Пример #1
0
def model3D(img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=10,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        #print("layer1: "+str(layerSize1))
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=10,
                   num_filters=20,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #print("layer1: "+str(layerSize2))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=20,
                             num_filters=10,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=10,
                             num_filters=3,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=(2, 2, 2),
                             padding='SAME'))
        ##
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=3,
                   num_filters=2,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Пример #2
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b1'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b3'))
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b5'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b7'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b9'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))

        seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
Пример #3
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=1,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        h, w = same(in_height=28,
                    in_width=28,
                    stride=(1, 1),
                    kernel_size=(3, 3))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())

        seq.add(
            Conv2D(input_channels=32,
                   num_filters=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(int(h * w * 64), 128))
        seq.add(TFBatchNormalization(name='b3'))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(128, 256))
        seq.add(TFBatchNormalization(name='b4'))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256, 10))
        seq.add(Softmax())
    return seq
Пример #4
0
 def __init__(self, input, BN_name, kernel=(3, 3, 3), iterate=1):
     self.layers = []
     self.int_ = 0
     self.input = input
     self.kernel = kernel
     self.iterate = iterate
     self.layers.append(
         Conv3D(input_channels=input,
                num_filters=input,
                kernel_size=kernel,
                stride=(1, 1, 1),
                padding='SAME'))
     self.layers.append(TFBatchNormalization(BN_name + str(self.int_)))
     self.int_ += 1
     self.layers.append(RELU())
     self.layers.append(
         Conv3D(input_channels=input,
                num_filters=input,
                kernel_size=kernel,
                stride=(1, 1, 1),
                padding='SAME'))
     self.layers.append(TFBatchNormalization(BN_name + str(self.int_)))
     self.int_ += 1
Пример #5
0
def Vanilla_Classifier(X_train, y_train, X_valid, y_valid, restore):
    batchsize = 100
    learning_rate = 0.001
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape
    
    g = tf.Graph()
    with g.as_default():
    
        data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
        data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize)

        X_ph = tf.placeholder('float32', [None, h, w, c])
        # y_ph = tf.placeholder('float32', [None, nclass])
        y_phs = []
        for comp in [nclass]:
            y_phs.append(tf.placeholder('float32', [None, comp]))
    
        dim = int(h*w*c)
        scope = 'encoder'
        start = tg.StartNode(input_vars=[X_ph])
        h1_Node = tg.HiddenNode(prev=[start], 
                                layers=[Sigmoid(),
                                        TFBatchNormalization(name= scope + '/vanilla1'),
                                        RELU(),
                                        Flatten(),
                                        Sigmoid(),
                                        TFBatchNormalization(name=scope + '/vanilla2')])
                                    
        h2_Node = tg.HiddenNode(prev=[h1_Node],
                                layers=[Linear(prev_dim=dim, this_dim=nclass),
                                        Softmax()])                                
        end_nodes = [tg.EndNode(prev=[h2_Node])]
    
        graph = Graph(start=[start], end=end_nodes)

        train_outs_sb = graph.train_fprop()
        test_outs = graph.test_fprop()
    
        ttl_mse = []
        # import pdb; pdb.set_trace()
        for y_ph, out in zip(y_phs, train_outs_sb):
            #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out)))
            ttl_mse.append(tf.reduce_mean((y_ph-out)**2))


        mse = sum(ttl_mse)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)
    
        saver = tf.train.Saver()
        vardir = './var/2'
        if not os.path.exists(vardir):
            os.makedirs(vardir)

        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        tf.set_random_seed(1)
        init = tf.global_variables_initializer()
    
    
        with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess:
            # print '=======session start'
            sess.run(init)
            if restore == 1:
                re_saver = tf.train.Saver()
                re_saver.restore(sess, vardir + "/model.ckpt")
                print("Model restored.")
            max_epoch = 100
            temp_acc = []
            
            for epoch in range(max_epoch):

                train_error = 0
                train_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_train:
                    feed_dict = {X_ph:X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch
                
                    sess.run(optimizer, feed_dict=feed_dict)
                    train_outs = sess.run(train_outs_sb, feed_dict=feed_dict)
                    train_error += total_mse(train_outs, [ys])[0]
                    train_accuracy += total_accuracy(train_outs, [ys])[0]
                    ttl_examples += len(X_batch)               

                valid_error = 0
                valid_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_valid:
                    feed_dict = {X_ph:X_batch}  
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch

                    valid_outs = sess.run(test_outs, feed_dict=feed_dict)
                    valid_error += total_mse(valid_outs, [ys])[0]
                    valid_accuracy += total_accuracy(valid_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                save_path = saver.save(sess, vardir + "/model.ckpt")
                # print("Model saved in file: %s" % save_path)
                temp_acc.append(valid_accuracy/float(ttl_examples))
            print 'max accuracy is:\t', max(temp_acc)
Пример #6
0
def Encoder_Classifier(X_train, y_train, X_valid, y_valid, restore):
    
    
    batchsize = 100
    learning_rate = 0.001
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape
    
    g = tf.Graph()
    with g.as_default():
    
        data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
        data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize)

        X_ph = tf.placeholder('float32', [None, h, w, c])
    
        y_phs = []
        for comp in [nclass]:
            y_phs.append(tf.placeholder('float32', [None, comp]))
    
    
        start = tg.StartNode(input_vars=[X_ph])
    
        h1, w1 = valid(h, w, filters=(5,5), strides=(1,1))
        h2, w2 = valid(h1, w1, filters=(5,5), strides=(2,2))
        h3, w3 = valid(h2, w2, filters=(5,5), strides=(2,2))
        flat_dim = int(h3*w3*32)
        scope = 'encoder'
        bottleneck_dim = 300
        enc_hn = tg.HiddenNode(prev=[start],
                               layers=[Conv2D(input_channels=c, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'),
                                       TFBatchNormalization(name=scope + '/genc1'),
                                       RELU(),
                                       Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                                       TFBatchNormalization(name=scope + '/genc2'),
                                       RELU(),
                                       Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                                       TFBatchNormalization(name=scope + '/genc3'),
                                       RELU(),
                                       Flatten(),
                                       Linear(flat_dim, 300),
                                       TFBatchNormalization(name=scope + '/genc4'),
                                       RELU(),
                                       Linear(300, bottleneck_dim),
                                       Tanh()
                                       ])
                                       
        h2_Node = tg.HiddenNode(prev=[enc_hn],
                                layers=[Linear(prev_dim=bottleneck_dim, this_dim=nclass),
                                        Softmax()])
                                    
        end_nodes = [tg.EndNode(prev=[h2_Node])]
    
        graph = Graph(start=[start], end=end_nodes)

        train_outs_sb = graph.train_fprop()
        test_outs = graph.test_fprop()
    
        ttl_mse = []
        # import pdb; pdb.set_trace()
        for y_ph, out in zip(y_phs, train_outs_sb):
            #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out)))
            ttl_mse.append(tf.reduce_mean((y_ph-out)**2))


        mse = sum(ttl_mse)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)

        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    
        # saver_init = tf.train.Saver()
        saver = tf.train.Saver()
        vardir = './var/1'
        if not os.path.exists(vardir):
            os.makedirs(vardir)
       
        tf.set_random_seed(1)
        init = tf.global_variables_initializer()
            
        with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess:
            sess.run(init)
            if restore == 1:
                re_saver = tf.train.Saver()
                re_saver.restore(sess, vardir + "/model.ckpt")
                print("Model restored.")
            
            # save_path = saver_init.save(sess, vardir + "/init.ckpt")
            # print("Model saved in file: %s" % save_path)
            max_epoch = 2
            temp_acc = []
            for epoch in range(max_epoch):
                # print 'epoch:', epoch
                train_error = 0
                train_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_train:
                    feed_dict = {X_ph:X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch
                        # import pdb; pdb.set_trace() 
                    sess.run(optimizer, feed_dict=feed_dict)
                    train_outs = sess.run(train_outs_sb, feed_dict=feed_dict)
                    train_error += total_mse(train_outs, [ys])[0]
                    train_accuracy += total_accuracy(train_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                valid_error = 0
                valid_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_valid:
                    feed_dict = {X_ph:X_batch}  
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch

                    valid_outs = sess.run(test_outs, feed_dict=feed_dict)
                    valid_error += total_mse(valid_outs, [ys])[0]
                    valid_accuracy += total_accuracy(valid_outs, [ys])[0]
                    ttl_examples += len(X_batch)


                temp_acc.append(valid_accuracy/float(ttl_examples))
            save_path = saver.save(sess, vardir + "/model.ckpt")
            print("Model saved in file: %s" % save_path)
            print 'max accuracy is:\t', max(temp_acc)        
Пример #7
0
Файл: gan.py Проект: Shirlly/GAN
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                real_ph = tf.placeholder('float32', [None, self.h, self.w, 1],
                                         name='real')
                real_sn = tg.StartNode(input_vars=[real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=1,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c1'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c2'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c3'),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/l1'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()
                # dis_var_list = graph.variables
                # for var in dis_var_list:
                # print var.name

                graph = tg.Graph(start=[self.noise_sn, self.y_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                # print('========')
                # for var in graph.variables:
                # print var.name

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)
                # for var in dis_var_list:
                #     print(var.name)
                #
                # print('=========')
                # for var in tf.global_variables():
                #     print(var.name)
                # import pdb; pdb.set_trace()
                # print()

            # graph = tg.Graph(start=[G_sn], end=[class_en, judge_en])
            # class_train_sb, judge_train_sb = graph.train_fprop() # symbolic outputs
            # class_test_sb, judge_test_sb = graph.test_fprop() # symbolic outputs

        return real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #8
0
Файл: gan.py Проект: Shirlly/GAN
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                # X_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='X')
                # X_sn = tg.StartNode(input_vars=[X_ph])
                noise_ph = tf.placeholder('float32',
                                          [None, self.bottleneck_dim],
                                          name='noise')
                self.noise_sn = tg.StartNode(input_vars=[noise_ph])

                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                # enc_hn = tg.HiddenNode(prev=[X_sn],
                #                        layers=[Conv2D(input_channels=1, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'),
                #                                RELU(),
                #                                Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                                RELU(),
                #                                Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                                RELU(),
                #                             #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                             #    RELU(),
                #                                Flatten(),
                #                                Linear(flat_dim, 300),
                #                                RELU(),
                #                                # seq.add(Dropout(0.5))
                #                                Linear(300, self.bottleneck_dim),
                #                                Tanh(),
                #                                ])

                y_ph = tf.placeholder('float32', [None, self.nclass], name='y')
                self.y_sn = tg.StartNode(input_vars=[y_ph])

                noise_hn = tg.HiddenNode(prev=[self.noise_sn, self.y_sn],
                                         input_merge_mode=Concat(1))

                self.gen_hn = tg.HiddenNode(
                    prev=[noise_hn],
                    layers=[
                        Linear(self.bottleneck_dim + 10, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(2, 2),
                                         kernel_size=(2, 2),
                                         stride=(1, 1),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/dc1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(9, 9),
                                         stride=(1, 1),
                                         padding='VALID'),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/dc2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/dc3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=1,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, 1)),
                        Sigmoid()
                    ])

                y_en = tg.EndNode(prev=[self.gen_hn])
                graph = tg.Graph(start=[self.noise_sn, self.y_sn], end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                # import pdb; pdb.set_trace()
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return y_ph, noise_ph, G_train_sb, G_test_sb, gen_var_list
Пример #9
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph])

                enc_hn = tg.HiddenNode(
                    prev=[self.gen_real_sn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc1'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc2'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc3'),
                        RELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, 300),
                        TFBatchNormalization(name=scope + '/genc4'),
                        RELU(),
                        Linear(300, self.bottleneck_dim),
                        Tanh(),
                    ])

                self.noise_sn = tg.StartNode(input_vars=[self.noise_ph])

                self.gen_hn = tg.HiddenNode(
                    prev=[self.noise_sn, enc_hn],
                    input_merge_mode=Sum(),
                    layers=[
                        Linear(self.bottleneck_dim, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        #    Reshape((-1, h))
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(h3, w3),
                                         kernel_size=(h3, w3),
                                         stride=(1, 1),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=200, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=100, short_memory=0.01),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/g2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=50, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=self.c,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, self.c)),
                        Sigmoid()
                    ])

                h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1))

                y_en = tg.EndNode(prev=[self.gen_hn])

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list
Пример #10
0
    def discriminator_allconv(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1))
                # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2))
                # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2))
                # flat_dim = int(h3*w3*32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                h, w = same(in_height=self.h,
                            in_width=self.w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                print('h, w', h, w)
                print('===============')
                # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w))

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Dropout(0.2),
                        # TFBatchNormalization(name='b0'),
                        Conv2D(input_channels=self.c,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b1'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b2'),
                        Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b3'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b4'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b5'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b6'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b7'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b8'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=self.nclass,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b9'),
                        # Dropout(0.5),
                        AvgPooling(poolsize=(h, w),
                                   stride=(1, 1),
                                   padding='VALID'),
                        Flatten(),
                    ])

                print('h,w', h, w)
                print('==============')
                class_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, self.nclass),
                        # Softmax()
                    ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #11
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        # BatchNormalization(layer_type='fc', dim=self.bottleneck_dim, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[Linear(self.bottleneck_dim, 1),
                            Sigmoid()])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #12
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                h1, w1 = valid(self.char_embed_dim,
                               self.word_len,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1))
                print('h1:{}, w1:{}'.format(h1, w1))
                h2, w2 = valid(h1, w1, kernel_size=(1, 3), stride=(1, 1))
                print('h2:{}, w2:{}'.format(h2, w2))
                h3, w3 = valid(h2, w2, kernel_size=(1, 3), stride=(1, 1))
                print('h3:{}, w3:{}'.format(h3, w3))
                # h4, w4 = valid(h3, w3, kernel_size=(1,6), stride=(1,1))
                # print('h4:{}, w4:{}'.format(h4, w4))
                # hf, wf = h4, w4
                hf, wf = h3, w3
                n_filters = 100

                real_sn = tg.StartNode(input_vars=[self.real_ph])

                real_hn = tg.HiddenNode(prev=[real_sn],
                                        layers=[
                                            OneHot(self.char_embed_dim),
                                            Transpose(perm=[0, 3, 2, 1])
                                        ])

                disc_hn = tg.HiddenNode(
                    prev=[real_hn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.sent_len,
                               num_filters=100,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        LeakyRELU(),
                        # Conv2D(input_channels=32, num_filters=128, kernel_size=(1,6), stride=(1,1), padding='VALID'),
                        # RELU(),
                        Flatten(),
                        Linear(int(hf * wf * n_filters), self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #13
0
def model3D_2(img=(84, 256, 256)):
    with tf.name_scope('WMH_2Chan_Input'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=(5, 5, 5),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        seq.add(RELU())

        seq.add(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))

        ## Extra MaxPool
        #seq.add(MaxPool3D(poolsize=(2,2,2), stride=poolStride, padding='SAME'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #seq.add(RELU())
        ## Extra Conv
        #seq.add(Conv3D(input_channels=16, num_filters=16, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #seq.add(TFBatchNormalization(name='b3'))

        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(TFBatchNormalization(name='b4'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=8,
                             num_filters=2,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=poolStride,
                             padding='SAME'))
        #seq.add(TFBatchNormalization(name='b5'))
        #seq.add(RELU())

        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(1,1,1), stride=convStride, padding='SAME'))
        ##
        ##
        #seq.add(RELU())
        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Пример #14
0
def CNN_Classifier(X_train, y_train, X_valid, y_valid, restore):
    batchsize = 64
    learning_rate = 0.001
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape

    g = tf.Graph()
    with g.as_default():
        data_train = tg.SequentialIterator(X_train,
                                           y_train,
                                           batchsize=batchsize)
        data_valid = tg.SequentialIterator(X_valid,
                                           y_valid,
                                           batchsize=batchsize)

        X_ph = tf.placeholder('float32', [None, h, w, c])

        y_phs = []
        for comp in [nclass]:
            y_phs.append(tf.placeholder('float32', [None, comp]))

        start = tg.StartNode(input_vars=[X_ph])

        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(2, 2), filters=(2, 2))
        # import pdb; pdb.set_trace()
        #h1, w1 = valid(ch_embed_dim, word_len, strides=(1,1), filters=(ch_embed_dim,4))
        num = 32
        dim = int(h * w * num)

        h1_Node = tg.HiddenNode(prev=[start],
                                layers=[
                                    Conv2D(input_channels=c,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer1'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer2'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer3'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer4'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer5'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer6'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer7'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer8'),
                                    RELU(),
                                    Dropout(dropout_below=0.5),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer9'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer10'),
                                    RELU(),
                                    MaxPooling(poolsize=(2, 2),
                                               stride=(2, 2),
                                               padding='VALID'),
                                    Reshape(shape=(-1, dim))
                                ])

        h2_Node = tg.HiddenNode(
            prev=[h1_Node],
            layers=[Linear(prev_dim=dim, this_dim=nclass),
                    Softmax()])

        end_nodes = [tg.EndNode(prev=[h2_Node])]

        graph = Graph(start=[start], end=end_nodes)

        train_outs_sb = graph.train_fprop()
        test_outs = graph.test_fprop()

        ttl_mse = []
        # import pdb; pdb.set_trace()
        for y_ph, out in zip(y_phs, train_outs_sb):
            #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out)))
            ttl_mse.append(tf.reduce_mean((y_ph - out)**2))

        mse = sum(ttl_mse)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        saver = tf.train.Saver()
        vardir = './var/5'
        if not os.path.exists(vardir):
            os.makedirs(vardir)

        tf.set_random_seed(1)
        init = tf.global_variables_initializer()
        with tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options)) as sess:

            sess.run(init)
            if restore == 1:
                re_saver = tf.train.Saver()
                re_saver.restore(sess, vardir + "/model.ckpt")
                print("Model restored.")

            max_epoch = 100
            temp_acc = []
            for epoch in range(max_epoch):
                # print 'epoch:', epoch
                train_error = 0
                train_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_train:
                    feed_dict = {X_ph: X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch
                        # import pdb; pdb.set_trace()
                    sess.run(optimizer, feed_dict=feed_dict)
                    train_outs = sess.run(train_outs_sb, feed_dict=feed_dict)
                    train_error += total_mse(train_outs, [ys])[0]
                    train_accuracy += total_accuracy(train_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                valid_error = 0
                valid_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_valid:
                    feed_dict = {X_ph: X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch

                    valid_outs = sess.run(test_outs, feed_dict=feed_dict)
                    valid_error += total_mse(valid_outs, [ys])[0]
                    valid_accuracy += total_accuracy(valid_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                temp_acc.append(valid_accuracy / float(ttl_examples))
            save_path = saver.save(sess, vardir + "/model.ckpt")
            print("Model saved in file: %s" % save_path)
            print 'max accuracy is:\t', max(temp_acc)