示例#1
0
    def test(self):

        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)

            self.saver_z.restore(sess, self.encode_z_model)
            self.saver_y.restore(sess, self.encode_y_model)

            realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50,
                                                        self.batch_size)

            output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array})

            #one-hot
            #label_y = tf.arg_max(label_y, 1)

            print label_y

            save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0))
            save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0))

            gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0)
            real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0)


            cv2.imshow("test_EGan", gen_img)
            cv2.imshow("Real_Image", real_img)

            cv2.waitKey(-1)

            print("Test finish!")
示例#2
0
    def train(self):

        opti_D = tf.train.AdamOptimizer(learning_rate=self.learning_rate_dis, beta1=0.5).minimize(self.loss , var_list=self.d_vars)
        opti_G = tf.train.AdamOptimizer(learning_rate=self.learning_rate_gen, beta1=0.5).minimize(self.G_fake_loss, var_list=self.g_vars)

        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)

            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            #self.saver.restore(sess , self.model_path)

            batch_num = 0
            e = 0
            step = 0

            while e <= self.max_epoch:

                rand = np.random.randint(0, 100)
                rand = 0

                while batch_num < len(self.ds_train)/self.batch_size:

                    step = step + 1
                    realbatch_array, real_y = MnistData.getNextBatch(self.ds_train, self.label_y, rand, batch_num,self.batch_size)

                    batch_z = np.random.normal(0, 1 , size=[self.batch_size, self.sample_size])

                    #optimization D
                    _,summary_str = sess.run([opti_D, summary_op], feed_dict={self.images:realbatch_array, self.z: batch_z, self.y:real_y})
                    summary_writer.add_summary(summary_str , step)
                    #optimizaiton G
                    _,summary_str = sess.run([opti_G, summary_op], feed_dict={self.images:realbatch_array, self.z: batch_z, self.y:real_y})
                    summary_writer.add_summary(summary_str , step)
                    batch_num += 1

                    if step%1 ==0:

                        D_loss = sess.run(self.loss, feed_dict={self.images:realbatch_array, self.z: batch_z, self.y:real_y})
                        fake_loss = sess.run(self.G_fake_loss, feed_dict={self.z : batch_z, self.y:real_y})
                        print("EPOCH %d step %d: D: loss = %.7f G: loss=%.7f " % (e, step , D_loss, fake_loss))

                    if np.mod(step , 50) == 1:

                        sample_images = sess.run(self.fake_images ,feed_dict={self.z:batch_z, self.y:sample_label()})
                        save_images(sample_images[0:64] , [8, 8], './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        #Save the model
                        self.saver.save(sess , self.model_path)

                e += 1
                batch_num = 0

            save_path = self.saver.save(sess , self.model_path)
            print "Model saved in file: %s" % save_path
示例#3
0
    def train_ez(self):

        opti_EZ = tf.train.AdamOptimizer(learning_rate = 0.01, beta1 = 0.5).minimize(self.loss_z,
                                                                                      var_list=self.enz_vars)
        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)
            #summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            self.saver.restore(sess , self.model_path)

            batch_num = 0
            e = 0
            step = 0

            while e <= self.max_epoch:

                rand = np.random.randint(0, 100)
                rand = 0

                while batch_num < len(self.ds_train) / self.batch_size:

                    step = step + 1

                    _,label_y = MnistData.getNextBatch(self.ds_train, self.label_y, rand, batch_num,
                                                                     self.batch_size)
                    batch_z = np.random.normal(0, 1, size=[self.batch_size, self.sample_size])

                    # optimization E
                    sess.run(opti_EZ, feed_dict={self.y: label_y,self.z: batch_z})
                    batch_num += 1

                    if step % 10 == 0:

                        ez_loss = sess.run(self.loss_z, feed_dict={self.y: label_y,self.z: batch_z})
                        #summary_writer.add_summary(ez_loss, step)
                        print("EPOCH %d step %d EZ loss %.7f" % (e, step, ez_loss))

                    if np.mod(step, 50) == 0:

                        # sample_images = sess.run(self.fake_images, feed_dict={self.e_y:})
                        # save_images(sample_images[0:64], [8, 8],
                        #             './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        self.saver_z.save(sess, self.encode_z_model)

                e += 1
                batch_num = 0

            save_path = self.saver_z.save(sess, self.encode_z_model)
            print "Model saved in file: %s" % save_path
示例#4
0
    def __init__(self, batch_size, max_epoch, build_model_flag, model_path,
                 encode_z_model, encode_y_model, data, label, extend_value,
                 network_type, sample_size, sample_path, log_dir,
                 gen_learning_rate, dis_learning_rate, info_reg_coeff):

        self.batch_size = batch_size
        self.max_epoch = max_epoch
        self.model_path = model_path
        self.encode_z_model = encode_z_model
        self.encode_y_model = encode_y_model
        self.ds_train = data
        self.label_y = label
        self.extend_value = extend_value
        self.type = network_type
        self.sample_size = sample_size
        self.sample_path = sample_path
        self.log_dir = log_dir
        self.learning_rate_gen = gen_learning_rate
        self.learning_rate_dis = dis_learning_rate
        self.info_reg_coeff = info_reg_coeff
        self.log_vars = []
        #self.output_dist= MeanBernoulli(28*28)
        self.channel = 1
        self.y_dim = 10

        self.output_size = MnistData().image_size
        self.build_model = build_model_flag

        self.images = tf.placeholder(tf.float32, [
            self.batch_size, self.output_size, self.output_size, self.channel
        ])
        self.z = tf.placeholder(tf.float32,
                                [self.batch_size, self.sample_size])
        self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim])

        self.weights1, self.biases1 = self.get_gen_variables()
        self.weights2, self.biases2 = self.get_dis_variables()

        if self.build_model == 0:
            self.build_model1()
        elif self.build_model == 1:
            self.build_model2()
        elif self.build_model == 2:
            self.build_model3()
        else:
            self.build_model4()
示例#5
0
    sample_size = 64
    dis_learn_rate = 0.0002
    gen_learn_rate = 0.0002

    exp_name = "mnist_%s" % timestamp

    log_dir = os.path.join(root_log_dir, exp_name)
    checkpoint_dir = os.path.join(root_checkpoint_dir, exp_name)

    mkdir_p(log_dir)
    mkdir_p(checkpoint_dir)
    mkdir_p(sample_path)
    mkdir_p(encode_z_checkpoint_dir)
    mkdir_p(encode_y_checkpoint_dir)

    data, label = MnistData().load_mnist()

    infoGan = Gan(batch_size=batch_size,
                  max_epoch=max_epoch,
                  build_model_flag=build_model_flag,
                  model_path=root_checkpoint_dir,
                  encode_z_model=encode_z_checkpoint_dir,
                  encode_y_model=encode_y_checkpoint_dir,
                  data=data,
                  label=label,
                  extend_value=FLAGS.extend,
                  network_type="mnist",
                  sample_size=sample_size,
                  sample_path=sample_path,
                  log_dir=log_dir,
                  gen_learning_rate=gen_learn_rate,
示例#6
0
#!/usr/bin/env python
import numpy as np
from utils import MnistData


def optimal_system_size(input):
    singulars = np.linalg.svd(input, compute_uv=False)
    for i in range(1, len(singulars)):
        if singulars[i] < np.mean(singulars[:i]) / 2:
            print 'Optimal n for linear system would be', i
            return
    print 'Optimal n for linear system would be', len(singulars)


data = MnistData(50000, normalize=False)
(u, y) = data.get('trn', i=0)

# Test optimal system size
print 'In the beginning...'
optimal_system_size(u)

print 'After dimension-wise mean subtraction...'
u -= np.mean(u, axis=1, keepdims=True)
optimal_system_size(u)

print 'After variance normalization...'
u /= np.maximum(np.std(u, axis=1, keepdims=True), 1e-10)
optimal_system_size(u)