Ejemplo n.º 1
0
    def test(self, save_dir):

        # init variables
        tf.global_variables_initializer().run()

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        load_model_status, global_step = self.load(self.ckpt_dir)
        assert load_model_status == True, '[!] Loading weights FAILED...'
        print(" [*] Loading weights SUCCESS...")

        test_set = np.reshape(self.dataset.test.images, [-1, 28, 28, 1])
        test_corrupt = test_set + np.random.normal(
            0, self.sigma / 255.0, np.shape(test_set)).astype('float32')

        feed_dict_test = {self.Y: test_corrupt}
        recon = self.sess.run(self.Y_, feed_dict=feed_dict_test)

        groundtruth = np.clip(255 * np.squeeze(test_set), 0,
                              255).astype('uint8')
        outputimage = np.clip(255 * np.squeeze(recon), 0, 255).astype('uint8')
        # calculate PSNR
        avg_psnr_test = cal_psnr(groundtruth, outputimage)

        #saving sample images
        ims(save_dir + "/denoised.png", merge(outputimage[:100], [10, 10]))
        ims(save_dir + "/noisy.png",
            merge(np.squeeze(test_corrupt)[:100], [10, 10]))

        print("---- TEST SET ---- Average PSNR %.3f ---" % avg_psnr_test)
Ejemplo n.º 2
0
    def train(self):
        for i in range(self.trials):
            visualization = self.mnist.test.next_batch(self.batchsize)[0]
            reshaped_vis = visualization.reshape(self.batchsize,128,128)
            base_dir = "reconstruction_"+str(i)+ "/"
            if not os.path.exists(base_dir):
                os.makedirs(base_dir)
            ims(base_dir+"base.jpg",merge(reshaped_vis[:64],[8,8]))
            # train
            saver = tf.train.Saver(max_to_keep=2)
            with tf.Session() as sess:
                generation_loss = []
                latent_loss = []
                sess.run(tf.initialize_all_variables())
                for epoch in range(self.epoch):
                    for idx in range(int(self.n_samples / self.batchsize)):
                        batch = self.mnist.train.next_batch(self.batchsize)[0]
                        _, gen_loss, lat_loss = sess.run((self.optimizer, self.generation_loss, self.latent_loss), feed_dict={self.images: batch})
                        # dumb hack to print cost every epoch
                        if idx % (self.n_samples - 3) == 0:
                            print "epoch %d: genloss %f latloss %f" % (epoch, np.mean(gen_loss), np.mean(lat_loss))
                            generation_loss.append(np.mean(gen_loss))
                            latent_loss.append(np.mean(lat_loss))
                            saver.save(sess, os.getcwd()+"/training/train",global_step=epoch)
                            generated_test = sess.run(self.generated_images, feed_dict={self.images: visualization})
                            generated_test = generated_test.reshape(self.batchsize,128,128)
                            ims(base_dir+str(epoch)+".jpg",merge(generated_test[:64],[8,8]))
                gen_loss_file = open('gen_loss_'+str(self.n_z)+'_latent.txt', 'w')
                lat_loss_file = open('lat_loss_'+str(self.n_z)+'_latent.txt', 'w')

                for item in generation_loss:
                    gen_loss_file.write("%s\n" % item)
                for item in latent_loss:
                    lat_loss_file.write("%s\n" % item)
        return
Ejemplo n.º 3
0
 def train(self):
     # get images, not labels
     visualization = self.mnist.train.next_batch(self.batchsize)[0]
     reshaped_vis = visualization.reshape(self.batchsize, 28, 28)
     ims("results/base.jpg", merge(reshaped_vis[:64], [8, 8]))
     # train
     saver = tf.train.Saver(max_to_keep=2)
     with tf.Session() as sess:
         sess.run(tf.initialize_all_variables())
         for epoch in range(10):
             for idx in range(int(self.n_samples / self.batchsize)):
                 batch = self.mnist.train.next_batch(self.batchsize)[0]
                 _, gen_loss, lat_loss = sess.run(
                     (self.optimizer, self.generation_loss,
                      self.latent_loss),
                     feed_dict={self.images: batch})
                 # dumb hack to print cost every epoch
                 if idx % (self.n_samples - 3) == 0:
                     print "epoch %d: genloss %f latloss %f" % (
                         epoch, np.mean(gen_loss), np.mean(lat_loss))
                     saver.save(sess,
                                os.getcwd() + "/training/train",
                                global_step=epoch)
                     # there is a problem in this code
                     # except for the first epoch, visualization could be used as training data
                     # which is logically incorrect.
                     # predict something that's already appeared in the training data is wrong
                     generated_test = sess.run(
                         self.generated_images,
                         feed_dict={self.images: visualization})
                     generated_test = generated_test.reshape(
                         self.batchsize, 28, 28)
                     ims("results/" + str(epoch) + ".jpg",
                         merge(generated_test[:64], [8, 8]))
Ejemplo n.º 4
0
    def evaluate(self, valid_corrupt, iter_num, summary_merged,
                 summary_writer):

        feed_dict_eval = {self.X: self.valid_set, self.Y: valid_corrupt}
        valid_output, valid_mse, valid_sure = self.sess.run(
            [self.Y_, self.mse, self.sure], feed_dict=feed_dict_eval)

        groundtruth = np.clip(255 * np.squeeze(self.valid_set), 0,
                              255).astype('uint8')
        noisyimage = np.clip(255 * np.squeeze(valid_corrupt), 0,
                             255).astype('uint8')
        outputimage = np.clip(255 * np.squeeze(valid_output), 0,
                              255).astype('uint8')
        # calculate PSNR
        avg_psnr = cal_psnr(groundtruth, outputimage)
        psnr_summary = self.sess.run(summary_merged,
                                     feed_dict={self.eval_psnr: avg_psnr})
        summary_writer.add_summary(psnr_summary, iter_num)

        #saving sample images
        ims(self.sample_dir + "/" + str(iter_num) + ".png",
            merge(outputimage[:100], [10, 10]))

        print("VALID, mse: %.6f, sure: %.6f" % (valid_mse, valid_sure))
        print("---- Validation Set ---- Average PSNR %.3f ---" % avg_psnr)
Ejemplo n.º 5
0
 def train(self):
     visualization = self.mnist.train.next_batch(self.batchsize)[0]
     reshaped_vis = visualization.reshape(self.batchsize, 28, 28)
     ims("results/base.jpg", merge(reshaped_vis[:64], [8, 8]))
     # train
     saver = tf.train.Saver(max_to_keep=2)
     with tf.Session() as sess:
         sess.run(tf.initialize_all_variables())
         for epoch in range(10):
             for idx in range(int(self.n_samples / self.batchsize)):
                 batch = self.mnist.train.next_batch(self.batchsize)[0]
                 _, gen_loss, lat_loss = sess.run(
                     (self.optimizer, self.generation_loss,
                      self.latent_loss),
                     feed_dict={self.images: batch})
                 # dumb hack to print cost every epoch
                 if idx % (self.n_samples - 3) == 0:
                     print "epoch %d: genloss %f latloss %f" % (
                         epoch, np.mean(gen_loss), np.mean(lat_loss))
                     saver.save(sess,
                                os.getcwd() + "/training/train",
                                global_step=epoch)
                     generated_test = sess.run(
                         self.generated_images,
                         feed_dict={self.images: visualization})
                     generated_test = generated_test.reshape(
                         self.batchsize, 28, 28)
Ejemplo n.º 6
0
def build_model():
    """ Graph Input """
    IMG = tf.placeholder(tf.float32, [None, 100 * 100])

    # train data
    imgpath = '/home/lu/cs/DCASE_NEW/DATA/data1/newtrain1/'
    img_arr, lab_arr = GetFlatPix(imgpath)  # (17750,10000),(17750,15)
    n_train = img_arr.shape[0]
    '''encoding'''
    mu, sigma = Encoder(IMG, reuse=False)
    '''sampling by re-parameterization technique'''
    eps = tf.random_normal(shape=tf.shape(mu))  #defaut 0-1,float32
    z = mu + tf.exp(sigma / 2) * eps
    '''decoding'''
    out_img = Decoder(z, reuse=False)
    out_flat = tf.reshape(out_img, [-1, 100 * 100])
    '''vae_loss'''
    recon_loss = tf.reduce_sum(tf.squared_difference(out_flat, IMG), 1)
    kl_loss = 0.5 * tf.reduce_sum(
        tf.exp(sigma) - 1. - sigma + tf.square(mu), 1)
    vae_loss = tf.reduce_mean(recon_loss + kl_loss)
    """ Training """
    # optimizers
    optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(vae_loss)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # 如果当前的路径下不存在out这个文件夹,就创建它
    if not os.path.exists('results/'):
        os.makedirs('results/')

    for step in range(100000):

        start = (step * batch_size) % n_train
        end = min(start + batch_size, n_train)
        _, total_loss, RE_loss, KL_loss = sess.run(
            [optimizer, vae_loss, recon_loss, kl_loss],
            feed_dict={IMG: img_arr[start:end]})

        if step % 1000 == 0:  # train
            print('step:%d,Total_Loss:%f,RE_loss:%f,KL_loss:%f' %
                  (step, total_loss, np.mean(RE_loss), np.mean(KL_loss)))

            # test
            """" Testing """
            # # Sampling from random z 训练好了之后,把decoder单独拿出来,把z丢给他
            ZN = tf.placeholder(tf.float32, [None, n_latent])
            fake_images = Decoder(ZN, reuse=True)

            z_batch = np.random.normal(0., 1, (batch_size, n_latent)).astype(
                np.float32)
            samples = sess.run(fake_images, feed_dict={ZN: z_batch})
            samples_img = np.reshape(samples, (-1, 100, 100, 1))
            from scipy.misc import imsave as ims
            # 显示生成的图片
            ims("./results/" + str(step) + ".jpg",
                merge(samples_img[0:batch_size], [8, 8]))
Ejemplo n.º 7
0
    def save_imgs(self, epoch):
        r, c = 3, 3
        noise = np.random.normal(0, 1, (r * c, 100))
        gen_imgs = self.generator.predict(noise)

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5
        #ims("images/pokemon_%d.png" % epoch,utils.merge(gen_imgs,[3,3]))
        ims('images/pokemon_%d.png'%epoch, utils.merge(gen_imgs,[3,3]))
Ejemplo n.º 8
0
 def train(self):
     for i in xrange(15000):
         xtrain, _ = self.mnist.train.next_batch(self.batch_size)
         cs, gen_loss, lat_loss, _ = self.sess.run([self.cs, self.generation_loss, self.latent_loss, self.train_op], feed_dict={self.images: xtrain})
         if i % 300 == 0:
             print "iter %d genloss %f latloss %f" % (i, gen_loss, lat_loss)
             results = np.transpose(np.array(cs),[0,1,2])[-1]
             results_square = np.reshape(results, [-1, 28, 28])
             print results_square.shape
             ims("results/"+str(i)+".jpg",merge(results_square,[8,8]))
Ejemplo n.º 9
0
def merge(images, size):
    h, w = images.shape[1], images.shape[2]
    img = np.zeros((h * size[0], w * size[1]))

    for idx, image in enumerate(images):
        i = idx % size[1]
        j = idx / size[1]
        img[j*h:j*h+h, i*w:i*w+w] = image
        ims("individual_images/" + str(idx)+".jpg", image)

    return img
    def train(self):
        self.validation_error = 100000.0
        try:
            data = self.mnist.train
            if self.n_test == 0:
                validation, val_labels = next_batch_partial(
                    data, self.batchsize, self.n_train)
            else:
                validation = data.images[self.n_train:]
                val_labels = data.labels[self.n_train:]

            reshaped_val = validation.reshape(-1, 28, 28)
            ims(os.path.join(self.results_dir, "base.jpg"),
                merge(reshaped_val[:64], [8, 8]))
            # train
            self.best = None
            self.best_epoch = 0
            saver = tf.train.Saver(max_to_keep=2)
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                last_epochs_completed = -1
                while(
                        data.epochs_completed < self.max_epochs and
                        data.epochs_completed - self.best_epoch <
                        self.max_epochs_without_improvement
                ):
                    if math.isnan(float(self.validation_error)):
                        # Quit early on nan since it will just propagate
                        # and be the final result anyway
                        break
                    batch, batch_labels = next_batch_partial(
                        data, self.batchsize, self.n_train)
                    _, gen_loss, lat_loss = sess.run(
                        (self.optimizer, self.calc_generation_loss,
                         self.calc_latent_loss),
                        feed_dict={self.images: batch})
                    if last_epochs_completed != data.epochs_completed:
                        last_epochs_completed = data.epochs_completed
                        self.print_epoch(
                            last_epochs_completed, gen_loss, lat_loss,
                            saver, sess, validation
                        )
        except Exception:
            print("Exception occurred.")
            self.validation_error = 100000.0
Ejemplo n.º 11
0
    def test_imgs(self):
        r, c = 3, 3
        noise = np.random.normal(0, 1, (r * c, 100))

        # load json and create model
        json_file = open('weights/generator.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = model_from_json(loaded_model_json)
        weightlist = glob.glob('weights/*.h5')
        cnt = 0
        for weight in weightlist:
            # load weights into new model
            loaded_model.load_weights(weight)
            gen_imgs = self.generator.predict(noise)
            # Rescale images 0 - 1
            gen_imgs = 0.5 * gen_imgs + 0.5
            ims('images/test_pokemon_%d.png'%cnt, utils.merge(gen_imgs,[3,3]))
            cnt = cnt+save_interval
Ejemplo n.º 12
0
    def train(self):
        #visualization = self.mnist.train.next_batch(self.batch_size)[0]  # we do not need label (= [1])
        visualization = self.mnist.test.next_batch(
            self.batch_size)[0]  # we do not need label (= [1])
        reshaped_vis = visualization.reshape(self.batch_size, 28, 28)

        # get first 64 images and merge in to one image of 8 x 8 grid, and SAVE
        # for reference to compare generated images
        ims("results/base.jpg", merge(reshaped_vis[:64], [8, 8]))

        # train
        saver = tf.train.Saver(max_to_keep=2)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            for epoch in range(self.epoch):
                for idx in range(self.train_num):
                    batch = self.mnist.train.next_batch(self.batch_size)[0]

                    _, gen_loss, lat_loss = sess.run(
                        (self.optimizer, self.generation_loss,
                         self.latent_loss),
                        feed_dict={self.images: batch})

                    # dumb hack to print cost every epoch
                    # if idx % (self.n_samples - 3) == 0: # original code. it works, but it seems wrong
                    if idx % (self.train_num - 3) == 0:
                        print("epoch %d: gen_loss %f lat_loss %f" %
                              (epoch, np.mean(gen_loss), np.mean(lat_loss)))
                        saver.save(sess,
                                   os.getcwd() + "/training/train",
                                   global_step=epoch)

                        # generate test
                        generated_test = sess.run(
                            self.generated_images,
                            feed_dict={self.images: visualization})
                        generated_test = generated_test.reshape(
                            self.batch_size, 28, 28)
                        ims("results/" + str(epoch) + ".jpg",
                            merge(generated_test[:64], [8, 8]))
Ejemplo n.º 13
0
    def train(self):
        visualization = self.mnist.train.next_batch(self.batchsize)[0]
        reshaped_vis = visualization.reshape(self.batchsize, 28, 28)
        print(np.shape(reshaped_vis))
        ims('results/base2.jpg', merge(reshaped_vis[:64], [8, 8]))

        saver = tf.train.Saver(max_to_keep=2)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for epoch in range(10):
                for idx in range(int(self.n_samples / self.batchsize)):
                    batch = self.mnist.train.next_batch(self.batchsize)[0]
                    _, gen_loss, lat_loss = sess.run((self.optimizer, self.generation_loss, self.latent_loss),
                                                     feed_dict={self.images: batch})

                    if idx % (self.n_samples - 3) == 0:
                        print('epoch %d: genloss %f latloss %f' % (epoch, np.mean(gen_loss), np.mean(lat_loss)))
                        saver.save(sess, os.getcwd()+'training/train', global_step=epoch)
                        generated_test = sess.run(self.generated_images, feed_dict={self.images: visualization})
                        generated_test = generated_test.reshape(self.batchsize, 28, 28)
                        ims('result/' + str(epoch) + '.jpg', merge(generated_test[:64], [8, 8]))
Ejemplo n.º 14
0
    def train(self):
        train_loss_d = train_loss_g = train_loss_vae = 0.5
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        n_samples = mnist.train.num_examples
        for epoch in range(10):
            for idx in range(int(n_samples / self.batchsize)):
                # z = np.random.uniform(-1.0,1.0, size=(self.batchsize, self.zdim)).astype(np.float32)
                batch = mnist.train.next_batch(self.batchsize)[0].reshape([self.batchsize,self.imgdim,self.imgdim,1])
                for i in xrange(4):
                    train_loss_g, _ = self.sess.run([self.g_loss, self.g_optim],feed_dict={self.images: batch})

                train_loss_d, _ = self.sess.run([self.d_loss, self.d_optim],feed_dict={self.images: batch})

                train_loss_vae, _ = self.sess.run([self.vae_loss, self.vae_optim],feed_dict={self.images: batch})

                print "%d: %f %f %f" % (idx, train_loss_d, train_loss_g, train_loss_vae)

                if idx % 15 == 0:
                    generated_test = self.sess.run(self.G, feed_dict={self.images: batch})
                    generated_test = generated_test.reshape(self.batchsize,28,28)
                    ims("results/"+str(idx + epoch*100000)+".jpg",merge(generated_test[:64],[8,8]))
Ejemplo n.º 15
0
 def train(self):
     next_batch = get_test_images(digits_low_samples, self.batchsize)
     visualization = next(next_batch)
     print(visualization.shape)
     reshaped_vis = visualization.reshape(self.batchsize, 28, 28)
     ims("results/base.jpg", merge(reshaped_vis[:64], [8, 8]))
     # train
     saver = tf.train.Saver(max_to_keep=2)
     self.sess = tf.Session()
     sess = self.sess
     sess.run(tf.global_variables_initializer())
     for epoch in range(1000):
         batch = next(next_batch)
         _, gen_loss, lat_loss = sess.run(
             (self.optimizer, self.generation_loss, self.latent_loss),
             feed_dict={self.images: batch})
         if epoch % 100 == 0:
             print("epoch %d: genloss %f latloss %f" %
                   (epoch, np.mean(gen_loss), np.mean(lat_loss)))
             saver.save(sess,
                        os.getcwd() + "/training/train",
                        global_step=epoch)
             generated_test = sess.run(
                 self.generated_images,
                 feed_dict={self.images: visualization})
             generated_test = generated_test.reshape(self.batchsize, 28, 28)
             ims("results/" + str(epoch) + ".jpg",
                 merge(generated_test[:64], [8, 8]))
     sample_images = sess.run(
         self.sample_images,
         feed_dict={self.num_samples: num_low_samples_digits_generate})
     np.save("../vae_samples.npy", np.reshape(sample_images, (-1, 28 * 28)))
     sample_images = sample_images.reshape(num_low_samples_digits_generate,
                                           28, 28)
     ims("results/" + "vae_samples.jpg", merge(sample_images[:64], [8, 8]))
    def print_epoch(self, epoch, gen_loss, lat_loss, saver, sess,
                    validation):

        saver.save(sess, os.path.join(self.results_dir, 'checkpoints',
                                      'checkpoint'),
                   global_step=epoch)
        val_ims, val_error = sess.run(
            [self.generate_images, self.calc_generation_loss],
            feed_dict={self.images: validation})
        fn="{:04d}.jpg".format(epoch)
        ims(os.path.join(self.results_dir, fn),
            merge(val_ims.reshape(-1, 28, 28)[:64], [8, 8]))

        self.validation_error = float(np.mean(val_error))
        print("epoch {:02d}: genloss {:7.3f} latloss {:7.3f} "
              "validation_genloss {:7.3f}".format(
                  epoch,
                  np.mean(gen_loss), np.mean(lat_loss), self.validation_error))

        if self.best is None or self.validation_error < self.best:
            self.best_epoch = epoch
            self.best = self.validation_error
Ejemplo n.º 17
0
def train():
    visualization = mnist.train.next_batch(batchsize)[0]
    reshaped_vis = visualization.reshape(batchsize, 28, 28)
    print(reshaped_vis.shape)
    ims("results/base.jpg", merge(reshaped_vis[:64], [8, 8]))
    # train
    saver = tf.train.Saver(max_to_keep=2)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(epochs):
            for idx in range(int(n_samples / batchsize)):
                batch = mnist.train.next_batch(batchsize)[0]
                _, gen_loss, lat_loss = sess.run(
                    (optimizer, generation_loss, KL_loss),
                    feed_dict={images: batch})
                # dumb hack to print cost every epoch
                if idx % (n_samples - 3) == 0:
                    print("epoch %d: genloss %f latloss %f" %
                          (epoch, np.mean(gen_loss), np.mean(lat_loss)))
                    saver.save(sess,
                               os.getcwd() + "/training/train",
                               global_step=epoch)
                    generated_test = sess.run(
                        generated_images, feed_dict={images: visualization})
                    generated_test = generated_test.reshape(batchsize, 28, 28)
                    ims("results/" + str(epoch) + ".jpg",
                        merge(generated_test[:64], [8, 8]))

        x_sample, y_sample = mnist.test.next_batch(2000)
        z_mu = sess.run(z_mean, feed_dict={images: x_sample})
        print(z_mu.shape, '===')
        plt.figure(figsize=(8, 6))
        plt.scatter(z_mu[:, 0], z_mu[:, 1], c=y_sample)
        plt.colorbar()
        plt.grid()
        plt.show()
Ejemplo n.º 18
0
 def train(self):
     visualization_im, visualization_lab = self.mnist.train.next_batch(self.batchsize)
     reshaped_vis_im = visualization_im.reshape(self.batchsize,28,28)
     ims("results/base.jpg",merge(reshaped_vis_im[:64],[8,8]))
     # train
     saver = tf.train.Saver(max_to_keep=2)
     with tf.Session() as sess:
         sess.run(tf.initialize_all_variables())
         for epoch in range(30):
             for idx in range(int(self.n_samples / self.batchsize)):
                 batch_images, batch_labels = self.mnist.train.next_batch(
                     self.batchsize)
                 _, gen_loss, lat_loss = sess.run(
                     (self.optimizer, self.generation_loss,
                      self.latent_loss), feed_dict={
                          self.images: batch_images,
                          self.labels: batch_labels})
                 # dumb hack to print cost every epoch
                 if idx % (self.n_samples - 3) == 0:
                     print("epoch {}: genloss {} latloss {}".format(epoch, np.mean(gen_loss), np.mean(lat_loss)))
                     saver.save(sess, os.getcwd()+"/training/train",global_step=epoch)
                     generated_test = sess.run(self.generated_images, feed_dict={self.images: visualization_im, self.labels: visualization_lab})
                     generated_test = generated_test.reshape(self.batchsize,28,28)
                     ims("results/"+str(epoch)+".jpg",merge(generated_test[:64],[8,8]))
Ejemplo n.º 19
0
    d_optim = tf.train.AdamOptimizer(learningrate, beta1=beta1).minimize(d_loss, var_list=d_vars)
    g_optim = tf.train.AdamOptimizer(learningrate, beta1=beta1).minimize(g_loss, var_list=g_vars)
    tf.global_variables_initializer().run()

    saver = tf.train.Saver(max_to_keep=10)

    counter = 1
    start_time = time.time()

    display_z = np.random.uniform(-1, 1, [batchsize, Z_DIM]).astype(np.float32)

    realfiles = data[0:64]
    realim = [get_image(batch_file, 64, 64) for batch_file in realfiles]
    real_img = np.array(realim).astype(np.float32)
    ims("results/imagenet/real.jpg", merge(real_img,[8,8]))

    if train:
        for epoch in range(10):
            batch_idx = (len(data)//batchsize)-2
            for idx in range(batch_idx):
                batch_files = data[idx*batchsize:(idx+1)*batchsize]
                batchim = [get_image(batch_file, 64, 64) for batch_file in batch_files]
                batch_images = np.array(batchim).astype(np.float32)

                batch_z = np.random.uniform(-1, 1, [batchsize, Z_DIM]).astype(np.float32)

                sess.run([d_optim],feed_dict={ images: batch_images, zin: batch_z })
                sess.run([g_optim],feed_dict={ zin: batch_z })

                counter += 1
    def test(self):
        with tf.Session() as sess:
            self.saver = tf.train.Saver()

            self.sess = sess
            sess.run(tf.global_variables_initializer())
            self.saver.restore(sess, 'models/TeacherStudent_MNIST_TO_Fashion')

            myPredict = self.predict()

            myIndex = 2

            testX = np.concatenate(
                (self.mnist_train_x, self.mnistFashion_train_x), axis=0)
            testY = np.concatenate(
                (self.mnist_train_y, self.mnistFashion_train_y), axis=0)
            index = [i for i in range(np.shape(testX)[0])]
            random.shuffle(index)
            testX = testX[index]
            testY = testY[index]
            batch_z = np.random.uniform(
                -1, 1, [self.batch_size, self.z_dim]).astype(np.float32)

            g_outputs = self.sess.run(self.GAN_output,
                                      feed_dict={
                                          self.inputs:
                                          testX[0:self.batch_size],
                                          self.z: batch_z,
                                          self.y: testY[0:self.batch_size]
                                      })

            g_outputs = np.reshape(g_outputs, (-1, 28, 28, 1))
            ims("results/" + "mnistToFashion_gan" + str(0) + ".png",
                merge(g_outputs[:36], [6, 6]))

            real1 = testX[0:self.batch_size]
            my11 = np.reshape(real1, (-1, 28, 28, 1))
            ims("results/" + "mnistToFashion_real" + str(0) + ".png",
                merge(my11[:36], [6, 6]))

            my11 = self.sess.run(
                self.output1,
                feed_dict={self.inputs: testX[0:self.batch_size]})
            my11 = np.reshape(my11, (-1, 28, 28, 1))
            ims("results/" + "mnistToFashion_reco" + str(0) + ".png",
                merge(my11[:36], [6, 6]))

            mnistAccuracy = self.Calculate_accuracy(self.mnist_test_x,
                                                    self.mnist_label_test)
            mnistFashionAccuracy = self.Calculate_accuracy(
                self.mnistFashion_test_x, self.mnistFashion_test_labels)

            z_mean, z_log_sigma_sq = My_Encoder_mnist(self.inputs,
                                                      "encoder1",
                                                      batch_size=64,
                                                      reuse=True)
            continous_variables = z_mean + z_log_sigma_sq * tf.random_normal(
                tf.shape(z_mean), 0, 1, dtype=tf.float32)

            z_in = tf.placeholder(tf.float32, [self.batch_size, self.z_dim])

            code1 = tf.concat((z_in, self.y, self.labels), axis=1)
            reco1 = Generator_mnist("generator1", code1, reuse=True)

            myTest_x = self.mnist_train_x[0:self.batch_size]
            myTest_y = self.mnist_train_y[0:self.batch_size]
            myTest_label = self.mnist_label[0:self.batch_size]

            myindex = 10
            myTest_x = self.mnistFashion_train_x[myindex *
                                                 self.batch_size:(myindex +
                                                                  1) *
                                                 self.batch_size]
            myTest_y = self.mnistFashion_train_y[myindex *
                                                 self.batch_size:(myindex +
                                                                  1) *
                                                 self.batch_size]
            myTest_label = self.mnistFashion_label[myindex *
                                                   self.batch_size:(myindex +
                                                                    1) *
                                                   self.batch_size]

            minx = -2.0
            minx = 0
            diff = 4.0 / 64.0
            for i in range(32):
                myNew = []
                myCodes = sess.run(continous_variables,
                                   feed_dict={self.inputs: myTest_x})
                for j in range(64):
                    myCodes[0, i] = minx + j * diff
                    reco = sess.run(reco1,
                                    feed_dict={
                                        z_in: myCodes,
                                        self.y: myTest_y,
                                        self.labels: myTest_label
                                    })
                    myNew.append(reco[0])

                myNew = np.array(myNew)
                myNew = np.reshape(myNew, (-1, 28, 28))
                ims("results/" + "PPP" + str(i) + ".png",
                    merge(myNew[:64], [8, 8]))

            bc = 0
Ejemplo n.º 21
0
    def train(self):
        visualization = self.mnist.train.next_batch(self.batchsize)[0]
        reshaped_vis = visualization.reshape(self.batchsize, 28, 28)
        ims("results/base.jpg", merge(reshaped_vis[:64], [8, 8]))
        # train
        saver = tf.train.Saver(max_to_keep=2)
        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            for epoch in range(10):
                for idx in range(int(self.n_samples / self.batchsize)):
                    batch = self.mnist.train.next_batch(self.batchsize)[0]
                    _, gen_loss, lat_loss = sess.run(
                        (self.optimizer, self.generation_loss,
                         self.latent_loss),
                        feed_dict={self.images: batch})
                    # dumb hack to print cost every epoch
                    if idx % (self.n_samples - 3) == 0:
                        print "epoch %d: genloss %f latloss %f" % (
                            epoch, np.mean(gen_loss), np.mean(lat_loss))
                        saver.save(sess,
                                   os.getcwd() + "/training/train",
                                   global_step=epoch)
                        generated_test = sess.run(
                            self.generated_images,
                            feed_dict={self.images: visualization})
                        generated_test = generated_test.reshape(
                            self.batchsize, 28, 28)


ims("results/" + str(epoch) + ".jpg", merge(generated_test[:64], [8, 8]))
Ejemplo n.º 22
0
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer(), feed_dict={keep_prob: 0.9})
    lastvalue = np.ones((4))
    
    if isWeight:
        saver.restore(sess, 'models/Dropout_Sinmple_6_Gaussian')
        tIndex = 7
        x_fixed = x_train[batch_size * tIndex:batch_size * tIndex + batch_size]
        x_fixed = x_test[batch_size * tIndex:batch_size * tIndex + batch_size]
        x_fixed = np.reshape(x_fixed, (-1, 28 * 28))

        reco,dropoutSamples = sess.run([y,dropout_samples], feed_dict={x_hat: x_fixed, x: x_fixed, keep_prob: 1.0})
        reco = np.reshape(reco, (-1, 28, 28))
        x_fixed = np.reshape(x_fixed, (-1, 28, 28))
        ims("results/" + "MNIST__Real_Bernoulli" + str(0) + ".png", merge(x_fixed[:64], [8, 8]))
        ims("results/" + "MNIST__Reco_Bernoulli" + str(0) + ".png", merge(reco[:64], [8, 8]))

        # yy = sess.run(y,feed_dict={x_hat: x_fixed, x: x_fixed, keep_prob: 1.0})
        # yy = np.reshape(yy,(-1,28,28))
        # ims("results/" + "VAE" + str(0) + ".jpg", merge(yy[:64], [8, 8]))

        z_in1 = tf.placeholder(tf.float32, shape=[None, dim_z])
        z_in2 = tf.placeholder(tf.float32, shape=[None, dim_z])
        z_in3 = tf.placeholder(tf.float32, shape=[None, dim_z])
        z_in4 = tf.placeholder(tf.float32, shape=[None, dim_z])
        z_in5 = tf.placeholder(tf.float32, shape=[None, dim_z])
        z_in6 = tf.placeholder(tf.float32, shape=[None, dim_z])

        mix_in = tf.placeholder(tf.float32, shape=[None, 6])
        x_in1 = tf.placeholder(tf.float32, shape=[None, 28 * 28])
Ejemplo n.º 23
0
        y4 = sess.run(yy, feed_dict={z_in2: z_samples, keep_prob: 0.9})

        z_samples = z1_samples + z2_samples + z3_samples + z4_samples
        y5 = sess.run(yy, feed_dict={z_in2: z_samples, keep_prob: 0.9})

        yyArr = np.zeros((batch_size, 28 * 28))
        yyArr[0:10, :] = x_fixed[0:10]
        yyArr[10:20, :] = y1[0:10]
        yyArr[20:30, :] = y2[0:10]
        yyArr[30:40, :] = y3[0:10]
        yyArr[40:50, :] = y4[0:10]
        yyArr[50:60, :] = y5[0:10]

        yyArr = np.reshape(yyArr, (-1, 28, 28))

        ims("results/" + "VAE" + str(0) + ".jpg", merge(yyArr[:60], [6, 10]))
        bc = 0
    else:
        lastvalue = np.ones((10))
        valid_reco = Give_Reconstruction(x_hat, x)
        x_valid = x_test[0:batch_size]
        x_valid = np.reshape(x_valid, (-1, 28 * 28))
        bestScore = 100000

        n_epochs = 100
        for epoch in range(n_epochs):
            # Random shuffling
            np.random.shuffle(train_total_data)
            train_data_ = train_total_data[:, :-mnist_data.NUM_LABELS]

            # Loop over all batches
Ejemplo n.º 24
0
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    weight = 'gen.h5'
    loaded_model.load_weights(weight)

    Hash = sys.argv[1]
    stat2 = sys.argv[2]
    if stat2[-1] == '/':
        pass
    else:
        stat2 = stat2 + '/'

    # Файлы .tsv
    if Hash[-4:] == '.tsv':
        A = pandas.read_csv(Hash, header=None)
        for i in range(len(A)):
            ims(stat2 + str(A[0][i]) + '.png', hash_to_im(A[0][i][4:-8]))

    # Файлы .txt
    if Hash[-4:] == '.txt':
        f = open(Hash)
        b = f.read().split('\n')
        f.close()
        for i in range(len(b)):
            if len(b[i]) == 52:

                ims(stat2 + b[i] + '.png', hash_to_im(str(b[i])[4:-8]))
    # Hash
    else:
        ims(stat2 + Hash + '.png', hash_to_im(Hash[4:-8]))
Ejemplo n.º 25
0
    def train(self):
        batchsize = 100
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            start_time = time.time()
            for epoch in range(501):
                for idx in range(int(self.n_samples / batchsize)):
                    batch = self.mnist.unbalance01.next_batch(batchsize)[0]
                    _, gen_loss, lat_loss = sess.run(
                        (self.optimizer, self.generation_loss,
                         self.latent_loss),
                        feed_dict={
                            self.images: batch,
                            self.batchsize: batchsize
                        })
                    # print("epoch {}: genloss {} latloss{}".format(epoch, np.mean(gen_loss), np.mean(lat_loss)))
                    if idx == range(int(self.n_samples /
                                        batchsize))[-1] and epoch % 50 == 0:
                        end_time = time.time()
                        print(
                            "Training epoch {}: --- {} seconds --- genloss {} latloss{}"
                            .format(epoch, end_time - start_time,
                                    np.mean(gen_loss), np.mean(lat_loss)))

                        # plt.scatter(latent_var[class0_idx,0],latent_var[class0_idx,1],c='red', s=1)
                        # plt.scatter(latent_var[class1_idx,0],latent_var[class1_idx,1],c='blue', s=1)
                        # plt.savefig('latent_plots/unbalance01_epoch{}_{}.png'.format(epoch, idx))
                        # plt.close()

                        # ratio: 10:1
                        num_random_samples = 900
                        im_w, im_h = 30, 30
                        r = np.random.RandomState(1234)
                        randoms = np.array([
                            r.normal(0, 1, 20)
                            for _ in range(num_random_samples)
                        ],
                                           dtype=np.float32)
                        random_samples = np.zeros((1, 28, 28))
                        for i in range(num_random_samples // 100):
                            generated_test = sess.run(
                                self.generated_images,
                                feed_dict={
                                    self.guessed_z:
                                    randoms[i * 100:(i + 1) * 100],
                                    self.batchsize: batchsize
                                })
                            generated_test = generated_test.reshape(
                                batchsize, 28, 28)
                            random_samples = np.concatenate(
                                (random_samples, generated_test), axis=0)
                        random_samples = random_samples[1:]
                        if epoch == 500:
                            ims("results/vae_10to1.jpg",
                                merge(random_samples, [im_w, im_h]))
                        # if epoch == 500:
                        #     for i, image in enumerate(random_samples):
                        #         ims('results_random_samples/random01_epoch500_10to1/temp{}.jpg'.format(i), image)

                        # # ratio: 100:1
                        # num_random_samples = 10000
                        # im_w, im_h = 100, 100
                        # r = np.random.RandomState(1234)
                        # randoms = np.array([r.normal(0, 1, 20) for _ in range(num_random_samples)], dtype = np.float32)
                        # random_samples = np.zeros((1,28,28))
                        # for i in range(num_random_samples//100):
                        #     generated_test = sess.run(self.generated_images, feed_dict={self.guessed_z: randoms[i*100:(i+1)*100], self.batchsize: batchsize})
                        #     generated_test = generated_test.reshape(batchsize,28,28)
                        #     random_samples = np.concatenate((random_samples, generated_test), axis=0)
                        # random_samples = random_samples[1:]
                        # if epoch == 500:
                        #     ims("results/random01_epoch500_100to1/all_images.jpg",merge(random_samples,[im_w, im_h]))
                        # # if epoch == 500:
                        # #     for i, image in enumerate(random_samples):
                        # #         ims('results_random_samples/random_epoch500_100to1/temp{}.jpg'.format(i), image)

                        # ratio: 1000:1
                        # num_random_samples = 96100
                        # im_w, im_h = 310, 310
                        # r = np.random.RandomState(1234)
                        # randoms = np.array([r.normal(0, 1, 20) for _ in range(num_random_samples)], dtype = np.float32)
                        # random_samples = np.zeros((1,28,28))
                        # for i in range(num_random_samples//100):
                        #     st()
                        #     generated_test = sess.run(self.generated_images, feed_dict={self.guessed_z: randoms[i*100:(i+1)*100], self.batchsize: batchsize})
                        #     generated_test = generated_test.reshape(batchsize,28,28)
                        #     random_samples = np.concatenate((random_samples, generated_test), axis=0)
                        # random_samples = random_samples[1:]
                        # if epoch == 500:
                        #     ims("results/random01_epoch500_1000to1/all_images.jpg",merge(random_samples,[im_w, im_h]))
                        # # if epoch == 500:
                        # #     for i, image in enumerate(random_samples):
                        # #         ims('results_random_samples/random_epoch500_100to1/temp{}.jpg'.format(i), image)

                        start_time = time.time()
            _, tot_loss, loss_likelihood, loss_divergence = sess.run(
                (train_op, loss, neg_marginal_likelihood, KL_divergence),
                feed_dict={
                    x_hat: batch_xs_input,
                    x: batch_xs_target,
                    keep_prob: 1.0
                })

            print(
                "epoch %d: L_tot %03.2f L_likelihood %03.2f L_divergence %03.2f"
                % (epoch, tot_loss, loss_likelihood, loss_divergence))
        # print cost every epoch

        y_PRR = sess.run(y, feed_dict={x_hat: x_fixed, keep_prob: 1})
        y_RPR = np.reshape(y_PRR, (-1, 64, 64, 3))
        ims("results/" + "VAE" + str(epoch) + ".jpg",
            merge2(y_RPR[:64], [8, 8]))

        loss_likelihood = loss_likelihood * -1
        if bestScore > loss_likelihood:
            bestScore = loss_likelihood
            saver.save(sess, "models/Dropout_Simple_Celeba4_Bernoulli")

        if epoch > 0:
            x_fixed_image = np.reshape(x_fixed, (-1, 64, 64, 3))
            ims("results/" + "Real" + str(epoch) + ".jpg",
                merge2(x_fixed_image[:64], [8, 8]))

    # saver.save(sess, "F:/Third_Experiment/Dropout_Simple_Celeba4")
Ejemplo n.º 27
0
        batch_labels = np.random.multinomial(1,
                                             discrete_len * [float(1.0 / discrete_len)],
                                             size=[batch_size])

        myArr = []
        for k in range(10):
            batch_labels = np.zeros((batch_size, discrete_len))
            batch_codes = np.concatenate((batch_labels, np.random.uniform(-1, 1, size=(batch_size, continous_len))),
                                         axis=1)
            batch_labels[:, k] = 1
            batch_z = np.random.uniform(-1, 1, [batch_size, noise_len]).astype(np.float32)

            y_PRR = sess.run(Generated_Imags, feed_dict={x_hat: batch_images, z_in: batch_z, y_in: batch_codes})
            y_RPR = np.reshape(y_PRR, (-1, 32, 32, 3))

            ims("results/" + "myh" + str(0) + ".png", merge2(y_RPR, [8, 8]))

            for k1 in range(8):
                myArr.append(y_RPR[k1])

        myArr = np.array(myArr)
        ims("results/" + "myh" + str(0) + ".png", merge2(myArr, [10, 8]))
        ims("results/" + "real" + str(0) + ".png", merge2(batch_images, [8, 8]))

        b1 = 0
        b = 0

        b = 0
    else:

        batch_images = x_train[0:batch_size]
Ejemplo n.º 28
0
                           """Directory where to write event logs """
                           """and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 5000,
                            """Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
                            """Whether to log device placement.""")

INITIAL_LEARNING_RATE = 0.01
NUM_CLASSES = 10
FT_DIM = 3
CODE_LEN = 20

imHeight = 28
imWidth = 28
numCh = 1


# Get images and labels
images_tr, labels_tr = mnistip.distorted_inputs(randFlip=True)

init = tf.initialize_all_variables()
sess = tf.InteractiveSession()
sess.run(init)
tf.train.start_queue_runners(sess=sess)

_images, _labels = sess.run([images_tr, labels_tr])        
reshaped_vis = np.squeeze(_images)
ims("results/base.jpg",merge(reshaped_vis[:64],[8,8]))
    
print(_images.shape)
Ejemplo n.º 29
0
def build_model():
    """ Graph Input """
    IMG = tf.placeholder(tf.float32, [None, 10000], name='real_img')
    IMG_matrix = tf.reshape(IMG, [-1, 100, 100, 1], name='img_matrix')
    ZN = tf.placeholder(tf.float32, [None, 100], name='noise')
    '''train data'''
    imgpath = './newtrain1/'
    img_arr, lab_arr = GetFlatPix(imgpath)  # (17750,10000),(17750,15)
    n_train = img_arr.shape[0]
    """ Loss Function """
    G_sample = generator(ZN, is_training=True, reuse=False)  #
    # print(G_sample)#outputs = tf.convert_to_tensor(inputs)
    D_real_out, D_real_pred = discriminator(IMG_matrix,
                                            is_training=True,
                                            reuse=False)

    D_fake_out, D_fake_pred = discriminator(G_sample,
                                            is_training=True,
                                            reuse=True)

    # get loss for discriminator(这里用的是sigmoid function)
    d_loss_real = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=D_real_out, labels=tf.ones_like(D_real_out)))
    d_loss_fake = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=D_fake_out, labels=tf.zeros_like(D_fake_out)))
    d_loss = d_loss_real + d_loss_fake

    # get loss for generator
    g_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=D_fake_out, labels=tf.ones_like(D_fake_out)))
    """ Training """
    # ——————————————————方式1——————————————————————————————————————————
    # # divide trainable variables into a group for D and a group for G
    # t_vars = tf.trainable_variables()
    # d_vars = [var for var in t_vars if 'dis_' in var.name]
    # g_vars = [var for var in t_vars if 'gen_' in var.name]
    #
    # # optimizers
    # with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
    #     d_optimizer = tf.train.AdamOptimizer(1e-3, beta1=0.5) \
    #               .minimize(d_loss, var_list=d_vars)
    #     g_optimizer = tf.train.AdamOptimizer(1e-3, beta1=0.5) \
    #               .minimize(g_loss, var_list=g_vars)

    # ————————————————————————————————————————————————————————————————————

    # ————————————————————————方式2————————————————————————————————————————
    gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope='generator')
    dis_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope='discriminator')

    gen_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       scope='generator')
    with tf.control_dependencies(gen_update_ops):
        train_gen = tf.train.AdamOptimizer(1e-3, beta1=0.5).minimize(
            g_loss, var_list=gen_vars)

    dis_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       scope='discriminator')
    with tf.control_dependencies(dis_update_ops):
        train_dis = tf.train.AdamOptimizer(1e-3, beta1=0.5).minimize(
            d_loss, var_list=dis_vars)

    # """" Testing """
    # # for test
    # fake_images = generator(ZN, is_training=False, reuse=True)#测试用
    """ Summary """
    d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
    d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
    d_loss_sum = tf.summary.scalar("d_loss", d_loss)
    g_loss_sum = tf.summary.scalar("g_loss", g_loss)

    # final summary operations
    g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
    d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])

    # ——————————————————start train————————————————————————————————
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    # saver to save model
    saver = tf.train.Saver()
    # summary writer
    writer = tf.summary.FileWriter(logdir=LOGS_DIRECTORY,
                                   graph=sess.graph)  # ——————————改

    if not os.path.exists('result_DCGAN/'):
        os.makedirs('result_DCGAN/')
    '''loop for train'''
    start_time = time.time()
    for step in range(200000):

        start = (step * batch_size) % n_train
        end = min(start + batch_size, n_train)

        img_batch = img_arr[start:end]
        # generator noise to feed to the generator
        noise_batch = np.random.uniform(-1, 1,
                                        [batch_size, z_dim]).astype(np.float32)

        # (1)先固定G开始Train D,D要看来自样本的img和来自generator的img
        _, summary_str, dl = sess.run([train_dis, d_sum, d_loss],
                                      feed_dict={
                                          IMG: img_batch,
                                          ZN: noise_batch
                                      })
        writer.add_summary(summary_str, step)

        # (2)固定D,train G(G只需要feed noise即可)
        _, summary_str, gl = sess.run([train_gen, g_sum, g_loss],
                                      feed_dict={ZN: noise_batch})
        writer.add_summary(summary_str, step)

        if step % 1000 == 0:
            print('step:%d,discriminator_loss:%f,generator_loss:%f' %
                  (step, dl, gl))
            """" Testing """
            fake_images = generator(ZN, is_training=False, reuse=True)  # 测试用
            final_samples = sess.run(fake_images, feed_dict={ZN: noise_batch})
            final_samples = np.reshape(final_samples, (-1, 100, 100, 1))
            from scipy.misc import imsave as ims
            # 显示生成的图片
            ims("./result_DCGAN/" + str(step) + ".jpg",
                merge(final_samples[0:batch_size], [8, 8]))

    # save model for final step
    save_path = saver.save(sess, MODEL_DIRECTORY)
    print("Model saved in file: %s" % save_path)
Ejemplo n.º 30
0
def train():

    X_p = tf.placeholder(tf.float32, [None, img_dim])
    # kp=tf.placeholder(tf.float32)
    # encoding(img(x_hat)=>encoder=>mean,variance)
    mu, sigma = gaussian_MLP_encoder(X_p, 512, 100, keep_prob=0.5)

    # sampling by re-parameterization technique
    eps = tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)
    z_composed = mu + tf.exp(sigma / 2) * eps

    # decoding(compose_z=>decoder=>img'(y))  #x is real img
    y = bernoulli_MLP_decoder(z_composed, 512, 10000,
                              keep_prob=0.5)  #sigmoid出来的y
    y = tf.clip_by_value(y, 1e-8, 1 - 1e-8)  #avoid loss NAN

    # loss
    recon_loss = -tf.reduce_sum(X_p * tf.log(y) + (1 - X_p) * tf.log(1 - y), 1)
    kl_loss = 0.5 * tf.reduce_sum(
        tf.square(mu) + tf.square(sigma) - tf.log(1e-8 + tf.square(sigma)) - 1,
        1)

    recon_loss = tf.reduce_mean(recon_loss)
    kl_loss = tf.reduce_mean(kl_loss)
    loss_op = recon_loss + kl_loss

    #optimizer
    train_op = tf.train.AdamOptimizer().minimize(loss_op)

    #start session
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    if not os.path.exists('mlp_result/'):
        os.makedirs('mlp_result/')

    for step in range(100000):

        start = (step * batch_size) % n_train
        end = min(start + batch_size, n_train)
        _, total_loss, RE_loss, KL_loss = sess.run(
            [train_op, loss_op, recon_loss, kl_loss],
            feed_dict={X_p: img_arr[start:end]})

        if step % 1000 == 0:  # train
            print('step:%d,Total_Loss:%f,RE_loss:%f,KL_loss:%f' %
                  (step, total_loss, RE_loss, KL_loss))

            # test
            """" Testing """
            # # Sampling from random z 训练好了之后,把decoder单独拿出来,把z丢给他
            ZN = tf.placeholder(tf.float32, [None, n_latent])
            fake_images = decoder(ZN, 10000, 512)

            z_batch = np.random.normal(0., 1, (batch_size, n_latent)).astype(
                np.float32)
            samples = sess.run(fake_images, feed_dict={ZN: z_batch})
            samples_img = np.reshape(samples, (-1, 100, 100, 1))
            from scipy.misc import imsave as ims
            # 显示生成的图片
            ims("./mlp_result/" + str(step) + ".jpg",
                merge(samples_img[0:batch_size], [8, 8]))
Ejemplo n.º 31
0
    loaded_model = model_from_json(loaded_model_json)
    weight='gen.h5'
    loaded_model.load_weights(weight)
    
    
    Hash=sys.argv[1]
    stat2=sys.argv[2]
    if stat2[-1]=='/':
        pass
    else:
        stat2=stat2+'/'
        
    # Файлы .tsv
    if Hash[-4:]=='.tsv':
        A=pandas.read_csv(Hash,header=None)
        for i in range(len(A)):
            ims(stat2+str(A[0][i])+'.png',hash_to_im(A[0][i][4:-8]))
            
    # Файлы .txt        
    if Hash[-4:]=='.txt':
        f = open(Hash)
        b=f.read().split('\n')
        f.close()
        for i in range(len(b)):
            if len(b[i])==52:
                
                ims(stat2+b[i]+'.png',hash_to_im(str(b[i])[4:-8]))
    # Hash
    else:
        ims(stat2+Hash+'.png',hash_to_im(Hash[4:-8]))
Ejemplo n.º 32
0
        )
        a4 = mpimg.imread(
            'F:/Third_Experiment/NewExperiment/InfoVAE/results/myselection/InfoVAE_womanToMan3.png'
        )
        a5 = mpimg.imread(
            'F:/Third_Experiment/NewExperiment/InfoVAE/results/myselection/InfoVAE_womanToMan4.png'
        )

        myImage = np.zeros((5 * 64, 10 * 64, 3))
        myImage[0:64, 0:64 * 10, 0:3] = a1
        myImage[64:64 * 2, 0:64 * 10, 0:3] = a2
        myImage[64 * 2:64 * 3, 0:64 * 10, 0:3] = a3
        myImage[64 * 3:64 * 4, 0:64 * 10, 0:3] = a4
        myImage[64 * 4:64 * 5, 0:64 * 10, 0:3] = a5

        ims("results/" + "InfoVAE_Emotion" + str(00) + ".png", myImage)

        bc = 0
        # load dataset
        # load dataset
        file_dir = "C:/commonData/rendered_chairs/rendered_chairs/"
        files = file_name(file_dir)
        data_files = files
        data_files = sorted(data_files)
        data_files = np.array(data_files)  # for tl.iterate.minibatches
        n_examples = np.shape(data_files)[0]

        # load dataset
        count1 = 0
        image_size = 64