def sample(self, n_batches): self.session.run(tf.initialize_all_variables()) self.load() all_voxels = [] all_imgs = [] all_zs = [] for i in xrange(n_batches): batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_size]) all_zs.append(batch_z) voxels = self.voxels.eval(session=self.session, feed_dict={ self.z: batch_z, self.train_flag: False }) imgs = self.final_imgs.eval(session=self.session, feed_dict={ self.z: batch_z, self.train_flag: False }) all_voxels.append(np.array(voxels)) all_imgs.append(np.array(imgs)) all_voxels = np.concatenate(all_voxels, axis=0) all_imgs = np.concatenate(all_imgs, axis=0) all_zs = np.vstack(all_zs) print all_voxels.shape np.save("results/PrGAN{}".format(self.dataset_name), all_zs) ops.save_voxels(all_voxels, "results/PrGAN{}".format(self.dataset_name)) ops.save_separate_images(all_imgs, "results/PrGAN{}".format(self.dataset_name))
def test (self, encs): self.session.run(tf.initialize_all_variables()) self.load() z = np.load(encs) voxels = self.voxels.eval(session=self.session, feed_dict={self.z: z, self.train_flag: False}) imgs = self.final_imgs.eval(session=self.session, feed_dict={self.z: z, self.train_flag: False}) create_folder("results/PrGAN{}".format(self.dataset_name)) ops.save_voxels(voxels, "results/PrGAN{}".format(self.dataset_name)) ops.save_separate_images(imgs, "results/PrGAN{}".format(self.dataset_name))
def sample(self, n_batches): self.session.run(tf.initialize_all_variables()) self.load() all_voxels = [] for i in xrange(n_batches): batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_size]) voxels = self.voxels.eval(session=self.session, feed_dict={ self.z: batch_z, self.train_flag: False }) all_voxels.append(np.array(voxels)) all_voxels = np.concatenate(all_voxels, axis=0) print all_voxels.shape ops.save_voxels(all_voxels, "results/VoxelGAN{}".format(self.dataset_name))
def train(self): if not os.path.exists(os.path.join("data", self.dataset_name)): print "No GAN training files found. Training aborted. =(" return dataset_files = glob.glob("data/" + self.dataset_name + "/*.png") dataset_files = np.array(dataset_files) n_files = dataset_files.shape[0] sample_z = np.random.uniform(-1, 1, [self.batch_size, self.z_size]) training_step = 0 self.session.run(tf.initialize_all_variables()) self.load() for epoch in xrange(self.n_iterations): rand_idxs = np.random.permutation(range(n_files)) n_batches = n_files // self.batch_size for batch_i in xrange(n_batches): idxs_i = rand_idxs[batch_i * self.batch_size:(batch_i + 1) * self.batch_size] imgs_batch = ops.load_imgbatch(dataset_files[idxs_i], color=False) #imgs_batch = ops.load_voxelbatch(dataset_files[idxs_i]) batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_size]) dloss_fake = self.D_fake.eval(session=self.session, feed_dict={ self.z: batch_z, self.train_flag: False }) dloss_real = self.D_real.eval(session=self.session, feed_dict={ self.images: imgs_batch, self.train_flag: False }) gloss = self.G_loss.eval(session=self.session, feed_dict={ self.z: batch_z, self.images: imgs_batch, self.train_flag: False }) train_discriminator = True margin = 0.8 dacc_real = np.mean(dloss_real) dacc_fake = np.mean(np.ones_like(dloss_fake) - dloss_fake) dacc = (dacc_real + dacc_fake) * 0.5 #print np.mean(dloss_real) #print np.mean(dloss_fake) if dacc > margin: train_discriminator = False #if dloss_fake > 1.0-margin or dloss_real > 1.0-margin: # train_generator = False #if train_discriminator is False and train_generator is False: # train_generator = train_discriminator = True print "EPOCH[{}], BATCH[{}/{}]".format(epoch, batch_i, n_batches) print "Discriminator avg acc: {}".format(dacc) print "Discriminator real mean: {}".format(np.mean(dloss_real)) print "Discriminator fake mean: {}".format(np.mean(dloss_fake)) print "Generator Loss:{}".format(gloss) # Update discriminator if train_discriminator: print "***Discriminator trained.***" self.session.run(self.D_optim, feed_dict={ self.images: imgs_batch, self.z: batch_z, self.train_flag: True }) # Update generator #if dacc > 0.9: # self.session.run(self.G_optim_classic, feed_dict={self.z: batch_z}) #if dacc > margin + 1.0: self.session.run(self.G_optim_classic, feed_dict={ self.z: batch_z, self.images: imgs_batch, self.train_flag: True }) #self.session.run(self.G_optim, feed_dict={self.z: batch_z, self.images: imgs_batch, self.train_flag: True}) if batch_i % 50 == 0: rendered_images = self.G.eval(session=self.session, feed_dict={ self.z: sample_z, self.images: imgs_batch, self.train_flag: False }) rendered_images = np.array(rendered_images) voxels = self.voxels.eval(session=self.session, feed_dict={ self.z: sample_z, self.images: imgs_batch, self.train_flag: False }) voxels = np.array(voxels) create_folder("results/{}".format(self.dataset_name)) ops.save_images( rendered_images, [8, 8], "results/{}/{}.png".format( self.dataset_name, epoch * n_batches + batch_i)) ops.save_images(imgs_batch, [8, 8], "sanity_chairs.png") ops.save_voxels(voxels, "results/{}".format(self.dataset_name)) print "Saving checkpoint..." create_folder('checkpoint/{}'.format(self.dataset_name)) self.saver.save(self.session, 'checkpoint/{}/model.ckpt'.format( self.dataset_name), global_step=training_step) print "***CHECKPOINT SAVED***" training_step += 1 self.history["generator"].append(gloss) self.history["discriminator_real"].append(dloss_real) self.history["discriminator_fake"].append(dloss_fake) np.save(os.path.join(self.logpath, "generator.npy"), np.array(self.history["generator"])) np.save(os.path.join(self.logpath, "discriminator_real.npy"), np.array(self.history["discriminator_real"])) np.save(os.path.join(self.logpath, "discriminator_fake.npy"), np.array(self.history["discriminator_fake"]))