def test_vae(vae): for part in ('train', 'val', 'test'): input_images = dataset.fetch_smallbatch_from_celeba('CelebA', part=part) rec_images = vae.get_layer('decoder').predict( vae.get_layer('encoder').predict(input_images)[0]) plot_image(img_renorm(input_images), img_renorm(rec_images))
def random(vae): z_shape = K.int_shape(vae.get_layer('encoder').outputs[0]) z = K.random_normal(shape=(z_shape[1], )) z = z.eval(session=tf.Session()) z = np.expand_dims(z, axis=0) modified_image = vae.get_layer('decoder').predict(z) plot_image(img_renorm(modified_image), img_renorm(modified_image))
def test_trans(translator, discriminator, image_file_name, target_gender): image = read_image(image_file_name) image = np.expand_dims(image, axis=0) target_gender = np.expand_dims(target_gender, axis=0) translated_img = translator.predict([image, target_gender]) r_src, _, r_cls = discriminator.predict(image) g_src, _, g_cls = discriminator.predict(translated_img) plot_image(img_renorm(image), img_renorm(translated_img)) print('input: ' + str(r_src) + " , " + str(r_cls) + ' - translated: ' + str(g_src) + " , " + str(g_cls))
def trans_attribute(vae, image_file_name, attribute_vectors_file, attr_trans_dic): attribute_vectors = np.load(attribute_vectors_file).item() image = read_image(image_file_name) image = np.expand_dims(image, axis=0) z = vae.get_layer('encoder').predict(image)[0] for attr, trans in attr_trans_dic.items(): z[0] += (attribute_vectors[attr] * trans) modified_image = vae.get_layer('decoder').predict(z) plot_image(img_renorm(image), img_renorm(modified_image))
def compute_attribute_vector(vae, attrs, attribute_vectors_file, batch_size=32): sess = K.get_session() encoder = vae.get_layer('encoder') z_shape = K.int_shape(encoder.outputs[0]) pos_vectors = np.zeros((len(attrs), z_shape[1]), np.float32) neg_vectors = np.zeros((len(attrs), z_shape[1]), np.float32) pos_nums = np.zeros((len(attrs), 1), np.int32) neg_nums = np.zeros((len(attrs), 1), np.int32) data, total_num = dataset.load_full_celeba_with_labels( 'CelebA', batch_size, attrs) iterator = data.make_one_shot_iterator() next_element = iterator.get_next() while True: try: images, labels = sess.run(next_element) z = encoder.predict(images)[0] for i in range(len(attrs)): pos_idx = np.argwhere(labels[:, i] == 1)[:, 0] neg_idx = np.argwhere(labels[:, i] == -1)[:, 0] pos_vec = np.sum(z[pos_idx, :], 0) neg_vec = np.sum(z[neg_idx, :], 0) pos_nums[i][0] += len(pos_idx) neg_nums[i][0] += len(neg_idx) pos_vectors[i] += pos_vec neg_vectors[i] += neg_vec except tf.errors.OutOfRangeError: break pos_vectors /= pos_nums neg_vectors /= neg_nums attribute_vectors = {} pos_images = vae.get_layer('decoder').predict(pos_vectors) neg_images = vae.get_layer('decoder').predict(neg_vectors) for i in range(len(attrs)): attribute_vectors[attrs[i]] = pos_vectors[i] - neg_vectors[i] # draw the attribute for debugging print(attrs[i]) plot_image([img_renorm(pos_images[i])], [img_renorm(neg_images[i])]) np.save(attribute_vectors_file, attribute_vectors)
def test_gan(self, epoch): for part in ('train', 'val', 'test'): ds = dataset.load_celeba('CelebA', batch_size, part=part, consumer='translator', smallbatch=10) element = ds.make_one_shot_iterator().get_next() sess = K.get_session() imgs, labels = sess.run(element) labels = 1 - labels print(labels) rec_imgs = self.generator.predict([imgs, labels]) src_real, _, cls_real = self.discriminator.predict(imgs) src_fake, _, cls_fake = self.discriminator.predict(rec_imgs) for r, f, sr, cr, sf, cf in zip(imgs, rec_imgs, src_real, cls_real, src_fake, cls_fake): plot_images([img_renorm(r), img_renorm(f)]) print('real: ' + str(sr) + ' cls ' + str(cr) + ' fake: ' + str(sf) + ' cls ' + str(cf))
def trans_attributes(vae, image_file_name, attribute_vectors_file, attrs): attribute_vectors = np.load(attribute_vectors_file).item() image = read_image('CelebA/img_align_celeba/' + image_file_name) image = np.expand_dims(image, axis=0) z = vae.get_layer('encoder').predict(image)[0] z_v = [z[0] + attribute_vectors[attr] for attr in attrs] z_v2 = [z[0] - attribute_vectors[attr] for attr in attrs] z_v.extend(z_v2) z_v = np.asarray(z_v, dtype=np.float32) modified_images = vae.get_layer('decoder').predict(z_v) images = np.append(modified_images, image, axis=0) plot_images(img_renorm(images))
def merge_2(vae, image_file_name1, image_file_name2): image1 = read_image(image_file_name1) image1 = np.expand_dims(image1, axis=0) z1 = vae.get_layer('encoder').predict(image1)[0] image2 = read_image(image_file_name2) image2 = np.expand_dims(image2, axis=0) z2 = vae.get_layer('encoder').predict(image2)[0] z_v = [] for i in range(9): z = (z2[0] * i + z1[0] * (8 - i)) / 8 z_v.append(z) z_v = np.asarray(z_v, dtype=np.float32) images = vae.get_layer('decoder').predict(z_v) plot_images(img_renorm(images))
def test_gan(self, epoch): for part in ('train', 'val', 'test'): images = dataset.fetch_smallbatch_from_celeba('CelebA', part=part) rec_imgs = self.generator.predict(images) plot_image(img_renorm(images), img_renorm(rec_imgs), epoch=epoch)