def evaluate_inception(self):
        incep_batch_size = self.cfg.EVAL.INCEP_BATCH_SIZE
        logits, _ = load_inception_inference(
            self.sess, self.classes, incep_batch_size,
            self.cfg.EVAL.INCEP_CHECKPOINT_DIR)
        pred_op = tf.nn.softmax(logits)

        z = tf.placeholder(tf.float32, [self.bs, self.model.z_dim], name='z')
        cond = tf.placeholder(tf.float32, [self.bs] + [self.model.embed_dim],
                              name='cond')
        eval_gen, _, _ = self.model.generator(z,
                                              cond,
                                              reuse=False,
                                              is_training=False)

        saver = tf.train.Saver(tf.global_variables('g_net'))
        could_load, _ = load(saver, self.sess, self.cfg.CHECKPOINT_DIR)
        if could_load:
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")
            raise RuntimeError(
                'Could not load the checkpoints of the generator')

        print('Generating x...')

        size = self.cfg.EVAL.SIZE
        n_batches = size // self.bs

        w, h, c = self.model.image_dims[0], self.model.image_dims[
            1], self.model.image_dims[2]
        samples = np.zeros((n_batches * self.bs, w, h, c))
        for i in range(n_batches):
            print("\rGenerating batch %d/%d" % (i + 1, n_batches),
                  end="",
                  flush=True)

            sample_z = np.random.normal(0, 1, size=(self.bs, self.model.z_dim))
            _, _, embed, _, _ = self.dataset.test.next_batch(self.bs,
                                                             4,
                                                             embeddings=True)
            start = i * self.bs
            end = start + self.bs

            gen_batch = self.sess.run(eval_gen,
                                      feed_dict={
                                          z: sample_z,
                                          cond: embed
                                      })
            samples[start:end] = denormalize_images(gen_batch)

        print('\nComputing inception score...')
        mean, std = inception_score.get_inception_score(samples,
                                                        self.sess,
                                                        incep_batch_size,
                                                        10,
                                                        pred_op,
                                                        verbose=True)
        print('Inception Score | mean:', "%.2f" % mean, 'std:', "%.2f" % std)
Exemple #2
0
    def evaluate_fid(self):
        incep_batch_size = self.cfg.EVAL.INCEP_BATCH_SIZE
        _, layers = load_inception_inference(self.sess, 20, incep_batch_size,
                                             self.cfg.EVAL.INCEP_CHECKPOINT_DIR)
        pool3 = layers['PreLogits']
        act_op = tf.reshape(pool3, shape=[incep_batch_size, -1])

        if not os.path.exists(self.cfg.EVAL.ACT_STAT_PATH):
            print('Computing activation statistics for real x')
            fid.compute_and_save_activation_statistics(self.cfg.EVAL.R_IMG_PATH, self.sess, incep_batch_size, act_op,
                                                       self.cfg.EVAL.ACT_STAT_PATH, verbose=True)

        print('Loading activation statistics for the real x')
        stats = np.load(self.cfg.EVAL.ACT_STAT_PATH)
        mu_real = stats['mu']
        sigma_real = stats['sigma']

        z = tf.placeholder(tf.float32, [self.bs, self.model.z_dim], name='real_images')
        cond = tf.placeholder(tf.float32, [self.bs] + [self.model.embed_dim], name='cond')
        eval_gen, _, _ = self.model.generator(z, cond, reuse=False)

        saver = tf.train.Saver(tf.global_variables('g_net'))
        could_load, _ = load(saver, self.sess, self.cfg.CHECKPOINT_DIR)
        if could_load:
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")
            raise RuntimeError('Could not load the checkpoints of the generator')

        print('Generating x...')

        fid_size = self.cfg.EVAL.SIZE
        n_batches = fid_size // self.bs

        w, h, c = self.model.image_dims[0], self.model.image_dims[1], self.model.image_dims[2]
        samples = np.zeros((n_batches * self.bs, w, h, c))
        for i in range(n_batches):
            start = i * self.bs
            end = start + self.bs

            sample_z = np.random.normal(0, 1, size=(self.bs, self.model.z_dim))
            images, _, embed, _, _ = self.dataset.test.next_batch(self.bs, 4, embeddings=True)

            samples[start: end] = denormalize_images(self.sess.run(eval_gen, feed_dict={z: sample_z, cond: embed}))

        print('Computing activation statistics for generated x...')
        mu_gen, sigma_gen = fid.calculate_activation_statistics(samples, self.sess, incep_batch_size, act_op,
                                                                verbose=True)
        print("calculate FID:", end=" ", flush=True)
        try:
            FID = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
        except Exception as e:
            print(e)
            FID = 500

        print(FID)
Exemple #3
0
def main(_):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            with tf.device("/gpu:%d" % FLAGS.gpu):
                logits, _ = load_inception_inference(sess, FLAGS.num_classes,
                                                     FLAGS.batch_size,
                                                     FLAGS.checkpoint_dir)
                pred_op = tf.nn.softmax(logits)

                images = load_inception_data(FLAGS.img_folder)
                mean, std = get_inception_score(images, sess, FLAGS.batch_size,
                                                FLAGS.splits, pred_op)
                print('mean:', "%.2f" % mean, 'std:', "%.2f" % std)
Exemple #4
0
def calculate_fid_given_paths():
    """ Calculates the FID of two paths. """
    real_img_path = FLAGS.real_img_folder
    gen_img_path = FLAGS.gen_img_folder
    if not os.path.exists(real_img_path):
        raise RuntimeError("Invalid path: %s" % real_img_path)
    if not os.path.exists(gen_img_path):
        raise RuntimeError("Invalid path %s" % gen_img_path)

    with tf.Session() as sess:
        _, layers = load_inception_inference(sess, FLAGS.num_classes, FLAGS.batch_size, FLAGS.checkpoint_dir)
        pool3 = layers['PreLogits']
        act_op = tf.reshape(pool3, shape=[FLAGS.batch_size, -1])

        m1, s1 = _handle_path(real_img_path, sess, act_op)
        m2, s2 = _handle_path(gen_img_path, sess, act_op)
        fid_dist = calculate_frechet_distance(m1, s1, m2, s2)
        return fid_dist
Exemple #5
0
def main(unused_argv=None):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            with tf.device("/gpu:%d" % FLAGS.gpu):
                _, layers = load_inception_inference(sess, FLAGS.num_classes,
                                                     FLAGS.batch_size,
                                                     FLAGS.checkpoint_dir)

                pool3 = layers['PreLogits']
                act_op = tf.reshape(pool3, shape=[FLAGS.batch_size, -1])

                real_images = load_inception_data(FLAGS.real_img_folder,
                                                  alphabetic=True)
                gen_images = load_inception_data(FLAGS.gen_img_folder,
                                                 alphabetic=True)
                compute_imd(sess, real_images, gen_images, act_op)
	def evaluate_inception(self):
		incep_batch_size = self.cfg.EVAL.INCEP_BATCH_SIZE
		logits, _ = load_inception_inference(self.sess, self.cfg.EVAL.NUM_CLASSES, incep_batch_size,
											 self.cfg.EVAL.INCEP_CHECKPOINT_DIR)
		pred_op = tf.nn.softmax(logits)
		
		z = tf.placeholder(tf.float32, [self.bs, self.model.stagei.z_dim], name='z')
		cond = tf.placeholder(tf.float32, [self.bs] + [self.model.stagei.embed_dim], name='cond')
		stagei_gen, _, _ = self.model.stagei.generator(z, cond, reuse=False, is_training=False)
		eval_gen, _, _ = self.model.generator(stagei_gen, cond, reuse=False, is_training=False)
		self.Retrieval.eval(self.bs)
		saver = tf.train.Saver(tf.global_variables('g_net')+tf.global_variables('vf_')+tf.global_variables('sf_')+
										tf.global_variables('att')) 
		could_load, _ = load(saver, self.sess, self.model.stagei.cfg.CHECKPOINT_DIR)
		
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			raise RuntimeError('Could not load the checkpoints of stage I')

		saver = tf.train.Saver(tf.global_variables('stageII_g_net'))
		could_load, _ = load(saver, self.sess, self.cfg.CHECKPOINT_DIR)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			raise RuntimeError('Could not load the checkpoints of stage II')

		print('Generating batches...')

		size = self.cfg.EVAL.SIZE
		n_batches = size // self.bs

		all_preds = []
		for i in range(n_batches):
			print("\rGenerating batch %d/%d" % (i + 1, n_batches), end="", flush=True)

			sample_z = np.random.normal(0, 1, size=(self.bs, self.model.z_dim))
			# _, _, embed, _, _ = self.dataset.test.next_batch(self.bs, 4, embeddings=True)
			_, _, embed, _, _ = self.dataset.test.next_batch(self.bs, 1, embeddings=True)
			im_feats, sent_feats, labels = self.test_data_loader.get_batch(i, self.bs, phase = 'incep')

			# Generate a batch and scale it up for inception
			
			sent_emb = self.sess.run(self.Retrieval.sent_embed_tensor,
									feed_dict={
												self.Retrieval.image_placeholder_test: im_feats,
												self.Retrieval.sent_placeholder_test: sent_feats,
											  })			
			gen_batch = self.sess.run(eval_gen, feed_dict={z: sample_z, cond: sent_emb})

			

			samples = denormalize_images(gen_batch)
			incep_samples = np.empty((self.bs, 299, 299, 3))
			for sample_idx in range(self.bs):
				incep_samples[sample_idx] = prep_incep_img(samples[sample_idx])

			# Run prediction for current batch
			pred = self.sess.run(pred_op, feed_dict={'inputs:0': incep_samples})
			all_preds.append(pred)

		# Get rid of the first dimension
		all_preds = np.concatenate(all_preds, 0)

		print('\nComputing inception score...')
		mean, std = inception_score.get_inception_from_predictions(all_preds, 10)
		print('Inception Score | mean:', "%.2f" % mean, 'std:', "%.2f" % std)
Exemple #7
0
    cond = tf.placeholder(tf.float32, [None, 1024], name='cond')
    z = tf.placeholder(tf.float32, [None, sample_size], name='z')
    gen_op, _, _ = pggan.generator(z, cond, stages=stage, t=False)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        saver = tf.train.Saver(tf.global_variables('g_net'))
        could_load = load(saver, sess, pggan_checkpoint_dir_read)
        if not could_load:
            raise RuntimeError('Could not load stage %d' % stage)

        logits, _ = load_inception_inference(sess, cfg.EVAL.NUM_CLASSES,
                                             incep_batch_size,
                                             incep_checkpoint_dir)
        pred_op = tf.nn.softmax(logits)

        size = 50000
        n_batches = size // batch_size

        all_preds = []
        for i in range(n_batches):
            print("\rGenerating batch %d/%d" % (i + 1, n_batches),
                  end="",
                  flush=True)

            sample_z = np.random.normal(0, 1, size=(batch_size, sample_size))
            _, _, embed, _, _ = dataset.test.next_batch(batch_size,
                                                        4,