Beispiel #1
0
    def test_calculate_frechet_distance(self):
        mu1, sigma1 = np.ones((16, )), np.ones((16, 16))
        mu2, sigma2 = mu1 * 2, sigma1 * 2

        score = fid_utils.calculate_frechet_distance(mu1=mu1,
                                                     mu2=mu2,
                                                     sigma1=sigma1,
                                                     sigma2=sigma2)

        assert type(score) == np.float64

        # Inputs check
        bad_mu2, bad_sigma2 = np.ones((15, 15)), np.ones((15, 15))
        with pytest.raises(ValueError):
            fid_utils.calculate_frechet_distance(mu1=mu1,
                                                 mu2=bad_mu2,
                                                 sigma1=sigma1,
                                                 sigma2=bad_sigma2)
Beispiel #2
0
    def test_calculate_frechet_distance(self):
        mu1, sigma1 = np.ones((16, )), np.ones((16, 16))
        mu2, sigma2 = mu1 * 2, sigma1 * 2

        score = fid_utils.calculate_frechet_distance(mu1=mu1,
                                                     mu2=mu2,
                                                     sigma1=sigma1,
                                                     sigma2=sigma2)

        assert type(score) == np.float64
Beispiel #3
0
def fid_score(num_real_samples,
              num_fake_samples,
              netG,
              dataset,
              seed=0,
              device=None,
              batch_size=50,
              verbose=True,
              stats_file=None,
              log_dir='./log'):
    """
    Computes FID stats using functions that store images in memory for speed and fidelity.
    Fidelity since by storing images in memory, we don't subject the scores to different read/write
    implementations of imaging libraries.

    Args:
        num_real_samples (int): The number of real images to use for FID.
        num_fake_samples (int): The number of fake images to use for FID.
        netG (Module): Torch Module object representing the generator model.
        device (str/torch.device): Device identifier to use for computation.
        seed (int): The random seed to use.
        dataset (str/Dataset): The name of the dataset to load if known, or a custom Dataset object
        batch_size (int): The batch size to feedforward for inference.
        verbose (bool): If True, prints progress.
        stats_file (str): The statistics file to load from if there is already one.
        log_dir (str): Directory where feature statistics can be stored.

    Returns:
        float: Scalar FID score.
    """
    start_time = time.time()

    # Check inputs
    if device is None:
        device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

    if isinstance(dataset, str):
        default_datasets = {
            'cifar10',
            'cifar100',
            'stl10_48',
            'imagenet_32',
            'imagenet_128',
            'celeba_64',
            'celeba_128',
            'lsun_bedroom',
            'fake_data',
        }
        if dataset not in default_datasets:
            raise ValueError('For default datasets, must be one of {}'.format(
                default_datasets))

    elif issubclass(type(dataset), torch.utils.data.Dataset):
        if stats_file is None:
            raise ValueError(
                "stats_file to save/load from cannot be empty if using a custom dataset."
            )

        if not stats_file.endswith('.npz'):
            stats_file = stats_file + '.npz'

    else:
        raise ValueError(
            'dataset must be either a Dataset object or a string.')

    # Make sure the random seeds are fixed
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)

    # Setup directories
    inception_path = os.path.join(log_dir, 'metrics', 'inception_model')

    # Setup the inception graph
    inception_utils.create_inception_graph(inception_path)

    # Start producing statistics for real and fake images
    # if device and device.index is not None:
    #     # Avoid unbounded memory usage
    #     gpu_options = tf.compat.v1.GPUOptions(allow_growth=True,
    #                                 per_process_gpu_memory_fraction=0.15,
    #                                 visible_device_list=str(device.index))
    #     config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)

    # else:
    #     config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})

    config = tf.compat.v1.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.2
    config.gpu_options.allow_growth = True

    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())

        m_real, s_real = compute_real_dist_stats(num_samples=num_real_samples,
                                                 sess=sess,
                                                 dataset=dataset,
                                                 batch_size=batch_size,
                                                 verbose=verbose,
                                                 stats_file=stats_file,
                                                 log_dir=log_dir,
                                                 seed=seed)

        m_fake, s_fake = compute_gen_dist_stats(netG=netG,
                                                num_samples=num_fake_samples,
                                                sess=sess,
                                                device=device,
                                                seed=seed,
                                                batch_size=batch_size,
                                                verbose=verbose)

        FID_score = fid_utils.calculate_frechet_distance(mu1=m_real,
                                                         sigma1=s_real,
                                                         mu2=m_fake,
                                                         sigma2=s_fake)

        print("INFO: FID: {} [Time Taken: {:.4f} secs]".format(
            FID_score,
            time.time() - start_time))

        return float(FID_score)
Beispiel #4
0
def fid_score(num_real_samples,
              num_fake_samples,
              netG,
              device,
              seed,
              dataset_name,
              batch_size=50,
              verbose=True,
              stats_file=None,
              log_dir='./log'):
    """
    Computes FID stats using functions that store images in memory for speed and fidelity.
    Fidelity since by storing images in memory, we don't subject the scores to different read/write
    implementations of imaging libraries.

    Args:
        num_real_samples (int): The number of real images to use for FID.
        num_fake_samples (int): The number of fake images to use for FID.
        netG (Module): Torch Module object representing the generator model.
        device (str): Device identifier to use for computation.
        seed (int): The random seed to use.
        dataset_name (str): The name of the dataset to load.
        batch_size (int): The batch size to feedforward for inference.
        verbose (bool): If True, prints progress.
        stats_file (str): The statistics file to load from if there is already one.
        log_dir (str): Directory where feature statistics can be stored.

    Returns:
        float: Scalar FID score.
    """
    start_time = time.time()

    # Make sure the random seeds are fixed
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)

    # Setup directories
    inception_path = os.path.join(log_dir, 'metrics', 'inception_model')

    # Setup the inception graph
    inception_utils.create_inception_graph(inception_path)

    # Start producing statistics for real and fake images
    if device and device.index is not None:
        # Avoid unbounded memory usage
        gpu_options = tf.GPUOptions(allow_growth=True,
                                    per_process_gpu_memory_fraction=0.15,
                                    visible_device_list=str(device.index))
        config = tf.ConfigProto(gpu_options=gpu_options)

    else:
        config = tf.ConfigProto(device_count={'GPU': 0})

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        m_real, s_real = compute_real_dist_stats(num_samples=num_real_samples,
                                                 sess=sess,
                                                 dataset_name=dataset_name,
                                                 batch_size=batch_size,
                                                 verbose=verbose,
                                                 stats_file=stats_file,
                                                 log_dir=log_dir,
                                                 seed=seed)

        m_fake, s_fake = compute_gen_dist_stats(netG=netG,
                                                num_samples=num_fake_samples,
                                                sess=sess,
                                                device=device,
                                                seed=seed,
                                                batch_size=batch_size,
                                                verbose=verbose)

        FID_score = fid_utils.calculate_frechet_distance(mu1=m_real,
                                                         sigma1=s_real,
                                                         mu2=m_fake,
                                                         sigma2=s_fake)

        print("INFO: FID Score: {} [Time Taken: {:.4f} secs]".format(
            FID_score,
            time.time() - start_time))

        return float(FID_score)