예제 #1
0
    def test_polynomial_mmd_averages(self):

        scores = kid_utils.polynomial_mmd_averages(codes_g=self.codes_g,
                                                   codes_r=self.codes_r,
                                                   n_subsets=4,
                                                   subset_size=1)

        assert len(scores) == 4
        assert type(scores[0]) == np.float64
예제 #2
0
def kid_score(num_samples,
              netG,
              dataset,
              seed=0,
              device=None,
              num_subsets=10,
              batch_size=50,
              verbose=True,
              feat_file=None,
              log_dir='./log',
              **kwargs):
    """
    Computes KID score.

    Args:
        num_samples (int): The number of real and fake images to use for KID.
        num_subsets (int): Number of subsets to compute average MMD.
        netG (Module): Torch Module object representing the generator model.
        device (str): Device identifier to use for computation.
        seed (int): The random seed to use.
        dataset (str/Dataset): The name of the dataset to load if known, or a custom Dataset object
        batch_size (int): The batch size to feedforward for inference.
        feat_file (str): The path to specific inception features for real images.
        log_dir (str): Directory where features can be stored.
        verbose (bool): If True, prints progress.

    Returns:
        tuple: Scalar mean and std of KID scores computed.
    """
    start_time = time.time()

    # Check inputs
    if device is None:
        device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

    if isinstance(dataset, str):
        default_datasets = {
            'cifar10',
            'cifar100',
            'stl10_48',
            'imagenet_32',
            'imagenet_128',
            'celeba_64',
            'celeba_128',
            'lsun_bedroom',
            'fake_data',
        }
        if dataset not in default_datasets:
            raise ValueError('For default datasets, must be one of {}'.format(
                default_datasets))

    elif issubclass(type(dataset), torch.utils.data.Dataset):
        if feat_file is None:
            raise ValueError(
                "feat_file to save/load from cannot be empty if using a custom dataset."
            )

        if not feat_file.endswith('.npz'):
            feat_file = feat_file + '.npz'

    else:
        raise ValueError(
            'dataset must be either a Dataset object or a string.')

    # Make sure the random seeds are fixed
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)

    # Directories
    inception_path = os.path.join('./inception_model')

    # Setup the inception graph
    inception_utils.create_inception_graph(inception_path)

    # Start producing features for real and fake images
    # if device.index is not None:
    #     # Avoid unbounded memory usage
    #     gpu_options = tf.compat.v1.GPUOptions(allow_growth=True,
    #                                 per_process_gpu_memory_fraction=0.15,
    #                                 visible_device_list=str(device.index))
    #     config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)

    # else:
    #     config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})

    config = tf.compat.v1.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.2
    config.gpu_options.allow_growth = True

    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())

        real_feat = compute_real_dist_feat(num_samples=num_samples,
                                           sess=sess,
                                           dataset=dataset,
                                           batch_size=batch_size,
                                           verbose=verbose,
                                           feat_file=feat_file,
                                           log_dir=log_dir,
                                           seed=seed,
                                           **kwargs)

        fake_feat = compute_gen_dist_feat(netG=netG,
                                          num_samples=num_samples,
                                          sess=sess,
                                          device=device,
                                          seed=seed,
                                          batch_size=batch_size,
                                          verbose=verbose)

        # Compute the KID score
        subset_size = num_samples // num_subsets
        scores = kid_utils.polynomial_mmd_averages(real_feat,
                                                   fake_feat,
                                                   n_subsets=num_subsets,
                                                   subset_size=subset_size)

        mmd_score, mmd_std = float(np.mean(scores)), float(np.std(scores))

        print("INFO: KID: {:.4f} ± {:.4f} [Time Taken: {:.4f} secs]".format(
            mmd_score, mmd_std,
            time.time() - start_time))

        return mmd_score, mmd_std
def kid_score(num_subsets,
              subset_size,
              netG,
              device,
              seed,
              dataset_name,
              batch_size=50,
              verbose=True,
              feat_file=None,
              log_dir='./log'):
    """
    Computes KID score.

    Args:
        num_subsets (int): Number of subsets to compute average MMD.
        subset_size (int): Size of subset for computing MMD.
        netG (Module): Torch Module object representing the generator model.
        device (str): Device identifier to use for computation.
        seed (int): The random seed to use.
        dataset_name (str): The name of the dataset to load.
        batch_size (int): The batch size to feedforward for inference.
        feat_file (str): The path to specific inception features for real images.
        log_dir (str): Directory where features can be stored.
        verbose (bool): If True, prints progress.

    Returns:
        tuple: Scalar mean and std of KID scores computed.
    """
    start_time = time.time()

    # Make sure the random seeds are fixed
    torch.manual_seed(seed)
    random.seed(seed)
    np.random.seed(seed)

    # Directories
    inception_path = os.path.join(log_dir, 'metrics', 'inception_model')

    # Setup the inception graph
    inception_utils.create_inception_graph(inception_path)

    # Decide sample size
    num_samples = int(num_subsets * subset_size)

    # Start producing features for real and fake images
    if device.index is not None:
        # Avoid unbounded memory usage
        gpu_options = tf.GPUOptions(allow_growth=True,
                                    per_process_gpu_memory_fraction=0.15,
                                    visible_device_list=str(device.index))
        config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)

    else:
        config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})

    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())

        real_feat = compute_real_dist_feat(num_samples=num_samples,
                                           sess=sess,
                                           dataset_name=dataset_name,
                                           batch_size=batch_size,
                                           verbose=verbose,
                                           feat_file=feat_file,
                                           log_dir=log_dir,
                                           seed=seed)

        fake_feat = compute_gen_dist_feat(netG=netG,
                                          num_samples=num_samples,
                                          sess=sess,
                                          device=device,
                                          seed=seed,
                                          batch_size=batch_size,
                                          verbose=verbose)

        # Compute the KID score
        scores = kid_utils.polynomial_mmd_averages(real_feat,
                                                   fake_feat,
                                                   n_subsets=num_subsets,
                                                   subset_size=subset_size)

        mmd_score, mmd_std = float(np.mean(scores)), float(np.std(scores))

        print("INFO: KID: {:.4f} ± {:.4f} [Time Taken: {:.4f} secs]".format(
            mmd_score, mmd_std,
            time.time() - start_time))

        return mmd_score, mmd_std