def compute_Precision_and_Recall(ref_features,
                                 eval_features,
                                 num_gpus=1,
                                 save_txt=None,
                                 save_path=None):
    """.

        Args:
            datareader (): FFHQ datareader object.
            save_txt (string): Name of result file.
            save_path (string): Absolute path to directory where result textfile is saved.
            num_gpus (int): Number of GPUs used.
            random_seed (int): Random seed.

    """
    print('Running ...')
    it_start = time()
    metric_results = np.zeros([1, 3], dtype=np.float32)
    ref_features = ref_features
    eval_features = eval_features

    # Calculate k-NN precision and recall.
    state = knn_precision_recall_features(ref_features,
                                          eval_features,
                                          num_gpus=num_gpus,
                                          nhood_sizes=[3],
                                          row_batch_size=25000,
                                          col_batch_size=50000)

    # Store results.
    metric_results[0, 0] = 0.0
    metric_results[0, 1] = state['precision'][0]
    metric_results[0, 2] = state['recall'][0]

    # Print progress.
    print('Precision: %0.3f' % state['precision'][0])
    print('Recall: %0.3f' % state['recall'][0])
    print('Iteration time: %gs\n' % (time() - it_start))

    # Save results.
    if save_txt:
        result_path = save_path
        result_file = os.path.join(result_path, 'Result.txt')
        header = 'Header,precision,recall'
        np.savetxt(result_file,
                   metric_results,
                   header=header,
                   delimiter=',',
                   comments='')
def run_ipr(R, E, subfolder, config):
    """
    Runs Iproved Precision and Recall score.
    :param R: array of R points.
    :param E: array of E points.
    :param subfolder: subfolder to save results to.
    :param config: dictionary containing method hyperparameters.
    :return: precision and recall scores.
    """
    nhood_sizes = config['nhood_sizes'] 
    default = config['default']
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 
    
    sess = tf.Session()
    with sess.as_default():
        pr = ipr.knn_precision_recall_features(R, E, nhood_sizes=nhood_sizes,
                    row_batch_size=500, col_batch_size=100, num_gpus=1)
    print('IPR precision: {0}, recall: {1}'.format(pr['precision'], pr['recall']))
    
    if not os.path.exists(subfolder):
        os.makedirs(subfolder)
    txt_file_path = os.path.join(subfolder, 'IPR_results.txt')
    pkl_file_path = os.path.join(subfolder, 'IPR_results.pkl')
    
    with open(txt_file_path, 'a') as f:
        f.writelines([
            '\n--- IPR stats\n',
            'nhood_sizes: {0}\n'.format(nhood_sizes)
        ])
        f.writelines(['{0}: {1}\n'.format(k, v) for k, v in pr.items()])
        print('IPR scores saved.')
    with open(pkl_file_path, 'wb') as f:
        pickle.dump(pr, f)
    
    default_idx = nhood_sizes.index(default)
    return pr['precision'][default_idx], pr['recall'][default_idx]
Beispiel #3
0
def compute_stylegan_truncation(datareader,
                                minibatch_size,
                                num_images,
                                truncations,
                                num_gpus,
                                random_seed,
                                save_txt=None,
                                save_path=None):
    """StyleGAN truncation sweep. (Fig. 4)

        Args:
            datareader (): FFHQ datareader object.
            minibatch_size (int): Minibatch size.
            num_images (int): Number of images used to evaluate precision and recall.
            truncations (list): List of truncation psi values.
            save_txt (string): Name of result file.
            save_path (string): Absolute path to directory where result textfile is saved.
            num_gpus (int): Number of GPUs used.
            random_seed (int): Random seed.

    """
    print('Running StyleGAN truncation sweep...')
    rnd = np.random.RandomState(random_seed)
    fmt = dict(func=dnnlib.tflib.convert_images_to_uint8)

    # Initialize VGG-16.
    feature_net = initialize_feature_extractor()

    # Initialize StyleGAN generator.
    Gs = initialize_stylegan()

    metric_results = np.zeros([len(truncations), 3], dtype=np.float32)
    for i, truncation in enumerate(truncations):
        print('Truncation %g' % truncation)
        it_start = time()

        # Calculate VGG-16 features for real images.
        print('Reading real images...')
        ref_features = np.zeros([num_images, feature_net.output_shape[1]],
                                dtype=np.float32)
        for begin in range(0, num_images, minibatch_size):
            end = min(begin + minibatch_size, num_images)
            real_batch, _ = datareader.get_minibatch_np(end - begin)
            ref_features[begin:end] = feature_net.run(real_batch,
                                                      num_gpus=num_gpus,
                                                      assume_frozen=True)

        # Calculate VGG-16 features for generated images.
        print('Generating images...')
        eval_features = np.zeros([num_images, feature_net.output_shape[1]],
                                 dtype=np.float32)
        for begin in range(0, num_images, minibatch_size):
            end = min(begin + minibatch_size, num_images)
            latent_batch = rnd.randn(end - begin, *Gs.input_shape[1:])
            gen_images = Gs.run(latent_batch,
                                None,
                                truncation_psi=truncation,
                                truncation_cutoff=18,
                                randomize_noise=True,
                                output_transform=fmt)
            eval_features[begin:end] = feature_net.run(gen_images,
                                                       num_gpus=num_gpus,
                                                       assume_frozen=True)

        # Calculate k-NN precision and recall.
        state = knn_precision_recall_features(ref_features,
                                              eval_features,
                                              num_gpus=num_gpus)

        # Store results.
        metric_results[i, 0] = truncation
        metric_results[i, 1] = state['precision'][0]
        metric_results[i, 2] = state['recall'][0]

        # Print progress.
        print('Precision: %0.3f' % state['precision'][0])
        print('Recall: %0.3f' % state['recall'][0])
        print('Iteration time: %gs\n' % (time() - it_start))

    # Save results.
    if save_txt:
        result_path = save_path
        result_file = os.path.join(result_path, 'stylegan_truncation.txt')
        header = 'truncation_psi,precision,recall'
        np.savetxt(result_file,
                   metric_results,
                   header=header,
                   delimiter=',',
                   comments='')
Beispiel #4
0
                   batchSize] = model.sample(nSamples=batchSize)
     nSampled += batchSize
 #print(sampled_fetch)
 print("Evaluating")
 if nEvalSamples < data.shape[0]:
     evalData = getDataBatch(nEvalSamples)
 else:
     evalData = data
 #evalData=data[:min([nEvalSamples, data.shape[0]])]
 logp = np.mean(
     model.getLogP(
         inputs=DRMM.DataIn(data=evalData, mask=np.ones_like(evalData))))
 with sess.as_default():
     #Precision and recall code from: https://github.com/kynkaat/improved-precision-and-recall-metric
     precrecall = knn_precision_recall_features(evalData,
                                                sampled_fetch,
                                                row_batch_size=10000)
 precision = precrecall['precision'][0]
 recall = precrecall['recall'][0]
 f1 = 2.0 * (recall * precision) / (recall + precision + 1e-8)
 print("F1 {}, logp {}".format(f1, logp))
 logFileName = "Results/benchmark_precrecall.csv"
 if not os.path.isfile(logFileName):
     logFile = open(logFileName, "w")
     logFile.write(
         "dataset,datasetIdx,nLayers,nComponentsPerLayer,nParameters,precision,recall,f1,logp\n"
     )
 else:
     logFile = open(logFileName, "a")
 #logFile.write("dataset,datasetIdx,nLayers,nComponentsPerLayer,sampleQuality")
 logFile.write("{},{},{},{},{},{},{},{},{}\n".format(