Esempio n. 1
0
def run(network_pkl,
        metrics,
        dataset,
        data_dir,
        mirror_augment,
        include_I=False,
        is_vae=False,
        mapping_nodup=False,
        avg_mv_for_I=False):
    print('Evaluating metrics "%s" for "%s"...' %
          (','.join(metrics), network_pkl))
    tflib.init_tf()
    network_pkl = pretrained_networks.get_path_or_url(network_pkl)
    dataset_args = dnnlib.EasyDict(tfrecord_dir=dataset, shuffle_mb=0)
    num_gpus = dnnlib.submit_config.num_gpus
    metric_group = metric_base.MetricGroup(
        [metric_defaults[metric] for metric in metrics])
    metric_group.run(network_pkl,
                     data_dir=data_dir,
                     dataset_args=dataset_args,
                     mirror_augment=mirror_augment,
                     num_gpus=num_gpus,
                     include_I=include_I,
                     is_vae=is_vae,
                     mapping_nodup=mapping_nodup,
                     avg_mv_for_I=avg_mv_for_I)
Esempio n. 2
0
def run_eval(dataset, resolution, result_dir, DiffAugment, num_gpus, batch_size, total_kimg, ema_kimg, num_samples, gamma, fmap_base, fmap_max, latent_size, mirror_augment, impl, metrics, resume, resume_kimg, num_repeats, eval):
    dataset, total_samples = dataset_tool.create_from_images(dataset, resolution)
    print('Evaluating metrics "%s" for "%s"...' % (','.join(metrics), resume))
    tflib.init_tf()
    resume = pretrained_networks.get_path_or_url(resume)
    dataset_args = dnnlib.EasyDict(tfrecord_dir=dataset, num_samples=num_samples or total_samples, resolution=resolution, from_tfrecords=True)
    metric_group = metric_base.MetricGroup([metric_defaults[metric] for metric in metrics], num_repeats=num_repeats)
    metric_group.run(resume, dataset_args=dataset_args, num_gpus=num_gpus)
Esempio n. 3
0
def run(network_pkl, metrics, dataset, data_dir, mirror_augment, rho_steps):
    print('Evaluating metrics "%s" for "%s"...' % (','.join(metrics), network_pkl))
    tflib.init_tf()
    network_pkl = pretrained_networks.get_path_or_url(network_pkl)
    dataset_args = dnnlib.EasyDict(tfrecord_dir=dataset, shuffle_mb=0)
    num_gpus = dnnlib.submit_config.num_gpus
    metric_group = metric_base.MetricGroup([metric_defaults[metric] for metric in metrics])
    if rho_steps > 1:
        rho_sweep = np.linspace(0, 1, rho_steps)
    else:
        rho_sweep = [1]
    for rho in rho_sweep:
        print(rho)
        metric_group.run(network_pkl, data_dir=data_dir, dataset_args=dataset_args, mirror_augment=mirror_augment, num_gpus=num_gpus, rho=rho)
Esempio n. 4
0
def run(network_pkl, metrics, dataset, data_dir, mirror_augment):
    print('Evaluating metrics "%s" for "%s"...' %
          (','.join(metrics), network_pkl))
    tflib.init_tf()
    network_pkl = pretrained_networks.get_path_or_url(network_pkl)
    dataset_args = dnnlib.EasyDict(tfrecord_dir=dataset,
                                   shuffle_mb=0,
                                   max_label_size="full")
    num_gpus = dnnlib.submit_config.num_gpus
    metric_group = metric_base.MetricGroup(
        [metric_defaults[metric] for metric in metrics])
    metric_group.run(network_pkl,
                     data_dir=data_dir,
                     dataset_args=dataset_args,
                     mirror_augment=mirror_augment,
                     num_gpus=num_gpus)
Esempio n. 5
0
def run(network_pkl, metrics, dataset, data_dir, mirror_augment, paths):
    print("Evaluating metrics %s for %s..." % (",".join(metrics), network_pkl))
    tflib.init_tf()

    network_pkl = pretrained_networks.get_path_or_url(network_pkl)
    dataset_args = dnnlib.EasyDict(tfrecord_dir = dataset, shuffle_mb = 0)
    num_gpus = dnnlib.submit_config.num_gpus
    metric_group = metric_base.MetricGroup([metric_defaults[metric] for metric in metrics])
    tf_config = {
        "rnd.np_random_seed": 1000,
        "allow_soft_placement": True,
        "gpu_options.per_process_gpu_memory_fraction": 1.0
    }

    metric_group.run(network_pkl, data_dir = data_dir, dataset_args = dataset_args, tf_config = tf_config,
        mirror_augment = mirror_augment, num_gpus = num_gpus, paths = paths)
Esempio n. 6
0
def run(network_pkl, metrics, dataset, data_dir, mirror_augment):
    print('Evaluating metrics "%s" for "%s"...' %
          (','.join(metrics), network_pkl))
    tflib.init_tf()
    pkls = [
        v for v in os.listdir(network_pkl)
        if v.startswith('network') and v.endswith('.pkl')
    ]
    pkls.sort()
    for pkl in pkls:
        net_pkl = pretrained_networks.get_path_or_url(
            os.path.join(network_pkl, pkl))
        print('Process pkl %s' % pkl)
        dataset_args = dnnlib.EasyDict(tfrecord_dir=dataset, shuffle_mb=0)
        num_gpus = dnnlib.submit_config.num_gpus
        metric_group = metric_base.MetricGroup(
            [metric_defaults[metric] for metric in metrics])
        metric_group.run(net_pkl,
                         data_dir=data_dir,
                         dataset_args=dataset_args,
                         mirror_augment=mirror_augment,
                         num_gpus=num_gpus)
        metric_group.update_autosummaries()