Beispiel #1
0
def run_auto(dataset, data_dir, result_dir, config_id, num_gpus, resolution,
             total_kimg, gamma, mirror_augment, metrics, train_auto):
    train = EasyDict(run_func_name='training.training_loop.training_auto_loop'
                     )  # Options for training loop.
    Enc = EasyDict(func_name='training.networks_stylegan2.Encoder'
                   )  # Options for encoder network.
    Dec = EasyDict(func_name='training.networks_stylegan2.Decoder'
                   )  # Options for decoder network.
    opt = EasyDict(beta1=0.0, beta2=0.99,
                   epsilon=1e-8)  # Options for autoencoder optimizer.
    loss = EasyDict(
        func_name='training.loss.auto_l1')  # Options for autoencoder loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='1080p',
        layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.image_snapshot_ticks = 10
    train.network_snapshot_ticks = 125
    sched.lrate = 0.003
    sched.minibatch_size = 64
    sched.minibatch_gpu = 64
    desc = 'stylegan2-hrae'

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset)
    dataset_args.resolution = resolution
    dataset_args.num_threads = 4

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus
    desc += '-auto'

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(Enc_args=Enc, Dec_args=Dec, opt_args=opt, loss_args=loss)
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  tf_config=tf_config)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
def run(result_dir,
        num_gpus,
        total_kimg,
        mirror_augment,
        metrics,
        resume_pkl,
        G_pkl,
        I_fmap_base=8,
        fmap_decay=0.15,
        n_samples_per=10,
        module_list=None,
        latent_type='uniform',
        batch_size=32,
        batch_per_gpu=16,
        random_seed=1000,
        fmap_min=16,
        fmap_max=512,
        dlatent_size=10,
        I_nf_scale=4,
        arch='resnet'):
    print('module_list:', module_list)
    train = EasyDict(
        run_func_name='training.training_loop_infernet.training_loop_infernet'
    )  # Options for training loop.

    module_list = _str_to_list(module_list)
    I = EasyDict(func_name='training.vc_networks2.infer_modular',
                 dlatent_size=dlatent_size,
                 fmap_min=fmap_min,
                 fmap_max=fmap_max,
                 module_list=module_list,
                 I_nf_scale=I_nf_scale)
    desc = 'inference_net'

    I_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    loss = EasyDict(func_name='training.loss_inference.I_loss',
                    latent_type=latent_type,
                    dlatent_size=dlatent_size)  # Options for generator loss.

    sched = EasyDict()  # Options for TrainingSchedule.
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    # tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().
    tf_config = {
        'rnd.np_random_seed': random_seed
    }  # Options for tflib.init_tf().

    train.total_kimg = total_kimg
    sched.lrate = 0.002
    sched.tick_kimg = 1
    sched.minibatch_size = batch_size
    sched.minibatch_gpu = batch_per_gpu
    metrics = [metric_defaults[x] for x in metrics]

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    # Configs A-E: Shrink networks to match original StyleGAN.
    I.fmap_base = 2 << I_fmap_base

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(I_args=I, I_opt_args=I_opt, loss_args=loss)
    kwargs.update(sched_args=sched,
                  metric_arg_list=metrics,
                  tf_config=tf_config,
                  resume_pkl=resume_pkl,
                  G_pkl=G_pkl,
                  n_samples_per=n_samples_per)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)