Beispiel #1
0
def run(
        dataset,
        data_dir,
        result_dir,
        config_id,
        num_gpus,
        total_kimg,
        gamma,
        mirror_augment,
        metrics,
        resume_pkl,
        D_global_size=3,
        C_global_size=0,  # Global C_latents.
        sb_C_global_size=4,
        C_local_hfeat_size=0,  # Local heatmap*features learned C_latents.
        C_local_heat_size=0,  # Local heatmap learned C_latents.
        n_samples_per=10,
        module_list=None,
        single_const=True,
        model_type='spatial_biased'):
    # print('module_list:', module_list)
    train = EasyDict(
        run_func_name='training.training_loop_dsp.training_loop_dsp'
    )  # Options for training loop.
    if model_type == 'spatial_biased':
        G = EasyDict(
            func_name=
            'training.spatial_biased_networks.G_main_spatial_biased_dsp',
            mapping_fmaps=128,
            fmap_max=128,
            latent_size=D_global_size + sb_C_global_size,
            dlatent_size=D_global_size + sb_C_global_size,
            D_global_size=D_global_size,
            sb_C_global_size=sb_C_global_size
        )  # Options for generator network.
        desc = 'spatial_biased_net'
    elif model_type == 'sb_general':
        G = EasyDict(
            func_name=
            'training.spatial_biased_networks.G_main_spatial_biased_dsp',
            synthesis_func='G_synthesis_sb_general_dsp',
            mapping_fmaps=128,
            fmap_max=128,
            latent_size=D_global_size + C_global_size + sb_C_global_size +
            C_local_hfeat_size + C_local_heat_size,
            dlatent_size=D_global_size + C_global_size + sb_C_global_size +
            C_local_hfeat_size + C_local_heat_size,
            D_global_size=D_global_size,
            C_global_size=C_global_size,
            sb_C_global_size=sb_C_global_size,
            C_local_hfeat_size=C_local_hfeat_size,
            C_local_heat_size=C_local_heat_size,
            use_noise=False)  # Options for generator network.
        desc = 'sb_general_net'
    elif model_type == 'sb_modular':
        module_list = _str_to_list(module_list)
        key_ls, size_ls, count_dlatent_size, _ = split_module_names(
            module_list)
        for i, key in enumerate(key_ls):
            if key.startswith('D_global'):
                D_global_size = size_ls[i]
                break
        print('D_global_size:', D_global_size)
        G = EasyDict(
            func_name=
            'training.spatial_biased_networks.G_main_spatial_biased_dsp',
            synthesis_func='G_synthesis_sb_modular',
            mapping_fmaps=128,
            fmap_max=128,
            latent_size=count_dlatent_size,
            dlatent_size=count_dlatent_size,
            D_global_size=D_global_size,
            module_list=module_list,
            single_const=single_const,
            use_noise=False)  # Options for generator network.
        desc = 'sb_modular_net'
    elif model_type == 'sb_singlelayer_modi':
        G = EasyDict(func_name='training.simple_networks.G_main_simple_dsp',
                     synthesis_func='G_synthesis_sb_singlelayer_modi_dsp',
                     mapping_fmaps=128,
                     fmap_max=128,
                     latent_size=D_global_size + sb_C_global_size,
                     dlatent_size=D_global_size + sb_C_global_size,
                     D_global_size=D_global_size,
                     sb_C_global_size=sb_C_global_size
                     )  # Options for generator network.
        desc = 'sb_singlelayer_net'
    elif model_type == 'stylegan2':
        G = EasyDict(
            func_name=
            'training.spatial_biased_networks.G_main_spatial_biased_dsp',
            dlatent_avg_beta=None,
            mapping_fmaps=128,
            fmap_max=128,
            latent_size=12,
            D_global_size=D_global_size,
            sb_C_global_size=sb_C_global_size
        )  # Options for generator network.
        desc = 'stylegan2_net'
    elif model_type == 'simple':
        G = EasyDict(func_name='training.simple_networks.G_main_simple_dsp',
                     latent_size=D_global_size + sb_C_global_size,
                     dlatent_size=D_global_size + sb_C_global_size,
                     D_global_size=D_global_size,
                     sb_C_global_size=sb_C_global_size
                     )  # Options for generator network.
    else:
        raise ValueError('Not supported model tyle: ' + model_type)

    if model_type == 'simple':
        D = EasyDict(func_name='training.simple_networks.D_simple_dsp'
                     )  # Options for discriminator network.
    else:
        D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2',
                     fmap_max=128)  # Options for discriminator network.
        # D         = EasyDict(func_name='training.spatial_biased_networks.D_with_discrete_dsp', fmap_max=128)  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(
        func_name='training.loss.G_logistic_ns_dsp',
        D_global_size=D_global_size)  # Options for generator loss.
    D_loss = EasyDict(
        func_name='training.loss.D_logistic_r1_dsp',
        D_global_size=D_global_size)  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='1080p',
        layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.G_lrate_base = sched.D_lrate_base = 0.002
    sched.minibatch_size_base = 32
    sched.minibatch_gpu_base = 4
    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset, max_label_size='full')

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        G.fmap_base = D.fmap_base = 8 << 10

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  G_opt_args=G_opt,
                  D_opt_args=D_opt,
                  G_loss_args=G_loss,
                  D_loss_args=D_loss,
                  traversal_grid=True)
    if model_type == 'sb_modular':
        n_continuous = 0
        for i, key in enumerate(key_ls):
            m_name = key.split('-')[0]
            if (m_name in LATENT_MODULES) and (not m_name == 'D_global'):
                n_continuous += size_ls[i]
    else:
        n_continuous = C_global_size + sb_C_global_size + \
            C_local_hfeat_size + C_local_heat_size
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  metric_arg_list=metrics,
                  tf_config=tf_config,
                  resume_pkl=resume_pkl,
                  n_discrete=D_global_size,
                  n_continuous=n_continuous,
                  n_samples_per=n_samples_per)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma,
        mirror_augment, metrics):
    train = EasyDict(run_func_name='training.training_loop.training_loop'
                     )  # Options for training loop.
    G = EasyDict(func_name='training.networks_stylegan2.G_main'
                 )  # Options for generator network.
    D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2'
                 )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg'
                      )  # Options for generator loss.
    D_loss = EasyDict(func_name='training.loss.D_logistic_r1'
                      )  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='8k', layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.G_lrate_base = float(
        os.environ['G_LR']) if 'G_LR' in os.environ else 0.002
    sched.D_lrate_base = float(
        os.environ['D_LR']) if 'D_LR' in os.environ else 0.002
    sched.G_lrate_base *= float(
        os.environ['G_LR_MULT']) if 'G_LR_MULT' in os.environ else 1.0
    sched.D_lrate_base *= float(
        os.environ['D_LR_MULT']) if 'D_LR_MULT' in os.environ else 1.0
    G_opt.beta2 = float(
        os.environ['G_BETA2']) if 'G_BETA2' in os.environ else 0.99
    D_opt.beta2 = float(
        os.environ['D_BETA2']) if 'D_BETA2' in os.environ else 0.99
    print('G_lrate: %f' % sched.G_lrate_base)
    print('D_lrate: %f' % sched.D_lrate_base)
    print('G_beta2: %f' % G_opt.beta2)
    print('D_beta2: %f' % D_opt.beta2)
    sched.minibatch_size_base = int(
        os.environ['BATCH_SIZE']) if 'BATCH_SIZE' in os.environ else num_gpus
    sched.minibatch_gpu_base = int(
        os.environ['BATCH_PER']) if 'BATCH_PER' in os.environ else 1
    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]
    desc = 'stylegan2'

    desc += '-' + dataset
    resolution = int(
        os.environ['RESOLUTION']) if 'RESOLUTION' in os.environ else 64
    dataset_args = EasyDict(tfrecord_dir=dataset, resolution=resolution)

    assert num_gpus in [
        1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192
    ]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        G.fmap_base = D.fmap_base = 8 << 10

    if 'FMAP_BASE' in os.environ:
        G.fmap_base = D.fmap_base = int(os.environ['FMAP_BASE']) << 10
    else:
        G.fmap_base = D.fmap_base = 16 << 10  # default

    print('G_fmap_base: %d' % G.fmap_base)
    print('D_fmap_base: %d' % D.fmap_base)

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    # Configs A-D: Enable progressive growing and switch to networks that support it.
    if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
        sched.lod_initial_resolution = 8
        sched.G_lrate_base = sched.D_lrate_base = 0.001
        sched.G_lrate_dict = sched.D_lrate_dict = {
            128: 0.0015,
            256: 0.002,
            512: 0.003,
            1024: 0.003
        }
        sched.minibatch_size_base = 32  # (default)
        sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
        sched.minibatch_gpu_base = 4  # (default)
        sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
        G.synthesis_func = 'G_synthesis_stylegan_revised'
        D.func_name = 'training.networks_stylegan2.D_stylegan'

    # Configs A-C: Disable path length regularization.
    if config_id in ['config-a', 'config-b', 'config-c']:
        G_loss = EasyDict(func_name='training.loss.G_logistic_ns')

    # Configs A-B: Disable lazy regularization.
    if config_id in ['config-a', 'config-b']:
        train.lazy_regularization = False

    # Config A: Switch to original StyleGAN networks.
    if config_id == 'config-a':
        G = EasyDict(func_name='training.networks_stylegan.G_style')
        D = EasyDict(func_name='training.networks_stylegan.D_basic')

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  G_opt_args=G_opt,
                  D_opt_args=D_opt,
                  G_loss_args=G_loss,
                  D_loss_args=D_loss)
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  metric_arg_list=metrics,
                  tf_config=tf_config)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
Beispiel #3
0
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma,
        mirror_augment, metrics, image_snapshot_ticks, network_snapshot_ticks,
        resume_pkl):
    train = EasyDict(run_func_name='training.training_loop.training_loop'
                     )  # Options for training loop.
    G = EasyDict(func_name='training.networks_stylegan2.G_main'
                 )  # Options for generator network.
    D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2'
                 )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg'
                      )  # Options for generator loss.
    D_loss = EasyDict(func_name='training.loss.D_logistic_r1'
                      )  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='8k', layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().
    try:
        pkl, kimg = misc.locate_latest_pkl(result_dir)
        train.resume_pkl = pkl
        train.resume_kimg = kimg
    except:
        print('Couldn\'t find valid snapshot, starting over')
    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = image_snapshot_ticks
    train.network_snapshot_ticks = network_snapshot_ticks
    train.resume_pkl = resume_pkl
    sched.G_lrate_base = sched.D_lrate_base = 0.002
    sched.minibatch_size_base = 32
    sched.minibatch_gpu_base = 4
    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]
    desc = 'stylegan2'

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset)

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        G.fmap_base = D.fmap_base = 8 << 10

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    # Configs A-D: Enable progressive growing and switch to networks that support it.
    if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
        sched.lod_initial_resolution = 8
        sched.G_lrate_base = sched.D_lrate_base = 0.001
        sched.G_lrate_dict = sched.D_lrate_dict = {
            128: 0.0015,
            256: 0.002,
            512: 0.003,
            1024: 0.003
        }
        sched.minibatch_size_base = 32  # (default)
        sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
        sched.minibatch_gpu_base = 4  # (default)
        sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
        G.synthesis_func = 'G_synthesis_stylegan_revised'
        D.func_name = 'training.networks_stylegan2.D_stylegan'

    # Configs A-C: Disable path length regularization.
    if config_id in ['config-a', 'config-b', 'config-c']:
        G_loss = EasyDict(func_name='training.loss.G_logistic_ns')

    # Configs A-B: Disable lazy regularization.
    if config_id in ['config-a', 'config-b']:
        train.lazy_regularization = False

    # Config A: Switch to original StyleGAN networks.
    if config_id == 'config-a':
        G = EasyDict(func_name='training.networks_stylegan.G_style')
        D = EasyDict(func_name='training.networks_stylegan.D_basic')

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  G_opt_args=G_opt,
                  D_opt_args=D_opt,
                  G_loss_args=G_loss,
                  D_loss_args=D_loss)
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  metric_arg_list=metrics,
                  tf_config=tf_config)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
Beispiel #4
0
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma,
        mirror_augment, metrics):
    train = EasyDict(
        run_func_name=
        'training.training_loop.training_loop_mirror_v6.training_loop')
    G = EasyDict(func_name='training.networks.networks_stylegan2.G_main')
    D = EasyDict(
        func_name=
        'training.networks.networks_stylegan2_discriminator_new_rotation.D_stylegan2_new_rotaion'
    )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
    D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
    G_loss = EasyDict(
        func_name=
        'training.loss.loss_G_new_rotation_squared_euclidean_10_interpolate_50_percent_uniform_dist.G_logistic_ns_pathreg'
    )
    D_loss = EasyDict(
        func_name=
        'training.loss.loss_D_logistic_r1_new_rotation_euclidean.D_logistic_r1_new_rotation'
    )
    sched = EasyDict()
    grid = EasyDict(size='1080p', layout='random')
    sc = dnnlib.SubmitConfig()
    tf_config = {'rnd.np_random_seed': 1000}

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.G_lrate_base = sched.D_lrate_base = 0.002
    sched.minibatch_size_base = 32
    sched.minibatch_gpu_base = 4

    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]
    desc = 'stylegan2'
    G.style_mixing_prob = None

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset)

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id
    desc += '-squared_euclidean_10_interpolate_50_percent'
    desc += '-v7-256'

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        G.fmap_base = D.fmap_base = 8 << 10

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    # Configs A-D: Enable progressive growing and switch to networks that support it.
    if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
        sched.lod_initial_resolution = 8
        sched.G_lrate_base = sched.D_lrate_base = 0.001
        sched.G_lrate_dict = sched.D_lrate_dict = {
            128: 0.0015,
            256: 0.002,
            512: 0.003,
            1024: 0.003
        }
        sched.minibatch_size_base = 32  # (default)
        sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
        sched.minibatch_gpu_base = 4  # (default)
        sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
        G.synthesis_func = 'G_synthesis_stylegan_revised'
        D.func_name = 'training.networks_stylegan2.D_stylegan'

    # Configs A-C: Disable path length regularization.
    if config_id in ['config-a', 'config-b', 'config-c']:
        G_loss = EasyDict(func_name='training.loss.G_logistic_ns')

    # Configs A-B: Disable lazy regularization.
    if config_id in ['config-a', 'config-b']:
        train.lazy_regularization = False

    # Config A: Switch to original StyleGAN networks.
    if config_id == 'config-a':
        G = EasyDict(func_name='training.networks_stylegan.G_style')
        D = EasyDict(func_name='training.networks_stylegan.D_basic')

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  G_opt_args=G_opt,
                  D_opt_args=D_opt,
                  G_loss_args=G_loss,
                  D_loss_args=D_loss)
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  metric_arg_list=metrics,
                  tf_config=tf_config)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
def run(dataset,
        data_dir,
        result_dir,
        config_id,
        num_gpus,
        total_kimg,
        gamma,
        mirror_augment,
        metrics,
        resume_pkl,
        fmap_decay=0.15,
        D_lambda=1,
        C_lambda=1,
        MI_lambda=1,
        cls_alpha=0,
        n_samples_per=10,
        module_list=None,
        single_const=True,
        model_type='spatial_biased',
        phi_blurry=0.5,
        latent_type='uniform'):

    train = EasyDict(
        run_func_name='training.training_loop_vid.training_loop_vid'
    )  # Options for training loop.

    D_global_size = 0

    module_list = _str_to_list(module_list)
    key_ls, size_ls, count_dlatent_size, _ = split_module_names(module_list)
    for i, key in enumerate(key_ls):
        if key.startswith('D_global'):
            D_global_size += size_ls[i]
            break
    print('D_global_size:', D_global_size)
    print('key_ls:', key_ls)
    print('size_ls:', size_ls)
    print('count_dlatent_size:', count_dlatent_size)

    if model_type == 'vid_model':
        G = EasyDict(func_name='training.vid_networks.G_main_vid',
                     synthesis_func='G_synthesis_vid_modular',
                     fmap_min=16,
                     fmap_max=512,
                     fmap_decay=fmap_decay,
                     latent_size=count_dlatent_size,
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     module_list=module_list,
                     single_const=single_const,
                     use_noise=True)  # Options for generator network.
        I = EasyDict(func_name='training.vid_networks.vid_head',
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     fmap_max=512)
        D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2',
                     fmap_max=512)  # Options for discriminator network.
        I_info = EasyDict()
        desc = model_type
    elif model_type == 'vid_with_cls':
        G = EasyDict(func_name='training.vid_networks.G_main_vid',
                     synthesis_func='G_synthesis_vid_modular',
                     fmap_min=16,
                     fmap_max=512,
                     fmap_decay=fmap_decay,
                     latent_size=count_dlatent_size,
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     module_list=module_list,
                     single_const=single_const,
                     use_noise=True)  # Options for generator network.
        I = EasyDict(func_name='training.vid_networks.vid_head',
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     fmap_max=512)
        I_info = EasyDict(
            func_name='training.info_gan_networks.info_gan_head_cls',
            dlatent_size=count_dlatent_size,
            D_global_size=D_global_size,
            fmap_decay=fmap_decay,
            fmap_min=16,
            fmap_max=512)
        D = EasyDict(
            func_name='training.info_gan_networks.D_info_gan_stylegan2',
            fmap_max=512)  # Options for discriminator network.
        desc = model_type
    elif model_type == 'vid_naive_cluster_model':
        G = EasyDict(func_name='training.vid_networks.G_main_vid',
                     synthesis_func='G_synthesis_vid_modular',
                     fmap_min=16,
                     fmap_max=512,
                     fmap_decay=fmap_decay,
                     latent_size=count_dlatent_size,
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     module_list=module_list,
                     single_const=single_const,
                     use_noise=True)  # Options for generator network.
        I = EasyDict(func_name='training.vid_networks.vid_naive_cluster_head',
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     fmap_max=512)  # Options for estimator network.
        D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2',
                     fmap_max=512)  # Options for discriminator network.
        I_info = EasyDict()
        desc = model_type
    elif model_type == 'vid_blurry_model':
        G = EasyDict(func_name='training.vid_networks.G_main_vid',
                     synthesis_func='G_synthesis_vid_modular',
                     fmap_min=16,
                     fmap_max=512,
                     fmap_decay=fmap_decay,
                     latent_size=count_dlatent_size,
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     module_list=module_list,
                     single_const=single_const,
                     use_noise=True)  # Options for generator network.
        I = EasyDict(func_name='training.vid_networks.vid_naive_cluster_head',
                     dlatent_size=count_dlatent_size,
                     D_global_size=D_global_size,
                     fmap_max=512)  # Options for estimator network.
        D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2',
                     fmap_max=512)  # Options for discriminator network.
        I_info = EasyDict()
        desc = model_type
    else:
        raise ValueError('Not supported model tyle: ' + model_type)

    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    I_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    if model_type == 'vid_model':
        G_loss = EasyDict(
            func_name='training.loss_vid.G_logistic_ns_vid',
            D_global_size=D_global_size,
            C_lambda=C_lambda,
            latent_type=latent_type)  # Options for generator loss.
        D_loss = EasyDict(
            func_name='training.loss_vid.D_logistic_r1_vid',
            D_global_size=D_global_size,
            latent_type=latent_type)  # Options for discriminator loss.
        I_loss = EasyDict(func_name='training.loss_vid.I_vid',
                          D_global_size=D_global_size,
                          latent_type=latent_type,
                          C_lambda=C_lambda,
                          MI_lambda=MI_lambda)  # Options for estimator loss.
    elif model_type == 'vid_with_cls':
        G_loss = EasyDict(
            func_name='training.loss_vid.G_logistic_ns_vid',
            D_global_size=D_global_size,
            C_lambda=C_lambda,
            cls_alpha=cls_alpha,
            latent_type=latent_type)  # Options for generator loss.
        D_loss = EasyDict(
            func_name='training.loss_vid.D_logistic_r1_info_gan_vid',
            D_global_size=D_global_size,
            latent_type=latent_type)  # Options for discriminator loss.
        I_loss = EasyDict(func_name='training.loss_vid.I_vid',
                          D_global_size=D_global_size,
                          latent_type=latent_type,
                          C_lambda=C_lambda,
                          MI_lambda=MI_lambda)  # Options for estimator loss.
    elif model_type == 'vid_naive_cluster_model':
        G_loss = EasyDict(
            func_name='training.loss_vid.G_logistic_ns_vid_naive_cluster',
            D_global_size=D_global_size,
            C_lambda=C_lambda,
            latent_type=latent_type)  # Options for generator loss.
        D_loss = EasyDict(
            func_name='training.loss_vid.D_logistic_r1_vid',
            D_global_size=D_global_size,
            latent_type=latent_type)  # Options for discriminator loss.
        I_loss = EasyDict()  # Options for estimator loss.
        I_opt = EasyDict()
    elif model_type == 'vid_blurry_model':
        G_loss = EasyDict(
            func_name='training.loss_vid.G_logistic_ns_vid_naive_cluster',
            D_global_size=D_global_size,
            C_lambda=C_lambda,
            latent_type=latent_type)  # Options for generator loss.
        D_loss = EasyDict(
            func_name='training.loss_vid.D_logistic_r1_vid',
            D_global_size=D_global_size,
            latent_type=latent_type)  # Options for discriminator loss.
        I_loss = EasyDict(func_name='training.loss_vid.I_vid_blurry',
                          D_global_size=D_global_size,
                          latent_type=latent_type,
                          C_lambda=C_lambda,
                          MI_lambda=MI_lambda,
                          phi=phi_blurry)  # Options for estimator loss.
    else:
        raise ValueError('Not supported loss tyle: ' + model_type)

    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='1080p',
        layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.G_lrate_base = sched.D_lrate_base = sched.I_lrate_base = 0.002
    sched.minibatch_size_base = 16
    sched.minibatch_gpu_base = 8
    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset, max_label_size='full')

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        # I.fmap_base = G.fmap_base = D.fmap_base = 8 << 10
        I.fmap_base = G.fmap_base = D.fmap_base = 2 << 8

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(
        G_args=G,
        D_args=D,
        I_args=I,
        I_info_args=I_info,
        G_opt_args=G_opt,
        D_opt_args=D_opt,
        I_opt_args=I_opt,
        G_loss_args=G_loss,
        D_loss_args=D_loss,
        I_loss_args=I_loss,
        use_vid_head=(model_type == 'vid_model'),
        use_vid_head_with_cls=(model_type == 'vid_with_cls'),
        use_vid_naive_cluster=(model_type == 'vid_naive_cluster_model'),
        use_vid_blurry=(model_type == 'vid_blurry_model'),
        traversal_grid=True)
    n_continuous = 0
    for i, key in enumerate(key_ls):
        m_name = key.split('-')[0]
        if (m_name in LATENT_MODULES) and (not m_name == 'D_global'):
            n_continuous += size_ls[i]

    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  metric_arg_list=metrics,
                  tf_config=tf_config,
                  resume_pkl=resume_pkl,
                  n_discrete=D_global_size,
                  n_continuous=n_continuous,
                  n_samples_per=n_samples_per,
                  C_lambda=C_lambda,
                  MI_lambda=MI_lambda)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
Beispiel #6
0
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma,
        mirror_augment, metrics):
    train = EasyDict(run_func_name='training.training_loop.training_loop'
                     )  # Options for training loop.
    G = EasyDict(func_name='training.networks_stylegan2.G_main'
                 )  # Options for generator network.
    D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2'
                 )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg'
                      )  # Options for generator loss.
    D_loss = EasyDict(func_name='training.loss.D_logistic_r1'
                      )  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='8k', layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.G_lrate_base = sched.D_lrate_base = 0.002
    sched.minibatch_size_base = 32
    sched.minibatch_gpu_base = 4
    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]
    desc = 'stylegan2'

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset)

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id not in ['config-f', 'config-l']:
        G.fmap_base = D.fmap_base = 8 << 10

    # Config L: Generator training only
    if config_id == 'config-l':
        # Use labels as latent vector input
        dataset_args.max_label_size = "full"
        # Deactivate methods specific for GAN training
        G.truncation_psi = None
        G.randomize_noise = False
        G.style_mixing_prob = None
        G.dlatent_avg_beta = None
        G.conditional_labels = False
        # Refinement training
        G_loss.func_name = 'training.loss.G_reconstruction'
        train.run_func_name = 'training.training_loop.training_loop_refinement'
        # G.freeze_layers = ["mapping", "noise"]#, "4x4", "8x8", "16x16", "32x32"]
        # Network for refinement
        train.resume_pkl = "nets/stylegan2-ffhq-config-f.pkl"  # TODO init net
        train.resume_with_new_nets = True
        # Maintenance tasks
        sched.tick_kimg_base = 1  # 1 tick = 5000 images (metric update)
        sched.tick_kimg_dict = {}
        train.image_snapshot_ticks = 5  # Save every 5000 images
        train.network_snapshot_ticks = 10  # Save every 10000 images
        # Training parameters
        sched.G_lrate_base = 1e-4
        train.G_smoothing_kimg = 0.0
        sched.minibatch_size_base = sched.minibatch_gpu_base * num_gpus  # 4 per GPU

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    # Configs A-D: Enable progressive growing and switch to networks that support it.
    if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
        sched.lod_initial_resolution = 8
        sched.G_lrate_base = sched.D_lrate_base = 0.001
        sched.G_lrate_dict = sched.D_lrate_dict = {
            128: 0.0015,
            256: 0.002,
            512: 0.003,
            1024: 0.003
        }
        sched.minibatch_size_base = 32  # (default)
        sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
        sched.minibatch_gpu_base = 4  # (default)
        sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
        G.synthesis_func = 'G_synthesis_stylegan_revised'
        D.func_name = 'training.networks_stylegan2.D_stylegan'

    # Configs A-C: Disable path length regularization.
    if config_id in ['config-a', 'config-b', 'config-c']:
        G_loss = EasyDict(func_name='training.loss.G_logistic_ns')

    # Configs A-B: Disable lazy regularization.
    if config_id in ['config-a', 'config-b']:
        train.lazy_regularization = False

    # Config A: Switch to original StyleGAN networks.
    if config_id == 'config-a':
        G = EasyDict(func_name='training.networks_stylegan.G_style')
        D = EasyDict(func_name='training.networks_stylegan.D_basic')

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  G_opt_args=G_opt,
                  D_opt_args=D_opt,
                  G_loss_args=G_loss,
                  D_loss_args=D_loss)
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  metric_arg_list=metrics,
                  tf_config=tf_config)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
Beispiel #7
0
def run(
    dataset,
    data_dir,
    result_dir,
    config_id,
    num_gpus,
    total_kimg,
    gamma,
    mirror_augment,
    metrics,
    resume_pkl=None,
    resume_kimg=None,
):
    train = EasyDict(
        run_func_name="training.training_loop.training_loop",
        # training resume options:
        resume_pkl=
        resume_pkl,  # Network pickle to resume training from, None = train from scratch.
        resume_kimg=
        resume_kimg,  # Assumed training progress at the beginning. Affects reporting and training schedule.
    )  # Options for training loop.
    G = EasyDict(func_name="training.networks_stylegan2.G_main"
                 )  # Options for generator network.
    D = EasyDict(func_name="training.networks_stylegan2.D_stylegan2"
                 )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(func_name="training.loss.G_logistic_ns_pathreg"
                      )  # Options for generator loss.
    D_loss = EasyDict(func_name="training.loss.D_logistic_r1"
                      )  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size="8k", layout="random")  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {"rnd.np_random_seed": 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.G_lrate_base = sched.D_lrate_base = 0.002
    sched.minibatch_size_base = 32
    sched.minibatch_gpu_base = 4
    D_loss.gamma = 10
    metrics = [metric_defaults[x] for x in metrics]
    desc = "stylegan2"

    desc += "-" + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset)

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += "-%dgpu" % num_gpus

    assert config_id in _valid_configs
    desc += "-" + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != "config-f":
        G.fmap_base = D.fmap_base = 8 << 10

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith("config-e"):
        D_loss.gamma = 100
        if "Gorig" in config_id:
            G.architecture = "orig"
        if "Gskip" in config_id:
            G.architecture = "skip"  # (default)
        if "Gresnet" in config_id:
            G.architecture = "resnet"
        if "Dorig" in config_id:
            D.architecture = "orig"
        if "Dskip" in config_id:
            D.architecture = "skip"
        if "Dresnet" in config_id:
            D.architecture = "resnet"  # (default)

    # Configs A-D: Enable progressive growing and switch to networks that support it.
    if config_id in ["config-a", "config-b", "config-c", "config-d"]:
        sched.lod_initial_resolution = 8
        sched.G_lrate_base = sched.D_lrate_base = 0.001
        sched.G_lrate_dict = sched.D_lrate_dict = {
            128: 0.0015,
            256: 0.002,
            512: 0.003,
            1024: 0.003,
        }
        sched.minibatch_size_base = 32  # (default)
        sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
        sched.minibatch_gpu_base = 4  # (default)
        sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
        G.synthesis_func = "G_synthesis_stylegan_revised"
        D.func_name = "training.networks_stylegan2.D_stylegan"

    # Configs A-C: Disable path length regularization.
    if config_id in ["config-a", "config-b", "config-c"]:
        G_loss = EasyDict(func_name="training.loss.G_logistic_ns")

    # Configs A-B: Disable lazy regularization.
    if config_id in ["config-a", "config-b"]:
        train.lazy_regularization = False

    # Config A: Switch to original StyleGAN networks.
    if config_id == "config-a":
        G = EasyDict(func_name="training.networks_stylegan.G_style")
        D = EasyDict(func_name="training.networks_stylegan.D_basic")

    if gamma is not None:
        D_loss.gamma = gamma

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(
        G_args=G,
        D_args=D,
        G_opt_args=G_opt,
        D_opt_args=D_opt,
        G_loss_args=G_loss,
        D_loss_args=D_loss,
    )
    kwargs.update(
        dataset_args=dataset_args,
        sched_args=sched,
        grid_args=grid,
        metric_arg_list=metrics,
        tf_config=tf_config,
    )
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)
Beispiel #8
0
def run(config_id, gamma, height, width, cond):
    train = EasyDict(run_func_name='training.diagnostic.create_initial_pkl'
                     )  # Options for training loop.
    G = EasyDict(func_name='training.networks_stylegan2.G_main'
                 )  # Options for generator network.
    D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2'
                 )  # Options for discriminator network.
    G_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for generator optimizer.
    D_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg'
                      )  # Options for generator loss.
    D_loss = EasyDict(func_name='training.loss.D_logistic_r1'
                      )  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='8k', layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    sched.minibatch_size_base = 192
    sched.minibatch_gpu_base = 3
    D_loss.gamma = 10
    desc = 'stylegan2'

    dataset_args = EasyDict()  # (tfrecord_dir=dataset)

    if cond:
        desc += '-cond'
        dataset_args.max_label_size = 'full'  # conditioned on full label

    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        G.fmap_base = D.fmap_base = 8 << 10

    # Config E: Set gamma to 100 and override G & D architecture.
    if config_id.startswith('config-e'):
        D_loss.gamma = 100
        if 'Gorig' in config_id: G.architecture = 'orig'
        if 'Gskip' in config_id: G.architecture = 'skip'  # (default)
        if 'Gresnet' in config_id: G.architecture = 'resnet'
        if 'Dorig' in config_id: D.architecture = 'orig'
        if 'Dskip' in config_id: D.architecture = 'skip'
        if 'Dresnet' in config_id: D.architecture = 'resnet'  # (default)

    # Configs A-D: Enable progressive growing and switch to networks that support it.
    if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
        sched.lod_initial_resolution = 8
        sched.G_lrate_base = sched.D_lrate_base = 0.001
        sched.G_lrate_dict = sched.D_lrate_dict = {
            128: 0.0015,
            256: 0.002,
            512: 0.003,
            1024: 0.003
        }
        sched.minibatch_size_base = 32  # (default)
        sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
        sched.minibatch_gpu_base = 4  # (default)
        sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
        G.synthesis_func = 'G_synthesis_stylegan_revised'
        D.func_name = 'training.networks_stylegan2.D_stylegan'

    # Configs A-C: Disable path length regularization.
    if config_id in ['config-a', 'config-b', 'config-c']:
        G_loss = EasyDict(func_name='training.loss.G_logistic_ns')

    # Configs A-B: Disable lazy regularization.
    if config_id in ['config-a', 'config-b']:
        train.lazy_regularization = False

    # Config A: Switch to original StyleGAN networks.
    if config_id == 'config-a':
        G = EasyDict(func_name='training.networks_stylegan.G_style')
        D = EasyDict(func_name='training.networks_stylegan.D_basic')

    if gamma is not None:
        D_loss.gamma = gamma

    G.update(resolution_h=height)
    G.update(resolution_w=width)
    D.update(resolution_h=height)
    D.update(resolution_w=width)

    sc.submit_target = dnnlib.SubmitTarget.DIAGNOSTIC
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(G_args=G,
                  D_args=D,
                  tf_config=tf_config,
                  config_id=config_id,
                  resolution_h=height,
                  resolution_w=width)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_desc = desc
    # dnnlib.submit_run(**kwargs)
    dnnlib.submit_diagnostic(**kwargs)