def run(dataset,
        data_dir,
        result_dir,
        config_id,
        num_gpus,
        total_kimg,
        mirror_augment,
        metrics,
        resume_G_pkl,
        n_batch=2,
        n_batch_per_gpu=1,
        D_global_size=0,
        C_global_size=10,
        model_type='hd_dis_model',
        latent_type='uniform',
        resume_pkl=None,
        n_samples_per=4,
        D_lambda=0,
        C_lambda=1,
        epsilon_in_loss=3,
        random_eps=True,
        M_lrmul=0.1,
        resolution_manual=1024,
        pretrained_type='with_stylegan2',
        traj_lambda=None,
        level_I_kimg=1000,
        use_level_training=False,
        resume_kimg=0,
        use_std_in_m=False,
        prior_latent_size=512,
        M_mapping_fmaps=512,
        hyperplane_lambda=1,
        hyperdir_lambda=1):
    train = EasyDict(run_func_name='training.training_loop_hd.training_loop_hd'
                     )  # Options for training loop with pretrained HD.
    if model_type == 'hd_hyperplane':
        M = EasyDict(
            func_name='training.hd_networks.net_M_hyperplane',
            C_global_size=C_global_size,
            D_global_size=D_global_size,
            latent_size=prior_latent_size,
            mapping_lrmul=M_lrmul,
            use_std_in_m=use_std_in_m)  # Options for dismapper network.
        I = EasyDict(
            func_name='training.hd_networks.net_I',
            C_global_size=C_global_size,
            D_global_size=D_global_size)  # Options for recognizor network.
    else:
        M = EasyDict(
            func_name='training.hd_networks.net_M',
            C_global_size=C_global_size,
            D_global_size=D_global_size,
            latent_size=prior_latent_size,
            mapping_fmaps=M_mapping_fmaps,
            mapping_lrmul=M_lrmul,
            use_std_in_m=use_std_in_m)  # Options for dismapper network.
        I = EasyDict(
            func_name='training.hd_networks.net_I',
            C_global_size=C_global_size,
            D_global_size=D_global_size)  # Options for recognizor network.
    if model_type == 'hd_dis_model_with_cls':
        I_info = EasyDict(func_name='training.hd_networks.net_I_info',
                          C_global_size=C_global_size,
                          D_global_size=D_global_size)
    else:
        I_info = EasyDict()
    I_opt = EasyDict(beta1=0.0, beta2=0.99,
                     epsilon=1e-8)  # Options for discriminator optimizer.
    if model_type == 'hd_hyperplane':
        I_loss = EasyDict(
            func_name='training.loss_hd.IandM_hyperplane_loss',
            latent_type=latent_type,
            D_global_size=D_global_size,
            C_global_size=C_global_size,
            D_lambda=D_lambda,
            C_lambda=C_lambda,
            epsilon=epsilon_in_loss,
            random_eps=random_eps,
            traj_lambda=traj_lambda,
            resolution_manual=resolution_manual,
            use_std_in_m=use_std_in_m,
            model_type=model_type,
            hyperplane_lambda=hyperplane_lambda,
            prior_latent_size=prior_latent_size,
            hyperdir_lambda=hyperdir_lambda)  # Options for discriminator loss.
    else:
        I_loss = EasyDict(func_name='training.loss_hd.IandM_loss',
                          latent_type=latent_type,
                          D_global_size=D_global_size,
                          C_global_size=C_global_size,
                          D_lambda=D_lambda,
                          C_lambda=C_lambda,
                          epsilon=epsilon_in_loss,
                          random_eps=random_eps,
                          traj_lambda=traj_lambda,
                          resolution_manual=resolution_manual,
                          use_std_in_m=use_std_in_m,
                          model_type=model_type,
                          hyperplane_lambda=hyperplane_lambda,
                          prior_latent_size=prior_latent_size
                          )  # Options for discriminator loss.
    sched = EasyDict()  # Options for TrainingSchedule.
    grid = EasyDict(
        size='1080p',
        layout='random')  # Options for setup_snapshot_image_grid().
    sc = dnnlib.SubmitConfig()  # Options for dnnlib.submit_run().
    tf_config = {'rnd.np_random_seed': 1000}  # Options for tflib.init_tf().

    train.data_dir = data_dir
    train.total_kimg = total_kimg
    train.mirror_augment = mirror_augment
    train.image_snapshot_ticks = train.network_snapshot_ticks = 10
    sched.I_lrate_base = 0.002
    sched.minibatch_size_base = n_batch
    sched.minibatch_gpu_base = n_batch_per_gpu
    metrics = [metric_defaults[x] for x in metrics]
    desc = 'hd_disentanglement'

    desc += '-' + dataset
    dataset_args = EasyDict(tfrecord_dir=dataset)

    assert num_gpus in [1, 2, 4, 8]
    sc.num_gpus = num_gpus
    desc += '-%dgpu' % num_gpus

    assert config_id in _valid_configs
    desc += '-' + config_id

    # Configs A-E: Shrink networks to match original StyleGAN.
    if config_id != 'config-f':
        I.fmap_base = 8 << 10

    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    kwargs = EasyDict(train)
    kwargs.update(I_args=I,
                  M_args=M,
                  I_opt_args=I_opt,
                  I_loss_args=I_loss,
                  resume_G_pkl=resume_G_pkl)
    kwargs.update(dataset_args=dataset_args,
                  sched_args=sched,
                  grid_args=grid,
                  use_hd_with_cls=(model_type == 'hd_dis_model_with_cls'),
                  use_hyperplane=(model_type == 'hd_hyperplane'),
                  metric_arg_list=metrics,
                  tf_config=tf_config,
                  resume_pkl=resume_pkl,
                  n_discrete=D_global_size,
                  n_continuous=C_global_size,
                  n_samples_per=n_samples_per,
                  resolution_manual=resolution_manual,
                  pretrained_type=pretrained_type,
                  level_I_kimg=level_I_kimg,
                  use_level_training=use_level_training,
                  resume_kimg=resume_kimg,
                  use_std_in_m=use_std_in_m,
                  prior_latent_size=prior_latent_size)
    kwargs.submit_config = copy.deepcopy(sc)
    kwargs.submit_config.run_dir_root = result_dir
    kwargs.submit_config.run_desc = desc
    dnnlib.submit_run(**kwargs)