def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics): train = EasyDict(run_func_name='training.training_loop.training_loop_mirror_v6_remove_half_fl_fr.training_loop') G = EasyDict(func_name='training.networks.networks_stylegan2.G_main') D = EasyDict(func_name='training.networks.networks_stylegan2_discriminator_new_rotation.D_stylegan2_new_rotaion') # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) G_loss = EasyDict(func_name='training.loss.loss_G_new_rotation_squared_euclidean_10_interpolate_50_percent_uniform_dist_int_penalty.G_logistic_ns_pathreg') D_loss = EasyDict(func_name='training.loss.loss_D_logistic_r1_new_rotation_euclidean_square.D_logistic_r1_new_rotation') sched = EasyDict() grid = EasyDict(size='1080p', layout='random') sc = dnnlib.SubmitConfig() tf_config = {'rnd.np_random_seed': 1000} train.data_dir = data_dir train.total_kimg = total_kimg train.mirror_augment = mirror_augment train.image_snapshot_ticks = train.network_snapshot_ticks = 10 sched.G_lrate_base = sched.D_lrate_base = 0.002 sched.minibatch_size_base = 32 sched.minibatch_gpu_base = 4 # train.resume_pkl = './results/00200-stylegan2-car_labels_v7_oversample_filter-2gpu-config-f-squared_euclidean_10_interpolate_50_percent_int_reg-256/network-snapshot-000887.pkl' # train.resume_kimg = 887.2 D_loss.gamma = 10 metrics = [metric_defaults[x] for x in metrics] desc = 'stylegan2' G.style_mixing_prob = None desc += '-' + dataset dataset_args = EasyDict(tfrecord_dir=dataset) assert num_gpus in [1, 2, 4, 8] sc.num_gpus = num_gpus desc += '-%dgpu' % num_gpus assert config_id in _valid_configs desc += '-' + config_id desc += '-squared_euclidean_10_interpolate_50_percent_int_reg_remove_half_fl_fr_no_noise_square' desc += '-256' # Configs A-E: Shrink networks to match original StyleGAN. if config_id != 'config-f': G.fmap_base = D.fmap_base = 8 << 10 # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith('config-e'): D_loss.gamma = 100 if 'Gorig' in config_id: G.architecture = 'orig' if 'Gskip' in config_id: G.architecture = 'skip' # (default) if 'Gresnet' in config_id: G.architecture = 'resnet' if 'Dorig' in config_id: D.architecture = 'orig' if 'Dskip' in config_id: D.architecture = 'skip' if 'Dresnet' in config_id: D.architecture = 'resnet' # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ['config-a', 'config-b', 'config-c', 'config-d']: sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.001 sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003} sched.minibatch_size_base = 32 # (default) sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = 4 # (default) sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} G.synthesis_func = 'G_synthesis_stylegan_revised' D.func_name = 'training.networks_stylegan2.D_stylegan' # Configs A-C: Disable path length regularization. if config_id in ['config-a', 'config-b', 'config-c']: G_loss = EasyDict(func_name='training.loss.G_logistic_ns') # Configs A-B: Disable lazy regularization. if config_id in ['config-a', 'config-b']: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == 'config-a': G = EasyDict(func_name='training.networks_stylegan.G_style') D = EasyDict(func_name='training.networks_stylegan.D_basic') if gamma is not None: D_loss.gamma = gamma sc.submit_target = dnnlib.SubmitTarget.LOCAL sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss) kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_dir_root = result_dir kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs)
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics): train = EasyDict(run_func_name='training.training_loop.training_loop' ) # Options for training loop. G = EasyDict(func_name='training.networks_stylegan2.G_main' ) # Options for generator network. D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2' ) # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg' ) # Options for generator loss. D_loss = EasyDict(func_name='training.loss.D_logistic_r1' ) # Options for discriminator loss. sched = EasyDict() # Options for TrainingSchedule. grid = EasyDict( size='8k', layout='random') # Options for setup_snapshot_image_grid(). sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). train.data_dir = data_dir train.total_kimg = total_kimg train.mirror_augment = mirror_augment train.image_snapshot_ticks = train.network_snapshot_ticks = 2 sched.G_lrate_base = sched.D_lrate_base = 0.002 sched.minibatch_size_base = 32 sched.minibatch_gpu_base = 4 D_loss.gamma = 10 metrics = [metric_defaults[x] for x in metrics] desc = 'stylegan2' desc += '-' + dataset dataset_args = EasyDict(tfrecord_dir=dataset) assert num_gpus in [1, 2, 4, 8] sc.num_gpus = num_gpus desc += '-%dgpu' % num_gpus assert config_id in _valid_configs desc += '-' + config_id # Configs A-E: Shrink networks to match original StyleGAN. if config_id != 'config-f': G.fmap_base = D.fmap_base = 8 << 10 # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith('config-e'): D_loss.gamma = 100 if 'Gorig' in config_id: G.architecture = 'orig' if 'Gskip' in config_id: G.architecture = 'skip' # (default) if 'Gresnet' in config_id: G.architecture = 'resnet' if 'Dorig' in config_id: D.architecture = 'orig' if 'Dskip' in config_id: D.architecture = 'skip' if 'Dresnet' in config_id: D.architecture = 'resnet' # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ['config-a', 'config-b', 'config-c', 'config-d']: sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.001 sched.G_lrate_dict = sched.D_lrate_dict = { 128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003 } sched.minibatch_size_base = 32 # (default) sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = 4 # (default) sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} G.synthesis_func = 'G_synthesis_stylegan_revised' D.func_name = 'training.networks_stylegan2.D_stylegan' # Configs A-C: Disable path length regularization. if config_id in ['config-a', 'config-b', 'config-c']: G_loss = EasyDict(func_name='training.loss.G_logistic_ns') # Configs A-B: Disable lazy regularization. if config_id in ['config-a', 'config-b']: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == 'config-a': G = EasyDict(func_name='training.networks_stylegan.G_style') D = EasyDict(func_name='training.networks_stylegan.D_basic') if gamma is not None: D_loss.gamma = gamma sc.submit_target = dnnlib.SubmitTarget.LOCAL sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss) kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_dir_root = result_dir kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs)
def create_model(config_id='config-f', gamma=None, height=512, width=512, cond=None, label_size=0): train = EasyDict(run_func_name='training.diagnostic.create_initial_pkl' ) # Options for training loop. G = EasyDict(func_name='training.networks_stylegan2.G_main' ) # Options for generator network. D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2' ) # Options for discriminator network. D_loss = EasyDict(func_name='training.loss.D_logistic_r1' ) # Options for discriminator loss. sched = EasyDict() # Options for TrainingSchedule. sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). sched.minibatch_size_base = 192 sched.minibatch_gpu_base = 3 D_loss.gamma = 10 desc = 'stylegan2' dataset_args = EasyDict() # (tfrecord_dir=dataset) if cond: desc += '-cond' dataset_args.max_label_size = 'full' # conditioned on full label desc += '-' + config_id # Configs A-E: Shrink networks to match original StyleGAN. if config_id != 'config-f': G.fmap_base = D.fmap_base = 8 << 10 # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith('config-e'): D_loss.gamma = 100 if 'Gorig' in config_id: G.architecture = 'orig' if 'Gskip' in config_id: G.architecture = 'skip' # (default) if 'Gresnet' in config_id: G.architecture = 'resnet' if 'Dorig' in config_id: D.architecture = 'orig' if 'Dskip' in config_id: D.architecture = 'skip' if 'Dresnet' in config_id: D.architecture = 'resnet' # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ['config-a', 'config-b', 'config-c', 'config-d']: sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.001 sched.G_lrate_dict = sched.D_lrate_dict = { 128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003 } sched.minibatch_size_base = 32 # (default) sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = 4 # (default) sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} G.synthesis_func = 'G_synthesis_stylegan_revised' D.func_name = 'training.networks_stylegan2.D_stylegan' # Configs A-C: Disable path length regularization. if config_id in ['config-a', 'config-b', 'config-c']: G_loss = EasyDict(func_name='training.loss.G_logistic_ns') # Configs A-B: Disable lazy regularization. if config_id in ['config-a', 'config-b']: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == 'config-a': G = EasyDict(func_name='training.networks_stylegan.G_style') D = EasyDict(func_name='training.networks_stylegan.D_basic') if gamma is not None: D_loss.gamma = gamma G.update(resolution_h=height) G.update(resolution_w=width) D.update(resolution_h=height) D.update(resolution_w=width) sc.submit_target = dnnlib.SubmitTarget.DIAGNOSTIC sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) # [EDITED] kwargs.update(G_args=G, D_args=D, tf_config=tf_config, config_id=config_id, resolution_h=height, resolution_w=width, label_size=label_size) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_desc = desc dnnlib.submit_diagnostic(**kwargs) return f'network-initial-config-f-{height}x{width}-{label_size}.pkl'
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume_run_id=None): train = EasyDict(run_func_name='training.training_loop.training_loop' ) # Options for training loop. G = EasyDict(func_name='training.networks_stylegan2.G_main' ) # Options for generator network. D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2' ) # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg' ) # Options for generator loss. D_loss = EasyDict(func_name='training.loss.D_logistic_r1' ) # Options for discriminator loss. sched = EasyDict() # Options for TrainingSchedule. grid = EasyDict( size='8k', layout='random') # Options for setup_snapshot_image_grid(). sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). if resume_run_id is not None: # Resume from the ID of the results directory given ids = sorted(get_valid_runids(result_dir)) if resume_run_id == 'recent': resume_run_id = ids[-1][0] else: try: resume_run_id = int(resume_run_id) except ValueError: raise RuntimeError( '--resume argument is invalid (must be number, or "recent"): {}' .format(resume_run_id)) try: rundir_name = next(x[1] for x in ids if x[0] == resume_run_id) except StopIteration: raise RuntimeError( 'Could not find results directory with run ID {} (options: {})' .format(resume_run_id, [x[0] for x in ids])) # Find kimg & pkl file rundir = os.path.join(result_dir, rundir_name) pkls = [ name for name in os.listdir(rundir) if name.startswith('network-snapshot-') and name.endswith('.pkl') ] kimgs = sorted([(int( pkl.replace('network-snapshot-', '').replace('.pkl', '')), pkl) for pkl in pkls], key=lambda x: x[0]) if len(kimgs) == 0: raise RuntimeError( 'No network-snapshot-[0-9].pkl files found in {}'.format( rundir)) max_kimg = kimgs[-1][0] pkl_name = kimgs[-1][1] # Get wall clock time logfilepath = os.path.join(rundir, 'log.txt') with open(logfilepath, 'r') as f: logfile = f.read() for line in logfile.splitlines(): if 'kimg {}'.format(max_kimg) in line: if 'time ' not in line: raise RuntimeError( 'Invalid log file: {}'.format(logfilepath)) line = line.split('time ')[1] if 'sec/tick' not in line: raise RuntimeError( 'Invalid log file: {}'.format(logfilepath)) line = line.split('sec/tick')[0].strip() # Parse d h m s, etc. total_seconds_formatted = line total_seconds = 0 if 'd' in line: arr = line.split('d') days = int(arr[0].strip()) total_seconds += days * 24 * 60 * 60 line = arr[1] if 'h' in line: arr = line.split('h') hours = int(arr[0].strip()) total_seconds += hours * 60 * 60 line = arr[1] if 'm' in line: arr = line.split('m') mins = int(arr[0].strip()) total_seconds += mins * 60 line = arr[1] if 's' in line: arr = line.split('s') secs = int(arr[0].strip()) total_seconds += secs line = arr[1] break # Set args for training train.resume_pkl = os.path.join(rundir, pkl_name) train.resume_kimg = max_kimg train.resume_time = total_seconds print('Resuming from run {}: kimg {}, time {}'.format( rundir_name, max_kimg, total_seconds_formatted)) train.data_dir = data_dir train.total_kimg = total_kimg train.mirror_augment = mirror_augment train.image_snapshot_ticks = train.network_snapshot_ticks = 1 sched.G_lrate_base = sched.D_lrate_base = 0.002 sched.minibatch_size_base = 32 sched.minibatch_gpu_base = 4 D_loss.gamma = 10 metrics = [metric_defaults[x] for x in metrics] desc = 'stylegan2' desc += '-' + dataset dataset_args = EasyDict(tfrecord_dir=dataset) assert num_gpus in [1, 2, 4, 8] sc.num_gpus = num_gpus desc += '-%dgpu' % num_gpus assert config_id in _valid_configs desc += '-' + config_id # Configs A-E: Shrink networks to match original StyleGAN. if config_id != 'config-f': G.fmap_base = D.fmap_base = 8 << 10 # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith('config-e'): D_loss.gamma = 100 if 'Gorig' in config_id: G.architecture = 'orig' if 'Gskip' in config_id: G.architecture = 'skip' # (default) if 'Gresnet' in config_id: G.architecture = 'resnet' if 'Dorig' in config_id: D.architecture = 'orig' if 'Dskip' in config_id: D.architecture = 'skip' if 'Dresnet' in config_id: D.architecture = 'resnet' # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ['config-a', 'config-b', 'config-c', 'config-d']: sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.001 sched.G_lrate_dict = sched.D_lrate_dict = { 128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003 } sched.minibatch_size_base = 32 # (default) sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = 4 # (default) sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} G.synthesis_func = 'G_synthesis_stylegan_revised' D.func_name = 'training.networks_stylegan2.D_stylegan' # Configs A-C: Disable path length regularization. if config_id in ['config-a', 'config-b', 'config-c']: G_loss = EasyDict(func_name='training.loss.G_logistic_ns') # Configs A-B: Disable lazy regularization. if config_id in ['config-a', 'config-b']: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == 'config-a': G = EasyDict(func_name='training.networks_stylegan.G_style') D = EasyDict(func_name='training.networks_stylegan.D_basic') if gamma is not None: D_loss.gamma = gamma sc.submit_target = dnnlib.SubmitTarget.LOCAL sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss) kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_dir_root = result_dir kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs)
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume_G_pkl=None, n_batch=2, n_batch_per_gpu=1, D_global_size=0, C_global_size=10, model_type='hd_dis_model', latent_type='uniform', resume_pkl=None, n_samples_per=4, D_lambda=0, C_lambda=1, epsilon_in_loss=3, random_eps=True, M_lrmul=0.1, resolution_manual=1024, pretrained_type='with_stylegan2', traj_lambda=None, level_I_kimg=1000, use_level_training=False, resume_kimg=0, use_std_in_m=False, prior_latent_size=512, stylegan2_dlatent_size=512, stylegan2_mapping_fmaps=512, M_mapping_fmaps=512, hyperplane_lambda=1, hyperdir_lambda=1): train = EasyDict( run_func_name='training.training_loop_hdwG.training_loop_hdwG') G = EasyDict(func_name='training.hd_networks_stylegan2.G_main', latent_size=prior_latent_size, dlatent_size=stylegan2_dlatent_size, mapping_fmaps=stylegan2_mapping_fmaps, mapping_lrmul=M_lrmul, style_mixing_prob=None, dlatent_avg_beta=None, truncation_psi=None, normalize_latents=False, structure='fixed') D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2') if model_type == 'hd_hyperplane': M = EasyDict(func_name='training.hd_networks.net_M_hyperplane', C_global_size=C_global_size, D_global_size=D_global_size, latent_size=prior_latent_size, mapping_lrmul=M_lrmul, use_std_in_m=use_std_in_m) I = EasyDict(func_name='training.hd_networks.net_I', C_global_size=C_global_size, D_global_size=D_global_size) elif model_type == 'vc_gan_preprior': M = EasyDict(func_name='training.hd_networks.net_M_vc', C_global_size=C_global_size, D_global_size=D_global_size, latent_size=prior_latent_size, mapping_lrmul=M_lrmul, use_std_in_m=use_std_in_m) I = EasyDict(func_name='training.hd_networks.net_I', C_global_size=C_global_size, D_global_size=D_global_size) elif model_type == 'vc_gan': M = EasyDict(func_name='training.hd_networks.net_M_empty', C_global_size=C_global_size, D_global_size=D_global_size, latent_size=prior_latent_size, mapping_lrmul=M_lrmul, use_std_in_m=use_std_in_m) I = EasyDict(func_name='training.hd_networks.net_I', C_global_size=C_global_size, D_global_size=D_global_size) G.mapping_func = 'G_mapping_hd_dis_to_dlatent' else: M = EasyDict(func_name='training.hd_networks.net_M', C_global_size=C_global_size, D_global_size=D_global_size, latent_size=prior_latent_size, mapping_fmaps=M_mapping_fmaps, mapping_lrmul=M_lrmul, use_std_in_m=use_std_in_m) I = EasyDict(func_name='training.hd_networks.net_I', C_global_size=C_global_size, D_global_size=D_global_size) if model_type == 'hd_dis_model_with_cls': I_info = EasyDict(func_name='training.hd_networks.net_I_info', C_global_size=C_global_size, D_global_size=D_global_size) else: I_info = EasyDict() I_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) if model_type == 'vc_gan': I_loss = EasyDict(func_name='training.loss_hdwG.IandG_vc_loss', latent_type=latent_type, D_global_size=D_global_size, C_global_size=C_global_size, D_lambda=D_lambda, C_lambda=C_lambda, epsilon=epsilon_in_loss, random_eps=random_eps, traj_lambda=traj_lambda, resolution_manual=resolution_manual, use_std_in_m=use_std_in_m, model_type=model_type, hyperplane_lambda=hyperplane_lambda, prior_latent_size=prior_latent_size, hyperdir_lambda=hyperdir_lambda) else: I_loss = EasyDict( func_name='training.loss_hdwG.IandMandG_hyperplane_loss', latent_type=latent_type, D_global_size=D_global_size, C_global_size=C_global_size, D_lambda=D_lambda, C_lambda=C_lambda, epsilon=epsilon_in_loss, random_eps=random_eps, traj_lambda=traj_lambda, resolution_manual=resolution_manual, use_std_in_m=use_std_in_m, model_type=model_type, hyperplane_lambda=hyperplane_lambda, prior_latent_size=prior_latent_size, hyperdir_lambda=hyperdir_lambda) D_loss = EasyDict(func_name='training.loss.D_logistic_r1') sched = EasyDict() grid = EasyDict(size='1080p', layout='random') sc = dnnlib.SubmitConfig() tf_config = {'rnd.np_random_seed': 1000} train.data_dir = data_dir train.total_kimg = total_kimg train.mirror_augment = mirror_augment train.image_snapshot_ticks = train.network_snapshot_ticks = 10 sched.G_lrate_base = sched.D_lrate_base = 0.002 sched.minibatch_size_base = n_batch sched.minibatch_gpu_base = n_batch_per_gpu D_loss.gamma = 10 metrics = [metric_defaults[x] for x in metrics] desc = 'hdwG_disentanglement' desc += '-' + dataset dataset_args = EasyDict(tfrecord_dir=dataset) assert num_gpus in [1, 2, 4, 8] sc.num_gpus = num_gpus desc += '-%dgpu' % num_gpus assert config_id in _valid_configs desc += '-' + config_id # Configs A-E: Shrink networks to match original StyleGAN. if config_id != 'config-f': # G.fmap_base = D.fmap_base = 8 << 10 if resolution_manual <= 256: I.fmap_base = 2 << 8 G.fmap_base = 2 << 10 D.fmap_base = 2 << 8 else: I.fmap_base = 8 << 10 G.fmap_base = D.fmap_base = 8 << 10 # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith('config-e'): D_loss.gamma = 100 if 'Gorig' in config_id: G.architecture = 'orig' if 'Gskip' in config_id: G.architecture = 'skip' # (default) if 'Gresnet' in config_id: G.architecture = 'resnet' if 'Dorig' in config_id: D.architecture = 'orig' if 'Dskip' in config_id: D.architecture = 'skip' if 'Dresnet' in config_id: D.architecture = 'resnet' # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ['config-a', 'config-b', 'config-c', 'config-d']: # sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.002 # sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003} sched.minibatch_size_base = n_batch # (default) # sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = n_batch_per_gpu # (default) # sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} # G.synthesis_func = 'hd_networks_stylegan2.G_synthesis_stylegan_revised' G.synthesis_func = 'G_synthesis_stylegan_revised_hd' # D.func_name = 'training.networks_stylegan2.D_stylegan' # Configs A-B: Disable lazy regularization. if config_id in ['config-a', 'config-b']: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == 'config-a': G = EasyDict(func_name='training.networks_stylegan.G_style') D = EasyDict(func_name='training.networks_stylegan.D_basic') if gamma is not None: D_loss.gamma = gamma sc.submit_target = dnnlib.SubmitTarget.LOCAL sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) kwargs.update(G_args=G, D_args=D, I_args=I, M_args=M, I_opt_args=I_opt, D_opt_args=D_opt, I_loss_args=I_loss, D_loss_args=D_loss, resume_pkl=resume_pkl, resume_G_pkl=resume_G_pkl) kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, use_hd_with_cls=(model_type == 'hd_dis_model_with_cls'), use_hyperplane=(model_type == 'hd_hyperplane'), metric_arg_list=metrics, tf_config=tf_config, n_discrete=D_global_size, n_continuous=C_global_size, n_samples_per=n_samples_per, resolution_manual=resolution_manual, pretrained_type=pretrained_type, level_I_kimg=level_I_kimg, use_level_training=use_level_training, resume_kimg=resume_kimg, use_std_in_m=use_std_in_m, prior_latent_size=prior_latent_size, latent_type=latent_type) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_dir_root = result_dir kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs)
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics): train = EasyDict(run_func_name='training.training_loop.training_loop' ) # Options for training loop. G = EasyDict(func_name='training.networks_stylegan2.G_main' ) # Options for generator network. D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2' ) # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg' ) # Options for generator loss. D_loss = EasyDict(func_name='training.loss.D_logistic_r1' ) # Options for discriminator loss. sched = EasyDict() # Options for TrainingSchedule. grid = EasyDict( size='8k', layout='random') # Options for setup_snapshot_image_grid(). sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). train.data_dir = data_dir train.total_kimg = total_kimg train.mirror_augment = mirror_augment train.image_snapshot_ticks = train.network_snapshot_ticks = 10 sched.G_lrate_base = sched.D_lrate_base = 0.002 sched.minibatch_size_base = 32 sched.minibatch_gpu_base = 4 D_loss.gamma = 10 metrics = [metric_defaults[x] for x in metrics] desc = 'stylegan2' desc += '-' + dataset dataset_args = EasyDict(tfrecord_dir=dataset) assert num_gpus in [1, 2, 4, 8] sc.num_gpus = num_gpus desc += '-%dgpu' % num_gpus assert config_id in _valid_configs desc += '-' + config_id # Configs A-E: Shrink networks to match original StyleGAN. if config_id not in ['config-f', 'config-l']: G.fmap_base = D.fmap_base = 8 << 10 # Config L: Generator training only if config_id == 'config-l': # Use labels as latent vector input dataset_args.max_label_size = "full" # Deactivate methods specific for GAN training G.truncation_psi = None G.randomize_noise = False G.style_mixing_prob = None G.dlatent_avg_beta = None G.conditional_labels = False # Refinement training G_loss.func_name = 'training.loss.G_reconstruction' train.run_func_name = 'training.training_loop.training_loop_refinement' # G.freeze_layers = ["mapping", "noise"]#, "4x4", "8x8", "16x16", "32x32"] # Network for refinement train.resume_pkl = "nets/stylegan2-ffhq-config-f.pkl" # TODO init net train.resume_with_new_nets = True # Maintenance tasks sched.tick_kimg_base = 1 # 1 tick = 5000 images (metric update) sched.tick_kimg_dict = {} train.image_snapshot_ticks = 5 # Save every 5000 images train.network_snapshot_ticks = 10 # Save every 10000 images # Training parameters sched.G_lrate_base = 1e-4 train.G_smoothing_kimg = 0.0 sched.minibatch_size_base = sched.minibatch_gpu_base * num_gpus # 4 per GPU # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith('config-e'): D_loss.gamma = 100 if 'Gorig' in config_id: G.architecture = 'orig' if 'Gskip' in config_id: G.architecture = 'skip' # (default) if 'Gresnet' in config_id: G.architecture = 'resnet' if 'Dorig' in config_id: D.architecture = 'orig' if 'Dskip' in config_id: D.architecture = 'skip' if 'Dresnet' in config_id: D.architecture = 'resnet' # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ['config-a', 'config-b', 'config-c', 'config-d']: sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.001 sched.G_lrate_dict = sched.D_lrate_dict = { 128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003 } sched.minibatch_size_base = 32 # (default) sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = 4 # (default) sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} G.synthesis_func = 'G_synthesis_stylegan_revised' D.func_name = 'training.networks_stylegan2.D_stylegan' # Configs A-C: Disable path length regularization. if config_id in ['config-a', 'config-b', 'config-c']: G_loss = EasyDict(func_name='training.loss.G_logistic_ns') # Configs A-B: Disable lazy regularization. if config_id in ['config-a', 'config-b']: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == 'config-a': G = EasyDict(func_name='training.networks_stylegan.G_style') D = EasyDict(func_name='training.networks_stylegan.D_basic') if gamma is not None: D_loss.gamma = gamma sc.submit_target = dnnlib.SubmitTarget.LOCAL sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss) kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_dir_root = result_dir kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs)
def run( dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume_pkl=None, resume_kimg=None, ): train = EasyDict( run_func_name="training.training_loop.training_loop", # training resume options: resume_pkl= resume_pkl, # Network pickle to resume training from, None = train from scratch. resume_kimg= resume_kimg, # Assumed training progress at the beginning. Affects reporting and training schedule. ) # Options for training loop. G = EasyDict(func_name="training.networks_stylegan2.G_main" ) # Options for generator network. D = EasyDict(func_name="training.networks_stylegan2.D_stylegan2" ) # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. G_loss = EasyDict(func_name="training.loss.G_logistic_ns_pathreg" ) # Options for generator loss. D_loss = EasyDict(func_name="training.loss.D_logistic_r1" ) # Options for discriminator loss. sched = EasyDict() # Options for TrainingSchedule. grid = EasyDict( size="8k", layout="random") # Options for setup_snapshot_image_grid(). sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {"rnd.np_random_seed": 1000} # Options for tflib.init_tf(). train.data_dir = data_dir train.total_kimg = total_kimg train.mirror_augment = mirror_augment train.image_snapshot_ticks = train.network_snapshot_ticks = 10 sched.G_lrate_base = sched.D_lrate_base = 0.002 sched.minibatch_size_base = 32 sched.minibatch_gpu_base = 4 D_loss.gamma = 10 metrics = [metric_defaults[x] for x in metrics] desc = "stylegan2" desc += "-" + dataset dataset_args = EasyDict(tfrecord_dir=dataset) assert num_gpus in [1, 2, 4, 8] sc.num_gpus = num_gpus desc += "-%dgpu" % num_gpus assert config_id in _valid_configs desc += "-" + config_id # Configs A-E: Shrink networks to match original StyleGAN. if config_id != "config-f": G.fmap_base = D.fmap_base = 8 << 10 # Config E: Set gamma to 100 and override G & D architecture. if config_id.startswith("config-e"): D_loss.gamma = 100 if "Gorig" in config_id: G.architecture = "orig" if "Gskip" in config_id: G.architecture = "skip" # (default) if "Gresnet" in config_id: G.architecture = "resnet" if "Dorig" in config_id: D.architecture = "orig" if "Dskip" in config_id: D.architecture = "skip" if "Dresnet" in config_id: D.architecture = "resnet" # (default) # Configs A-D: Enable progressive growing and switch to networks that support it. if config_id in ["config-a", "config-b", "config-c", "config-d"]: sched.lod_initial_resolution = 8 sched.G_lrate_base = sched.D_lrate_base = 0.001 sched.G_lrate_dict = sched.D_lrate_dict = { 128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003, } sched.minibatch_size_base = 32 # (default) sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32} sched.minibatch_gpu_base = 4 # (default) sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4} G.synthesis_func = "G_synthesis_stylegan_revised" D.func_name = "training.networks_stylegan2.D_stylegan" # Configs A-C: Disable path length regularization. if config_id in ["config-a", "config-b", "config-c"]: G_loss = EasyDict(func_name="training.loss.G_logistic_ns") # Configs A-B: Disable lazy regularization. if config_id in ["config-a", "config-b"]: train.lazy_regularization = False # Config A: Switch to original StyleGAN networks. if config_id == "config-a": G = EasyDict(func_name="training.networks_stylegan.G_style") D = EasyDict(func_name="training.networks_stylegan.D_basic") if gamma is not None: D_loss.gamma = gamma sc.submit_target = dnnlib.SubmitTarget.LOCAL sc.local.do_not_copy_source_files = True kwargs = EasyDict(train) kwargs.update( G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss, ) kwargs.update( dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config, ) kwargs.submit_config = copy.deepcopy(sc) kwargs.submit_config.run_dir_root = result_dir kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs)