def prepare_train_eval(rank, world_size, run_name, train_config, model_config, hdf5_path_train): cfgs = dict2clsattr(train_config, model_config) prev_ada_p, step, best_step, best_fid, best_fid_checkpoint_path, mu, sigma, inception_model = None, 0, 0, None, None, None, None, None if cfgs.distributed_data_parallel: print("Use GPU: {} for training.".format(rank)) setup(rank, world_size) torch.cuda.set_device(rank) writer = SummaryWriter( log_dir=join('./logs', run_name)) if rank == 0 else None if rank == 0: logger = make_logger(run_name, None) logger.info('Run name : {run_name}'.format(run_name=run_name)) logger.info(train_config) logger.info(model_config) else: logger = None ##### load dataset ##### if rank == 0: logger.info('Load train datasets...') train_dataset = LoadDataset(cfgs.dataset_name, cfgs.data_path, train=True, download=True, resize_size=cfgs.img_size, hdf5_path=hdf5_path_train, random_flip=cfgs.random_flip_preprocessing) if cfgs.reduce_train_dataset < 1.0: num_train = int(cfgs.reduce_train_dataset * len(train_dataset)) train_dataset, _ = torch.utils.data.random_split( train_dataset, [num_train, len(train_dataset) - num_train]) if rank == 0: logger.info('Train dataset size : {dataset_size}'.format( dataset_size=len(train_dataset))) if rank == 0: logger.info('Load {mode} datasets...'.format(mode=cfgs.eval_type)) eval_mode = True if cfgs.eval_type == 'train' else False eval_dataset = LoadDataset(cfgs.dataset_name, cfgs.data_path, train=eval_mode, download=True, resize_size=cfgs.img_size, hdf5_path=None, random_flip=False) if rank == 0: logger.info('Eval dataset size : {dataset_size}'.format( dataset_size=len(eval_dataset))) if cfgs.distributed_data_parallel: train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset) cfgs.batch_size = cfgs.batch_size // world_size else: train_sampler = None train_dataloader = DataLoader(train_dataset, batch_size=cfgs.batch_size, shuffle=(train_sampler is None), pin_memory=True, num_workers=cfgs.num_workers, sampler=train_sampler, drop_last=True) eval_dataloader = DataLoader(eval_dataset, batch_size=cfgs.batch_size, shuffle=False, pin_memory=True, num_workers=cfgs.num_workers, drop_last=False) ##### build model ##### if rank == 0: logger.info('Build model...') module = __import__( 'models.{architecture}'.format(architecture=cfgs.architecture), fromlist=['something']) if rank == 0: logger.info('Modules are located on models.{architecture}.'.format( architecture=cfgs.architecture)) Gen = module.Generator(cfgs.z_dim, cfgs.shared_dim, cfgs.img_size, cfgs.g_conv_dim, cfgs.g_spectral_norm, cfgs.attention, cfgs.attention_after_nth_gen_block, cfgs.activation_fn, cfgs.conditional_strategy, cfgs.num_classes, cfgs.g_init, cfgs.G_depth, cfgs.mixed_precision).to(rank) Dis = module.Discriminator( cfgs.img_size, cfgs.d_conv_dim, cfgs.d_spectral_norm, cfgs.attention, cfgs.attention_after_nth_dis_block, cfgs.activation_fn, cfgs.conditional_strategy, cfgs.hypersphere_dim, cfgs.num_classes, cfgs.nonlinear_embed, cfgs.normalize_embed, cfgs.d_init, cfgs.D_depth, cfgs.mixed_precision).to(rank) if cfgs.ema: if rank == 0: logger.info('Prepare EMA for G with decay of {}.'.format( cfgs.ema_decay)) Gen_copy = module.Generator( cfgs.z_dim, cfgs.shared_dim, cfgs.img_size, cfgs.g_conv_dim, cfgs.g_spectral_norm, cfgs.attention, cfgs.attention_after_nth_gen_block, cfgs.activation_fn, cfgs.conditional_strategy, cfgs.num_classes, initialize=False, G_depth=cfgs.G_depth, mixed_precision=cfgs.mixed_precision).to(rank) Gen_ema = ema(Gen, Gen_copy, cfgs.ema_decay, cfgs.ema_start) else: Gen_copy, Gen_ema = None, None if rank == 0: logger.info(count_parameters(Gen)) if rank == 0: logger.info(Gen) if rank == 0: logger.info(count_parameters(Dis)) if rank == 0: logger.info(Dis) ### define loss functions and optimizers G_loss = { 'vanilla': loss_dcgan_gen, 'least_square': loss_lsgan_gen, 'hinge': loss_hinge_gen, 'wasserstein': loss_wgan_gen } D_loss = { 'vanilla': loss_dcgan_dis, 'least_square': loss_lsgan_dis, 'hinge': loss_hinge_dis, 'wasserstein': loss_wgan_dis } if cfgs.optimizer == "SGD": G_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Gen.parameters()), cfgs.g_lr, momentum=cfgs.momentum, nesterov=cfgs.nesterov) D_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Dis.parameters()), cfgs.d_lr, momentum=cfgs.momentum, nesterov=cfgs.nesterov) elif cfgs.optimizer == "RMSprop": G_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Gen.parameters()), cfgs.g_lr, momentum=cfgs.momentum, alpha=cfgs.alpha) D_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Dis.parameters()), cfgs.d_lr, momentum=cfgs.momentum, alpha=cfgs.alpha) elif cfgs.optimizer == "Adam": G_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Gen.parameters()), cfgs.g_lr, [cfgs.beta1, cfgs.beta2], eps=1e-6) D_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Dis.parameters()), cfgs.d_lr, [cfgs.beta1, cfgs.beta2], eps=1e-6) else: raise NotImplementedError if cfgs.LARS_optimizer: G_optimizer = LARS(optimizer=G_optimizer, eps=1e-8, trust_coef=0.001) D_optimizer = LARS(optimizer=D_optimizer, eps=1e-8, trust_coef=0.001) ##### load checkpoints if needed ##### if cfgs.checkpoint_folder is None: checkpoint_dir = make_checkpoint_dir(cfgs.checkpoint_folder, run_name) else: when = "current" if cfgs.load_current is True else "best" if not exists(abspath(cfgs.checkpoint_folder)): raise NotADirectoryError checkpoint_dir = make_checkpoint_dir(cfgs.checkpoint_folder, run_name) g_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=G-{when}-weights-step*.pth".format(when=when)))[0] d_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=D-{when}-weights-step*.pth".format(when=when)))[0] Gen, G_optimizer, trained_seed, run_name, step, prev_ada_p = load_checkpoint( Gen, G_optimizer, g_checkpoint_dir) Dis, D_optimizer, trained_seed, run_name, step, prev_ada_p, best_step, best_fid, best_fid_checkpoint_path =\ load_checkpoint(Dis, D_optimizer, d_checkpoint_dir, metric=True) if rank == 0: logger = make_logger(run_name, None) if cfgs.ema: g_ema_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=G_ema-{when}-weights-step*.pth".format( when=when)))[0] Gen_copy = load_checkpoint(Gen_copy, None, g_ema_checkpoint_dir, ema=True) Gen_ema.source, Gen_ema.target = Gen, Gen_copy writer = SummaryWriter( log_dir=join('./logs', run_name)) if rank == 0 else None if cfgs.train_configs['train']: assert cfgs.seed == trained_seed, "Seed for sampling random numbers should be same!" if rank == 0: logger.info('Generator checkpoint is {}'.format(g_checkpoint_dir)) if rank == 0: logger.info( 'Discriminator checkpoint is {}'.format(d_checkpoint_dir)) if cfgs.freeze_layers > -1: prev_ada_p, step, best_step, best_fid, best_fid_checkpoint_path = None, 0, 0, None, None ##### wrap models with DP and convert BN to Sync BN ##### if world_size > 1: if cfgs.distributed_data_parallel: if cfgs.synchronized_bn: process_group = torch.distributed.new_group( [w for w in range(world_size)]) Gen = torch.nn.SyncBatchNorm.convert_sync_batchnorm( Gen, process_group) Dis = torch.nn.SyncBatchNorm.convert_sync_batchnorm( Dis, process_group) if cfgs.ema: Gen_copy = torch.nn.SyncBatchNorm.convert_sync_batchnorm( Gen_copy, process_group) Gen = DDP(Gen, device_ids=[rank], broadcast_buffers=False, find_unused_parameters=True) Dis = DDP(Dis, device_ids=[rank], broadcast_buffers=False, find_unused_parameters=True) if cfgs.ema: Gen_copy = DDP(Gen_copy, device_ids=[rank], broadcast_buffers=False, find_unused_parameters=True) else: Gen = DataParallel(Gen, output_device=rank) Dis = DataParallel(Dis, output_device=rank) if cfgs.ema: Gen_copy = DataParallel(Gen_copy, output_device=rank) if cfgs.synchronized_bn: Gen = convert_model(Gen).to(rank) Dis = convert_model(Dis).to(rank) if cfgs.ema: Gen_copy = convert_model(Gen_copy).to(rank) ##### load the inception network and prepare first/secend moments for calculating FID ##### if cfgs.eval: inception_model = InceptionV3().to(rank) if world_size > 1 and cfgs.distributed_data_parallel: toggle_grad(inception_model, on=True) inception_model = DDP(inception_model, device_ids=[rank], broadcast_buffers=False, find_unused_parameters=True) elif world_size > 1 and cfgs.distributed_data_parallel is False: inception_model = DataParallel(inception_model, output_device=rank) else: pass mu, sigma = prepare_inception_moments(dataloader=eval_dataloader, generator=Gen, eval_mode=cfgs.eval_type, inception_model=inception_model, splits=1, run_name=run_name, logger=logger, device=rank) worker = make_worker( cfgs=cfgs, run_name=run_name, best_step=best_step, logger=logger, writer=writer, n_gpus=world_size, gen_model=Gen, dis_model=Dis, inception_model=inception_model, Gen_copy=Gen_copy, Gen_ema=Gen_ema, train_dataset=train_dataset, eval_dataset=eval_dataset, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, G_optimizer=G_optimizer, D_optimizer=D_optimizer, G_loss=G_loss[cfgs.adv_loss], D_loss=D_loss[cfgs.adv_loss], prev_ada_p=prev_ada_p, rank=rank, checkpoint_dir=checkpoint_dir, mu=mu, sigma=sigma, best_fid=best_fid, best_fid_checkpoint_path=best_fid_checkpoint_path, ) if cfgs.train_configs['train']: step = worker.train(current_step=step, total_step=cfgs.total_step) if cfgs.eval: is_save = worker.evaluation( step=step, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.save_images: worker.save_images(is_generate=True, png=True, npz=True, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.image_visualization: worker.run_image_visualization( nrow=cfgs.nrow, ncol=cfgs.ncol, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.k_nearest_neighbor: worker.run_nearest_neighbor( nrow=cfgs.nrow, ncol=cfgs.ncol, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.interpolation: assert cfgs.architecture in [ "big_resnet", "biggan_deep" ], "StudioGAN does not support interpolation analysis except for biggan and biggan_deep." worker.run_linear_interpolation( nrow=cfgs.nrow, ncol=cfgs.ncol, fix_z=True, fix_y=False, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) worker.run_linear_interpolation( nrow=cfgs.nrow, ncol=cfgs.ncol, fix_z=False, fix_y=True, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.frequency_analysis: worker.run_frequency_analysis( num_images=len(train_dataset) // cfgs.num_classes, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.tsne_analysis: worker.run_tsne(dataloader=eval_dataloader, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step)
def validation(encoder, decoder, val_loader, vocab_size, args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") encoder.to(device) decoder.to(device) encoder.eval() decoder.eval() criterion = (nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()) if args.mode == "val": encoder, decoder, _, _ = load_checkpoint(encoder, decoder, None, device, args, False) total_step = math.ceil( len(val_loader.dataset.caption_lengths) / val_loader.batch_sampler.batch_size) references = list( ) # references (true captions) for calculating BLEU-4 score hypotheses = list() # hypotheses (predictions) with torch.no_grad(): for i, (imgs, caps, caplens, allcaps) in enumerate(val_loader): imgs = imgs.to(device) caps = caps.to(device) caplens = caplens.to(device) imgs = encoder(imgs) if args.model == "lstm": scores = decoder(imgs, caps) loss = criterion(scores.view(-1, vocab_size), caps.view(-1)) elif args.model == "attention": scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder( imgs, caps, caplens) targets = caps_sorted[:, 1:] scores_copy = scores.clone() scores = pack_padded_sequence(scores, decode_lengths, batch_first=True).data targets = pack_padded_sequence(targets, decode_lengths, batch_first=True).data loss = criterion(scores, targets) loss += 1.0 * ((1.0 - alphas.sum(dim=1))**2).mean() scores = scores_copy stats = "Step [%d/%d], Loss: %.4f, Perplexity: %5.4f" % ( i + 1, total_step, loss.item(), np.exp(loss.item()), ) print("\r" + stats, end="") sys.stdout.flush() if (i + 1) % args.print_every == 0: print("\r" + stats) # References if args.model == "attention": allcaps = allcaps[ sort_ind] # because images were sorted in the decoder for j in range(allcaps.shape[0]): img_caps = allcaps[j].tolist() img_captions = list( map( lambda c: [ w for w in c if w not in { val_loader.dataset.vocab("<start>"), val_loader.dataset.vocab("<pad>"), } ], img_caps, )) references.append(img_captions) # Hypotheses _, preds = torch.max(scores, dim=2) preds = preds.tolist() temp_preds = list() for j, _ in enumerate(preds): if args.model == "attention": temp_preds.append( preds[j][:decode_lengths[j]]) # remove pads elif args.model == "lstm": temp_preds.append(preds[j]) hypotheses.extend(temp_preds) assert len(references) == len(hypotheses) bleu4 = corpus_bleu(references, hypotheses) print("\n\nBLEU-4 - {}".format(bleu4))
def load_frameowrk( seed, disable_debugging_API, num_workers, config_path, checkpoint_folder, reduce_train_dataset, standing_statistics, standing_step, freeze_layers, load_current, eval_type, dataset_name, num_classes, img_size, data_path, architecture, conditional_strategy, hypersphere_dim, nonlinear_embed, normalize_embed, g_spectral_norm, d_spectral_norm, activation_fn, attention, attention_after_nth_gen_block, attention_after_nth_dis_block, z_dim, shared_dim, g_conv_dim, d_conv_dim, G_depth, D_depth, optimizer, batch_size, d_lr, g_lr, momentum, nesterov, alpha, beta1, beta2, total_step, adv_loss, cr, g_init, d_init, random_flip_preprocessing, prior, truncated_factor, ema, ema_decay, ema_start, synchronized_bn, mixed_precision, hdf5_path_train, train_config, model_config, **_): if seed == 0: cudnn.benchmark = True cudnn.deterministic = False else: fix_all_seed(seed) cudnn.benchmark = False cudnn.deterministic = True if disable_debugging_API: torch.autograd.set_detect_anomaly(False) n_gpus = torch.cuda.device_count() default_device = torch.cuda.current_device() check_flag_0(batch_size, n_gpus, standing_statistics, ema, freeze_layers, checkpoint_folder) assert batch_size % n_gpus == 0, "batch_size should be divided by the number of gpus " if n_gpus == 1: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') prev_ada_p, step, best_step, best_fid, best_fid_checkpoint_path = None, 0, 0, None, None standing_step = standing_step if standing_statistics is True else batch_size run_name = make_run_name(RUN_NAME_FORMAT, framework=config_path.split('/')[-1][:-5], phase='train') logger = make_logger(run_name, None) writer = SummaryWriter(log_dir=join('./logs', run_name)) logger.info('Run name : {run_name}'.format(run_name=run_name)) logger.info(train_config) logger.info(model_config) logger.info('Loading train datasets...') train_dataset = LoadDataset(dataset_name, data_path, train=True, download=True, resize_size=img_size, hdf5_path=hdf5_path_train, random_flip=random_flip_preprocessing) if reduce_train_dataset < 1.0: num_train = int(reduce_train_dataset * len(train_dataset)) train_dataset, _ = torch.utils.data.random_split( train_dataset, [num_train, len(train_dataset) - num_train]) logger.info('Train dataset size : {dataset_size}'.format( dataset_size=len(train_dataset))) logger.info('Loading {mode} datasets...'.format(mode=eval_type)) eval_mode = True if eval_type == 'train' else False eval_dataset = LoadDataset(dataset_name, data_path, train=eval_mode, download=True, resize_size=img_size, hdf5_path=None, random_flip=False) logger.info('Eval dataset size : {dataset_size}'.format( dataset_size=len(eval_dataset))) logger.info('Building model...') if architecture == "dcgan": assert img_size == 32, "Sry, StudioGAN does not support dcgan models for generation of images larger than 32 resolution." module = __import__( 'models.{architecture}'.format(architecture=architecture), fromlist=['something']) logger.info('Modules are located on models.{architecture}'.format( architecture=architecture)) Gen = module.Generator(z_dim, shared_dim, img_size, g_conv_dim, g_spectral_norm, attention, attention_after_nth_gen_block, activation_fn, conditional_strategy, num_classes, g_init, G_depth, mixed_precision).to(default_device) Dis = module.Discriminator(img_size, d_conv_dim, d_spectral_norm, attention, attention_after_nth_dis_block, activation_fn, conditional_strategy, hypersphere_dim, num_classes, nonlinear_embed, normalize_embed, d_init, D_depth, mixed_precision).to(default_device) if ema: print('Preparing EMA for G with decay of {}'.format(ema_decay)) Gen_copy = module.Generator( z_dim, shared_dim, img_size, g_conv_dim, g_spectral_norm, attention, attention_after_nth_gen_block, activation_fn, conditional_strategy, num_classes, initialize=False, G_depth=G_depth, mixed_precision=mixed_precision).to(default_device) Gen_ema = ema_(Gen, Gen_copy, ema_decay, ema_start) else: Gen_copy, Gen_ema = None, None logger.info(count_parameters(Gen)) logger.info(Gen) logger.info(count_parameters(Dis)) logger.info(Dis) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers, drop_last=True) eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers, drop_last=False) G_loss = { 'vanilla': loss_dcgan_gen, 'least_square': loss_lsgan_gen, 'hinge': loss_hinge_gen, 'wasserstein': loss_wgan_gen } D_loss = { 'vanilla': loss_dcgan_dis, 'least_square': loss_lsgan_dis, 'hinge': loss_hinge_dis, 'wasserstein': loss_wgan_dis } if optimizer == "SGD": G_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, momentum=momentum, nesterov=nesterov) D_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, momentum=momentum, nesterov=nesterov) elif optimizer == "RMSprop": G_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, momentum=momentum, alpha=alpha) D_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, momentum=momentum, alpha=alpha) elif optimizer == "Adam": G_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, [beta1, beta2], eps=1e-6) D_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, [beta1, beta2], eps=1e-6) elif optimizer == "AdaBelief": G_optimizer = AdaBelief(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, [beta1, beta2], eps=1e-12, rectify=False) D_optimizer = AdaBelief(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, [beta1, beta2], eps=1e-12, rectify=False) else: raise NotImplementedError if checkpoint_folder is not None: when = "current" if load_current is True else "best" if not exists(abspath(checkpoint_folder)): raise NotADirectoryError checkpoint_dir = make_checkpoint_dir(checkpoint_folder, run_name) g_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=G-{when}-weights-step*.pth".format(when=when)))[0] d_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=D-{when}-weights-step*.pth".format(when=when)))[0] Gen, G_optimizer, trained_seed, run_name, step, prev_ada_p = load_checkpoint( Gen, G_optimizer, g_checkpoint_dir) Dis, D_optimizer, trained_seed, run_name, step, prev_ada_p, best_step, best_fid, best_fid_checkpoint_path =\ load_checkpoint(Dis, D_optimizer, d_checkpoint_dir, metric=True) logger = make_logger(run_name, None) if ema: g_ema_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=G_ema-{when}-weights-step*.pth".format( when=when)))[0] Gen_copy = load_checkpoint(Gen_copy, None, g_ema_checkpoint_dir, ema=True) Gen_ema.source, Gen_ema.target = Gen, Gen_copy writer = SummaryWriter(log_dir=join('./logs', run_name)) if train_config['train']: assert seed == trained_seed, "seed for sampling random numbers should be same!" logger.info('Generator checkpoint is {}'.format(g_checkpoint_dir)) logger.info('Discriminator checkpoint is {}'.format(d_checkpoint_dir)) if freeze_layers > -1: prev_ada_p, step, best_step, best_fid, best_fid_checkpoint_path = None, 0, 0, None, None else: checkpoint_dir = make_checkpoint_dir(checkpoint_folder, run_name) if n_gpus > 1: Gen = DataParallel(Gen, output_device=default_device) Dis = DataParallel(Dis, output_device=default_device) if ema: Gen_copy = DataParallel(Gen_copy, output_device=default_device) if synchronized_bn: Gen = convert_model(Gen).to(default_device) Dis = convert_model(Dis).to(default_device) if ema: Gen_copy = convert_model(Gen_copy).to(default_device) if train_config['eval']: inception_model = InceptionV3().to(default_device) if n_gpus > 1: inception_model = DataParallel(inception_model, output_device=default_device) mu, sigma = prepare_inception_moments(dataloader=eval_dataloader, generator=Gen, eval_mode=eval_type, inception_model=inception_model, splits=1, run_name=run_name, logger=logger, device=default_device) else: mu, sigma, inception_model = None, None, None train_eval = Train_Eval( run_name=run_name, best_step=best_step, dataset_name=dataset_name, eval_type=eval_type, logger=logger, writer=writer, n_gpus=n_gpus, gen_model=Gen, dis_model=Dis, inception_model=inception_model, Gen_copy=Gen_copy, Gen_ema=Gen_ema, train_dataset=train_dataset, eval_dataset=eval_dataset, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, freeze_layers=freeze_layers, conditional_strategy=conditional_strategy, pos_collected_numerator=model_config['model'] ['pos_collected_numerator'], z_dim=z_dim, num_classes=num_classes, hypersphere_dim=hypersphere_dim, d_spectral_norm=d_spectral_norm, g_spectral_norm=g_spectral_norm, G_optimizer=G_optimizer, D_optimizer=D_optimizer, batch_size=batch_size, g_steps_per_iter=model_config['optimization']['g_steps_per_iter'], d_steps_per_iter=model_config['optimization']['d_steps_per_iter'], accumulation_steps=model_config['optimization']['accumulation_steps'], total_step=total_step, G_loss=G_loss[adv_loss], D_loss=D_loss[adv_loss], contrastive_lambda=model_config['loss_function']['contrastive_lambda'], margin=model_config['loss_function']['margin'], tempering_type=model_config['loss_function']['tempering_type'], tempering_step=model_config['loss_function']['tempering_step'], start_temperature=model_config['loss_function']['start_temperature'], end_temperature=model_config['loss_function']['end_temperature'], weight_clipping_for_dis=model_config['loss_function'] ['weight_clipping_for_dis'], weight_clipping_bound=model_config['loss_function'] ['weight_clipping_bound'], gradient_penalty_for_dis=model_config['loss_function'] ['gradient_penalty_for_dis'], gradient_penalty_lambda=model_config['loss_function'] ['gradient_penalty_lambda'], deep_regret_analysis_for_dis=model_config['loss_function'] ['deep_regret_analysis_for_dis'], regret_penalty_lambda=model_config['loss_function'] ['regret_penalty_lambda'], cr=cr, cr_lambda=model_config['loss_function']['cr_lambda'], bcr=model_config['loss_function']['bcr'], real_lambda=model_config['loss_function']['real_lambda'], fake_lambda=model_config['loss_function']['fake_lambda'], zcr=model_config['loss_function']['zcr'], gen_lambda=model_config['loss_function']['gen_lambda'], dis_lambda=model_config['loss_function']['dis_lambda'], sigma_noise=model_config['loss_function']['sigma_noise'], diff_aug=model_config['training_and_sampling_setting']['diff_aug'], ada=model_config['training_and_sampling_setting']['ada'], prev_ada_p=prev_ada_p, ada_target=model_config['training_and_sampling_setting']['ada_target'], ada_length=model_config['training_and_sampling_setting']['ada_length'], prior=prior, truncated_factor=truncated_factor, ema=ema, latent_op=model_config['training_and_sampling_setting']['latent_op'], latent_op_rate=model_config['training_and_sampling_setting'] ['latent_op_rate'], latent_op_step=model_config['training_and_sampling_setting'] ['latent_op_step'], latent_op_step4eval=model_config['training_and_sampling_setting'] ['latent_op_step4eval'], latent_op_alpha=model_config['training_and_sampling_setting'] ['latent_op_alpha'], latent_op_beta=model_config['training_and_sampling_setting'] ['latent_op_beta'], latent_norm_reg_weight=model_config['training_and_sampling_setting'] ['latent_norm_reg_weight'], default_device=default_device, print_every=train_config['print_every'], save_every=train_config['save_every'], checkpoint_dir=checkpoint_dir, evaluate=train_config['eval'], mu=mu, sigma=sigma, best_fid=best_fid, best_fid_checkpoint_path=best_fid_checkpoint_path, mixed_precision=mixed_precision, train_config=train_config, model_config=model_config, ) if train_config['train']: step = train_eval.train(current_step=step, total_step=total_step) if train_config['eval']: is_save = train_eval.evaluation( step=step, standing_statistics=standing_statistics, standing_step=standing_step) if train_config['save_images']: train_eval.save_images(is_generate=True, png=True, npz=True, standing_statistics=standing_statistics, standing_step=standing_step) if train_config['image_visualization']: train_eval.run_image_visualization( nrow=train_config['nrow'], ncol=train_config['ncol'], standing_statistics=standing_statistics, standing_step=standing_step) if train_config['k_nearest_neighbor']: train_eval.run_nearest_neighbor( nrow=train_config['nrow'], ncol=train_config['ncol'], standing_statistics=standing_statistics, standing_step=standing_step) if train_config['interpolation']: assert architecture in [ "big_resnet", "biggan_deep" ], "Not supported except for biggan and biggan_deep." train_eval.run_linear_interpolation( nrow=train_config['nrow'], ncol=train_config['ncol'], fix_z=True, fix_y=False, standing_statistics=standing_statistics, standing_step=standing_step) train_eval.run_linear_interpolation( nrow=train_config['nrow'], ncol=train_config['ncol'], fix_z=False, fix_y=True, standing_statistics=standing_statistics, standing_step=standing_step) if train_config['frequency_analysis']: train_eval.run_frequency_analysis( num_images=len(train_dataset) // num_classes, standing_statistics=standing_statistics, standing_step=standing_step)
def prepare_train_eval(cfgs, hdf5_path_train, **_): if cfgs.seed == -1: cudnn.benchmark, cudnn.deterministic = True, False else: fix_all_seed(cfgs.seed) cudnn.benchmark, cudnn.deterministic = False, True n_gpus, default_device = torch.cuda.device_count(), torch.cuda.current_device() if n_gpus ==1: warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.') if cfgs.disable_debugging_API: torch.autograd.set_detect_anomaly(False) check_flag_0(cfgs.batch_size, n_gpus, cfgs.freeze_layers, cfgs.checkpoint_folder, cfgs.architecture, cfgs.img_size) run_name = make_run_name(RUN_NAME_FORMAT, framework=cfgs.config_path.split('/')[3][:-5], phase='train') prev_ada_p, step, best_step, best_fid, best_fid_checkpoint_path, mu, sigma, inception_model = None, 0, 0, None, None, None, None, None logger = make_logger(run_name, None) writer = SummaryWriter(log_dir=join('./logs', run_name)) logger.info('Run name : {run_name}'.format(run_name=run_name)) logger.info(cfgs.train_configs) logger.info(cfgs.model_configs) ##### load dataset ##### logger.info('Loading train datasets...') train_dataset = LoadDataset(cfgs.dataset_name, cfgs.data_path, train=True, download=True, resize_size=cfgs.img_size, hdf5_path=hdf5_path_train, random_flip=cfgs.random_flip_preprocessing) if cfgs.reduce_train_dataset < 1.0: num_train = int(cfgs.reduce_train_dataset*len(train_dataset)) train_dataset, _ = torch.utils.data.random_split(train_dataset, [num_train, len(train_dataset) - num_train]) logger.info('Train dataset size : {dataset_size}'.format(dataset_size=len(train_dataset))) logger.info('Loading {mode} datasets...'.format(mode=cfgs.eval_type)) eval_mode = True if cfgs.eval_type == 'train' else False eval_dataset = LoadDataset(cfgs.dataset_name, cfgs.data_path, train=eval_mode, download=True, resize_size=cfgs.img_size, hdf5_path=None, random_flip=False) logger.info('Eval dataset size : {dataset_size}'.format(dataset_size=len(eval_dataset))) train_dataloader = DataLoader(train_dataset, batch_size=cfgs.batch_size, shuffle=True, pin_memory=True, num_workers=cfgs.num_workers, drop_last=True) eval_dataloader = DataLoader(eval_dataset, batch_size=cfgs.batch_size, shuffle=True, pin_memory=True, num_workers=cfgs.num_workers, drop_last=False) ##### build model ##### logger.info('Building model...') module = __import__('models.{architecture}'.format(architecture=cfgs.architecture), fromlist=['something']) logger.info('Modules are located on models.{architecture}'.format(architecture=cfgs.architecture)) Gen = module.Generator(cfgs.z_dim, cfgs.shared_dim, cfgs.img_size, cfgs.g_conv_dim, cfgs.g_spectral_norm, cfgs.attention, cfgs.attention_after_nth_gen_block, cfgs.activation_fn, cfgs.conditional_strategy, cfgs.num_classes, cfgs.g_init, cfgs.G_depth, cfgs.mixed_precision).to(default_device) Dis = module.Discriminator(cfgs.img_size, cfgs.d_conv_dim, cfgs.d_spectral_norm, cfgs.attention, cfgs.attention_after_nth_dis_block, cfgs.activation_fn, cfgs.conditional_strategy, cfgs.hypersphere_dim, cfgs.num_classes, cfgs.nonlinear_embed, cfgs.normalize_embed, cfgs.d_init, cfgs.D_depth, cfgs.mixed_precision).to(default_device) if cfgs.ema: print('Preparing EMA for G with decay of {}'.format(cfgs.ema_decay)) Gen_copy = module.Generator(cfgs.z_dim, cfgs.shared_dim, cfgs.img_size, cfgs.g_conv_dim, cfgs.g_spectral_norm, cfgs.attention, cfgs.attention_after_nth_gen_block, cfgs.activation_fn, cfgs.conditional_strategy, cfgs.num_classes, initialize=False, G_depth=cfgs.G_depth, mixed_precision=cfgs.mixed_precision).to(default_device) Gen_ema = ema(Gen, Gen_copy, cfgs.ema_decay, cfgs.ema_start) else: Gen_copy, Gen_ema = None, None logger.info(count_parameters(Gen)) logger.info(Gen) logger.info(count_parameters(Dis)) logger.info(Dis) ### define loss functions and optimizers G_loss = {'vanilla': loss_dcgan_gen, 'least_square': loss_lsgan_gen, 'hinge': loss_hinge_gen, 'wasserstein': loss_wgan_gen} D_loss = {'vanilla': loss_dcgan_dis, 'least_square': loss_lsgan_dis, 'hinge': loss_hinge_dis, 'wasserstein': loss_wgan_dis} if cfgs.optimizer == "SGD": G_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Gen.parameters()), cfgs.g_lr, momentum=cfgs.momentum, nesterov=cfgs.nesterov) D_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Dis.parameters()), cfgs.d_lr, momentum=cfgs.momentum, nesterov=cfgs.nesterov) elif cfgs.optimizer == "RMSprop": G_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Gen.parameters()), cfgs.g_lr, momentum=cfgs.momentum, alpha=cfgs.alpha) D_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Dis.parameters()), cfgs.d_lr, momentum=cfgs.momentum, alpha=cfgs.alpha) elif cfgs.optimizer == "Adam": G_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Gen.parameters()), cfgs.g_lr, [cfgs.beta1, cfgs.beta2], eps=1e-6) D_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Dis.parameters()), cfgs.d_lr, [cfgs.beta1, cfgs.beta2], eps=1e-6) else: raise NotImplementedError ##### load checkpoints if needed ##### if cfgs.checkpoint_folder is None: checkpoint_dir = make_checkpoint_dir(cfgs.checkpoint_folder, run_name) else: when = "current" if cfgs.load_current is True else "best" if not exists(abspath(cfgs.checkpoint_folder)): raise NotADirectoryError checkpoint_dir = make_checkpoint_dir(cfgs.checkpoint_folder, run_name) g_checkpoint_dir = glob.glob(join(checkpoint_dir,"model=G-{when}-weights-step*.pth".format(when=when)))[0] d_checkpoint_dir = glob.glob(join(checkpoint_dir,"model=D-{when}-weights-step*.pth".format(when=when)))[0] Gen, G_optimizer, trained_seed, run_name, step, prev_ada_p = load_checkpoint(Gen, G_optimizer, g_checkpoint_dir) Dis, D_optimizer, trained_seed, run_name, step, prev_ada_p, best_step, best_fid, best_fid_checkpoint_path =\ load_checkpoint(Dis, D_optimizer, d_checkpoint_dir, metric=True) logger = make_logger(run_name, None) if cfgs.ema: g_ema_checkpoint_dir = glob.glob(join(checkpoint_dir, "model=G_ema-{when}-weights-step*.pth".format(when=when)))[0] Gen_copy = load_checkpoint(Gen_copy, None, g_ema_checkpoint_dir, ema=True) Gen_ema.source, Gen_ema.target = Gen, Gen_copy writer = SummaryWriter(log_dir=join('./logs', run_name)) if cfgs.train_configs['train']: assert cfgs.seed == trained_seed, "seed for sampling random numbers should be same!" logger.info('Generator checkpoint is {}'.format(g_checkpoint_dir)) logger.info('Discriminator checkpoint is {}'.format(d_checkpoint_dir)) if cfgs.freeze_layers > -1 : prev_ada_p, step, best_step, best_fid, best_fid_checkpoint_path = None, 0, 0, None, None ##### wrap models with DP and convert BN to Sync BN ##### if n_gpus > 1: Gen = DataParallel(Gen, output_device=default_device) Dis = DataParallel(Dis, output_device=default_device) if cfgs.ema: Gen_copy = DataParallel(Gen_copy, output_device=default_device) if cfgs.synchronized_bn: Gen = convert_model(Gen).to(default_device) Dis = convert_model(Dis).to(default_device) if cfgs.ema: Gen_copy = convert_model(Gen_copy).to(default_device) ##### load the inception network and prepare first/secend moments for calculating FID ##### if cfgs.eval: inception_model = InceptionV3().to(default_device) if n_gpus > 1: inception_model = DataParallel(inception_model, output_device=default_device) mu, sigma = prepare_inception_moments(dataloader=eval_dataloader, generator=Gen, eval_mode=cfgs.eval_type, inception_model=inception_model, splits=1, run_name=run_name, logger=logger, device=default_device) worker = make_worker( cfgs=cfgs, run_name=run_name, best_step=best_step, logger=logger, writer=writer, n_gpus=n_gpus, gen_model=Gen, dis_model=Dis, inception_model=inception_model, Gen_copy=Gen_copy, Gen_ema=Gen_ema, train_dataset=train_dataset, eval_dataset=eval_dataset, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, G_optimizer=G_optimizer, D_optimizer=D_optimizer, G_loss=G_loss[cfgs.adv_loss], D_loss=D_loss[cfgs.adv_loss], prev_ada_p=prev_ada_p, default_device=default_device, checkpoint_dir=checkpoint_dir, mu=mu, sigma=sigma, best_fid=best_fid, best_fid_checkpoint_path=best_fid_checkpoint_path, ) if cfgs.train_configs['train']: step = worker.train(current_step=step, total_step=cfgs.total_step) if cfgs.eval: is_save = worker.evaluation(step=step, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.save_images: worker.save_images(is_generate=True, png=True, npz=True, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.image_visualization: worker.run_image_visualization(nrow=cfgs.nrow, ncol=cfgs.ncol, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.k_nearest_neighbor: worker.run_nearest_neighbor(nrow=cfgs.nrow, ncol=cfgs.ncol, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.interpolation: assert cfgs.architecture in ["big_resnet", "biggan_deep"], "Not supported except for biggan and biggan_deep." worker.run_linear_interpolation(nrow=cfgs.nrow, ncol=cfgs.ncol, fix_z=True, fix_y=False, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) worker.run_linear_interpolation(nrow=cfgs.nrow, ncol=cfgs.ncol, fix_z=False, fix_y=True, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step) if cfgs.frequency_analysis: worker.run_frequency_analysis(num_images=len(train_dataset)//cfgs.num_classes, standing_statistics=cfgs.standing_statistics, standing_step=cfgs.standing_step)
def train(encoder, decoder, data_loader, vocab_size, args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") encoder.to(device) decoder.to(device) encoder.train() decoder.train() params = list(decoder.parameters()) + list(encoder.embed.parameters()) criterion = nn.CrossEntropyLoss().to(device) optimizer = torch.optim.Adam(params=params, lr=0.001) start_epoch = 0 if args.cont_train: encoder, decoder, optimizer, start_epoch = load_checkpoint( encoder, decoder, optimizer, device, args ) total_step = math.ceil( len(data_loader.dataset.caption_lengths) / data_loader.batch_sampler.batch_size ) print( "----- TRAINING STARTED of {} from epoch # {} -----".format( args.model, start_epoch ) ) for epoch in range(1, args.epochs + 1): for step in range(1, total_step + 1): indices = data_loader.dataset.get_indices() new_sampler = data.sampler.SubsetRandomSampler(indices=indices) data_loader.batch_sampler.sampler = new_sampler images, captions, caplens, _ = next(iter(data_loader)) images = images.to(device) captions = captions.to(device) caplens = caplens.to(device) features = encoder(images) if args.model == "lstm": scores = decoder(features, captions) loss = criterion(scores.view(-1, vocab_size), captions.view(-1)) elif args.model == "attention": scores, caps_sorted, decode_lengths, alphas, _ = decoder( features, captions, caplens ) targets = caps_sorted[:, 1:] # removing <start> scores = pack_padded_sequence( scores, decode_lengths, batch_first=True ).data targets = pack_padded_sequence( targets, decode_lengths, batch_first=True ).data loss = criterion(scores, targets) loss += 1.0 * ((1.0 - alphas.sum(dim=1)) ** 2).mean() optimizer.zero_grad() loss.backward() optimizer.step() stats = "Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f" % ( epoch, args.epochs, step, total_step, loss.item(), np.exp(loss.item()), ) print("\r" + stats, end="") sys.stdout.flush() if step % args.print_every == 0: print("\r" + stats) if epoch % args.save_every == 0: torch.save( { "encoder": encoder.state_dict(), "decoder": decoder.state_dict(), "optimizer": optimizer.state_dict(), "epoch": start_epoch + epoch, "train_step": step, }, os.path.join( args.model_dir, args.model, "model-{}-{}.pkl".format(args.model, start_epoch + epoch), ), )
def train_framework( seed, num_workers, config_path, reduce_train_dataset, load_current, type4eval_dataset, dataset_name, num_classes, img_size, data_path, architecture, conditional_strategy, hypersphere_dim, nonlinear_embed, normalize_embed, g_spectral_norm, d_spectral_norm, activation_fn, attention, attention_after_nth_gen_block, attention_after_nth_dis_block, z_dim, shared_dim, g_conv_dim, d_conv_dim, G_depth, D_depth, optimizer, batch_size, d_lr, g_lr, momentum, nesterov, alpha, beta1, beta2, total_step, adv_loss, consistency_reg, g_init, d_init, random_flip_preprocessing, prior, truncated_factor, latent_op, ema, ema_decay, ema_start, synchronized_bn, hdf5_path_train, train_config, model_config, **_): fix_all_seed(seed) cudnn.benchmark = False # Not good Generator for undetermined input size cudnn.deterministic = True n_gpus = torch.cuda.device_count() default_device = torch.cuda.current_device() second_device = default_device if n_gpus == 1 else default_device + 1 assert batch_size % n_gpus == 0, "batch_size should be divided by the number of gpus " if n_gpus == 1: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') start_step, best_step, best_fid, best_fid_checkpoint_path = 0, 0, None, None run_name = make_run_name(RUN_NAME_FORMAT, framework=config_path.split('/')[3][:-5], phase='train') logger = make_logger(run_name, None) writer = SummaryWriter(log_dir=join('./logs', run_name)) logger.info('Run name : {run_name}'.format(run_name=run_name)) logger.info(train_config) logger.info(model_config) logger.info('Loading train datasets...') train_dataset = LoadDataset(dataset_name, data_path, train=True, download=True, resize_size=img_size, hdf5_path=hdf5_path_train, consistency_reg=consistency_reg, random_flip=random_flip_preprocessing) if reduce_train_dataset < 1.0: num_train = int(reduce_train_dataset * len(train_dataset)) train_dataset, _ = torch.utils.data.random_split( train_dataset, [num_train, len(train_dataset) - num_train]) logger.info('Train dataset size : {dataset_size}'.format( dataset_size=len(train_dataset))) logger.info('Loading {mode} datasets...'.format(mode=type4eval_dataset)) eval_mode = True if type4eval_dataset == 'train' else False eval_dataset = LoadDataset(dataset_name, data_path, train=eval_mode, download=True, resize_size=img_size, hdf5_path=None, random_flip=False) logger.info('Eval dataset size : {dataset_size}'.format( dataset_size=len(eval_dataset))) logger.info('Building model...') if architecture == "dcgan": assert img_size == 32, "Sry, StudioGAN does not support dcgan models for generation of images larger than 32 resolution." module = __import__( 'models.{architecture}'.format(architecture=architecture), fromlist=['something']) logger.info('Modules are located on models.{architecture}'.format( architecture=architecture)) Gen = module.Generator(z_dim, shared_dim, img_size, g_conv_dim, g_spectral_norm, attention, attention_after_nth_gen_block, activation_fn, conditional_strategy, num_classes, synchronized_bn, g_init, G_depth).to(default_device) Dis = module.Discriminator(img_size, d_conv_dim, d_spectral_norm, attention, attention_after_nth_dis_block, activation_fn, conditional_strategy, hypersphere_dim, num_classes, nonlinear_embed, normalize_embed, synchronized_bn, d_init, D_depth).to(default_device) if ema: print('Preparing EMA for G with decay of {}'.format(ema_decay)) Gen_copy = module.Generator(z_dim, shared_dim, img_size, g_conv_dim, g_spectral_norm, attention, attention_after_nth_gen_block, activation_fn, conditional_strategy, num_classes, synchronized_bn=False, initialize=False, G_depth=G_depth).to(default_device) Gen_ema = ema_(Gen, Gen_copy, ema_decay, ema_start) else: Gen_copy, Gen_ema = None, None if n_gpus > 1: Gen = DataParallel(Gen, output_device=second_device) Dis = DataParallel(Dis, output_device=second_device) if ema: Gen_copy = DataParallel(Gen_copy, output_device=second_device) if synchronized_bn: patch_replication_callback(Gen) patch_replication_callback(Dis) logger.info(count_parameters(Gen)) logger.info(Gen) logger.info(count_parameters(Dis)) logger.info(Dis) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers, drop_last=True) eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers, drop_last=False) G_loss = { 'vanilla': loss_dcgan_gen, 'hinge': loss_hinge_gen, 'wasserstein': loss_wgan_gen } D_loss = { 'vanilla': loss_dcgan_dis, 'hinge': loss_hinge_dis, 'wasserstein': loss_wgan_dis } if optimizer == "SGD": G_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, momentum=momentum, nesterov=nesterov) D_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, momentum=momentum, nesterov=nesterov) elif optimizer == "RMSprop": G_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, momentum=momentum, alpha=alpha) D_optimizer = torch.optim.RMSprop(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, momentum=momentum, alpha=alpha) elif optimizer == "Adam": G_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, [beta1, beta2]) D_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, [beta1, beta2]) elif optimizer == "AdamP": G_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, betas=(beta1, beta2)) D_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, betas=(beta1, beta2)) else: raise NotImplementedError checkpoint_dir = make_checkpoint_dir(train_config['checkpoint_folder'], run_name) if train_config['checkpoint_folder'] is not None: when = "current" if load_current is True else "best" g_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=G-{when}-weights-step*.pth".format(when=when)))[0] d_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=D-{when}-weights-step*.pth".format(when=when)))[0] Gen, G_optimizer, trained_seed, run_name, start_step, best_step = load_checkpoint( Gen, G_optimizer, g_checkpoint_dir) Dis, D_optimizer, trained_seed, run_name, start_step, best_step, best_fid, best_fid_checkpoint_path = load_checkpoint( Dis, D_optimizer, d_checkpoint_dir, metric=True) logger = make_logger(run_name, None) if ema: g_ema_checkpoint_dir = glob.glob( join(checkpoint_dir, "model=G_ema-{when}-weights-step*.pth".format( when=when)))[0] Gen_copy = load_checkpoint(Gen_copy, None, g_ema_checkpoint_dir, ema=True) Gen_ema.source, Gen_ema.target = Gen, Gen_copy writer = SummaryWriter(log_dir=join('./logs', run_name)) assert seed == trained_seed, "seed for sampling random numbers should be same!" logger.info('Generator checkpoint is {}'.format(g_checkpoint_dir)) logger.info('Discriminator checkpoint is {}'.format(d_checkpoint_dir)) if train_config['eval']: inception_model = InceptionV3().to(default_device) inception_model = DataParallel(inception_model, output_device=second_device) mu, sigma, is_score, is_std = prepare_inception_moments_eval_dataset( dataloader=eval_dataloader, generator=Gen, eval_mode=type4eval_dataset, inception_model=inception_model, splits=10, run_name=run_name, logger=logger, device=second_device) else: mu, sigma, inception_model = None, None, None logger.info('Start training...') trainer = Trainer( run_name=run_name, best_step=best_step, dataset_name=dataset_name, type4eval_dataset=type4eval_dataset, logger=logger, writer=writer, n_gpus=n_gpus, gen_model=Gen, dis_model=Dis, inception_model=inception_model, Gen_copy=Gen_copy, Gen_ema=Gen_ema, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, conditional_strategy=conditional_strategy, z_dim=z_dim, num_classes=num_classes, hypersphere_dim=hypersphere_dim, d_spectral_norm=d_spectral_norm, g_spectral_norm=g_spectral_norm, G_optimizer=G_optimizer, D_optimizer=D_optimizer, batch_size=batch_size, g_steps_per_iter=model_config['optimization']['g_steps_per_iter'], d_steps_per_iter=model_config['optimization']['d_steps_per_iter'], accumulation_steps=model_config['optimization']['accumulation_steps'], total_step=total_step, G_loss=G_loss[adv_loss], D_loss=D_loss[adv_loss], contrastive_lambda=model_config['loss_function']['contrastive_lambda'], tempering_type=model_config['loss_function']['tempering_type'], tempering_step=model_config['loss_function']['tempering_step'], start_temperature=model_config['loss_function']['start_temperature'], end_temperature=model_config['loss_function']['end_temperature'], gradient_penalty_for_dis=model_config['loss_function'] ['gradient_penalty_for_dis'], gradient_penelty_lambda=model_config['loss_function'] ['gradient_penelty_lambda'], weight_clipping_for_dis=model_config['loss_function'] ['weight_clipping_for_dis'], weight_clipping_bound=model_config['loss_function'] ['weight_clipping_bound'], consistency_reg=consistency_reg, consistency_lambda=model_config['loss_function']['consistency_lambda'], diff_aug=model_config['training_and_sampling_setting']['diff_aug'], prior=prior, truncated_factor=truncated_factor, ema=ema, latent_op=latent_op, latent_op_rate=model_config['training_and_sampling_setting'] ['latent_op_rate'], latent_op_step=model_config['training_and_sampling_setting'] ['latent_op_step'], latent_op_step4eval=model_config['training_and_sampling_setting'] ['latent_op_step4eval'], latent_op_alpha=model_config['training_and_sampling_setting'] ['latent_op_alpha'], latent_op_beta=model_config['training_and_sampling_setting'] ['latent_op_beta'], latent_norm_reg_weight=model_config['training_and_sampling_setting'] ['latent_norm_reg_weight'], default_device=default_device, second_device=second_device, print_every=train_config['print_every'], save_every=train_config['save_every'], checkpoint_dir=checkpoint_dir, evaluate=train_config['eval'], mu=mu, sigma=sigma, best_fid=best_fid, best_fid_checkpoint_path=best_fid_checkpoint_path, train_config=train_config, model_config=model_config, ) if conditional_strategy == 'ContraGAN' and train_config['train']: trainer.run_ours(current_step=start_step, total_step=total_step) elif train_config['train']: trainer.run(current_step=start_step, total_step=total_step) elif train_config['eval']: is_save = trainer.evaluation(step=start_step) if train_config['k_nearest_neighbor'] > 0: trainer.K_Nearest_Neighbor( train_config['criterion_4_k_nearest_neighbor'], train_config['number_of_nearest_samples'], random.randrange(num_classes))
def main(args): ckpt = load_checkpoint(args.path, args.key) cores = [] net_info = module_filter(ckpt) consumption = ensemble_net_info(net_info) print_net(consumption)
def train_framework(dataset_name, architecture, num_classes, img_size, data_path, eval_dataset, hdf5_path_train, hdf5_path_valid, train_rate, auxiliary_classifier, projection_discriminator, contrastive_training, hyper_dim, nonlinear_embed, normalize_embed, g_spectral_norm, d_spectral_norm, attention, reduce_class, at_after_th_gen_block, at_after_th_dis_block, leaky_relu, g_init, d_init, latent_op, consistency_reg, make_positive_aug, synchronized_bn, ema, ema_decay, ema_start, adv_loss, z_dim, shared_dim, g_conv_dim, d_conv_dim, batch_size, total_step, truncated_factor, prior, d_lr, g_lr, beta1, beta2, batch4metrics, config, **_): fix_all_seed(config['seed']) cudnn.benchmark = True # Not good Generator for undetermined input size cudnn.deterministic = False n_gpus = torch.cuda.device_count() default_device = torch.cuda.current_device() second_device = default_device if n_gpus == 1 else default_device+1 assert batch_size % n_gpus == 0, "batch_size should be divided by the number of gpus " if n_gpus == 1: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') start_step = 0 best_val_fid, best_checkpoint_fid_path, best_val_is, best_checkpoint_is_path = None, None, None, None run_name = make_run_name(RUN_NAME_FORMAT, framework=config['config_path'].split('/')[3][:-5], phase='train', config=config) logger = make_logger(run_name, None) writer = SummaryWriter(log_dir=join('./logs', run_name)) logger.info('Run name : {run_name}'.format(run_name=run_name)) logger.info(config) logger.info('Loading train datasets...') train_dataset = LoadDataset(dataset_name, data_path, train=True, download=True, resize_size=img_size, hdf5_path=hdf5_path_train, consistency_reg=consistency_reg, make_positive_aug=make_positive_aug) if train_rate < 1.0: num_train = int(train_rate*len(train_dataset)) train_dataset, _ = torch.utils.data.random_split(train_dataset, [num_train, len(train_dataset) - num_train]) logger.info('Train dataset size : {dataset_size}'.format(dataset_size=len(train_dataset))) logger.info('Loading valid datasets...') valid_dataset = LoadDataset(dataset_name, data_path, train=False, download=True, resize_size=img_size, hdf5_path=hdf5_path_valid) logger.info('Valid dataset size : {dataset_size}'.format(dataset_size=len(valid_dataset))) logger.info('Building model...') module = __import__('models.{architecture}'.format(architecture=architecture),fromlist=['something']) logger.info('Modules are located on models.{architecture}'.format(architecture=architecture)) num_classes = int(reduce_class*num_classes) Gen = module.Generator(z_dim, shared_dim, g_conv_dim, g_spectral_norm, attention, at_after_th_gen_block, leaky_relu, auxiliary_classifier, projection_discriminator, num_classes, contrastive_training, synchronized_bn, g_init).to(default_device) Dis = module.Discriminator(d_conv_dim, d_spectral_norm, attention, at_after_th_dis_block, leaky_relu, auxiliary_classifier, projection_discriminator, hyper_dim, num_classes, contrastive_training, nonlinear_embed, normalize_embed, synchronized_bn, d_init).to(default_device) if ema: print('Preparing EMA for G with decay of {}'.format(ema_decay)) Gen_copy = module.Generator(z_dim, shared_dim, g_conv_dim, g_spectral_norm, attention, at_after_th_gen_block, leaky_relu, auxiliary_classifier, projection_discriminator, num_classes, contrastive_training, synchronized_bn=False, initialize=False).to(default_device) Gen_ema = ema_(Gen, Gen_copy, ema_decay, ema_start) else: Gen_copy, Gen_ema = None, None if n_gpus > 1: Gen = DataParallel(Gen, output_device=second_device) Dis = DataParallel(Dis, output_device=second_device) if ema: Gen_copy = DataParallel(Gen_copy, output_device=second_device) if config['synchronized_bn']: patch_replication_callback(Gen) patch_replication_callback(Dis) logger.info(count_parameters(Gen)) logger.info(Gen) logger.info(count_parameters(Dis)) logger.info(Dis) if reduce_class != 1.0: assert dataset_name == "TINY_ILSVRC2012" or "ILSVRC2012", "reduce_class mode can not be applied on the CIFAR10 dataset" n_train = int(reduce_class*len(train_dataset)) n_valid = int(reduce_class*len(valid_dataset)) train_weights = [1.0]*n_train + [0.0]*(len(train_dataset) - n_train) valid_weights = [1.0]*n_valid + [0.0]*(len(valid_dataset) - n_valid) train_sampler = torch.utils.data.sampler.WeightedRandomSampler(train_weights, len(train_weights)) valid_sampler = torch.utils.data.sampler.WeightedRandomSampler(valid_weights, len(valid_weights)) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, shuffle=False, pin_memory=True, num_workers=config['num_workers'], drop_last=True) evaluation_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=batch4metrics, shuffle=False, pin_memory=True, num_workers=config['num_workers'], drop_last=False) else: train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=config['num_workers'], drop_last=True) evaluation_dataloader = DataLoader(valid_dataset, batch_size=batch4metrics, shuffle=True, pin_memory=True, num_workers=config['num_workers'], drop_last=False) G_loss = {'vanilla': loss_dcgan_gen, 'hinge': loss_hinge_gen, 'wasserstein': loss_wgan_gen} D_loss = {'vanilla': loss_dcgan_dis, 'hinge': loss_hinge_dis, 'wasserstein': loss_wgan_dis} G_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Gen.parameters()), g_lr, [beta1, beta2]) D_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, Dis.parameters()), d_lr, [beta1, beta2]) checkpoint_dir = make_checkpoint_dir(config['checkpoint_folder'], run_name, config) if config['checkpoint_folder'] is not None: logger = make_logger(run_name, config['log_output_path']) g_checkpoint_dir = glob.glob(os.path.join(checkpoint_dir,"model=G-step=" + str(config['step']) + "*.pth"))[0] d_checkpoint_dir = glob.glob(os.path.join(checkpoint_dir,"model=D-step=" + str(config['step']) + "*.pth"))[0] Gen, G_optimizer, seed, run_name, start_step = load_checkpoint(Gen, G_optimizer, g_checkpoint_dir) Dis, D_optimizer, seed, run_name, start_step, best_val_fid, best_checkpoint_fid_path,\ best_val_is, best_checkpoint_is_path = load_checkpoint(Dis, D_optimizer, d_checkpoint_dir, metric=True) if ema: g_ema_checkpoint_dir = glob.glob(os.path.join(checkpoint_dir, "model=G_ema-step=" + str(config['step']) + "*.pth"))[0] Gen_copy = load_checkpoint(Gen_copy, None, g_ema_checkpoint_dir, ema=ema) Gen_ema.source, Gen_ema.target = Gen, Gen_copy writer = SummaryWriter(log_dir=join('./logs', run_name)) assert config['seed'] == seed, "seed for sampling random numbers should be same!" logger.info('Generator checkpoint is {}'.format(g_checkpoint_dir)) logger.info('Discriminator checkpoint is {}'.format(d_checkpoint_dir)) if config['eval']: inception_model = InceptionV3().to(default_device) inception_model = DataParallel(inception_model, output_device=second_device) mu, sigma, is_score, is_std = prepare_inception_moments_eval_dataset(dataloader=evaluation_dataloader, inception_model=inception_model, reduce_class=reduce_class, splits=10, logger=logger, device=second_device, eval_dataset=eval_dataset) else: mu, sigma, inception_model = None, None, None logger.info('Start training...') trainer = Trainer( run_name=run_name, logger=logger, writer=writer, n_gpus=n_gpus, gen_model=Gen, dis_model=Dis, inception_model=inception_model, Gen_copy=Gen_copy, Gen_ema=Gen_ema, train_dataloader=train_dataloader, evaluation_dataloader=evaluation_dataloader, G_loss=G_loss[adv_loss], D_loss=D_loss[adv_loss], auxiliary_classifier=auxiliary_classifier, contrastive_training=contrastive_training, contrastive_lambda=config['contrastive_lambda'], softmax_posterior=config['softmax_posterior'], contrastive_softmax=config['contrastive_softmax'], hyper_dim=config['hyper_dim'], tempering=config['tempering'], discrete_tempering=config['discrete_tempering'], tempering_times=config['tempering_times'], start_temperature=config['start_temperature'], end_temperature=config['end_temperature'], gradient_penalty_for_dis=config['gradient_penalty_for_dis'], lambda4lp=config['lambda4lp'], lambda4gp=config['lambda4gp'], weight_clipping_for_dis=config['weight_clipping_for_dis'], weight_clipping_bound=config['weight_clipping_bound'], latent_op=latent_op, latent_op_rate=config['latent_op_rate'], latent_op_step=config['latent_op_step'], latent_op_step4eval=config['latent_op_step4eval'], latent_op_alpha=config['latent_op_alpha'], latent_op_beta=config['latent_op_beta'], latent_norm_reg_weight=config['latent_norm_reg_weight'], consistency_reg=consistency_reg, consistency_lambda=config['consistency_lambda'], make_positive_aug=make_positive_aug, G_optimizer=G_optimizer, D_optimizer=D_optimizer, default_device=default_device, second_device=second_device, batch_size=batch_size, z_dim=z_dim, num_classes=num_classes, truncated_factor=truncated_factor, prior=prior, g_steps_per_iter=config['g_steps_per_iter'], d_steps_per_iter=config['d_steps_per_iter'], accumulation_steps=config['accumulation_steps'], lambda4ortho=config['lambda4ortho'], print_every=config['print_every'], save_every=config['save_every'], checkpoint_dir=checkpoint_dir, evaluate=config['eval'], mu=mu, sigma=sigma, best_val_fid=best_val_fid, best_checkpoint_fid_path=best_checkpoint_fid_path, best_val_is=best_val_is, best_checkpoint_is_path=best_checkpoint_is_path, config=config, ) if contrastive_training: trainer.run_ours(current_step=start_step, total_step=total_step) else: trainer.run(current_step=start_step, total_step=total_step)