# Register modules to checkpoint checkpoint_io.register_modules(generator=generator, ) # Get model file model_file = config['test']['model_file'] # Distributions ydist = get_ydist(nlabels, device=device) zdist = get_zdist('gauss', config['z_dist']['dim'], device=device) # Test generator generator_test = generator # Evaluator evaluator = Evaluator(generator_test, zdist, batch_size=config['test']['batch_size'], device=device) evaluator_single = Evaluator(generator_test, zdist, batch_size=config['test']['batch_size'], device=device) # Load checkpoint load_dict = checkpoint_io.load(model_file) it = load_dict.get('it', -1) epoch_idx = load_dict.get('epoch_idx', -1) # Pick a random but fixed seed seed = torch.randint(0, 10000, (1, ))[0] # Evaluation Loop
grid = np.reshape(grid, [-1, 2]).astype(np.float32) grid = torch.from_numpy(grid).cuda() grid_y = np.zeros([1000 * 1000]).astype(np.int64) grid_y = torch.from_numpy(grid_y).cuda() # Test generator if config['training']['take_model_average']: generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator # Evaluator evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device) # Train tstart = t0 = time.time() # Load checkpoint if it exists try: load_dict = checkpoint_io.load(model_file) except FileNotFoundError: it = epoch_idx = -1 else: it = load_dict.get('it', -1) epoch_idx = load_dict.get('epoch_idx', -1) logger.load_stats('stats.p')
if config['test']['use_model_average']: generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator # Distributions ydist = get_ydist(nlabels, device=device) zdist = get_zdist(config['z_dist']['type'], config['z_dist']['dim'], device=device) # Evaluator evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device) # Load checkpoint if existant load_dict = checkpoint_io.load(args.oldmodel) it = load_dict.get('it', -1) epoch_idx = load_dict.get('epoch_idx', -1) # Inception score if config['test']['compute_inception']: print('Computing inception score...') inception_mean, inception_std = evaluator.compute_inception_score() print('Inception score: %.4f +- %.4f' % (inception_mean, inception_std)) # Samples
utils.save_images(x_real, path.join(out_dir, 'real.png')) # Test generator if config['training']['take_model_average']: generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator # Evaluator # NNN = 8000 x_real, _ = utils.get_nsamples(test_loader, NNN) evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device, fid_real_samples=x_real, inception_nsamples=NNN, fid_sample_size=NNN) # Train tstart = t0 = time.time() it = -1 epoch_idx = -1 # Reinitialize model average if needed if (config['training']['take_model_average'] and config['training']['model_average_reinit']): update_average(generator_test, generator, 0.) # Learning rate anneling
# Test generator if config['training']['take_model_average']: generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator # Evaluator # evaluator = Evaluator(generator_test, zdist, ydist, # batch_size=batch_size, device=device) x_real_FID, _ = utils.get_nsamples(test_loader, NNN) evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device, fid_real_samples=x_real_FID, inception_nsamples=NNN, fid_sample_size=NNN) it = -1 epoch_idx = -1 # Reinitialize model average if needed if (config['training']['take_model_average'] and config['training']['model_average_reinit']): update_average(generator_test, generator, 0.) # Learning rate anneling g_scheduler = build_lr_scheduler(g_optimizer, config, last_epoch=it) d_scheduler = build_lr_scheduler(d_optimizer, config, last_epoch=it)
def perform_evaluation(run_name, image_type): out_dir = os.path.join(os.getcwd(), '..', 'output', run_name) checkpoint_dir = os.path.join(out_dir, 'chkpts') checkpoints = sorted(glob.glob(os.path.join(checkpoint_dir, '*'))) evaluation_dict = {} for point in checkpoints: if not int( point.split('/')[-1].split('_')[1].split('.')[0]) % 10000 == 0: continue iter_num = int(point.split('/')[-1].split('_')[1].split('.')[0]) model_file = point.split('/')[-1] config = load_config('../configs/fr_default.yaml', None) is_cuda = (torch.cuda.is_available()) checkpoint_io = CheckpointIO(checkpoint_dir=checkpoint_dir) device = torch.device("cuda:0" if is_cuda else "cpu") generator, discriminator = build_models(config) # Put models on gpu if needed generator = generator.to(device) discriminator = discriminator.to(device) # Use multiple GPUs if possible generator = nn.DataParallel(generator) discriminator = nn.DataParallel(discriminator) generator_test_9 = copy.deepcopy(generator) generator_test_99 = copy.deepcopy(generator) generator_test_999 = copy.deepcopy(generator) generator_test_9999 = copy.deepcopy(generator) # Register modules to checkpoint checkpoint_io.register_modules( generator=generator, generator_test_9=generator_test_9, generator_test_99=generator_test_99, generator_test_999=generator_test_999, generator_test_9999=generator_test_9999, discriminator=discriminator, ) # Load checkpoint load_dict = checkpoint_io.load(model_file) # Distributions ydist = get_ydist(config['data']['nlabels'], device=device) zdist = get_zdist(config['z_dist']['type'], config['z_dist']['dim'], device=device) z_sample = torch.Tensor(np.load('z_data.npy')).to(device) #for name, model in zip(['0_', '09_', '099_', '0999_', '09999_'], [generator, generator_test_9, generator_test_99, generator_test_999, generator_test_9999]): for name, model in zip( ['099_', '0999_', '09999_'], [generator_test_99, generator_test_999, generator_test_9999]): # Evaluator evaluator = Evaluator(model, zdist, ydist, device=device) x_sample = [] for i in range(10): x = evaluator.create_samples(z_sample[i * 1000:(i + 1) * 1000]) x_sample.append(x) x_sample = torch.cat(x_sample) x_sample = x_sample / 2 + 0.5 if not os.path.exists('fake_data'): os.makedirs('fake_data') for i in range(10000): torchvision.utils.save_image(x_sample[i, :, :, :], 'fake_data/{}.png'.format(i)) fid_score = calculate_fid_given_paths( ['fake_data', image_type + '_real'], 50, True, 2048) print(iter_num, name, fid_score) os.system("rm -rf " + "fake_data") evaluation_dict[(iter_num, name[:-1])] = {'FID': fid_score} if not os.path.exists('evaluation_data/' + run_name): os.makedirs('evaluation_data/' + run_name) pickle.dump( evaluation_dict, open('evaluation_data/' + run_name + '/eval_fid.p', 'wb'))
def main(): pp = pprint.PrettyPrinter(indent=1) pp.pprint({ 'data': config['data'], 'generator': config['generator'], 'discriminator': config['discriminator'], 'clusterer': config['clusterer'], 'training': config['training'] }) is_cuda = torch.cuda.is_available() # Short hands batch_size = config['training']['batch_size'] log_every = config['training']['log_every'] inception_every = config['training']['inception_every'] backup_every = config['training']['backup_every'] sample_nlabels = config['training']['sample_nlabels'] nlabels = config['data']['nlabels'] sample_nlabels = min(nlabels, sample_nlabels) checkpoint_dir = path.join(out_dir, 'chkpts') # Create missing directories if not path.exists(out_dir): os.makedirs(out_dir) if not path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) # Logger checkpoint_io = CheckpointIO(checkpoint_dir=checkpoint_dir) device = torch.device("cuda:0" if is_cuda else "cpu") train_dataset, _ = get_dataset( name=config['data']['type'], data_dir=config['data']['train_dir'], size=config['data']['img_size'], deterministic=config['data']['deterministic']) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, num_workers=config['training']['nworkers'], shuffle=True, pin_memory=True, sampler=None, drop_last=True) # Create models generator, discriminator = build_models(config) # Put models on gpu if needed generator = generator.to(device) discriminator = discriminator.to(device) for name, module in discriminator.named_modules(): if isinstance(module, nn.Sigmoid): print('Found sigmoid layer in discriminator; not compatible with BCE with logits') exit() g_optimizer, d_optimizer = build_optimizers(generator, discriminator, config) devices = [int(x) for x in args.devices] generator = nn.DataParallel(generator, device_ids=devices) discriminator = nn.DataParallel(discriminator, device_ids=devices) # Register modules to checkpoint checkpoint_io.register_modules(generator=generator, discriminator=discriminator, g_optimizer=g_optimizer, d_optimizer=d_optimizer) # Logger logger = Logger(log_dir=path.join(out_dir, 'logs'), img_dir=path.join(out_dir, 'imgs'), monitoring=config['training']['monitoring'], monitoring_dir=path.join(out_dir, 'monitoring')) # Distributions ydist = get_ydist(nlabels, device=device) zdist = get_zdist(config['z_dist']['type'], config['z_dist']['dim'], device=device) ntest = config['training']['ntest'] x_test, y_test = utils.get_nsamples(train_loader, ntest) x_cluster, y_cluster = utils.get_nsamples(train_loader, config['clusterer']['nimgs']) x_test, y_test = x_test.to(device), y_test.to(device) z_test = zdist.sample((ntest, )) utils.save_images(x_test, path.join(out_dir, 'real.png')) logger.add_imgs(x_test, 'gt', 0) # Test generator if config['training']['take_model_average']: print('Taking model average') bad_modules = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d] for model in [generator, discriminator]: for name, module in model.named_modules(): for bad_module in bad_modules: if isinstance(module, bad_module): print('Batch norm in discriminator not compatible with exponential moving average') exit() generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator clusterer = get_clusterer(config)(discriminator=discriminator, x_cluster=x_cluster, x_labels=y_cluster, gt_nlabels=config['data']['nlabels'], **config['clusterer']['kwargs']) # Load checkpoint if it exists it = utils.get_most_recent(checkpoint_dir, 'model') if args.model_it == -1 else args.model_it it, epoch_idx, loaded_clusterer = checkpoint_io.load_models(it=it, load_samples='supervised' != config['clusterer']['name']) if loaded_clusterer is None: print('Initializing new clusterer. The first clustering can be quite slow.') clusterer.recluster(discriminator=discriminator) checkpoint_io.save_clusterer(clusterer, it=0) np.savez(os.path.join(checkpoint_dir, 'cluster_samples.npz'), x=x_cluster) else: print('Using loaded clusterer') clusterer = loaded_clusterer # Evaluator evaluator = Evaluator( generator_test, zdist, ydist, train_loader=train_loader, clusterer=clusterer, batch_size=batch_size, device=device, inception_nsamples=config['training']['inception_nsamples']) # Trainer trainer = Trainer(generator, discriminator, g_optimizer, d_optimizer, gan_type=config['training']['gan_type'], reg_type=config['training']['reg_type'], reg_param=config['training']['reg_param']) # Training loop print('Start training...') while it < args.nepochs * len(train_loader): epoch_idx += 1 for x_real, y in train_loader: it += 1 x_real, y = x_real.to(device), y.to(device) z = zdist.sample((batch_size, )) y = clusterer.get_labels(x_real, y).to(device) # Discriminator updates dloss, reg = trainer.discriminator_trainstep(x_real, y, z) logger.add('losses', 'discriminator', dloss, it=it) logger.add('losses', 'regularizer', reg, it=it) # Generators updates gloss = trainer.generator_trainstep(y, z) logger.add('losses', 'generator', gloss, it=it) if config['training']['take_model_average']: update_average(generator_test, generator, beta=config['training']['model_average_beta']) # Print stats if it % log_every == 0: g_loss_last = logger.get_last('losses', 'generator') d_loss_last = logger.get_last('losses', 'discriminator') d_reg_last = logger.get_last('losses', 'regularizer') print('[epoch %0d, it %4d] g_loss = %.4f, d_loss = %.4f, reg=%.4f' % (epoch_idx, it, g_loss_last, d_loss_last, d_reg_last)) if it % config['training']['recluster_every'] == 0 and it > config['training']['burnin_time']: # print cluster distribution for online methods if it % 100 == 0 and config['training']['recluster_every'] <= 100: print(f'[epoch {epoch_idx}, it {it}], distribution: {clusterer.get_label_distribution(x_real)}') clusterer.recluster(discriminator=discriminator, x_batch=x_real) # (i) Sample if necessary if it % config['training']['sample_every'] == 0: print('Creating samples...') x = evaluator.create_samples(z_test, y_test) x = evaluator.create_samples(z_test, clusterer.get_labels(x_test, y_test).to(device)) logger.add_imgs(x, 'all', it) for y_inst in range(sample_nlabels): x = evaluator.create_samples(z_test, y_inst) logger.add_imgs(x, '%04d' % y_inst, it) # (ii) Compute inception if necessary if it % inception_every == 0 and it > 0: print('PyTorch Inception score...') inception_mean, inception_std = evaluator.compute_inception_score() logger.add('metrics', 'pt_inception_mean', inception_mean, it=it) logger.add('metrics', 'pt_inception_stddev', inception_std, it=it) print(f'[epoch {epoch_idx}, it {it}] pt_inception_mean: {inception_mean}, pt_inception_stddev: {inception_std}') # (iii) Backup if necessary if it % backup_every == 0: print('Saving backup...') checkpoint_io.save('model_%08d.pt' % it, it=it) checkpoint_io.save_clusterer(clusterer, int(it)) logger.save_stats('stats_%08d.p' % it) if it > 0: checkpoint_io.save('model.pt', it=it)
if config['test']['use_model_average']: generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator # Distributions ydist = get_ydist(nlabels, device=device) zdist = get_zdist(config['z_dist']['type'], config['z_dist']['dim'], device=device) # Evaluator evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device) print(generator.module.resnet_0_0.conv_0.weight[1, 1, :, :]) # Load checkpoint if existant load_dict = checkpoint_io.load(model_file) it = load_dict.get('it', -1) epoch_idx = load_dict.get('epoch_idx', -1) print(generator.module.resnet_0_0.conv_0.weight[1, 1, :, :]) TrainModeSave = DATA torch.save(generator.module.state_dict(), save_dir + TrainModeSave + 'Pre_generator')
utils.save_images(x_real, path.join(out_dir, 'real.png')) # Test generator if config['training']['take_model_average']: generator_test = copy.deepcopy(generator) checkpoint_io.register_modules(generator_test=generator_test) else: generator_test = generator # Evaluator if inception_every > 0 and compute_fid: # This will also compute FID # Load fid_samples (1024) many. fid_real_samples, _ = utils.get_nsamples(train_loader, fid_sample_size) evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device, fid_real_samples=fid_real_samples, fid_sample_size=fid_sample_size) else: evaluator = Evaluator(generator_test, zdist, ydist, batch_size=batch_size, device=device) # Train tstart = t0 = time.time() it = epoch_idx = -1 # Load checkpoint if existant it = checkpoint_io.load('model.pt') if it != -1: logger.load_stats('stats.p') if adaptive_beta: # Set reg_param to the last reg_param reg_param = logger.stats['learning_rates']['beta_value'][-1]
path = "Final_cifar_nopid_sigmoid.1_" for epoch_id in range(40, 80): model_name = "/home/kunxu/Workspace/GAN_PID/output/" + path config = load_config(os.path.join(model_name, "config.yaml"), 'configs/default.yaml') generator, discriminator = build_models(config) generator = torch.nn.DataParallel(generator) zdist = get_zdist(config['z_dist']['type'], config['z_dist']['dim'], device=device) ydist = get_ydist(1, device=device) checkpoint_io = CheckpointIO(checkpoint_dir="./tmp") checkpoint_io.register_modules(generator_test=generator) evaluator = Evaluator(generator, zdist, ydist, batch_size=100, device=device) ckptpath = os.path.join(model_name, "chkpts", "model_{:08d}.pt".format(epoch_id * 10000 + 9999)) print(ckptpath) load_dict = checkpoint_io.load(ckptpath) img_list = [] for i in range(500): ztest = zdist.sample((100, )) x = evaluator.create_samples(ztest) img_list.append(x.cpu().numpy()) img_list = np.concatenate(img_list, axis=0) m, s = evaluation(img_list) all_results.append([float(m), float(s)])