# Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'MUNIT': trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).cuda() train_display_images_b = torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).cuda() test_display_images_a = torch.stack( [test_loader_a.dataset[i] for i in range(display_size)]).cuda() test_display_images_b = torch.stack( [test_loader_b.dataset[i] for i in range(display_size)]).cuda() # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder(
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, default='configs/edges2handbags_folder.yaml', help='Path to the config file.') parser.add_argument('--output_path', type=str, default='.', help="outputs path") #resume option => [, default='730000'] parser.add_argument("--resume", default='150000', action="store_true") parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT") opts = parser.parse_args() cudnn.benchmark = True # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'MUNIT': trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).cuda() train_display_images_b = torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).cuda() test_display_images_a = torch.stack( [test_loader_a.dataset[i] for i in range(display_size)]).cuda() test_display_images_b = torch.stack( [test_loader_b.dataset[i] for i in range(display_size)]).cuda() # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 while True: for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)): trainer.update_learning_rate() images_a, images_b = images_a.cuda().detach(), images_b.cuda( ).detach() with Timer("Elapsed time in update: %f"): # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: sys.exit('Finish training')
def main(argv): (opts, args) = parser.parse_args(argv) cudnn.benchmark = True model_name = os.path.splitext(os.path.basename(opts.config))[0] # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] # Setup model and data loader trainer = MUNIT_Trainer(config) trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) test_display_images_a = Variable(torch.stack( [test_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True) test_display_images_b = Variable(torch.stack( [test_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True) train_display_images_a = Variable(torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True) train_display_images_b = Variable(torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True) # Setup logger and output folders train_writer = tensorboard.SummaryWriter(os.path.join( opts.log, model_name)) output_directory = os.path.join(opts.outputs, model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder # Start training iterations = trainer.resume(checkpoint_directory) if opts.resume else 0 while True: for it, (images_a, images_b) in enumerate(izip(train_loader_a, train_loader_b)): trainer.update_learning_rate() images_a, images_b = Variable(images_a.cuda()), Variable( images_b.cuda()) # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: # Test set images image_outputs = trainer.sample(test_display_images_a, test_display_images_b) write_images( image_outputs, display_size, '%s/gen_test%08d.jpg' % (image_directory, iterations + 1)) # Train set images image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_images( image_outputs, display_size, '%s/gen_train%08d.jpg' % (image_directory, iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_save_iter'] == 0: image_outputs = trainer.sample(test_display_images_a, test_display_images_b) write_images(image_outputs, display_size, '%s/gen.jpg' % image_directory) # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: return
sys.exit("Only support MUNIT|UNIT.") os.exit() trainer.cuda() dataset_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] samples = list() dataset_probs = list() augmentation = list() for i in range(config['n_datasets']): samples.append(config['sample_' + dataset_letters[i]]) dataset_probs.append(config['prob_' + dataset_letters[i]]) augmentation.append(config['transform_' + dataset_letters[i]]) _, test_loader_list = get_all_data_loaders(config, config['n_datasets'], samples, augmentation, config['trim']) # Setup logger and output folders. model_name = os.path.splitext(os.path.basename(opts.config))[0] output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder(output_directory) shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # Copy config file to output folder. # Creating isomorphic directory. if not os.path.exists(os.path.join(image_directory, 'isomorphic')): os.mkdir(os.path.join(image_directory, 'isomorphic')) # Start test.
# Load experiment setting if opts.resume: config = get_config('./outputs/' + opts.name + '/config.yaml') else: config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'DGNet': trainer = DGNet_Trainer(config, gpu_ids) # from trainer ->DGNet_Trainer trainer.cuda() random.seed(7) #fix random result train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) # function from utils.py # 随机选出一些图片,然后根据选出的任意两张id不同的图片,输出一些训练过程中产生的重构图片或合成图片。 train_a_rand = random.permutation( train_loader_a.dataset.img_num)[0:display_size] train_b_rand = random.permutation( train_loader_b.dataset.img_num)[0:display_size] test_a_rand = random.permutation(test_loader_a.dataset.img_num)[0:display_size] test_b_rand = random.permutation(test_loader_b.dataset.img_num)[0:display_size] train_display_images_a = torch.stack( [train_loader_a.dataset[i][0] for i in train_a_rand]).cuda() train_display_images_ap = torch.stack( [train_loader_a.dataset[i][2] for i in train_a_rand]).cuda() train_display_images_b = torch.stack(
default='.', help="outputs path") parser.add_argument("--resume", action="store_true") opts = parser.parse_args() cudnn.benchmark = True # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] # Setup model and data loader trainer = Trainer(config) # trainer.cuda() (train_loader_a, train_loader_b, test_loader_a, test_loader_b) = get_all_data_loaders(config) # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder(output_directory) # copy config file to output folder shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 while True: it = 0
if opts.a2b==1: new_size = config['new_size_a'] else: new_size = config['new_size_b'] start = time.time() n_rand = 3 if opts.trainer == 'SECUNIT' or opts.trainer == 'CDUNIT': trainer.resume(opts.checkpoint, hyperparameters=config) trainer.cuda() trainer.eval() config['batch_size'] = 8 img_count_a, img_count_b = 0, 0 train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(config, train_bool=False) base_dir = Path(opts.output_folder) orig_dir = Path(base_dir, "orig") orig_dir.mkdir(parents=True, exist_ok=True) tran_dir = Path(base_dir, "fake") tran_dir.mkdir(exist_ok=True) seg_dir = Path(base_dir, "seg") seg_dir.mkdir(exist_ok=True) style_dir = Path(base_dir, "style") style_dir.mkdir(exist_ok=True) with torch.no_grad(): img_count_a, img_count_b = enum_loader(train_loader_a, train_loader_b, orig_dir, tran_dir, seg_dir, style_dir, opts, img_count_a, img_count_b, n_rand) img_count_a, img_count_b = enum_loader(test_loader_a, test_loader_b, orig_dir, tran_dir, seg_dir, style_dir, opts, img_count_a, img_count_b, n_rand)
# Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'IMPNet': trainer = IPMNet_Trainer(config) trainer.cuda() random.seed(7) # fix random result train_loader_a, train_loader_b, test_loader_a, test_loader_b, train_mask_loader_a, train_mask_loader_b,\ test_mask_loader_a, test_mask_loader_b, train_texture_loader_a, train_texture_loader_b,test_texture_loader_a,\ test_texture_loader_b = get_all_data_loaders(config) train_a_rand = random.permutation(len(train_loader_a.dataset))[0:display_size] train_b_rand = random.permutation(len(train_loader_b.dataset))[0:display_size] test_a_rand = random.permutation(len(test_loader_a.dataset))[0:display_size] test_b_rand = random.permutation(len(test_loader_b.dataset))[0:display_size] train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in train_a_rand]).cuda() train_display_mask_a = torch.stack( [train_mask_loader_a.dataset[i] for i in train_a_rand]).cuda() train_display_texture_a = torch.stack( [train_texture_loader_a.dataset[i] for i in train_a_rand]).cuda() train_display_images_a, train_display_mask_a, train_display_texture_a = randomcrop( train_display_images_a, train_display_mask_a, train_display_texture_a, config['crop_image_height'], config['crop_image_width'])
def main(): cudnn.benchmark = True # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader trainer = UNIT_Trainer(config) if torch.cuda.is_available(): trainer.cuda(config['gpuID']) train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] writer = SummaryWriter(os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder print('start training !!') # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 TraindataA = data_prefetcher(train_loader_a) TraindataB = data_prefetcher(train_loader_b) testdataA = data_prefetcher(test_loader_a) testdataB = data_prefetcher(test_loader_b) while True: dataA = TraindataA.next() dataB = TraindataB.next() if dataA is None or dataB is None: TraindataA = data_prefetcher(train_loader_a) TraindataB = data_prefetcher(train_loader_b) dataA = TraindataA.next() dataB = TraindataB.next() with Timer("Elapsed time in update: %f"): # Main training code for _ in range(3): trainer.content_update(dataA, dataB, config) trainer.dis_update(dataA, dataB, config) trainer.gen_update(dataA, dataB, config) # torch.cuda.synchronize() trainer.update_learning_rate() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, writer) if (iterations + 1) % config['image_save_iter'] == 0: testa = testdataA.next() testb = testdataB.next() if dataA is None or dataB is None or dataA.size( 0) != display_size or dataB.size(0) != display_size: testdataA = data_prefetcher(test_loader_a) testdataB = data_prefetcher(test_loader_b) testa = testdataA.next() testb = testdataB.next() with torch.no_grad(): test_image_outputs = trainer.sample(testa, testb) train_image_outputs = trainer.sample(dataA, dataB) if test_image_outputs is not None and train_image_outputs is not None: write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(dataA, dataB) if image_outputs is not None: write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: writer.close() sys.exit('Finish training')
parser.add_argument("--resume", action="store_true") opts = parser.parse_args() # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_lize = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader trainer = AGUIT_Trainer(config) trainer.cuda() # load labeled and unlabeled dataloaders train_loader_l, train_loader_u = get_all_data_loaders(config) test_display_images_l = [ torch.stack([train_loader_l.dataset[i][0] for i in range(display_lize)]).cuda(), torch.stack([train_loader_l.dataset[i][1] for i in range(display_lize)]).cuda() ] # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder(output_directory) shutil.copy(opts.config,
trainer = MUNIT_Trainer(config, resume_epoch=opts.load, snapshot_dir=opts.snapshot_dir) trainer.cuda() dataset_letters = eval(opts.dataset_letters) samples = list() dataset_probs = list() augmentation = list() for i in range(config['n_datasets']): samples.append(config['sample_' + dataset_letters[i]]) dataset_probs.append(config['prob_' + dataset_letters[i]]) augmentation.append(config['transform_' + dataset_letters[i]]) train_loader_list, test_loader_list = get_all_data_loaders( config, config['n_datasets'], samples, augmentation, config['trim'], opts.dataset_letters) # Setup logger and output folders. model_name = os.path.splitext(os.path.basename(opts.config))[0] output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder(output_directory) shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # Copy config file to output folder. # Creating isomorphic directory. if not os.path.exists(os.path.join(image_directory, 'isomorphic')): os.mkdir(os.path.join(image_directory, 'isomorphic')) # Start test.
def main(): from utils import get_all_data_loaders, prepare_sub_folder, write_html, write_loss, get_config, write_2images, Timer import argparse from torch.autograd import Variable from trainer import MUNIT_Trainer, UNIT_Trainer import torch.backends.cudnn as cudnn import torch # try: # from itertools import izip as zip # except ImportError: # will be 3.x series # pass import os import sys import tensorboardX import shutil os.environ["CUDA_VISIBLE_DEVICES"] = str(0) parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, default='configs/edges2handbags_folder.yaml', help='Path to the config file.') parser.add_argument('--output_path', type=str, default='.', help="outputs path") parser.add_argument("--resume", action="store_true") parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT") opts = parser.parse_args() cudnn.benchmark = True ''' Note: https://www.pytorchtutorial.com/when-should-we-set-cudnn-benchmark-to-true/ 大部分情况下,设置这个 flag 可以让内置的 cuDNN 的 auto-tuner 自动寻找最适合当前配置的高效算法,来达到优化运行效率的问题 1. 如果网络的输入数据维度或类型上变化不大,设置 torch.backends.cudnn.benchmark = true 可以增加运行效率; 2. 如果网络的输入数据在每次 iteration 都变化的话,会导致 cnDNN 每次都会去寻找一遍最优配置,这样反而会降低运行效率。 ''' # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'MUNIT': trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).cuda() train_display_images_b = torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).cuda() test_display_images_a = torch.stack( [test_loader_a.dataset[i] for i in range(display_size)]).cuda() test_display_images_b = torch.stack( [test_loader_b.dataset[i] for i in range(display_size)]).cuda() # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 while True: for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)): trainer.update_learning_rate() images_a, images_b = images_a.cuda().detach(), images_b.cuda( ).detach() with Timer("Elapsed time in update: %f"): # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: sys.exit('Finish training')
if __name__ == '__main__': os.environ['CUDA_VISIBLE_DEVICES'] = '7' device = torch.device( 'cuda:0') if torch.cuda.is_available() else torch.device('cpu') # Load config config = Config().parser.parse_args() torch.backends.cudnn.benchmark = True if torch.cuda.is_available(): torch.cuda.manual_seed(config.seed) torch.manual_seed(config.seed) max_iter = config.max_iter display_size = config.display_size # Achieve data loader train_loader_a, train_loader_b, _, _ = get_all_data_loaders(config) train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).to(device) train_display_images_b = torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).to(device) # Main models model = V2VModel(config).to(device) # Setup logger and output folders model_name = config.model_name train_writer = SummaryWriter(config.log_dir) output_directory = os.path.join(config.output_path + '/outputs', model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory)
# Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'MUNIT': trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(config) train_display_images_a = Variable(torch.stack([train_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True) train_display_images_b = Variable(torch.stack([train_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True) test_display_images_a = Variable(torch.stack([test_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True) test_display_images_b = Variable(torch.stack([test_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True) # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder(output_directory) shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 while True:
########################## Load model ################################################# trainer = MUNIT_Trainer(config) state_dict = torch.load(opts.checkpoint) trainer.gen_a.load_state_dict(state_dict['a']) trainer.gen_b.load_state_dict(state_dict['b']) trainer.cuda() trainer.eval() encode_a = trainer.gen_a.encode # encoder A function decode_a = trainer.gen_a.decode # decoder A function encode_b = trainer.gen_b.encode # encoder B function decode_b = trainer.gen_b.decode # decoder B function ########################## Load data ################################################# _, _, test_loader_a, test_loader_b = get_all_data_loaders(config) if not os.path.exists(output_dir + '/' + 'output_images'): os.makedirs(output_dir + '/' + 'output_images') ############### Extract the tensors and vectors ###################################### # Note that the batch size is set as 300. with torch.no_grad(): for images in test_loader_a: if images.size(0) != batch_size: continue images = images.cuda().detach() # Start testing test_images_a = images.cpu().numpy() c_a, s_a = encode_a(images) c_a_np = c_a.cpu().numpy()
def main(argv): (opts, args) = parser.parse_args(argv) cudnn.benchmark = True # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] # Setup logger and output folders output_subfolders = prepare_logging_folders(config['output_root'], config['experiment_name']) logger = create_logger( os.path.join(output_subfolders['logs'], 'train_log.log')) shutil.copy(opts.config, os.path.join( output_subfolders['logs'], 'config.yaml')) # copy config file to output folder tb_logger = tensorboard_logger.Logger(output_subfolders['logs']) logger.info('============ Initialized logger ============') logger.info('Config File: {}'.format(opts.config)) # Setup model and data loader trainer = MUNIT_Trainer(config, opts) trainer.cuda() loaders = get_all_data_loaders(config) val_display_images = next(iter(loaders['val'])) logger.info('Test images: {}'.format(val_display_images['A_paths'])) # Start training iterations = trainer.resume(opts.model_path, hyperparameters=config) if opts.resume else 0 while True: for it, images in enumerate(loaders['train']): trainer.update_learning_rate() images_a = images['A'] images_b = images['B'] images_a, images_b = Variable(images_a.cuda()), Variable( images_b.cuda()) # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: for tag, value in trainer.loss.items(): tb_logger.scalar_summary(tag, value, iterations) val_output_imgs = trainer.sample( Variable(val_display_images['A'].cuda()), Variable(val_display_images['B'].cuda())) tb_imgs = [] for imgs in val_output_imgs.values(): tb_imgs.append(torch.cat(torch.unbind(imgs, 0), dim=2)) tb_logger.image_summary(list(val_output_imgs.keys()), tb_imgs, iterations) if (iterations + 1) % config['print_iter'] == 0: logger.info( "Iteration: {:08}/{:08} Discriminator Loss: {:.4f} Generator Loss: {:.4f}" .format(iterations + 1, max_iter, trainer.loss['D/total'], trainer.loss['G/total'])) # Write images # if (iterations + 1) % config['image_save_iter'] == 0: # val_output_imgs = trainer.sample( # Variable(val_display_images['A'].cuda()), # Variable(val_display_images['B'].cuda())) # # for key, imgs in val_output_imgs.items(): # key = key.replace('/', '_') # write_images(imgs, config['display_size'], '{}/{}_{:08}.jpg'.format(output_subfolders['images'], key, iterations+1)) # # logger.info('Saved images to: {}'.format(output_subfolders['images'])) # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(output_subfolders['models'], iterations) iterations += 1 if iterations >= max_iter: return