G_adv, G_sty, G_rec, D_adv = trainer.update(x, y, i, j, j_trg) if (iterations + 1) % config['image_save_iter'] == 0: for i in range(len(train_iters)): j, j_trg = random.sample(list(range(len(train_iters[i]))), 2) x, _ = train_iters[i][j].next() x_trg, _ = train_iters[i][j_trg].next() train_iters[i][j].preload() train_iters[i][j_trg].preload() test_image_outputs = trainer.sample(x, x_trg, j, j_trg, i) write_2images( test_image_outputs, config['batch_size'], image_directory, 'sample_%08d_%s_%s_to_%s' % (iterations + 1, config['tags'][i]['name'], config['tags'][i]['attributes'][j]['name'], config['tags'][i]['attributes'][j_trg]['name'])) torch.cuda.synchronize() if (iterations + 1) % config['log_iter'] == 0: write_loss(iterations, trainer, train_writer) now = time.time() print( f"[#{iterations + 1:06d}|{total_iterations:d}] {now - start:5.2f}s" ) start = now if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations)
batch_art = normalize_arr_of_imgs(torch.tensor(q_art.get()['image'], requires_grad=False).cuda()).permute(0,3,1,2).requires_grad_() batch_content = normalize_arr_of_imgs(torch.tensor(q_content.get()['image'], requires_grad=False).cuda()).permute(0,3,1,2).requires_grad_() # Training update trainer.update_learning_rate() discr_success = trainer.update(batch_art, batch_content, opts, discr_success, alpha, discr_success >= win_rate) # Dump training stats in log file if step % 10 == 0: write_loss(step, trainer, train_writer) # Save network weights if (step+1) % opts.save_freq == 0: trainer.save(checkpoint_directory, step) if step % 50 == 0: print("Iteration: %08d/%08d, dloss = %.8s, gloss = %.8s, discr_success = %.5s" % (step, opts.max_iter, trainer.discr_loss.item(), trainer.gener_loss.item(), discr_success)) # Write images if (step+1) % 100 == 0: del batch_art, batch_content torch.cuda.empty_cache() with torch.no_grad(): samp = trainer.sample(test) image_outputs = [denormalize_arr_of_imgs(samp[0]), denormalize_arr_of_imgs(samp[1])] write_2images(image_outputs, opts.display_size, image_directory, 'test_%08d' % (step + 1)) del samp, image_outputs torch.cuda.empty_cache() print("Training is finished. Terminate jobs.") for p in jobs: p.join() p.terminate() print("Done.")
else: _ = test_fid(dataset_for_fid_A, tmp_path_im_b2a, iterations, train_writer, 'A', m1_1_b2a, s1_1_b2a) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) test_gen_a2b_im, test_gen_b2a_im = write_2images( test_image_outputs, display_size * config['council']['council_size'], image_directory, 'test_%08d' % (iterations + 1), do_a2b=config['do_a2b'], do_b2a=config['do_b2a']) train_gen_a2b_im, train_gen_b2a_im = write_2images( train_image_outputs, display_size * config['council']['council_size'], image_directory, 'train_%08d' % (iterations + 1), do_a2b=config['do_a2b'], do_b2a=config['do_b2a']) if config['do_a2b']: train_writer.add_image('a2b/train', train_gen_a2b_im, iterations) train_writer.add_image('a2b/test', test_gen_a2b_im,
if trainer.train_seg: trainer.segmentation_head_update( images_as, images_bs, sem_a, sem_b, config['adaptation']['sem_seg_lambda'], comet_exp) # Write images if (iterations + 1) % config["image_save_iter"] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images( test_image_outputs, display_size, image_directory, "test_%08d" % (iterations + 1), comet_exp, ) write_2images( train_image_outputs, display_size, image_directory, "train_%08d" % (iterations + 1), comet_exp, ) if (iterations + 1) % config["image_display_iter"] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b)
def main(): cudnn.benchmark = True # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader trainer = UNIT_Trainer(config) if torch.cuda.is_available(): trainer.cuda(config['gpuID']) train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] writer = SummaryWriter(os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder print('start training !!') # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 TraindataA = data_prefetcher(train_loader_a) TraindataB = data_prefetcher(train_loader_b) testdataA = data_prefetcher(test_loader_a) testdataB = data_prefetcher(test_loader_b) while True: dataA = TraindataA.next() dataB = TraindataB.next() if dataA is None or dataB is None: TraindataA = data_prefetcher(train_loader_a) TraindataB = data_prefetcher(train_loader_b) dataA = TraindataA.next() dataB = TraindataB.next() with Timer("Elapsed time in update: %f"): # Main training code for _ in range(3): trainer.content_update(dataA, dataB, config) trainer.dis_update(dataA, dataB, config) trainer.gen_update(dataA, dataB, config) # torch.cuda.synchronize() trainer.update_learning_rate() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, writer) if (iterations + 1) % config['image_save_iter'] == 0: testa = testdataA.next() testb = testdataB.next() if dataA is None or dataB is None or dataA.size( 0) != display_size or dataB.size(0) != display_size: testdataA = data_prefetcher(test_loader_a) testdataB = data_prefetcher(test_loader_b) testa = testdataA.next() testb = testdataB.next() with torch.no_grad(): test_image_outputs = trainer.sample(testa, testb) train_image_outputs = trainer.sample(dataA, dataB) if test_image_outputs is not None and train_image_outputs is not None: write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(dataA, dataB) if image_outputs is not None: write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: writer.close() sys.exit('Finish training')
trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) trainer.update_learning_rate() iterations += 1 with torch.no_grad(): test_image_outputs = trainer.sample(test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % epoch) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % epoch) epoch += 1 # Save network weights trainer.save(checkpoint_directory, epoch) if epoch >= config['epoch']: sys.exit('Finish training') train_writer.close() torch.cuda.empty_cache()
loss_dis = trainer.dis_update(images_l, images_u, labels_l, config) loss_gen = trainer.gen_update(images_l, images_u, labels_l, config) torch.cuda.synchronize() # Log if (iterations + 1) % config['log_iter'] == 0: write_loss(iterations, trainer, train_writer) now = time.time() print("[epoch:{:02d}#{:05d}|{:d}]genLoss:{:5.2f}, " "disLoss:{:5.2f}, with {:5.2f}s".format( epoch, iterations + 1, max_iter, loss_gen, loss_dis, now - start)) start = now # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample(*test_display_images_l) write_2images(test_image_outputs, display_lize, image_directory, 'test_%08d' % (iterations + 1)) # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: sys.exit('Finish training') epoch += 1
% (loss_dis_total, loss_gen_total, loss_recon_x, loss_recon_s, loss_recon_c, loss_cycrecon, loss_vgg)) # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample(test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d+%04f' % (iterations + 1, loss_gen_total)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d+%04f' % (iterations + 1, loss_gen_total)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0:
write_loss(iterations, trainer, train_writer) rank1 = trainer.rank_1 rank5 = trainer.rank_5 rank10 = trainer.rank_10 mAP0 = trainer.mAP_zero mAP05 = trainer.mAP_half mAPn1 = trainer.mAP_neg_one mAP_list.append(mAP05) rank1_list.append(rank1.numpy()) rank5_list.append(rank5.numpy()) rank10_list.append(rank10.numpy()) # save generated images in every round with torch.no_grad(): image_outputs = trainer.sample_ab(train_display_images_aba, train_display_images_abb) write_2images(image_outputs, display_size, image_directory, 'train_ab_%08d' % (iterations + 1)) del image_outputs with torch.no_grad(): image_outputs = trainer.sample_aa(train_display_images_aaa, train_display_images_aab) write_2images(image_outputs, display_size, image_directory, 'train_aa_%08d' % (iterations + 1)) del image_outputs with torch.no_grad(): image_outputs = trainer.sample_bb(train_display_images_bba, train_display_images_bbb) write_2images(image_outputs, display_size, image_directory, 'train_bb_%08d' % (iterations + 1)) del image_outputs # regenerate data loaders in every epoch train_loader_a, train_loader_b, _, _ = get_mix_data_loaders(config)
config['lr'], config['lr_policy'], config['step_size'], config['gamma'], config['init'], config['dim'], config['ngf'], config['ndf'], config['content_data_path'], config['art_data_path'], config['discr_loss_weight'], config['transformer_loss_weight'], config['feature_loss_weight'], config['discr_success_rate'] )) myNet = ArtGAN(opts).cuda() initial_step = myNet.resume_eval(options.trained_network) torch.backends.cudnn.benchmark = True transform = transforms.Compose([transforms.Resize(opts.image_size), transforms.ToTensor()]) dataset = data.ImageFolder(options.input_path, transform=transform) loader = DataLoader(dataset=dataset, batch_size=1, num_workers=0) for it, images in enumerate(loader): test = normalize_arr_of_imgs(images.cuda().detach()) with torch.no_grad(): _, samp = myNet.sample(test) image_outputs = [denormalize_arr_of_imgs(samp)] write_2images(image_outputs, 1, options.output_path, 'test_%08d' % (it+1))
def main(): from utils import get_all_data_loaders, prepare_sub_folder, write_html, write_loss, get_config, write_2images, Timer import argparse from torch.autograd import Variable from trainer import MUNIT_Trainer, UNIT_Trainer import torch.backends.cudnn as cudnn import torch # try: # from itertools import izip as zip # except ImportError: # will be 3.x series # pass import os import sys import tensorboardX import shutil os.environ["CUDA_VISIBLE_DEVICES"] = str(0) parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, default='configs/edges2handbags_folder.yaml', help='Path to the config file.') parser.add_argument('--output_path', type=str, default='.', help="outputs path") parser.add_argument("--resume", action="store_true") parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT") opts = parser.parse_args() cudnn.benchmark = True ''' Note: https://www.pytorchtutorial.com/when-should-we-set-cudnn-benchmark-to-true/ 大部分情况下,设置这个 flag 可以让内置的 cuDNN 的 auto-tuner 自动寻找最适合当前配置的高效算法,来达到优化运行效率的问题 1. 如果网络的输入数据维度或类型上变化不大,设置 torch.backends.cudnn.benchmark = true 可以增加运行效率; 2. 如果网络的输入数据在每次 iteration 都变化的话,会导致 cnDNN 每次都会去寻找一遍最优配置,这样反而会降低运行效率。 ''' # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'MUNIT': trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).cuda() train_display_images_b = torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).cuda() test_display_images_a = torch.stack( [test_loader_a.dataset[i] for i in range(display_size)]).cuda() test_display_images_b = torch.stack( [test_loader_b.dataset[i] for i in range(display_size)]).cuda() # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 while True: for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)): trainer.update_learning_rate() images_a, images_b = images_a.cuda().detach(), images_b.cuda( ).detach() with Timer("Elapsed time in update: %f"): # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: sys.exit('Finish training')
# Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample(test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample(train_display_images_a, train_display_images_b) test_a2b_filename, test_b2a_filename = write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) train_a2b_filename, train_b2a_filename = write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) train_writer.add_image('train_a2b', img_fn_to_tensor(train_a2b_filename), iterations) train_writer.add_image('train_b2a', img_fn_to_tensor(train_b2a_filename), iterations) train_writer.add_image('test_a2b', img_fn_to_tensor(test_a2b_filename), iterations) train_writer.add_image('test_b2a', img_fn_to_tensor(test_b2a_filename), iterations) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0:
images_a, images_b = Variable(images_a.cuda()), Variable(images_b.cuda()) # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: # Test set images image_outputs = trainer.sample(test_display_images_a, test_display_images_b) write_2images(image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) # Train set images image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: train_display_images_a = Variable(torch.stack([train_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True) train_display_images_b = Variable(torch.stack([train_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True) image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, default='configs/edges2handbags_folder.yaml', help='Path to the config file.') parser.add_argument('--output_path', type=str, default='.', help="outputs path") #resume option => [, default='730000'] parser.add_argument("--resume", default='150000', action="store_true") parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT") opts = parser.parse_args() cudnn.benchmark = True # Load experiment setting config = get_config(opts.config) max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # Setup model and data loader if opts.trainer == 'MUNIT': trainer = MUNIT_Trainer(config) elif opts.trainer == 'UNIT': trainer = UNIT_Trainer(config) else: sys.exit("Only support MUNIT|UNIT") trainer.cuda() train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders( config) train_display_images_a = torch.stack( [train_loader_a.dataset[i] for i in range(display_size)]).cuda() train_display_images_b = torch.stack( [train_loader_b.dataset[i] for i in range(display_size)]).cuda() test_display_images_a = torch.stack( [test_loader_a.dataset[i] for i in range(display_size)]).cuda() test_display_images_b = torch.stack( [test_loader_b.dataset[i] for i in range(display_size)]).cuda() # Setup logger and output folders model_name = os.path.splitext(os.path.basename(opts.config))[0] train_writer = tensorboardX.SummaryWriter( os.path.join(opts.output_path + "/logs", model_name)) output_directory = os.path.join(opts.output_path + "/outputs", model_name) checkpoint_directory, image_directory = prepare_sub_folder( output_directory) shutil.copy(opts.config, os.path.join( output_directory, 'config.yaml')) # copy config file to output folder # Start training iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0 while True: for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)): trainer.update_learning_rate() images_a, images_b = images_a.cuda().detach(), images_b.cuda( ).detach() with Timer("Elapsed time in update: %f"): # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations) iterations += 1 if iterations >= max_iter: sys.exit('Finish training')
train_iters[i][j].preload() G_adv, G_sty, G_rec, D_adv = trainer.update(x, y, i, j, j_trg) if (iterations + 1) % config['image_save_iter'] == 0: for i in range(len(train_iters)): j, j_trg = random.sample(list(range(len(train_iters[i]))), 2) x, _ = train_iters[i][j].next() x_trg, _ = train_iters[i][j_trg].next() train_iters[i][j].preload() train_iters[i][j_trg].preload() test_image_outputs = trainer.sample(x, x_trg, j, j_trg, i) write_2images(test_image_outputs, config['batch_size'], image_directory, 'sample_%08d_%2d' % (iterations + 1, i)) torch.cuda.synchronize() if (iterations + 1) % config['log_iter'] == 0: write_loss(iterations, trainer, train_writer) now = time.time() print( f"[#{iterations + 1:06d}|{total_iterations:d}] {now - start:5.2f}s" ) start = now if (iterations + 1) % config['snapshot_save_iter'] == 0: trainer.save(checkpoint_directory, iterations)
trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() # Dump training stats in log file if (iterations + 1) % config['log_iter'] == 0: print("Iteration: %08d/%08d" % (iterations + 1, max_iter)) write_loss(iterations, trainer, train_writer) # Write images if (iterations + 1) % config['image_save_iter'] == 0: with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1)) write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1)) # HTML write_html(output_directory + "/index.html", iterations + 1, config['image_save_iter'], 'images') if (iterations + 1) % config['image_display_iter'] == 0: with torch.no_grad(): image_outputs = trainer.sample(train_display_images_a, train_display_images_b) write_2images(image_outputs, display_size, image_directory, 'train_current') # Save network weights if (iterations + 1) % config['snapshot_save_iter'] == 0:
train_writer = tensorboardX.SummaryWriter( os.path.join(config['log_patch'] + '/trian')) test_writer = tensorboardX.SummaryWriter( os.path.join(config['log_patch'] + '/test')) print(config) if __name__ == "__main__": while True: for it, (images_a, images_b) in enumerate(zip(train_a, train_b)): trainer.update_learning_rate() images_a, images_b = images_a.cuda().detach(), images_b.cuda( ).detach() # Main training code trainer.dis_update(images_a, images_b, config) trainer.gen_update(images_a, images_b, config) torch.cuda.synchronize() print("step:{}".format(iterations)) if iterations % 10 == 0: write_loss(iterations, trainer, train_writer) with torch.no_grad(): test_image_outputs = trainer.sample( test_display_images_a, test_display_images_b) train_image_outputs = trainer.sample( train_display_images_a, train_display_images_b) write_2images(test_image_outputs, config['display_size'], 'test', iterations, train_writer) write_2images(train_image_outputs, config['display_size'], 'train', iterations, test_writer) iterations += 1