def main(json_path='options/val_tsms.json'): parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) logger_name = 'val_msmd_patch' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) for phase, dataset_opt in opt['datasets'].items(): test_set = define_Dataset(phase, dataset_opt) test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) model = define_Model(opt, stage2=True) model.load() avg_psnr = 0.0 idx = 0 for test_data in test_loader: idx += 1 image_name = os.path.basename(test_data['L_path'][0]) image_name = image_name + '.png' save_img_path = os.path.join(opt['path']['images'], image_name) model.feed_data(test_data) model.test() visuals = model.current_visuals() E_img = util.tensor2uint(visuals['E']) #print(E_img.shape) H_img = util.tensor2uint(visuals['H']) # ----------------------- # save estimated image E # ----------------------- util.imsave(E_img, save_img_path) # ----------------------- # calculate PSNR # ----------------------- current_psnr = util.calculate_psnr(E_img, H_img, border=4) logger.info('{:->4d}--> {:>10s} | {:<4.2f}dB'.format( idx, image_name, current_psnr)) avg_psnr += current_psnr avg_psnr = avg_psnr / idx # testing log message_te = '\tVal_PSNR_avg: {:<.2f}dB'.format(avg_psnr) logger.info(message_te)
def main(json_path='options/train_sr.json'): ''' # ---------------------------------------- # Step--1 (prepare opt) # ---------------------------------------- ''' parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) util.mkdirs( (path for key, path in opt['path'].items() if 'pretrained' not in key)) # ---------------------------------------- # update opt # ---------------------------------------- init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G1') opt['path']['pretrained_netG1'] = init_path_G current_step = init_iter border = opt['scale'] # ---------------------------------------- # save opt to a '../option.json' file # ---------------------------------------- option.save(opt) # ---------------------------------------- # return None for missing key # ---------------------------------------- opt = option.dict_to_nonedict(opt) # ---------------------------------------- # configure logger # ---------------------------------------- logger_name = 'train' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # ---------------------------------------- # seed # ---------------------------------------- seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) logger.info('Random seed: {}'.format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) ''' # ---------------------------------------- # Step--2 (creat dataloader) # ---------------------------------------- ''' # ---------------------------------------- # 1) create_dataset # 2) creat_dataloader for train and test # ---------------------------------------- for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = define_Dataset(phase, dataset_opt) train_size = int( math.ceil( len(train_set) / dataset_opt['dataloader_batch_size'])) logger.info('Number of train images: {:,d}, iters: {:,d}'.format( len(train_set), train_size)) train_loader = DataLoader( train_set, batch_size=dataset_opt['dataloader_batch_size'], shuffle=dataset_opt['dataloader_shuffle'], num_workers=dataset_opt['dataloader_num_workers'], drop_last=True, pin_memory=True) elif phase == 'val': val_set = define_Dataset(phase, dataset_opt) val_loader = DataLoader(val_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) else: raise NotImplementedError("Phase [%s] is not recognized." % phase) ''' # ---------------------------------------- # Step--3 (model_1) # ---------------------------------------- ''' model_1 = define_Model(opt, stage1=True) #logger.info(model_1.info_network()) model_1.init_train() #logger.info(model_1.info_params()) for epoch in range(100000): for i, train_data in enumerate(train_loader): current_step += 1 model_1.update_learning_rate(current_step) model_1.feed_data(train_data) model_1.optimize_parameters(current_step) if current_step % opt['train']['checkpoint_save'] == 0: # logger.info('Saving the model.') model_1.save(current_step) # ------------------------------- # model_1 testing # ------------------------------- if current_step % opt['train']['checkpoint_test'] == 0: # training info logs = model_1.current_log() # such as loss message_tr = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format( epoch, current_step, model_1.current_learning_rate()) for k, v in logs.items(): # merge log information into message message_tr += '\t{:s}: {:.3e}'.format(k, v) avg_psnr = 0.0 idx = 0 for val_data in val_loader: idx += 1 model_1.feed_data(val_data) model_1.test() visuals = model_1.current_visuals() E_img = util.tensor2uint(visuals['E']) H_img = util.tensor2uint(visuals['H']) # ----------------------- # calculate PSNR # ----------------------- current_psnr = util.calculate_psnr(E_img, H_img, border=border) avg_psnr += current_psnr avg_psnr = avg_psnr / idx # testing log message_val = '\tStage SR Val_PSNR_avg: {:<.2f}dB'.format( avg_psnr) message = message_tr + message_val logger.info(message) logger.info('End of Stage SR training.')
def main(json_path='options/train_msrresnet_psnr.json'): ''' # ---------------------------------------- # Step--1 (prepare opt) # ---------------------------------------- ''' parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) util.mkdirs( (path for key, path in opt['path'].items() if 'pretrained' not in key)) # ---------------------------------------- # update opt # ---------------------------------------- # -->-->-->-->-->-->-->-->-->-->-->-->-->- init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G') opt['path']['pretrained_netG'] = init_path_G current_step = init_iter border = opt['scale'] # --<--<--<--<--<--<--<--<--<--<--<--<--<- # ---------------------------------------- # save opt to a '../option.json' file # ---------------------------------------- option.save(opt) # ---------------------------------------- # return None for missing key # ---------------------------------------- opt = option.dict_to_nonedict(opt) # ---------------------------------------- # configure logger # ---------------------------------------- logger_name = 'train' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # ---------------------------------------- # seed # ---------------------------------------- seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) logger.info('Random seed: {}'.format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) ''' # ---------------------------------------- # Step--2 (creat dataloader) # ---------------------------------------- ''' # ---------------------------------------- # 1) create_dataset # 2) creat_dataloader for train and test # ---------------------------------------- for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = define_Dataset(dataset_opt) train_size = int( math.ceil( len(train_set) / dataset_opt['dataloader_batch_size'])) logger.info('Number of train images: {:,d}, iters: {:,d}'.format( len(train_set), train_size)) train_loader = DataLoader( train_set, batch_size=dataset_opt['dataloader_batch_size'], shuffle=dataset_opt['dataloader_shuffle'], num_workers=dataset_opt['dataloader_num_workers'], drop_last=True, pin_memory=True) elif phase == 'test': test_set = define_Dataset(dataset_opt) test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) else: raise NotImplementedError("Phase [%s] is not recognized." % phase) ''' # ---------------------------------------- # Step--3 (initialize model) # ---------------------------------------- ''' model = define_Model(opt) model.init_train() logger.info(model.info_network()) logger.info(model.info_params()) ''' # ---------------------------------------- # Step--4 (main training) # ---------------------------------------- ''' for epoch in range(100): # keep running for i, train_data in enumerate(train_loader): current_step += 1 # ------------------------------- # 1) update learning rate # ------------------------------- model.update_learning_rate(current_step) # ------------------------------- # 2) feed patch pairs # ------------------------------- model.feed_data(train_data) # ------------------------------- # 3) optimize parameters # ------------------------------- model.optimize_parameters(current_step) # ------------------------------- # 4) training information # ------------------------------- if current_step % opt['train']['checkpoint_print'] == 0: logs = model.current_log() # such as loss message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format( epoch, current_step, model.current_learning_rate()) for k, v in logs.items(): # merge log information into message message += '{:s}: {:.3e} '.format(k, v) logger.info(message) # ------------------------------- # 5) save model # ------------------------------- if current_step % opt['train']['checkpoint_save'] == 0: logger.info('Saving the model.') model.save(current_step) # ------------------------------- # 6) testing # ------------------------------- if current_step % opt['train']['checkpoint_test'] == 0: avg_psnr = 0.0 idx = 0 for test_data in test_loader: idx += 1 image_name_ext = os.path.basename(test_data['L_path'][0]) img_name, ext = os.path.splitext(image_name_ext) img_dir = os.path.join(opt['path']['images'], img_name) util.mkdir(img_dir) model.feed_data(test_data) model.test() visuals = model.current_visuals() E_img = util.tensor2uint(visuals['E']) H_img = util.tensor2uint(visuals['H']) # ----------------------- # save estimated image E # ----------------------- save_img_path = os.path.join( img_dir, '{:s}_{:d}.png'.format(img_name, current_step)) util.imsave(E_img, save_img_path) # ----------------------- # calculate PSNR # ----------------------- current_psnr = util.calculate_psnr(E_img, H_img, border=border) logger.info('{:->4d}--> {:>10s} | {:<4.2f}dB'.format( idx, image_name_ext, current_psnr)) avg_psnr += current_psnr avg_psnr = avg_psnr / idx # testing log logger.info( '<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}dB\n'. format(epoch, current_step, avg_psnr)) logger.info('Saving the final model.') model.save('latest') logger.info('End of training.')
def main(json_path='options/train_drunet.json'): ''' # ---------------------------------------- # Step--1 (prepare opt) # ---------------------------------------- ''' parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') parser.add_argument('--launcher', default='pytorch', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--dist', default=False) opt = option.parse(parser.parse_args().opt, is_train=True) util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key)) # ---------------------------------------- # update opt # ---------------------------------------- # -->-->-->-->-->-->-->-->-->-->-->-->-->- init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G') opt['path']['pretrained_netG'] = init_path_G current_step = init_iter border = 0 # --<--<--<--<--<--<--<--<--<--<--<--<--<- # ---------------------------------------- # save opt to a '../option.json' file # ---------------------------------------- option.save(opt) # ---------------------------------------- # return None for missing key # ---------------------------------------- opt = option.dict_to_nonedict(opt) opt['dist'] = parser.parse_args().dist # ---------------------------------------- # configure logger # ---------------------------------------- logger_name = 'train' utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name+'.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # ---------------------------------------- # distributed settings # ---------------------------------------- if opt['dist']: init_dist('pytorch') opt['rank'], opt['world_size'] = get_dist_info() print(str(opt['rank']) + '----' + str(opt['world_size'])) # ---------------------------------------- # seed # ---------------------------------------- seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) logger.info('Random seed: {}'.format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) ''' # ---------------------------------------- # Step--2 (creat dataloader) # ---------------------------------------- ''' # ---------------------------------------- # 1) create_dataset # 2) creat_dataloader for train and test # ---------------------------------------- for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = define_Dataset(dataset_opt) train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size'])) logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size)) if opt['dist']: train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'], drop_last=True, seed=seed+opt['rank']) train_loader = DataLoader(train_set, batch_size=dataset_opt['dataloader_batch_size']//opt['num_gpu'], shuffle=False, num_workers=dataset_opt['dataloader_num_workers']//opt['num_gpu'], drop_last=True, pin_memory=True, sampler=train_sampler) else: train_loader = DataLoader(train_set, batch_size=dataset_opt['dataloader_batch_size'], shuffle=dataset_opt['dataloader_shuffle'], num_workers=dataset_opt['dataloader_num_workers'], drop_last=True, pin_memory=True) elif phase == 'test': test_set = define_Dataset(dataset_opt) test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) else: raise NotImplementedError("Phase [%s] is not recognized." % phase) ''' # ---------------------------------------- # Step--3 (initialize model) # ---------------------------------------- ''' model = define_Model(opt) model.init_train() if opt['rank'] == 0: logger.info(model.info_params())