def main(json_path='options/train_sr.json'): ''' # ---------------------------------------- # Step--1 (prepare opt) # ---------------------------------------- ''' parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) util.mkdirs( (path for key, path in opt['path'].items() if 'pretrained' not in key)) # ---------------------------------------- # update opt # ---------------------------------------- init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G1') opt['path']['pretrained_netG1'] = init_path_G current_step = init_iter border = opt['scale'] # ---------------------------------------- # save opt to a '../option.json' file # ---------------------------------------- option.save(opt) # ---------------------------------------- # return None for missing key # ---------------------------------------- opt = option.dict_to_nonedict(opt) # ---------------------------------------- # configure logger # ---------------------------------------- logger_name = 'train' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # ---------------------------------------- # seed # ---------------------------------------- seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) logger.info('Random seed: {}'.format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) ''' # ---------------------------------------- # Step--2 (creat dataloader) # ---------------------------------------- ''' # ---------------------------------------- # 1) create_dataset # 2) creat_dataloader for train and test # ---------------------------------------- for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = define_Dataset(phase, dataset_opt) train_size = int( math.ceil( len(train_set) / dataset_opt['dataloader_batch_size'])) logger.info('Number of train images: {:,d}, iters: {:,d}'.format( len(train_set), train_size)) train_loader = DataLoader( train_set, batch_size=dataset_opt['dataloader_batch_size'], shuffle=dataset_opt['dataloader_shuffle'], num_workers=dataset_opt['dataloader_num_workers'], drop_last=True, pin_memory=True) elif phase == 'val': val_set = define_Dataset(phase, dataset_opt) val_loader = DataLoader(val_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) else: raise NotImplementedError("Phase [%s] is not recognized." % phase) ''' # ---------------------------------------- # Step--3 (model_1) # ---------------------------------------- ''' model_1 = define_Model(opt, stage1=True) #logger.info(model_1.info_network()) model_1.init_train() #logger.info(model_1.info_params()) for epoch in range(100000): for i, train_data in enumerate(train_loader): current_step += 1 model_1.update_learning_rate(current_step) model_1.feed_data(train_data) model_1.optimize_parameters(current_step) if current_step % opt['train']['checkpoint_save'] == 0: # logger.info('Saving the model.') model_1.save(current_step) # ------------------------------- # model_1 testing # ------------------------------- if current_step % opt['train']['checkpoint_test'] == 0: # training info logs = model_1.current_log() # such as loss message_tr = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format( epoch, current_step, model_1.current_learning_rate()) for k, v in logs.items(): # merge log information into message message_tr += '\t{:s}: {:.3e}'.format(k, v) avg_psnr = 0.0 idx = 0 for val_data in val_loader: idx += 1 model_1.feed_data(val_data) model_1.test() visuals = model_1.current_visuals() E_img = util.tensor2uint(visuals['E']) H_img = util.tensor2uint(visuals['H']) # ----------------------- # calculate PSNR # ----------------------- current_psnr = util.calculate_psnr(E_img, H_img, border=border) avg_psnr += current_psnr avg_psnr = avg_psnr / idx # testing log message_val = '\tStage SR Val_PSNR_avg: {:<.2f}dB'.format( avg_psnr) message = message_tr + message_val logger.info(message) logger.info('End of Stage SR training.')
def main(json_path='options/train_msrresnet_psnr.json'): ''' # ---------------------------------------- # Step--1 (prepare opt) # ---------------------------------------- ''' parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) util.mkdirs( (path for key, path in opt['path'].items() if 'pretrained' not in key)) # ---------------------------------------- # update opt # ---------------------------------------- # -->-->-->-->-->-->-->-->-->-->-->-->-->- init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G') opt['path']['pretrained_netG'] = init_path_G current_step = init_iter border = opt['scale'] # --<--<--<--<--<--<--<--<--<--<--<--<--<- # ---------------------------------------- # save opt to a '../option.json' file # ---------------------------------------- option.save(opt) # ---------------------------------------- # return None for missing key # ---------------------------------------- opt = option.dict_to_nonedict(opt) # ---------------------------------------- # configure logger # ---------------------------------------- logger_name = 'train' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # ---------------------------------------- # seed # ---------------------------------------- seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) logger.info('Random seed: {}'.format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) ''' # ---------------------------------------- # Step--2 (creat dataloader) # ---------------------------------------- ''' # ---------------------------------------- # 1) create_dataset # 2) creat_dataloader for train and test # ---------------------------------------- for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = define_Dataset(dataset_opt) train_size = int( math.ceil( len(train_set) / dataset_opt['dataloader_batch_size'])) logger.info('Number of train images: {:,d}, iters: {:,d}'.format( len(train_set), train_size)) train_loader = DataLoader( train_set, batch_size=dataset_opt['dataloader_batch_size'], shuffle=dataset_opt['dataloader_shuffle'], num_workers=dataset_opt['dataloader_num_workers'], drop_last=True, pin_memory=True) elif phase == 'test': test_set = define_Dataset(dataset_opt) test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) else: raise NotImplementedError("Phase [%s] is not recognized." % phase) ''' # ---------------------------------------- # Step--3 (initialize model) # ---------------------------------------- ''' model = define_Model(opt) model.init_train() logger.info(model.info_network()) logger.info(model.info_params()) ''' # ---------------------------------------- # Step--4 (main training) # ---------------------------------------- ''' for epoch in range(100): # keep running for i, train_data in enumerate(train_loader): current_step += 1 # ------------------------------- # 1) update learning rate # ------------------------------- model.update_learning_rate(current_step) # ------------------------------- # 2) feed patch pairs # ------------------------------- model.feed_data(train_data) # ------------------------------- # 3) optimize parameters # ------------------------------- model.optimize_parameters(current_step) # ------------------------------- # 4) training information # ------------------------------- if current_step % opt['train']['checkpoint_print'] == 0: logs = model.current_log() # such as loss message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format( epoch, current_step, model.current_learning_rate()) for k, v in logs.items(): # merge log information into message message += '{:s}: {:.3e} '.format(k, v) logger.info(message) # ------------------------------- # 5) save model # ------------------------------- if current_step % opt['train']['checkpoint_save'] == 0: logger.info('Saving the model.') model.save(current_step) # ------------------------------- # 6) testing # ------------------------------- if current_step % opt['train']['checkpoint_test'] == 0: avg_psnr = 0.0 idx = 0 for test_data in test_loader: idx += 1 image_name_ext = os.path.basename(test_data['L_path'][0]) img_name, ext = os.path.splitext(image_name_ext) img_dir = os.path.join(opt['path']['images'], img_name) util.mkdir(img_dir) model.feed_data(test_data) model.test() visuals = model.current_visuals() E_img = util.tensor2uint(visuals['E']) H_img = util.tensor2uint(visuals['H']) # ----------------------- # save estimated image E # ----------------------- save_img_path = os.path.join( img_dir, '{:s}_{:d}.png'.format(img_name, current_step)) util.imsave(E_img, save_img_path) # ----------------------- # calculate PSNR # ----------------------- current_psnr = util.calculate_psnr(E_img, H_img, border=border) logger.info('{:->4d}--> {:>10s} | {:<4.2f}dB'.format( idx, image_name_ext, current_psnr)) avg_psnr += current_psnr avg_psnr = avg_psnr / idx # testing log logger.info( '<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}dB\n'. format(epoch, current_step, avg_psnr)) logger.info('Saving the final model.') model.save('latest') logger.info('End of training.')
def main(json_path: str = 'options/train_denoising.json'): parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) util.makedirs( [path for key, path in opt['path'].items() if 'pretrained' not in key]) current_step = 0 option.save(opt) # logger logger_name = 'train' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # seed seed = opt['train']['manual_seed'] random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cuda.manual_seed_all(seed) # data opt_data_train: Dict[str, Any] = opt["data"]["train"] train_set: DatasetDenoising = select_dataset(opt_data_train, "train") train_loader: DataLoader[DatasetDenoising] = DataLoader( train_set, batch_size=opt_data_train['batch_size'], shuffle=True, num_workers=opt_data_train['num_workers'], drop_last=True, pin_memory=True) opt_data_test = opt["data"]["test"] test_sets: List[DatasetDenoising] = select_dataset(opt_data_test, "test") test_loaders: List[DataLoader[DatasetDenoising]] = [] for test_set in test_sets: test_loaders.append( DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=True, pin_memory=True)) # model model = Model(opt) model.init() # train start = time.time() for epoch in range(1000000): # keep running for train_data in tqdm(train_loader): current_step += 1 model.feed_data(train_data) model.train() model.update_learning_rate(current_step) if current_step % opt['train']['checkpoint_log'] == 0: model.log_train(current_step, epoch, logger) if current_step % opt['train']['checkpoint_test'] == 0: avg_psnrs: Dict[str, List[float]] = {} avg_ssims: Dict[str, List[float]] = {} tags: List[str] = [] test_index = 0 for test_loader in tqdm(test_loaders): test_set: DatasetDenoising = test_loader.dataset avg_psnr = 0. avg_ssim = 0. for test_data in tqdm(test_loader): test_index += 1 model.feed_data(test_data) model.test() psnr, ssim = model.cal_metrics() avg_psnr += psnr avg_ssim += ssim if current_step % opt['train'][ 'checkpoint_saveimage'] == 0: model.save_visuals(test_set.tag) avg_psnr = round(avg_psnr / len(test_loader), 2) avg_ssim = round(avg_ssim * 100 / len(test_loader), 2) name = test_set.name if name in avg_psnrs: avg_psnrs[name].append(avg_psnr) avg_ssims[name].append(avg_ssim) else: avg_psnrs[name] = [avg_psnr] avg_ssims[name] = [avg_ssim] if test_set.tag not in tags: tags.append(test_set.tag) header = ['Dataset'] + tags t = PrettyTable(header) for key, value in avg_psnrs.items(): t.add_row([key] + value) logger.info(f"Test PSNR:\n{t}") t = PrettyTable(header) for key, value in avg_ssims.items(): t.add_row([key] + value) logger.info(f"Test SSIM:\n{t}") logger.info(f"Time elapsed: {time.time() - start:.2f}") start = time.time() model.save(logger)
def main(config_path: str = 'options/test_denoising.json'): parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=config_path, help='Path to option JSON file.') opt = option.parse(parser.parse_args().opt, is_train=True) util.makedirs( [path for key, path in opt['path'].items() if 'pretrained' not in key]) option.save(opt) logger_name = 'test' utils_logger.logger_info( logger_name, os.path.join(opt['path']['log'], logger_name + '.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) opt_data_test = opt["data"]["test"] test_sets: List[DatasetDenoising] = select_dataset(opt_data_test, "test") test_loaders: List[DataLoader[DatasetDenoising]] = [] for test_set in test_sets: test_loaders.append( DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True)) model = Model(opt) model.init() avg_psnrs: Dict[str, List[float]] = {} avg_ssims: Dict[str, List[float]] = {} tags = [] for test_loader in test_loaders: test_set: DatasetDenoising = test_loader.dataset avg_psnr = 0. avg_ssim = 0. for test_data in test_loader: model.feed_data(test_data) model.test() psnr, ssim = model.cal_metrics() avg_psnr += psnr avg_ssim += ssim model.save_visuals(test_set.tag) avg_psnr = round(avg_psnr / len(test_loader), 2) avg_ssim = round(avg_ssim * 100 / len(test_loader), 2) name = test_set.name if name in avg_psnrs: avg_psnrs[name].append(avg_psnr) avg_ssims[name].append(avg_ssim) else: avg_psnrs[name] = [avg_psnr] avg_ssims[name] = [avg_ssim] tags.append(test_set.tag) header = ['Dataset'] + list(set(tags)) t = PrettyTable(header) for key, value in avg_psnrs.items(): t.add_row([key] + value) logger.info(f"Test PSNR:\n{t}") t = PrettyTable(header) for key, value in avg_ssims.items(): t.add_row([key] + value) logger.info(f"Test SSIM:\n{t}")
def main(json_path='options/train_drunet.json'): ''' # ---------------------------------------- # Step--1 (prepare opt) # ---------------------------------------- ''' parser = argparse.ArgumentParser() parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.') parser.add_argument('--launcher', default='pytorch', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--dist', default=False) opt = option.parse(parser.parse_args().opt, is_train=True) util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key)) # ---------------------------------------- # update opt # ---------------------------------------- # -->-->-->-->-->-->-->-->-->-->-->-->-->- init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G') opt['path']['pretrained_netG'] = init_path_G current_step = init_iter border = 0 # --<--<--<--<--<--<--<--<--<--<--<--<--<- # ---------------------------------------- # save opt to a '../option.json' file # ---------------------------------------- option.save(opt) # ---------------------------------------- # return None for missing key # ---------------------------------------- opt = option.dict_to_nonedict(opt) opt['dist'] = parser.parse_args().dist # ---------------------------------------- # configure logger # ---------------------------------------- logger_name = 'train' utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name+'.log')) logger = logging.getLogger(logger_name) logger.info(option.dict2str(opt)) # ---------------------------------------- # distributed settings # ---------------------------------------- if opt['dist']: init_dist('pytorch') opt['rank'], opt['world_size'] = get_dist_info() print(str(opt['rank']) + '----' + str(opt['world_size'])) # ---------------------------------------- # seed # ---------------------------------------- seed = opt['train']['manual_seed'] if seed is None: seed = random.randint(1, 10000) logger.info('Random seed: {}'.format(seed)) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) ''' # ---------------------------------------- # Step--2 (creat dataloader) # ---------------------------------------- ''' # ---------------------------------------- # 1) create_dataset # 2) creat_dataloader for train and test # ---------------------------------------- for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': train_set = define_Dataset(dataset_opt) train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size'])) logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size)) if opt['dist']: train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'], drop_last=True, seed=seed+opt['rank']) train_loader = DataLoader(train_set, batch_size=dataset_opt['dataloader_batch_size']//opt['num_gpu'], shuffle=False, num_workers=dataset_opt['dataloader_num_workers']//opt['num_gpu'], drop_last=True, pin_memory=True, sampler=train_sampler) else: train_loader = DataLoader(train_set, batch_size=dataset_opt['dataloader_batch_size'], shuffle=dataset_opt['dataloader_shuffle'], num_workers=dataset_opt['dataloader_num_workers'], drop_last=True, pin_memory=True) elif phase == 'test': test_set = define_Dataset(dataset_opt) test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False, pin_memory=True) else: raise NotImplementedError("Phase [%s] is not recognized." % phase) ''' # ---------------------------------------- # Step--3 (initialize model) # ---------------------------------------- ''' model = define_Model(opt) model.init_train() if opt['rank'] == 0: logger.info(model.info_params())