Beispiel #1
0
class SISR():
    def __init__(): return
        parser = argparse.ArgumentParser(description='Test Super Resolution Models')
        parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
        opt = option.parse(parser.parse_args().opt)
        opt = option.dict_to_nonedict(opt)

        #initial configure
        scale = opt['scale']
        degrad = opt['degradation']
        network_opt = opt['networks']
        model_name = network_opt['which_model'].upper()
        if opt['self_ensemble']: model_name += 'plus'

        for _, dataset_opt in sorted(opt['datasets'].items()):
            test_set = create_dataset(dataset_opt)
            test_loader = create_dataloader(test_set, dataset_opt)
            test_loaders.append(test_loader)
            print('===> Test Dataset: [%s]   Number of images: [%d]' % (test_set.name(), len(test_set)))
            bm_names.append(test_set.name())

        # create solver (and load model)
        solver = create_solver(opt)

        # create test dataloader
        bm_names =[]
        test_loaders = []

        print("==================================================")
        print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad))
Beispiel #2
0
def downloadModel():
    # json parse
    parser = argparse.ArgumentParser(
        description='Test Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # json parse된것 초기화
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']: model_name += 'plus'

    #json파일로 model로드
    solver = create_solver(opt)

    #testset SR한번 후 본격 SR 진행
    shutil.copy('./results/LR/Test/!.png', './results/LR/MyImage/!.png')
    shutil.copy('./results/LR/Test/!.png', './results/LR/MyImage/!!.png')
    SR(solver, opt, model_name)
    os.remove('./results/LR/MyImage/!!.png')
    return solver, opt, model_name
def predictscore(listLRs, listSRs, LRpath_list):
    optpath = './options/test/testScorenet_load.json'
    opt = option.parse_imagetest(optpath)
    opt = option.dict_to_nonedict(opt)

    # initial configure
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']: model_name += 'plus'

    tmpsolver = create_solver(opt)
    #print('===> Start Test')
    #print("==================================================")
    listscore = []
    
    for i in range(len(listLRs)):
        LR = listLRs[i]
        SR = listSRs[i]
        LR, SR = np2Tensor([LR,SR], opt['rgb_range'])
        tmpsolver.feed_imgs(LR,SR)
        tmpsolver.test_scorenet()
        print(LRpath_list[i])
        visuals = tmpsolver.get_current_visual_scorenet(need_HR=None)
        score = visuals['predQS']#.to(torch.double) 
        listscore.append(score)

    return listscore
def setup(opts):
    global opt
    model_scale = opts["scale"]
    model = model_scale + "/" + "model.pth"
    config = model_scale + "/" + "config.json"

    opt = option.parse(config)
    opt = option.dict_to_nonedict(opt)

    solver = create_solver(opt, model)

    return solver
Beispiel #5
0
def main():
    #### options
    parser = argparse.ArgumentParser()
    parser.add_argument('-config', type=str, help='Path to YAML config')
    args = parser.parse_args()
    cfg = util.parse(args.config)

    #generate random data
    sampler = create_sampler(cfg)
    cfg["mu"], cfg['nu'] = sampler.sample_weight(cfg['dim_n'], cfg['dim_m'])
    cfg["C"] = sampler.sample_cost(cfg['dim_n'], cfg['dim_m'])

    solver = create_solver(cfg)
    solver.solve()
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser(
        description='Train Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    print('Inside Train.py')
    print(opt['datasets']['train']['data_path'])

    # random seed
    seed = opt['solver']['manual_seed']
    if seed is None: seed = random.randint(1, 10000)
    print("===> Random Seed: [%d]" % seed)
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt)
            print('===> Train Dataset: %s   Number of images: [%d]' %
                  (train_set.name(), len(train_set)))
            if train_loader is None:
                raise ValueError("[Error] The training data does not exist")

        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('===> Val Dataset: %s   Number of images: [%d]' %
                  (val_set.name(), len(val_set)))

        else:
            raise NotImplementedError(
                "[Error] Dataset phase [%s] in *.json is not recognized." %
                phase)

    solver = create_solver(opt)

    scale = opt['scale']
    model_name = opt['networks']['which_model'].upper()

    print('===> Start Train')
    print("==================================================")

    solver_log = solver.get_current_log()

    NUM_EPOCH = int(opt['solver']['num_epochs'])
    start_epoch = solver_log['epoch']

    print("Method: %s || Scale: %d || Epoch Range: (%d ~ %d)" %
          (model_name, scale, start_epoch, NUM_EPOCH))

    for epoch in range(start_epoch, NUM_EPOCH + 1):
        print('\n===> Training Epoch: [%d/%d]...  Learning Rate: %f' %
              (epoch, NUM_EPOCH, solver.get_current_learning_rate()))

        # Initialization
        solver_log['epoch'] = epoch

        # Train model
        train_loss_list = []
        with tqdm(total=len(train_loader),
                  desc='Epoch: [%d/%d]' % (epoch, NUM_EPOCH),
                  miniters=1) as t:
            for iter, batch in enumerate(train_loader):
                solver.feed_data(batch)
                iter_loss = solver.train_step()
                batch_size = batch['LR'].size(0)
                train_loss_list.append(iter_loss * batch_size)
                t.set_postfix_str("Batch Loss: %.4f" % iter_loss)
                t.update()

        solver_log['records']['train_loss'].append(
            sum(train_loss_list) / len(train_set))
        solver_log['records']['lr'].append(solver.get_current_learning_rate())

        print('\nEpoch: [%d/%d]   Avg Train Loss: %.6f' %
              (epoch, NUM_EPOCH, sum(train_loss_list) / len(train_set)))

        print('===> Validating...')

        psnr_list = []
        ssim_list = []
        val_loss_list = []

        for iter, batch in enumerate(val_loader):
            solver.feed_data(batch)
            iter_loss = solver.test()
            val_loss_list.append(iter_loss)

            # calculate evaluation metrics
            visuals = solver.get_current_visual()
            # print(visuals['SR'].shape)
            psnr, ssim = util.calc_metrics(visuals['SR'],
                                           visuals['HR'],
                                           crop_border=scale)
            psnr_list.append(psnr)
            ssim_list.append(ssim)
            # if opt["save_image"]:
            #     solver.save_current_visual(epoch, iter)
        solver_log['records']['val_loss'].append(
            sum(val_loss_list) / len(val_loss_list))
        solver_log['records']['psnr'].append(sum(psnr_list) / len(psnr_list))
        solver_log['records']['ssim'].append(sum(ssim_list) / len(ssim_list))

        # record the best epoch
        epoch_is_best = False
        if solver_log['best_pred'] < (sum(psnr_list) / len(psnr_list)):
            solver_log['best_pred'] = (sum(psnr_list) / len(psnr_list))
            epoch_is_best = True
            solver_log['best_epoch'] = epoch

        print(
            "[%s] PSNR: %.2f   SSIM: %.4f   Loss: %.6f   Best PSNR: %.2f in Epoch: [%d]"
            %
            (val_set.name(), sum(psnr_list) / len(psnr_list), sum(ssim_list) /
             len(ssim_list), sum(val_loss_list) / len(val_loss_list),
             solver_log['best_pred'], solver_log['best_epoch']))

        solver.set_current_log(solver_log)
        solver.save_checkpoint(epoch, epoch_is_best)
        solver.save_current_log()

        # update lr
        solver.update_learning_rate(epoch)

    print('===> Finished !')
def main():
    parser = argparse.ArgumentParser(description='Test Super Resolution Models')
    parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']: model_name += 'plus'

    # create test dataloader
    bm_names =[]
    test_loaders = []
    for _, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' % (test_set.name(), len(test_set)))
        bm_names.append(test_set.name())

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad))

    f = open(opt['savefile'], "w")


    sr_list = []
    lr_list = []
    LRpath_list = []
    #ORpath_list = []

    need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True
    

    #save a txt file for score net include: 
    #### LR SR score for LRHR            
    #### LR SR for LR only  
    save_img_path = opt['dir']

    
    for iter, batch in enumerate(test_loader):
    
        solver.feed_data(batch, need_HR=need_HR)
        solver.test()

        visuals = solver.get_current_visual(need_HR=need_HR)
        sr_list.append(visuals['SR'])
        lr_list.append(visuals['LR'])
        print(batch['LR_path'])
        LRpath_list.append(batch['LR_path'])
        #ORpath_list.append(batch['HR_path'])
        
    listscores = predictscore(lr_list,sr_list,LRpath_list)
    for i in range(len(listscores)):
        #write name and score
        tmpscore = str(listscores[i])
        tmpscore = tmpscore.replace('tensor','')
        tmpscore = tmpscore.replace('[','')
        tmpscore = tmpscore.replace(']','')
        tmpscore = tmpscore.replace('(','')
        tmpscore = tmpscore.replace(')','')
        
        tmppath = str(LRpath_list[i])
        tmppath = tmppath.replace('\'','')
        tmppath = tmppath.replace('[','')
        tmppath = tmppath.replace(']','')
        writetofile = tmppath + '\t' + tmpscore + '\n'
        f.write(writetofile)
        
    f.close()    
    print("==================================================")
    print("===> Finished !")
Beispiel #8
0
        if train_loaders is None:
            raise ValueError("[Error] The training data does not exist")

    elif phase == 'val':
        val_set = create_dataset(dataset_opt)
        val_loader = create_dataloader(val_set, dataset_opt)
        print('======> Val Dataset: %s, Number of images: [%d]' %(val_set.name(), len(val_set)))

    else:
        raise NotImplementedError("[Error] Dataset phase [%s] in *.json is not recognized." % phase)


##### Create model and solver #####
scale = opt['scale']
client_solvers = [create_solver(opt) for _ in range(num_clients)]
global_solver = create_solver(opt)
model_name = opt['networks']['which_model'].upper()


print('===> Start Train')
print("==================================================")
print("Method: %s || Scale: %d || Total epoch: %d " %(model_name, scale, num_rounds))


##### Create solver log for saving #####
solver_log = global_solver.get_current_log()
start_epoch = solver_log['round']


##### Helper functions for federated training #####
Beispiel #9
0
"""
Created on Apr 12, 2019
@author: Yuedong Chen
"""

from options import Options
from solvers import create_solver

if __name__ == '__main__':
    opt = Options().parse()

    solver = create_solver(opt)
    solver.run_solver()

    print('[THE END]')
from options import Options
from solvers import create_solver

if __name__ == '__main__':
    print('[Starting]')
    opt = Options().parse()  # 读入cmd参数并初始化为Options
    solver = create_solver(opt)  # 利用配置创建求解器
    solver.run_solver()
    print('[THE END]')

def main():
    torch.backends.cudnn.benchmark = True

    args = option.add_args()
    opt = option.parse(args.opt,
                       nblocks=args.nblocks,
                       nlayers=args.nlayers,
                       iterations=args.iterations,
                       trained_model=args.trained_path,
                       lr_path=args.lr_path)

    # fix random seed
    # seed_torch(opt['solver']['manual_seed'])

    # create train and val dataloader
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt)
            print('===> Train Dataset: %s   Number of images: [%d]' %
                  (train_set.name(), len(train_set)))
            if train_loader is None:
                raise ValueError("[Error] The training data does not exist")

        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('===> Val Dataset: %s   Number of images: [%d]' %
                  (val_set.name(), len(val_set)))

        else:
            raise NotImplementedError(
                "[Error] Dataset phase [%s] in *.json is not recognized." %
                phase)

    solver = create_solver(opt)

    scale = opt['scale']
    model_name = opt['networks']['which_model'].upper()

    print('===> Start Train')
    print("==================================================")

    solver_log = solver.get_current_log()

    NUM_EPOCH = int(opt['solver']['num_epochs'])
    start_epoch = solver_log['epoch']

    print("Method: %s || Scale: %d || Epoch Range: (%d ~ %d)" %
          (model_name, scale, start_epoch, NUM_EPOCH))

    for epoch in range(start_epoch, NUM_EPOCH + 1):
        print('\n===> Training Epoch: [%d/%d]...  Learning Rate: %f' %
              (epoch, NUM_EPOCH, solver.get_current_learning_rate()))

        # Initialization
        solver_log['epoch'] = epoch

        # Train model
        train_loss_list = []
        with tqdm(total=len(train_loader),
                  desc='Epoch: [%d/%d]' % (epoch, NUM_EPOCH),
                  miniters=1) as t:
            for iter, batch in enumerate(train_loader):
                solver.feed_data(batch)
                iter_loss = solver.train_step()
                batch_size = batch['LR'].size(0)
                train_loss_list.append(iter_loss * batch_size)
                t.set_postfix_str("Batch Loss: %.4f" % iter_loss)
                t.update()

        solver_log['records']['train_loss'].append(
            sum(train_loss_list) / len(train_set))
        solver_log['records']['lr'].append(solver.get_current_learning_rate())

        print('\nEpoch: [%d/%d]   Avg Train Loss: %.6f' %
              (epoch, NUM_EPOCH, sum(train_loss_list) / len(train_set)))

        print('===> Validating...', )

        psnr_list = []
        ssim_list = []
        val_loss_list = []

        for iter, batch in enumerate(val_loader):
            solver.feed_data(batch)
            iter_loss = solver.test()
            val_loss_list.append(iter_loss)

            # calculate evaluation metrics
            visuals = solver.get_current_visual()
            psnr, ssim = util.calc_metrics(visuals['SR'],
                                           visuals['HR'],
                                           crop_border=scale)
            psnr_list.append(psnr)
            ssim_list.append(ssim)

            if opt["save_image"]:
                solver.save_current_visual(epoch, iter)

        solver_log['records']['val_loss'].append(
            sum(val_loss_list) / len(val_loss_list))
        solver_log['records']['psnr'].append(sum(psnr_list) / len(psnr_list))
        solver_log['records']['ssim'].append(sum(ssim_list) / len(ssim_list))

        # record the best epoch
        epoch_is_best = False
        if solver_log['best_pred'] < (sum(psnr_list) / len(psnr_list)):
            solver_log['best_pred'] = (sum(psnr_list) / len(psnr_list))
            epoch_is_best = True
            solver_log['best_epoch'] = epoch

        print(
            "[%s] PSNR: %.2f   SSIM: %.4f   Loss: %.6f   Best PSNR: %.2f in Epoch: [%d]"
            %
            (val_set.name(), sum(psnr_list) / len(psnr_list), sum(ssim_list) /
             len(ssim_list), sum(val_loss_list) / len(val_loss_list),
             solver_log['best_pred'], solver_log['best_epoch']))

        solver.set_current_log(solver_log)
        solver.save_checkpoint(epoch, epoch_is_best)
        solver.save_current_log()

        # update lr
        # solver.update_learning_rate()
        solver.scheduler.step()

    print('===> Finished !')
Beispiel #12
0
def main():
    args = option.add_args()
    opt = option.parse(args)

    # random seed
    seed = opt['solver']['manual_seed']
    if seed is None: seed = random.randint(1, 10000)
    print("===> Random Seed: [%d]" % seed)
    random.seed(seed)
    torch.manual_seed(seed)
    pytorch_seed(seed)

    # create train and val dataloader
    train_loader_list = []
    bm_names = []
    collate_fn = None
    if opt['collate_fn'] == 'my_collate':
        collate_fn = my_collate.collate_fn
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        if 'train' in phase:
            train_set = create_dataset(dataset_opt)
            train_loader = create_dataloader(train_set, dataset_opt,
                                             collate_fn)  #todo:
            train_loader_list.append(train_loader)
            print('===> Train Dataset: %s   Number of images: [%d]' %
                  (train_set.name(), len(train_set)))
            if train_loader is None:
                raise ValueError("[Error] The training data does not exist")
            bm_names.append(train_set.name())

        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('===> Val Dataset: %s   Number of images: [%d]' %
                  (val_set.name(), len(val_set)))

        else:
            raise NotImplementedError(
                "[Error] Dataset phase [%s] in *.json is not recognized." %
                phase)

    solver = create_solver(opt)

    scale = opt['scale']
    model_name = opt['networks']['which_model'].upper()

    print('===> Start Train')
    print("==================================================")

    solver_log = solver.get_current_log()

    NUM_EPOCH = int(opt['solver']['num_epochs'])
    start_epoch = solver_log['epoch']

    print("Method: %s || Scale: %d || Epoch Range: (%d ~ %d)" %
          (model_name, scale, start_epoch, NUM_EPOCH))
    # writer = SummaryWriter('runs/')
    lamda = 1
    for epoch in range(start_epoch, NUM_EPOCH + 1):
        print('\n===> Training Epoch: [%d/%d]...  Learning Rate: %f' %
              (epoch, NUM_EPOCH, solver.get_current_learning_rate()))

        # Initialization
        solver_log['epoch'] = epoch
        if epoch == 30:
            print('this is the %dth epoch.' % epoch)

        # Train model
        train_loss_list = []

        total_len = [len(in_set) for in_set in train_loader_list]
        total_len = sum(total_len)

        with tqdm(total=total_len,
                  desc='Epoch: [%d/%d]' % (epoch, NUM_EPOCH),
                  miniters=1) as t:
            for bm, train_loader in zip(bm_names, train_loader_list):
                print('==========> Start Train: %s <==========' % (bm))
                # for iter, (batch, batch_len, mask) in enumerate(train_loader):
                #     solver.feed_data(batch)
                #     iter_loss = solver.train_step(mask, lamda)
                #     batch_size = batch['LR'].size(0)
                #     train_loss_list.append(iter_loss*batch_size)
                #     t.set_postfix_str("Batch Loss: %.4f" % iter_loss)
                #     t.update()
                for iter, batch in enumerate(train_loader):
                    solver.feed_data(batch)
                    iter_loss = solver.train_step()
                    batch_size = batch['LR'].size(0)
                    train_loss_list.append(iter_loss * batch_size)
                    t.set_postfix_str("Batch Loss: %.4f" % iter_loss)
                    t.update()

        solver_log['records']['train_loss'].append(
            sum(train_loss_list) / len(train_set))
        solver_log['records']['lr'].append(solver.get_current_learning_rate())

        print('\nEpoch: [%d/%d]   Avg Train Loss: %.6f' %
              (epoch, NUM_EPOCH, sum(train_loss_list) / len(train_set)))

        print('===> Validating...', )

        psnr_list = []
        ssim_list = []
        val_loss_list = []

        for iter, batch in enumerate(val_loader):
            solver.feed_data(batch)
            iter_loss = solver.test()
            val_loss_list.append(iter_loss)

            # calculate evaluation metrics
            visuals = solver.get_current_visual()
            psnr, ssim = util.pan_calc_metrics(visuals['SR'],
                                               visuals['HR'],
                                               crop_border=scale,
                                               img_range=opt['img_range'])
            psnr_list.append(psnr)
            ssim_list.append(ssim)

            if opt["save_image"]:
                solver.save_current_visual(epoch, iter)

        solver_log['records']['val_loss'].append(
            sum(val_loss_list) / len(val_loss_list))
        solver_log['records']['psnr'].append(sum(psnr_list) / len(psnr_list))
        solver_log['records']['ssim'].append(sum(ssim_list) / len(ssim_list))

        # record the best epoch
        epoch_is_best = False
        if solver_log['best_pred'] < (sum(psnr_list) / len(psnr_list)):
            solver_log['best_pred'] = (sum(psnr_list) / len(psnr_list))
            epoch_is_best = True
            solver_log['best_epoch'] = epoch

        print(
            "[%s] CC: %.4f   RMSE: %.4f   Loss: %.6f   Best CC: %.2f in Epoch: [%d]"
            %
            (val_set.name(), sum(psnr_list) / len(psnr_list), sum(ssim_list) /
             len(ssim_list), sum(val_loss_list) / len(val_loss_list),
             solver_log['best_pred'], solver_log['best_epoch']))
        # writer.add_scalar('train_loss', sum(train_loss_list) / len(train_set))
        # writer.add_scalar('val_loss', sum(val_loss_list)/len(val_loss_list))
        # writer.add_scalar('CC', sum(psnr_list)/len(psnr_list))
        # writer.add_scalar('RMSE', sum(ssim_list)/len(ssim_list))

        solver.set_current_log(solver_log)
        solver.save_checkpoint(epoch, epoch_is_best)
        solver.save_current_log()

        # update lr
        solver.update_learning_rate(epoch)

        #update lmda
        lamda = max(1 - 0.01 * (epoch // 5), 0)

    # writer.close()
    print('===> Finished !')
Beispiel #13
0
def main():

    # setting arguments and logger
    parser = argparse.ArgumentParser(description='Arguments')
    parser.add_argument('--opt',
                        type=str,
                        required=True,
                        help='path to json or yaml file')
    parser.add_argument('--name',
                        type=str,
                        required=True,
                        help='save_dir prefix name')
    parser.add_argument('--scale',
                        type=int,
                        required=True,
                        help='scale factor')
    parser.add_argument('--ps', type=int, default=None, help='patch size')
    parser.add_argument('--bs', type=int, default=None, help='batch_size')
    parser.add_argument('--lr', type=float, default=None, help='learning rate')
    parser.add_argument('--train_Y',
                        action='store_true',
                        default=False,
                        help='convert rgb to yuv and only train on Y channel')
    parser.add_argument('--gpu_ids',
                        type=str,
                        default=None,
                        help='which gpu to use')
    parser.add_argument('--use_chop',
                        action='store_true',
                        default=False,
                        help='whether to use split_forward in test phase')
    parser.add_argument('--pretrained',
                        default=None,
                        help='checkpoint path to resume from')

    args = parser.parse_args()
    args, lg = parse(args)

    # Tensorboard curve
    pretrained = args['solver']['pretrained']
    train_path = '../Tensorboard/train_{}'.format(args['name'])
    val_path = '../Tensorboard/val_{}'.format(args['name'])
    psnr_path = '../Tensorboard/psnr_{}'.format(args['name'])
    ssim_path = '../Tensorboard/ssim_{}'.format(args['name'])

    if pretrained is None:
        if osp.exists(train_path):
            lg.info('Remove dir: [{}]'.format(train_path))
            shutil.rmtree(train_path, True)
        if osp.exists(val_path):
            lg.info('Remove dir: [{}]'.format(val_path))
            shutil.rmtree(val_path, True)
        if osp.exists(psnr_path):
            lg.info('Remove dir: [{}]'.format(psnr_path))
            shutil.rmtree(psnr_path, True)
        if osp.exists(ssim_path):
            lg.info('Remove dir: [{}]'.format(ssim_path))
            shutil.rmtree(ssim_path, True)

    train_writer = SummaryWriter(train_path)
    val_writer = SummaryWriter(val_path)
    psnr_writer = SummaryWriter(psnr_path)
    ssim_writer = SummaryWriter(ssim_path)

    # random seed
    seed = args['solver']['manual_seed']
    random.seed(seed)
    torch.manual_seed(seed)

    # create train and val dataloader
    for phase, dataset_opt in args['datasets'].items():
        if phase == 'train':
            train_dataset = create_dataset(dataset_opt)
            train_loader = create_loader(train_dataset, dataset_opt)
            length = len(train_dataset)
            lg.info(
                'Number of train images: [{}], iters each epoch: [{}]'.format(
                    length, len(train_loader)))
        elif phase == 'val':
            val_dataset = create_dataset(dataset_opt)
            val_loader = create_loader(val_dataset, dataset_opt)
            length = len(val_dataset)
            lg.info(
                'Number of val images: [{}], iters each epoch: [{}]'.format(
                    length, len(val_loader)))
        elif phase == 'test':
            test_dataset = create_dataset(dataset_opt)
            test_loader = create_loader(test_dataset, dataset_opt)
            length = len(test_dataset)
            lg.info(
                'Number of test images: [{}], iters each epoch: [{}]'.format(
                    length, len(test_loader)))

    # create solver
    solver = create_solver(args)

    # training prepare
    solver_log = solver.get_current_log()
    NUM_EPOCH = args['solver']['num_epochs']
    cur_iter = -1
    start_epoch = solver_log['epoch']
    scale = args['scale']
    lg.info('Start Training from [{}] Epoch'.format(start_epoch))
    print_freq = args['print']['print_freq']
    val_step = args['solver']['val_step']

    # training
    for epoch in range(start_epoch, NUM_EPOCH + 1):
        solver_log['epoch'] = epoch

        train_loss_list = []
        for iter, data in enumerate(train_loader):
            cur_iter += 1
            solver.feed_data(data)
            iter_loss = solver.optimize_step()
            train_loss_list.append(iter_loss)

            # show on screen
            if (cur_iter % print_freq) == 0:
                lg.info(
                    'Epoch: {:4} | iter: {:3} | train_loss: {:.4f} | lr: {}'.
                    format(epoch, iter, iter_loss,
                           solver.get_current_learning_rate()))

        train_loss = round(sum(train_loss_list) / len(train_loss_list), 4)
        train_writer.add_scalar('loss', train_loss, epoch)
        solver_log['train_records']['train_loss'].append(train_loss)
        solver_log['train_records']['lr'].append(
            solver.get_current_learning_rate())

        epoch_is_best = False

        if (epoch % val_step) == 0:
            # Validation
            lg.info('Start Validation...')
            pbar = ProgressBar(len(val_loader))
            psnr_list = []
            ssim_list = []
            val_loss_list = []

            for iter, data in enumerate(val_loader):
                solver.feed_data(data)
                loss = solver.test()
                val_loss_list.append(loss)

                # calculate evaluation metrics
                visuals = solver.get_current_visual(need_np=True)
                psnr, ssim = calc_metrics(visuals['SR'],
                                          visuals['HR'],
                                          crop_border=scale,
                                          test_Y=True)
                psnr_list.append(psnr)
                ssim_list.append(ssim)
                pbar.update('')

            # save image
            solver.save_current_visual(epoch)

            avg_psnr = round(sum(psnr_list) / len(psnr_list), 2)
            avg_ssim = round(sum(ssim_list) / len(ssim_list), 4)
            val_loss = round(sum(val_loss_list) / len(val_loss_list), 4)
            val_writer.add_scalar('loss', val_loss, epoch)
            psnr_writer.add_scalar('psnr', avg_psnr, epoch)
            ssim_writer.add_scalar('ssim', avg_ssim, epoch)

            solver_log['val_records']['val_loss'].append(val_loss)
            solver_log['val_records']['psnr'].append(avg_psnr)
            solver_log['val_records']['ssim'].append(avg_ssim)

            # record the best epoch
            if solver_log['best_pred'] < avg_psnr:
                solver_log['best_pred'] = avg_psnr
                epoch_is_best = True
                solver_log['best_epoch'] = epoch
            lg.info(
                'PSNR: {:.2f} | SSIM: {:.4f} | Loss: {:.4f} | Best_PSNR: {:.2f} in Epoch: [{}]'
                .format(avg_psnr, avg_ssim, val_loss, solver_log['best_pred'],
                        solver_log['best_epoch']))

        solver.set_current_log(solver_log)
        solver.save_checkpoint(epoch, epoch_is_best)
        solver.save_current_log()

        # update lr
        solver.update_learning_rate(epoch)

    lg.info('===> Finished !')
Beispiel #14
0
def main():
    parser = argparse.ArgumentParser(
        description='Test Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']: model_name += 'plus'

    # create test dataloader
    bm_names = []
    test_loaders = []
    for _, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' %
              (test_set.name(), len(test_set)))
        bm_names.append(test_set.name())

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s" %
          (model_name, scale, degrad))

    for bm, test_loader in zip(bm_names, test_loaders):
        print("Test set : [%s]" % bm)

        sr_list = []
        path_list = []

        total_psnr = []
        total_ssim = []
        total_time = []

        need_HR = False if test_loader.dataset.__class__.__name__.find(
            'LRHR') < 0 else True

        for iter, batch in enumerate(test_loader):

            solver.feed_data(batch, need_HR=need_HR)

            # calculate forward time
            t0 = time.time()
            solver.test()
            t1 = time.time()
            total_time.append((t1 - t0))

            visuals = solver.get_current_visual(need_HR=need_HR)
            sr_list.append(visuals['SR'])

            # calculate PSNR/SSIM metrics on Python
            if need_HR:
                psnr, ssim = util.calc_metrics(visuals['SR'],
                                               visuals['HR'],
                                               crop_border=scale)
                total_psnr.append(psnr)
                total_ssim.append(ssim)
                path_list.append(
                    os.path.basename(batch['HR_path'][0]).replace(
                        'HR', model_name))
                print(
                    "[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ."
                    % (iter + 1, len(test_loader),
                       os.path.basename(batch['LR_path'][0]), psnr, ssim,
                       (t1 - t0)))
            else:
                path_list.append(os.path.basename(batch['LR_path'][0]))
                print("[%d/%d] %s || Timer: %.4f sec ." %
                      (iter + 1, len(test_loader),
                       os.path.basename(batch['LR_path'][0]), (t1 - t0)))

        if need_HR:
            print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm)
            print("PSNR: %.2f      SSIM: %.4f      Speed: %.4f" %
                  (sum(total_psnr) / len(total_psnr), sum(total_ssim) /
                   len(total_ssim), sum(total_time) / len(total_time)))
        else:
            print("---- Average Speed(s) for [%s] is %.4f sec ----" %
                  (bm, sum(total_time) / len(total_time)))

        # save SR results for further evaluation on MATLAB
        if need_HR:
            save_img_path = os.path.join('./results/SR/' + degrad, model_name,
                                         bm, "x%d" % scale)
        else:
            save_img_path = os.path.join('./results/SR/' + bm, model_name,
                                         "x%d" % scale)

        print("===> Saving SR images of [%s]... Save Path: [%s]\n" %
              (bm, save_img_path))

        if not os.path.exists(save_img_path): os.makedirs(save_img_path)
        for img, name in zip(sr_list, path_list):
            imageio.imwrite(os.path.join(save_img_path, name), img)

    print("==================================================")
    print("===> Finished !")
Beispiel #15
0
def main():
    parser = argparse.ArgumentParser(description='Test Super Resolution Models')
    parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']: model_name += 'plus'

    # create test dataloader
    bm_names =[]
    test_loaders = []
    for _, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' % (test_set.name(), len(test_set)))
        bm_names.append(test_set.name())

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad))
    f = open(opt['savefile'], "w")
    for bm, test_loader in zip(bm_names, test_loaders):
        print("Test set : [%s]"%bm)

        sr_list = []
        rec_list = []
        path_list = []

        total_psnr = []
        total_ssim = []
        total_psnr_input = []
        total_ssim_input = []
        total_time = []

        need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True
        save_img_path = opt['dir']
        print("===> Saving SR images of [%s]... Save Path: [%s]\n" % (bm, save_img_path))
        if not os.path.exists(save_img_path): os.makedirs(save_img_path)
        
        for iter, batch in enumerate(test_loader):
            solver.feed_data(batch, need_HR=need_HR)
            
            # calculate forward time
            t0 = time.time()
            solver.test()
            t1 = time.time()
            total_time.append((t1 - t0))

            visuals = solver.get_current_visual(need_HR=need_HR)
            #visuals_rec = solver.get_current_visual(need_HR=need_HR)
            sr_list.append(visuals['SR'])
            
            
            # calculate PSNR/SSIM metrics on Python
            if need_HR:
                path_list.append(os.path.basename(batch['HR_path'][0]).replace('HR', model_name))
            else:
                path_list.append(os.path.basename(batch['LR_path1'][0]))
            m = os.path.basename(batch['LR_path1'][0])+"\t"+str((t1 - t0))+"\n"
            f.write(m)    
            
            imageio.imwrite(os.path.join(save_img_path, os.path.basename(batch['LR_path1'][0])), visuals['SR'])
        # save SR results for further evaluation on MATLAB
        
        #for img, name in zip(sr_list, path_list):
            #imageio.imwrite(os.path.join(save_img_path, name), img)

    print("==================================================")
    print("===> Finished !")
Beispiel #16
0
def main():
    parser = argparse.ArgumentParser(
        description='Test Super Resolution Models')
    parser.add_argument(
        '-opt', type=str, required=True, help='Path to options JSON file.')
    parser.add_argument(
        '-save_folder', type=str, required=True, help='Folder to save output images')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)
    save_folder = parser.parse_args().save_folder


    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()

    # create folders
    # util.mkdir_and_rename(opt['path']['res_root'])
    # option.save(opt)

    # create test dataloader
    bm_names = []
    test_loaders = []
    for ds_name, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        # print('===> Test Dataset: [%s]   Number of images: [%d]' %
        #       (dataset_opt['name'], len(test_set)))
        bm_names.append(dataset_opt['name'])

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    # print('===> Start Test')
    # print("==================================================")
    # print("Method: %s || Scale: %d || Degradation: %s" % (model_name, scale,
    #                                                       degrad))

    for bm, test_loader in zip(bm_names, test_loaders):
        # print("Test set : [%s]" % bm)

        sr_list = []
        path_list = []


        need_HR = False if test_loader.dataset.__class__.__name__.find(
            'HR') < 0 else True

        for iter, batch in tqdm(enumerate(test_loader), total=len(test_loader)):
            solver.feed_data(batch, need_HR=need_HR, need_landmark=False)
            solver.test()

            visuals = solver.get_current_visual(need_HR=need_HR)
            sr_list.append(visuals['SR'][-1])

            path_list.append(os.path.basename(batch['LR_path'][0]))

        # # save SR results for further evaluation on MATLAB
        # save_img_path = os.path.join(opt['path']['res_root'], bm)
        #
        # print("===> Saving SR images of [%s]... Save Path: [%s]\n" %
        #       (bm, save_img_path))

        # if not os.path.exists(save_img_path): os.makedirs(save_img_path)
        for img, name in zip(sr_list, path_list):
            file_name, file_ext = os.path.splitext(name)
            name = file_name+"_FaceEnhancement" + file_ext
            imageio.imwrite(os.path.join(save_folder, name), img)

    print("==================================================")
    print("===> Finished !")
def main():
    parser = argparse.ArgumentParser(description='Test Super Resolution Models')
    parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # make sure the CUDA_VISIBLE_DEVICES is set before import torch.
    from utils import util
    from solvers import create_solver
    from datasets import create_dataloader
    from datasets import create_dataset

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']: model_name += 'plus'

    # create test dataloader
    bm_names =[]
    test_loaders = []
    percent10 = True
    for _, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' % (test_set.name(), len(test_set)))
        bm_names.append(test_set.name())

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad))

    for bm, test_loader in zip(bm_names, test_loaders):
        print("Test set : [%s]"%bm)

        sr_list = []
        path_list = []

        total_psnr = []
        total_ssim = []
        total_time = []

        need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True

        for iter, batch in enumerate(test_loader):
            solver.feed_data(batch, need_HR=need_HR)

            # calculate forward time
            t0 = time.time()
            solver.test()
            t1 = time.time()
            total_time.append((t1 - t0))

            visuals = solver.get_current_visual(need_HR=need_HR)
            sr_list.append(visuals['SR'])

            # calculate PSNR/SSIM metrics on Python
            # 这里仅支持batch size = 1的情况!!!
            if need_HR:
                psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale)
                total_psnr.append(psnr)
                total_ssim.append(ssim)
                path_list.append(os.path.basename(batch['HR_path'][0]).replace('HR', model_name))
                print("[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter+1, len(test_loader),
                                                                                       os.path.basename(batch['LR_path'][0]),
                                                                                       psnr, ssim,
                                                                                       (t1 - t0)))
            else:
                file_dir = batch['LR_path'][0].split('/')[-2]
                path_list.append(os.path.join(file_dir, os.path.basename(batch['LR_path'][0])))
                print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader),
                                                           os.path.join(file_dir, os.path.basename(batch['LR_path'][0])),
                                                           (t1 - t0)))

        if need_HR:
            print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm)
            print("PSNR: %.2f      SSIM: %.4f      Speed: %.4f" % (sum(total_psnr)/len(total_psnr),
                                                                  sum(total_ssim)/len(total_ssim),
                                                                  sum(total_time)/len(total_time)))
        else:
            print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm,
                                                                      sum(total_time)/len(total_time)))

        if need_HR:
            save_img_path = os.path.join('../submit/SR/'+degrad, model_name, bm, "x%d"%scale)
        else:
            save_img_path = os.path.join('../submit/')

        print("===> Saving SR images of [%s]... Save Path: [%s]\n" % (bm, save_img_path))

        middle_name = 'h_Res' if percent10 else 'h_Sub25_Res'
        filter_idx = -1 if percent10 else -7

        if not os.path.exists(save_img_path): os.makedirs(save_img_path)
        for img, name in zip(sr_list, path_list):
            store_path = os.path.join(save_img_path, name)
            base_dir = os.path.dirname(store_path)[:filter_idx] + middle_name
            if not os.path.exists(base_dir): os.makedirs(base_dir)
            store_path = os.path.join(base_dir, os.path.basename(name))
            print('write into {}.'.format(store_path))
            imageio.imwrite(store_path, img)

        percent10 = False

    print("==================================================")
    print("===> Finished !")
Beispiel #18
0
def main():
    args = option.add_args()
    opt = option.parse(args.opt,
                       nblocks=args.nblocks,
                       nlayers=args.nlayers,
                       iterations=args.iterations,
                       trained_model=args.trained_model,
                       lr_path=args.lr_path
                       )
    opt = option.dict_to_nonedict(opt)

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()
    if opt['self_ensemble']:
        model_name += 'plus'

    # create test dataloader
    bm_names =[]
    test_loaders = []
    for _, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' % (test_set.name(), len(test_set)))
        bm_names.append(test_set.name())

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s"%(model_name, scale, degrad))

    # whether save the SR image?
    if opt['save_image']:
        para_save = Paralle_save_img()
        para_save.begin_background()
    # with para_save.begin_background() as para_save_imag

    for bm, test_loader in zip(bm_names, test_loaders):
        print("Test set : [%s]" % bm)

        total_psnr = []
        total_ssim = []
        total_time = []

        need_HR = False if test_loader.dataset.__class__.__name__.find('LRHR') < 0 else True

        if need_HR:
            save_img_path = os.path.join('./results/SR/' + degrad, model_name, bm, "x%d" % scale)
        else:
            save_img_path = os.path.join('./results/SR/' + bm, model_name, "x%d" % scale)

        if not os.path.exists(save_img_path):
            os.makedirs(save_img_path)

        for iter, batch in enumerate(test_loader):
            solver.feed_data(batch, need_HR=need_HR)

            # calculate forward time
            t0 = time.time()
            solver.test()
            t1 = time.time()
            total_time.append((t1 - t0))

            visuals = solver.get_current_visual(need_HR=need_HR)

            # calculate PSNR/SSIM metrics on Python
            if need_HR:
                psnr, ssim = util.calc_metrics(visuals['SR'], visuals['HR'], crop_border=scale)
                total_psnr.append(psnr)
                total_ssim.append(ssim)
                name = os.path.basename(batch['HR_path'][0]).replace('.', ('_x{}_' + model_name + '.').format(scale))

                print("[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ." % (iter + 1, len(test_loader),
                                                                                       os.path.basename(
                                                                                           batch['LR_path'][0]),
                                                                                       psnr, ssim,
                                                                                       (t1 - t0)))
            else:
                print("[%d/%d] %s || Timer: %.4f sec ." % (iter + 1, len(test_loader),
                                                           os.path.basename(batch['LR_path'][0]),
                                                           (t1 - t0)))
            if opt['save_image']:
                name = os.path.basename(batch['LR_path'][0]).replace('.', ('_x{}_' + model_name + '.').format(scale))
                para_save.put_image_path(filename=os.path.join(save_img_path, name), img=visuals['SR'])

        total_psnr, total_ssim = np.array(total_psnr), np.array(total_ssim)
        if need_HR:
            print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm)
            print("PSNR: %.2f(+/-%.2f)      SSIM: %.4f      Speed: %.4f" % (total_psnr.mean(), total_psnr.std(),
                                                                   total_ssim.mean(),
                                                                   sum(total_time) / len(total_time)))
        else:
            print("---- Average Speed(s) for [%s] is %.4f sec ----" % (bm,

                                                                       sum(total_time) / len(total_time)))
    if opt['save_image']:
        para_save.end_background()

    print("==================================================")
    print("===> Finished !")
        if train_loaders is None:
            raise ValueError("[Error] The training data does not exist")

    elif phase == 'val':
        val_set = create_dataset(dataset_opt)
        val_loader = create_dataloader(val_set, dataset_opt)
        print('======> Val Dataset: %s, Number of images: [%d]' %(val_set.name(), len(val_set)))

    else:
        raise NotImplementedError("[Error] Dataset phase [%s] in *.json is not recognized." % phase)


##### Create model and solver #####
scale = opt['scale']
client_solvers = [create_solver(opt) for _ in range(num_clients)]
server_solver = create_solver(opt)
model_name = opt['networks']['which_model'].upper()

print('===> Start Train')
print("==================================================")
print("Method: %s || Scale: %d || Total epoch: %d " %(model_name, scale, num_rounds))


##### Create solver log for saving #####
dir_path = server_solver.exp_root
results_save_path = os.path.join(dir_path, 'results.csv')
solver_log = server_solver.get_current_log()
start_epoch = solver_log['round']

def main():
    parser = argparse.ArgumentParser(
        description='Test Super Resolution Models')
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to options JSON file.')
    opt = option.parse(parser.parse_args().opt)
    opt = option.dict_to_nonedict(opt)

    # initial configure
    scale = opt['scale']
    degrad = opt['degradation']
    network_opt = opt['networks']
    model_name = network_opt['which_model'].upper()

    # create folders
    util.mkdir_and_rename(opt['path']['res_root'])
    option.save(opt)

    # create test dataloader
    bm_names = []
    test_loaders = []
    for ds_name, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)
        print('===> Test Dataset: [%s]   Number of images: [%d]' %
              (dataset_opt['name'], len(test_set)))
        bm_names.append(dataset_opt['name'])

    # create solver (and load model)
    solver = create_solver(opt)
    # Test phase
    print('===> Start Test')
    print("==================================================")
    print("Method: %s || Scale: %d || Degradation: %s" %
          (model_name, scale, degrad))

    for bm, test_loader in zip(bm_names, test_loaders):
        print("Test set : [%s]" % bm)

        sr_list = []
        path_list = []

        total_psnr = []
        total_ssim = []
        total_time = []
        res_dict = OrderedDict()

        need_HR = False if test_loader.dataset.__class__.__name__.find(
            'HR') < 0 else True

        for iter, batch in tqdm(enumerate(test_loader),
                                total=len(test_loader)):
            solver.feed_data(batch, need_HR=need_HR, need_landmark=False)

            # calculate forward time
            t0 = time.time()
            solver.test()
            t1 = time.time()
            total_time.append((t1 - t0))

            visuals = solver.get_current_visual(need_HR=need_HR)
            sr_list.append(visuals['SR'][-1])

            # calculate PSNR/SSIM metrics on Python
            if need_HR:
                psnr, ssim = util.calc_metrics(visuals['SR'][-1],
                                               visuals['HR'],
                                               crop_border=scale)
                total_psnr.append(psnr)
                total_ssim.append(ssim)
                path_list.append(
                    os.path.basename(batch['HR_path'][0]).replace(
                        'HR', model_name))
                # print(
                #     "[%d/%d] %s || PSNR(dB)/SSIM: %.2f/%.4f || Timer: %.4f sec ."
                #     % (iter + 1, len(test_loader),
                #        os.path.basename(batch['HR_path'][0]), psnr, ssim,
                #        (t1 - t0)))
                res_dict[path_list[-1]] = {
                    'psnr': psnr,
                    'ssim': ssim,
                    'time': t1 - t0
                }

            else:
                path_list.append(os.path.basename(batch['LR_path'][0]))
                # print("[%d/%d] %s || Timer: %.4f sec ." %
                #       (iter + 1, len(test_loader),
                #        os.path.basename(batch['LR_path'][0]), (t1 - t0)))

        if need_HR:
            print("---- Average PSNR(dB) /SSIM /Speed(s) for [%s] ----" % bm)
            average_res_str = "PSNR: %.2f      SSIM: %.4f      Speed: %.4f" % \
                  (sum(total_psnr) / len(total_psnr), sum(total_ssim) /
                   len(total_ssim), sum(total_time) / len(total_time))
            print(average_res_str)
        else:
            print("---- Average Speed(s) for [%s] is %.4f sec ----" %
                  (bm, sum(total_time) / len(total_time)))

        # save SR results for further evaluation on MATLAB
        save_img_path = os.path.join(opt['path']['res_root'], bm)

        print("===> Saving SR images of [%s]... Save Path: [%s]\n" %
              (bm, save_img_path))

        if not os.path.exists(save_img_path): os.makedirs(save_img_path)
        for img, name in zip(sr_list, path_list):
            imageio.imwrite(os.path.join(save_img_path, name), img)
        if need_HR:
            with open(os.path.join(save_img_path, 'result.json'), 'w') as f:
                json.dump(res_dict, f, indent=2)
            with open(os.path.join(save_img_path, 'average_result.txt'),
                      'w') as f:
                f.write(average_res_str + '\n')

    print("==================================================")
    print("===> Finished !")