Exemple #1
0
def get_sim(root_path, ori_path, n_samples):
    img_list = sorted(glob.glob(ori_path))
    total_psnr = 0.
    total_ssim = 0.
    count = 0
    for i, v in enumerate(img_list):
        img_name = v.split("/")[-1].split(".")[0]
        img0_np = load_image(v)
        for j in range(n_samples):
            img1_np = load_image(root_path + img_name + "_" + str(j) + ".png")
            total_psnr += util.psnr(img0_np, img1_np)
            total_ssim += util.ssim(img0_np, img1_np, multichannel=True)
            count += 1

    return total_psnr / count, total_ssim / count
Exemple #2
0
def align(folder):
    print('process', folder)
    xs = os.listdir(folder)
    xs.sort(key=lambda x: int(x.split('.')[0][5:]))

    last = None
    sum = []
    total = 0
    for i, x in enumerate(xs):
        x = osp.join(folder, x)
        # print(x)
        img = Image.open(x)
        h, w = img.size
        scale = 10
        h //= scale
        w //= scale
        img = img.resize((h, w))

        # img.save('./tmp.jpg')
        # import ipdb; ipdb.set_trace()

        img = np.array(img)
        if i == 0:
            last = img
            continue

        # check diff:
        # PSNR = calculate_psnr(img, last)
        PSNR = None
        SSIM = ssim(img, last)
        sum.append(SSIM)
        total += SSIM
        if total / i - SSIM > 0.008:
            print(
                f'{folder} - res: frame-{i}, SSIM: {SSIM}, mean-SSIM: {total/i}'
            )
            break
        # print(f'{i}, {i-1}: SSIM: {SSIM}, mean-SSIM: {total/i}, PSNR: {PSNR}')
        # last = img
    return i
Exemple #3
0
        model.test()  # test
        visuals = model.get_current_visuals(need_HR=need_HR)

        sr_img = util.tensor2img(visuals['SR'])  # uint8

        if need_HR:  # load GT image and calculate psnr
            gt_img = util.tensor2img(visuals['HR'])

            crop_border = test_loader.dataset.opt['scale']
            cropped_sr_img = sr_img[crop_border:-crop_border,
                                    crop_border:-crop_border, :]
            cropped_gt_img = gt_img[crop_border:-crop_border,
                                    crop_border:-crop_border, :]
            psnr = util.psnr(cropped_sr_img, cropped_gt_img)
            ssim = util.ssim(cropped_sr_img, cropped_gt_img, multichannel=True)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            if gt_img.shape[2] == 3:  # RGB image
                cropped_sr_img_y = rgb2ycbcr(cropped_sr_img, only_y=True)
                cropped_gt_img_y = rgb2ycbcr(cropped_gt_img, only_y=True)
                psnr_y = util.psnr(cropped_sr_img_y, cropped_gt_img_y)
                ssim_y = util.ssim(cropped_sr_img_y,
                                   cropped_gt_img_y,
                                   multichannel=False)
                test_results['psnr_y'].append(psnr_y)
                test_results['ssim_y'].append(ssim_y)
                print('{:20s} - PSNR: {:.4f} dB; SSIM: {:.4f}; PSNR_Y: {:.4f} dB; SSIM_Y: {:.4f}.'\
                    .format(img_name, psnr, ssim, psnr_y, ssim_y))
            else:
                print('{:20s} - PSNR: {:.4f} dB; SSIM: {:.4f}.'.format(
Exemple #4
0
        for run_index in range(multiple):
            code = model.gen_code(data['LR'].shape[0], data['LR'].shape[2],
                                  data['LR'].shape[3])
            model.feed_data(data, code=code, need_HR=need_HR)
            model.test()

            img_path = data['LR_path'][0]
            img_name = os.path.splitext(os.path.basename(img_path))[0]

            visuals = model.get_current_visuals(need_HR=need_HR)
            sr_img = util.tensor2img(visuals['HR_pred'])  # uint8

            if need_HR:  # load target image and calculate metric scores
                gt_img = util.tensor2img(visuals['HR'])
                psnr = util.psnr(sr_img, gt_img)
                ssim = util.ssim(sr_img, gt_img, multichannel=True)
                lpips = torch.sum(model.get_loss(level=-1))
                test_results['psnr'].append(psnr)
                test_results['ssim'].append(ssim)
                test_results['lpips'].append(lpips)
                print('{:20s} - LPIPS: {:.4f}; PSNR: {:.4f} dB; SSIM: {:.4f}.'.
                      format(img_name, lpips, psnr, ssim))
            else:
                print(img_name)

            save_img_path = os.path.join(dataset_dir,
                                         img_name + '_%d.png' % run_index)
            util.save_img(sr_img, save_img_path)

    if need_HR:  # metrics
        # Average LPIPS/PSNR/SSIM results
Exemple #5
0
def main():
    # Settings
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
    opt = option.parse(parser.parse_args().opt) #load settings and initialize settings

    util.mkdir_and_rename(opt['path']['experiments_root'])  # rename old experiments if exists
    util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and \
        not key == 'saved_model'))
    option.save(opt)
    opt = option.dict_to_nonedict(opt)  # Convert to NoneDict, which return None for missing key.

    # Redirect all writes to the "txt" file
    sys.stdout = PrintLogger(opt['path']['log'])

    # create train and val dataloader
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = create_dataset(dataset_opt)
            train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
            print('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
            total_iters = int(opt['train']['niter'])
            total_epoches = int(math.ceil(total_iters / train_size))
            print('Total epoches needed: {:d} for iters {:,d}'.format(total_epoches, total_iters))
            train_loader = create_dataloader(train_set, dataset_opt)
        elif phase == 'val':
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt)
            print('Number of val images in [{:s}]: {:d}'.format(dataset_opt['name'], len(val_set)))
        else:
            raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
    assert train_loader is not None

    # Create model
    model = create_model(opt)
    # Create logger
    logger = Logger(opt)

    current_step = 0
    start_time = time.time()
    print('---------- Start training -------------')
    for epoch in range(total_epoches):
        for i, train_data in enumerate(train_loader):
            current_step += 1
            if current_step > total_iters:
                break

            # training
            model.feed_data(train_data)
            model.optimize_parameters(current_step)

            time_elapsed = time.time() - start_time
            start_time = time.time()

            # log
            if current_step % opt['logger']['print_freq'] == 0:
                logs = model.get_current_log()
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = current_step
                print_rlt['time'] = time_elapsed
                for k, v in logs.items():
                    print_rlt[k] = v
                print_rlt['lr'] = model.get_current_learning_rate()
                logger.print_format_results('train', print_rlt)

            # save models
            if current_step % opt['logger']['save_checkpoint_freq'] == 0:
                print('Saving the model at the end of iter {:d}.'.format(current_step))
                model.save(current_step)

            # validation
            if current_step % opt['train']['val_freq'] == 0:
                print('---------- validation -------------')
                start_time = time.time()

                avg_psnr = 0.0
                avg_ssim =0.0
                idx = 0
                for val_data in val_loader:
                    idx += 1
                    img_name = os.path.splitext(os.path.basename(val_data['GT_path'][0]))[0]
                    img_dir = os.path.join(opt['path']['val_images'], img_name)
                    util.mkdir(img_dir)

                    model.feed_data(val_data)
                    model.test()

                    visuals = model.get_current_visuals()
                    out_img = util.tensor2img(visuals['Output'])
                    gt_img = util.tensor2img(visuals['ground_truth'])  # uint8

                    # Save output images for reference
                    save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(\
                        img_name, current_step))
                    util.save_img(out_img, save_img_path)

                    # calculate PSNR
                    if len(gt_img.shape) == 2:
                        gt_img = np.expand_dims(gt_img, axis=2)
                        out_img = np.expand_dims(out_img, axis=2)
                    crop_border = opt['scale']
                    cropped_out_img = out_img[crop_border:-crop_border, crop_border:-crop_border, :]
                    cropped_gt_img = gt_img[crop_border:-crop_border, crop_border:-crop_border, :]
                    if gt_img.shape[2] == 3:  # RGB image
                        cropped_out_img_y = bgr2ycbcr(cropped_out_img, only_y=True)
                        cropped_gt_img_y = bgr2ycbcr(cropped_gt_img, only_y=True)
                        avg_psnr += util.psnr(cropped_out_img_y, cropped_gt_img_y)
                        avg_ssim += util.ssim(cropped_out_img_y, cropped_gt_img_y, multichannel=False)
                    else:
                        avg_psnr += util.psnr(cropped_out_img, cropped_gt_img)
                        avg_ssim += util.ssim(cropped_out_img, cropped_gt_img, multichannel=True)

                avg_psnr = avg_psnr / idx
                avg_ssim = avg_ssim / idx
                time_elapsed = time.time() - start_time
                # Save to log
                print_rlt = OrderedDict()
                print_rlt['model'] = opt['model']
                print_rlt['epoch'] = epoch
                print_rlt['iters'] = current_step
                print_rlt['time'] = time_elapsed
                print_rlt['psnr'] = avg_psnr
                print_rlt['ssim'] = avg_ssim
                logger.print_format_results('val', print_rlt)
                print('-----------------------------------')

            # update learning rate
            model.update_learning_rate()

    print('Saving the final model.')
    model.save('latest')
    print('End of training.')
Exemple #6
0
def main(name_flag, input_path, gt_path, model_path, save_path, save_imgs,
         flip_test):
    device = torch.device('cuda')
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    save_path = os.path.join(save_path, name_flag)

    #### model
    model = CNLRN_arch.CNLRN(n_colors=3,
                             n_deblur_blocks=20,
                             n_nlrgs_body=6,
                             n_nlrgs_up1=2,
                             n_nlrgs_up2=2,
                             n_subgroups=2,
                             n_rcabs=4,
                             n_feats=64,
                             nonlocal_psize=(4, 4, 4, 4),
                             scale=4)
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    model = model.to(device)

    #### logger
    util.mkdirs(save_path)
    util.setup_logger('base',
                      save_path,
                      name_flag,
                      level=logging.INFO,
                      screen=True,
                      tofile=True)
    logger = logging.getLogger('base')

    logger.info('Evaluate: {}'.format(name_flag))
    logger.info('Input images path: {}'.format(input_path))
    logger.info('GT images path: {}'.format(gt_path))
    logger.info('Model path: {}'.format(model_path))
    logger.info('Results save path: {}'.format(save_path))
    logger.info('Flip test: {}'.format(flip_test))
    logger.info('Save images: {}'.format(save_imgs))

    #### Evaluation
    total_psnr_l = []
    total_ssim_l = []

    img_path_l = sorted(glob.glob(osp.join(input_path, '*')))

    #### read LQ and GT images
    imgs_LQ = data_util.read_img_seq(input_path)
    img_GT_l = []
    for img_GT_path in sorted(glob.glob(osp.join(gt_path, '*'))):
        img_GT_l.append(data_util.read_img(None, img_GT_path))

    # process each image
    for img_idx, img_path in enumerate(img_path_l):
        img_name = osp.splitext(osp.basename(img_path))[0]
        imgs_in = imgs_LQ[img_idx:img_idx + 1].to(device)

        if flip_test:
            output = util.flipx8_forward(model, imgs_in)
        else:
            output = util.single_forward(model, imgs_in)

        output = util.tensor2img(output.squeeze(0))

        if save_imgs:
            cv2.imwrite(osp.join(save_path, '{}.png'.format(img_name)), output)

        # calculate PSNR
        output = output / 255.
        GT = np.copy(img_GT_l[img_idx])

        output, GT = util.crop_border([output, GT], crop_border=4)
        crt_psnr = util.calculate_psnr(output * 255, GT * 255)
        crt_ssim = util.ssim(output * 255, GT * 255)
        total_psnr_l.append(crt_psnr)
        total_ssim_l.append(crt_ssim)

        logger.info('{} \tPSNR: {:.3f} \tSSIM: {:.4f}'.format(
            img_name, crt_psnr, crt_ssim))

    logger.info('################ Final Results ################')
    logger.info('Evaluate: {}'.format(name_flag))
    logger.info('Input images path: {}'.format(input_path))
    logger.info('GT images path: {}'.format(gt_path))
    logger.info('Model path: {}'.format(model_path))
    logger.info('Results save path: {}'.format(save_path))
    logger.info('Flip test: {}'.format(flip_test))
    logger.info('Save images: {}'.format(save_imgs))
    logger.info(
        'Total Average PSNR: {:.3f} SSIM: {:.4f} for {} images.'.format(
            sum(total_psnr_l) / len(total_psnr_l),
            sum(total_ssim_l) / len(total_ssim_l), len(img_path_l)))