Exemplo n.º 1
0
def test_psnr_supports_different_data_ranges(input_tensors: Tuple[
    torch.Tensor, torch.Tensor], data_range, device: str) -> None:
    x, y = input_tensors
    x_scaled = (x * data_range).type(torch.uint8)
    y_scaled = (y * data_range).type(torch.uint8)

    measure_scaled = psnr(x_scaled.to(device),
                          y_scaled.to(device),
                          data_range=data_range)
    measure = psnr(x_scaled.to(device) / float(data_range),
                   y_scaled.to(device) / float(data_range),
                   data_range=1.0)
    diff = torch.abs(measure_scaled - measure)
    assert diff <= 1e-6, f'Result for same tensor with different data_range should be the same, got {diff}'
Exemplo n.º 2
0
def test_psnr_reduction(prediction: torch.Tensor, target: torch.Tensor):
    measure = psnr(prediction, target, reduction='mean')
    assert measure.dim(
    ) == 0, f'PSNR with `mean` reduction must return 1 number, got {len(measure)}'

    measure = psnr(prediction, target, reduction='sum')
    assert measure.dim(
    ) == 0, f'PSNR with `mean` reduction must return 1 number, got {len(measure)}'

    measure = psnr(prediction, target, reduction='none')
    assert len(measure) == prediction.size(0), \
        f'PSNR with `none` reduction must have length equal to number of images, got {len(measure)}'

    with pytest.raises(KeyError):
        psnr(prediction, target, reduction='random string')
Exemplo n.º 3
0
def test_psnr_reduction(x, y):
    measure = psnr(x, y, reduction='mean')
    assert measure.dim(
    ) == 0, f'PSNR with `mean` reduction must return 1 number, got {len(measure)}'

    measure = psnr(x, y, reduction='sum')
    assert measure.dim(
    ) == 0, f'PSNR with `mean` reduction must return 1 number, got {len(measure)}'

    measure = psnr(x, y, reduction='none')
    assert len(measure) == x.size(0), \
        f'PSNR with `none` reduction must have length equal to number of images, got {len(measure)}'

    with pytest.raises(ValueError):
        psnr(x, y, reduction='random string')
    def update(self, output):
        y_pred = output[0]
        # y_pred = torch.clamp_min(y_pred, min=0.0)
        y = output[1]
        # y = torch.clamp_min(y, min=0.0)
        y = torch.abs(y)
        y_pred = torch.abs(y_pred)
        # print("CrowdCountingMeanPSNRabs ")
        # print("y_pred", y_pred.shape)
        # print("y", y.shape)

        y_pred = F.interpolate(y_pred, scale_factor=8) / 64
        pad_density_map_tensor = torch.zeros((1, 1, y.shape[2], y.shape[3])).cuda()
        pad_density_map_tensor[:, 0, :y_pred.shape[2], :y_pred.shape[3]] = y_pred
        y_pred = pad_density_map_tensor

        # y_max = torch.max(y)
        # y_pred_max = torch.max(y_pred)
        # max_value = y_max
        # psnr_metric = piq.psnr(y, y_pred, reduction="sum", data_range=max_value.item())
        # psnr_metric = torch.abs((y-y_pred).sum())

        # self calculate
        y = y/torch.max(y)*255
        y_pred = y_pred / torch.max(y_pred) * 255
        # EPS = 1e-20
        # mse = torch.mean((y_pred - y) ** 2, dim=[2, 3])
        # score = - 10 * torch.log10(mse + EPS)
        # psnr_metric = score
        psnr_metric = piq.psnr(y, y_pred, 255, "sum")
        self._sum += psnr_metric.item()
        # we multiply because ssim calculate mean of each image in batch
        # we multiply so we will divide correctly

        self._num_examples += y.shape[0]
    def update(self, output):
        y_pred = output[0]
        y_pred = torch.clamp_min(y_pred, min=0.0)
        y = output[1]
        y = torch.clamp_min(y, min=0.0)
        # print("CrowdCountingMeanPSNRclamp ")
        # print("y_pred", y_pred.shape)
        # print("y", y.shape)

        y_pred = F.interpolate(y_pred, scale_factor=8) / 64
        pad_density_map_tensor = torch.zeros((1, 1, y.shape[2], y.shape[3])).cuda()
        pad_density_map_tensor[:, 0, :y_pred.shape[2], :y_pred.shape[3]] = y_pred
        y_pred = pad_density_map_tensor

        y = y / torch.max(y) * 255
        y_pred = y_pred / torch.max(y_pred) * 255
        # y_max = torch.max(y)
        # y_pred_max = torch.max(y_pred)
        # max_value = torch.max(y_max, y_pred_max)
        psnr_metric = piq.psnr(y, y_pred, reduction="sum", data_range=255)

        self._sum += psnr_metric.item()
        # we multiply because ssim calculate mean of each image in batch
        # we multiply so we will divide correctly

        self._num_examples += y.shape[0]
Exemplo n.º 6
0
 def val_iter(self, final=True):
     with torch.no_grad():
         self.model.eval()
         t = tqdm(self.loader_val)
         if final:
             t.set_description("Validation")
         else:
             t.set_description(f"Epoch {self.epoch} val   ")
         psnr_avg = AverageMeter()
         ssim_avg = AverageMeter()
         l1_avg = AverageMeter()
         l2_avg = AverageMeter()
         for hr, lr in t:
             hr, lr = hr.to(self.dtype).to(self.device), lr.to(self.dtype).to(self.device)
             sr = self.model(lr).clamp(0, 1)
             l1_loss = torch.nn.functional.l1_loss(sr, hr).item()
             l2_loss = torch.sqrt(torch.nn.functional.mse_loss(sr, hr)).item()
             psnr = piq.psnr(hr, sr)
             ssim = piq.ssim(hr, sr)
             l1_avg.update(l1_loss)
             l2_avg.update(l2_loss)
             psnr_avg.update(psnr)
             ssim_avg.update(ssim)
             t.set_postfix(PSNR=f'{psnr_avg.get():.2f}', SSIM=f'{ssim_avg.get():.4f}')
         if self.writer is not None:
             self.writer.add_scalar('PSNR', psnr_avg.get(), self.epoch)
             self.writer.add_scalar('SSIM', ssim_avg.get(), self.epoch)
             self.writer.add_scalar('L1', l1_avg.get(), self.epoch)
             self.writer.add_scalar('L2', l2_avg.get(), self.epoch)
         return psnr_avg.get(), ssim_avg.get()
def main():
    psnr_tensor = 0
    ssim_tensor = 0
    l1_tensor = 0
    l2_tensor = 0
    # lpips_tensor = 0
    count = 0
    t0 = time.time()
    print("Calculating image quality metrics ...")
    for batch in data_loader:
        img_batch, gt_batch = batch
        if torch.cuda.is_available():
            # Move to GPU to make computaions faster
            img_batch = img_batch.cuda()
            gt_batch = gt_batch.cuda()

        for i in range(gt_batch.shape[0]):
            gt, img = gt_batch[i], img_batch[i]

            # MS-SIM
            ms_ssim_index: torch.Tensor = piq.multi_scale_ssim(gt,
                                                               img,
                                                               data_range=1.)
            # PSNR
            psnr_index: torch.Tensor = piq.psnr(gt,
                                                img,
                                                data_range=1.,
                                                reduction='mean')
            # L1 Error
            l1_index = nn.L1Loss(reduction='mean')(gt, img)
            # L1 Error
            l2_index = nn.MSELoss(reduction='mean')(gt, img)
            # LPIPS
            # lpips_loss: torch.Tensor = piq.LPIPS(reduction='mean')(gt, img)

            # Adding for computing average value
            ssim_tensor += ms_ssim_index
            psnr_tensor += psnr_index
            l1_tensor += l1_index
            l2_tensor += l2_index
            # lpips_tensor += lpips_loss.item()

            count += 1

    t1 = time.time()

    # print(
    #     "Avg. LPIPS: {} \nAvg. SSIM: {} \nAvg. PSNR: {} \nAvg. L1: {} \nAvg. L2: {} \n".format(lpips_tensor / count,
    #                                                                                            ssim_tensor / count,
    #                                                                                            psnr_tensor / count,
    #                                                                                            l1_tensor / count,
    #                                                                                            l2_tensor / count))

    print(
        "Avg. SSIM: {} \nAvg. PSNR: {} \nAvg. L1: {} \nAvg. L2: {} \n".format(
            ssim_tensor / count, psnr_tensor / count, l1_tensor / count,
            l2_tensor / count))
    print(count)
    print("Average processing time for each image (of total {} images): {} s".
          format(count, (t1 - t0) / count))
Exemplo n.º 8
0
    def eval(self, gt, pred):
        with torch.no_grad():
            gt_tensor = torch.Tensor(gt).clamp(0, 1).permute(0, 3, 1,
                                                             2).to('cuda:0')
            pred_tensor = torch.Tensor(pred).clamp(0,
                                                   1).permute(0, 3, 1,
                                                              2).to('cuda:0')
            psnr_index = piq.psnr(pred_tensor,
                                  gt_tensor,
                                  data_range=1.,
                                  reduction='none').item()
            _, _, h, w = gt_tensor.shape

            lpipsAlex = 0
            lpipsVGG = 0
            msssim_index = 0
            ssim_index = 0
            n = 1
            for i in range(n):
                for j in range(n):
                    xstart = w // n * j
                    ystart = h // n * i
                    xend = w // n * (j + 1)
                    yend = h // n * (i + 1)
                    ssim_index += piq.ssim(pred_tensor[:, :, ystart:yend,
                                                       xstart:xend],
                                           gt_tensor[:, :, ystart:yend,
                                                     xstart:xend],
                                           data_range=1.,
                                           reduction='none').item()
                    msssim_index = piq.multi_scale_ssim(
                        pred_tensor[:, :, ystart:yend, xstart:xend],
                        gt_tensor[:, :, ystart:yend, xstart:xend],
                        data_range=1.,
                        reduction='none').item()
                    lpipsVGG += self.lpipsVGG(
                        pred_tensor[:, :, ystart:yend, xstart:xend],
                        gt_tensor[:, :, ystart:yend, xstart:xend]).item()
                    lpipsAlex += self.lpipsAlex(
                        pred_tensor[:, :, ystart:yend, xstart:xend],
                        gt_tensor[:, :, ystart:yend, xstart:xend]).item()
            msssim_index /= n * n
            ssim_index /= n * n
            lpipsVGG /= n * n
            lpipsAlex /= n * n
            # dists = piq.DISTS(reduction='none')(pred_tensor, gt_tensor).item()

            # with torch.no_grad():
            #     lpips_index = piq.LPIPS(reduction='none')(pred_tensor, gt_tensor).item()
            rmse = ((gt - pred)**2).mean()**0.5
            # relmse = (((gt - pred) ** 2).mean() / (gt ** 2).mean() + 1e-5) ** 0.5
            # return {'rmse':rmse,'relmse':relmse,'psnr':psnr_index,'ssim':ssim_index,'msssim':msssim_index,'lpips':lpips_index}
        return {
            'rmse': rmse,
            'psnr': psnr_index,
            'ssim': ssim_index,
            'msssim': msssim_index,
            'lpipsVGG': lpipsVGG,
            'lpipsAlex': lpipsAlex
        }
Exemplo n.º 9
0
def test_psnr_big_for_identical_images(x) -> None:
    # Max value depends on EPS constant. It's 80 for EPS=1e-8, 100 for EPS=1e-10 etc.
    max_val = torch.tensor(80.)

    y = x.clone()
    measure = psnr(x, y, data_range=1.0)
    assert torch.isclose(
        measure,
        max_val), f"PSNR for identical images should be 80, got {measure}"
Exemplo n.º 10
0
def test_psnr_matches_skimage_greyscale():
    x = torch.rand(1, 1, 256, 256)
    y = torch.rand(1, 1, 256, 256)
    pm_measure = psnr(x, y, reduction='mean')
    sk_measure = peak_signal_noise_ratio(x.squeeze().numpy(),
                                         y.squeeze().numpy(),
                                         data_range=1.0)

    assert torch.isclose(pm_measure, torch.tensor(sk_measure, dtype=pm_measure.dtype)), \
        f"Must match Sklearn version. Got: {pm_measure} and skimage: {sk_measure}"
Exemplo n.º 11
0
def test_psnr_matches_skimage_rgb():
    prediction = torch.rand(1, 3, 256, 256)
    target = torch.rand(1, 3, 256, 256)
    pm_measure = psnr(prediction, target, reduction='mean')
    sk_measure = peak_signal_noise_ratio(prediction.squeeze().numpy(),
                                         target.squeeze().numpy(),
                                         data_range=1.0)

    assert torch.isclose(pm_measure, torch.tensor(sk_measure, dtype=pm_measure.dtype)), \
        f"Must match Sklearn version. Got: {pm_measure} and skimage: {sk_measure}"
Exemplo n.º 12
0
    def training_step(self, train_batch, batch_idx):
        img = train_batch[0]

        # generate noisy image
        noise = torch.empty_like(img)
        noise.normal_(0, 0.1)
        noise_img = img + noise

        # model datafeed
        output = self.model(noise_img)
        # PSNR

        psnr = piq.psnr(output, img, data_range=255, reduction='none')

        loss = F.mse_loss(output, img)
        self.log('PSNR', psnr.mean().item())
        self.log('train_loss', loss)
        return loss
Exemplo n.º 13
0
def compute_psnr(args, unnorm_recons, gt_exp, data_range):
    # Have to reshape to batch . trajectories x res x res and then reshape back to batch x trajectories x res x res
    # because of psnr implementation
    psnr_recons = torch.clamp(unnorm_recons, 0., 10.).reshape(
        gt_exp.size(0) * gt_exp.size(1), 1, args.resolution,
        args.resolution).to('cpu')
    psnr_gt = gt_exp.reshape(
        gt_exp.size(0) * gt_exp.size(1), 1, args.resolution,
        args.resolution).to('cpu')
    # First duplicate data range over trajectories, then reshape: this to ensure alignment with recon and gt.
    psnr_data_range = data_range.expand(-1, gt_exp.size(1), -1, -1)
    psnr_data_range = psnr_data_range.reshape(
        gt_exp.size(0) * gt_exp.size(1), 1, 1, 1).to('cpu')
    psnr_scores = psnr(psnr_recons,
                       psnr_gt,
                       reduction='none',
                       data_range=psnr_data_range)
    psnr_scores = psnr_scores.reshape(gt_exp.size(0), gt_exp.size(1))
    return psnr_scores
def image_metrics_from_dataset(dataset, output_addr='/tmp/psnr.csv'):

    cols = ['file_name', 'psnr', 'ssim']
    cols_str = ','.join(cols)

    with open(output_addr, 'wt') as f:
        f.write(f'{cols_str}\n')

    dl = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
    for i, data in tqdm(enumerate(dl), total=int(len(dataset))):
        x, y, file_name = data
        psnr = piq.psnr(x, y).item()
        ssim = piq.ssim(x, y).item()
        vals = [file_name[0], str(psnr), str(ssim)]
        val_str = ','.join(vals)
        with open(output_addr, 'at') as f:
            f.write(f'{val_str}\n')

    return pd.read_csv(output_addr)
Exemplo n.º 15
0
    def validation_epoch_end(
            self, outputs: List[Tuple[torch.Tensor, torch.Tensor]]) -> None:
        if isinstance(self.val_dataloader().dataset, ImageLoader):
            self.val_dataloader().dataset.val = False
        else:
            self.val_dataloader().dataset.dataset.val = False

        fid_score = fid(self.forged_images, self.reference_images,
                        self.hparams.feature_dimensionality_fid, self.device)
        ssim_score = ssim(self.forged_images,
                          self.reference_images,
                          data_range=255)
        psnr_score = psnr(self.forged_images,
                          self.reference_images,
                          data_range=255)

        self.log('FID_score', fid_score, on_step=False, on_epoch=True)
        self.log('SSIM', ssim_score, on_step=False, on_epoch=True)
        self.log('PSNR', psnr_score, on_step=False, on_epoch=True)
Exemplo n.º 16
0
def grid_search(x, y, rec_func, grid):
    """ Grid search utility for tuning hyper-parameters. """
    err_min = np.inf
    grid_param = None

    grid_shape = [len(val) for val in grid.values()]
    err = torch.zeros(grid_shape)
    err_psnr = torch.zeros(grid_shape)
    err_ssim = torch.zeros(grid_shape)

    for grid_val, nidx in zip(itertools.product(*grid.values()),
                              np.ndindex(*grid_shape)):
        grid_param_cur = dict(zip(grid.keys(), grid_val))
        print(
            "Current grid parameters (" + str(list(nidx)) + " / " +
            str(grid_shape) + "): " + str(grid_param_cur),
            flush=True,
        )
        x_rec = rec_func(y, **grid_param_cur)
        err[nidx], _ = l2_error(x_rec, x, relative=True, squared=False)
        err_psnr[nidx] = psnr(
            rotate_real(x_rec)[:, 0:1, ...],
            rotate_real(x)[:, 0:1, ...],
            data_range=rotate_real(x)[:, 0:1, ...].max(),
            reduction="mean",
        )
        err_ssim[nidx] = ssim(
            rotate_real(x_rec)[:, 0:1, ...],
            rotate_real(x)[:, 0:1, ...],
            data_range=rotate_real(x)[:, 0:1, ...].max(),
            size_average=True,
        )
        print("Rel. recovery error: {:1.2e}".format(err[nidx]), flush=True)
        print("PSNR: {:.2f}".format(err_psnr[nidx]), flush=True)
        print("SSIM: {:.2f}".format(err_ssim[nidx]), flush=True)
        if err[nidx] < err_min:
            grid_param = grid_param_cur
            err_min = err[nidx]

    return grid_param, err_min, err, err_psnr, err_ssim
Exemplo n.º 17
0
 def forward(self, predict, target):
     if self.l1_norm:
         l1_norm_metric = nn.functional.l1_loss(predict, target)
     if self.mse:
         mse_norm_metric = nn.functional.mse_loss(predict, target)
     if self.pearsonr:
         pearsonr_metric = audtorch.metrics.functional.pearsonr(predict, target).mean()
     if self.cc:
         cc_metric = audtorch.metrics.functional.concordance_cc(predict, target).mean()
     if self.psnr:
         psnr_metric = piq.psnr(predict, target, data_range=1., reduction='none').mean()
     if self.ssim:
         ssim_metric = piq.ssim(predict, target, data_range=1.)
     if self.mssim:
         mssim_metric = piq.multi_scale_ssim(predict, target, data_range=1.)
     metric_summary = {'l1_norm': l1_norm_metric,
                       'mse': mse_norm_metric,
                       'pearsonr_metric': pearsonr_metric,
                       'cc': cc_metric,
                       'psnr': psnr_metric,
                       'ssim': ssim_metric,
                       'mssim': mssim_metric
                       }
     return metric_summary
Exemplo n.º 18
0
def main(args):

    input_shape = (3, 380, 380)
    if not os.path.exists(args.checkpoints_output):
        os.makedirs(args.checkpoints_output)

    if not os.path.exists(args.logs):
        os.makedirs(args.logs)

    images_output = os.path.join(args.logs, 'images')
    if not os.path.exists(images_output):
        os.makedirs(images_output)

    if not args.model in models:
        print(f"Model name {args.model} must be one of: {model_names}")
        return 1

    print(f"Seting up training for model: {args.model}")
    print(f"Train X Root: {args.train_x_root}")
    print(f"Train Y Root: {args.train_y_root}")

    if args.test_x is not None and args.test_y is not None:
        print(f"Test X Root: {args.test_x}")
        print(f"Test Y Root: {args.test_y}")

    normalize_transform = transforms.Normalize((0.5, 0.5, 0.5),
                                               (0.5, 0.5, 0.5))
    if args.test_x is None or args.test_y is None:
        dataset = EnumPairedDataset(args.train_x_root,
                                    args.train_y_root,
                                    transform=normalize_transform)
        train_d, test_d = train_val_dataset(dataset)
    else:
        train_d = EnumPairedDataset(args.train_x_root,
                                    args.train_y_root,
                                    transform=normalize_transform)
        test_d = EnumPairedDataset(args.test_x_root,
                                   args.test_y_root,
                                   transform=normalize_transform)

    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    train_dl = DataLoader(train_d,
                          batch_size=train_batch_size,
                          shuffle=True,
                          num_workers=0)
    test_dl = DataLoader(test_d,
                         batch_size=test_batch_size,
                         shuffle=True,
                         num_workers=0)

    if args.show_dataset:
        x_batch, y_batch, names = next(iter(train_dl))
        plt.subplot(2, 1, 1)
        plt.imshow(torchvision.utils.make_grid(x_batch).permute(1, 2, 0))
        plt.subplot(2, 1, 2)
        plt.imshow(torchvision.utils.make_grid(y_batch).permute(1, 2, 0))
        plt.show()

    model = models[args.model]

    model = torch.nn.DataParallel(model,
                                  device_ids=range(torch.cuda.device_count()))
    device = f"cuda:{model.device_ids[0]}"
    #device = 'cpu'
    model.to(device)
    summary(model, input_shape)

    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    #criterion = loss_fun()

    if args.load is not None:
        try:
            pretrained_dict = torch.load(args.load)
            model_dict = model.state_dict()
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
            print(f"Weights loaded from {args.load}")
        except Exception as e:
            print(f"Couldn't load weights from {args.load}")
            print(e)

    best_training_loss = math.inf
    test_loss_for_best_training_loss = math.inf

    cols = [
        'epoch', 'training_loss', 'test_loss', 'train_psnr', 'test_psnr',
        'train_ssim', 'test_ssim'
    ]
    logs_addr = os.path.join(args.logs, 'logs.csv')
    add_line_to_csv(logs_addr, cols)

    print(f"Logs to: {args.logs}")
    epochs = args.epochs
    if epochs <= args.from_epoch:
        epochs += args.from_epoch

    for epoch in range(args.from_epoch, epochs + 1):

        print(f"Epoch {epoch}/{epochs}")

        training_loss = 0.0
        test_loss = 0.0

        train_psnr = 0.0
        test_psnr = 0.0
        train_ssim = 0.0
        test_ssim = 0.0

        if args.load is not None:
            try:
                fn, ext = os.path.splitext(os.path.basename(args.load))
                loss_vals = fn.split('_')
                best_training_loss = float(loss_vals[2])
                test_loss_for_best_training_loss = float(loss_vals[3])
            except Exception as e:
                print(f"Couldn't load best training loss from {args.load}")
                print(e)

        print("Training:")
        for i, data in tqdm(enumerate(train_dl),
                            total=int(len(train_d) / train_batch_size)):
            w, m, file_name = data
            x = w.to(device)
            y = m.to(device)
            del w
            del m

            optimizer.zero_grad()
            y_hat = model(x)

            loss = loss_fun(y_hat, y)
            loss.backward()
            optimizer.step()

            training_loss += float(loss.item())
            del x
            '''
            train_psnr = piq.psnr(y_hat[0], y[0],data_range=1.,
                                   reduction='none')
            train_ssim = piq.ssim(y_hat[0], y[0], data_range=1.,
                                   reduction='none')
            '''

            del y
            del y_hat

        training_loss /= (i + 1)
        #train_psnr /= (i+1)
        #train_ssim /= (i+1)

        with torch.no_grad():
            print("Testing:")
            for i, data in tqdm(enumerate(test_dl),
                                total=int(len(test_d) / test_batch_size)):
                w, m, file_name = data
                x = w.to(device)
                y = m.to(device)
                y_hat = model(x)
                loss = loss_fun(y_hat, y)
                test_loss += float(loss.item())
                del x

                try:
                    test_psnr += piq.psnr(y_hat, y)
                    test_ssim += piq.ssim(y_hat, y)
                except:
                    pass

                if args.show_output_images and i < 5:
                    imgs_dir = os.path.join(images_output, f"epoch_{epoch}")
                    if not os.path.exists(imgs_dir):
                        os.makedirs(imgs_dir)
                    for j, y_hat_i in enumerate(y_hat):
                        fn = os.path.splitext(os.path.basename(
                            file_name[j]))[0]
                        y_gt = y[j]
                        img_i_addr = os.path.join(
                            imgs_dir,
                            f'{epoch}_{fn}_{i}_{j}.{dataset.images_extension}')
                        img_i_gt_addr = os.path.join(
                            imgs_dir,
                            f'{epoch}_{fn}_{i}_{j}_gt.{dataset.images_extension}'
                        )
                        torchvision.utils.save_image(y_hat_i, img_i_addr)
                        torchvision.utils.save_image(y_gt, img_i_gt_addr)
                        del y_gt
                del y
                del y_hat

        test_loss /= (i + 1)
        test_psnr /= (i + 1)
        test_ssim /= (i + 1)

        print(f"Completed Epoch: {epoch}/{args.epochs}")
        print(f"\tTrain loss: {training_loss}")
        print(f"\tTest loss: {test_loss}")
        print(f"\tTrain PSNR: {train_psnr}")
        print(f"\tTest PSNR: {test_psnr}")
        print(f"\tTrain SSIM: {train_ssim}")
        print(f"\tTest SSIM: {test_ssim}")
        print(f"\tBest training loss so far: {best_training_loss}")
        print(f"\tTest loss for: {test_loss_for_best_training_loss}")

        add_line_to_csv(logs_addr, [
            str(epoch),
            str(training_loss),
            str(test_loss),
            str(train_psnr),
            str(test_psnr),
            str(train_ssim),
            str(test_ssim)
        ])

        if best_training_loss > training_loss:
            best_training_loss = training_loss
            test_loss_for_best_training_loss = test_loss
            save_file_name = f"{args.model}_epoch_{epoch}_{best_training_loss:.3f}_{test_loss_for_best_training_loss:.3f}.pth"
            checkpoint_path = os.path.join(args.checkpoints_output,
                                           save_file_name)
            torch.save(model.state_dict(), checkpoint_path)
                store_data=True,
                keep_init=keep_init,
                err_measure=err_measure,
            )

            (
                results.loc[idx].X_adv_err[:, s],
                idx_max_adv_err,
            ) = X_adv_err_cur.max(dim=1)
            results.loc[idx].X_ref_err[:, s] = X_ref_err_cur.mean(dim=1)

            for idx_noise in range(len(noise_rel)):
                idx_max = idx_max_adv_err[idx_noise]
                results.loc[idx].X_adv_psnr[idx_noise, s] = psnr(
                    rotate_real(X_adv_cur[idx_noise, ...])[idx_max, 0:1, ...],
                    rotate_real(X_0_s.cpu())[0, 0:1, ...],
                    data_range=4.5,
                    reduction="none",
                )  # normalization as in example-script
                results.loc[idx].X_ref_psnr[idx_noise, s] = psnr(
                    rotate_real(X_ref_cur[idx_noise, ...])[:, 0:1, ...],
                    rotate_real(X_0_s.cpu())[:, 0:1, ...],
                    data_range=4.5,
                    reduction="mean",
                )  # normalization as in example-script
                results.loc[idx].X_adv_ssim[idx_noise, s] = ssim(
                    rotate_real(X_adv_cur[idx_noise, ...])[idx_max, 0:1, ...],
                    rotate_real(X_0_s.cpu())[0, 0:1, ...],
                    data_range=4.5,
                    size_average=False,
                )  # normalization as in example-script
                results.loc[idx].X_ref_ssim[idx_noise, s] = ssim(
Exemplo n.º 20
0
def test_psnr_works_for_colour_images_on_gpu(prediction: torch.Tensor,
                                             target: torch.Tensor) -> None:
    prediction = prediction.cuda()
    target = target.cuda()
    psnr(prediction, target, data_range=1.0)
Exemplo n.º 21
0
def test_psnr_works_for_greyscale_images_on_gpu() -> None:
    prediction = torch.rand(4, 1, 256, 256).cuda()
    target = torch.rand(4, 1, 256, 256).cuda()
    psnr(prediction, target, data_range=1.0)
            keep_init=keep_init,
            err_measure=err_measure,
        )

        results.loc[idx].X_adv_psnr = torch.zeros(len(noise_rel), X_0.shape[0])
        results.loc[idx].X_ref_psnr = torch.zeros(len(noise_rel), X_0.shape[0])
        results.loc[idx].X_adv_ssim = torch.zeros(len(noise_rel), X_0.shape[0])
        results.loc[idx].X_ref_ssim = torch.zeros(len(noise_rel), X_0.shape[0])

        for idx_noise in range(len(noise_rel)):
            results.loc[idx].X_adv_psnr[idx_noise, ...] = psnr(
                torch.clamp(
                    rotate_real(results.loc[idx].X_adv[idx_noise, ...])[:, 0:1,
                                                                        ...],
                    v_min,
                    v_max,
                ),
                torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
                data_range=v_max - v_min,
                reduction="none",
            )
            results.loc[idx].X_ref_psnr[idx_noise, ...] = psnr(
                torch.clamp(
                    rotate_real(results.loc[idx].X_ref[idx_noise, ...])[:, 0:1,
                                                                        ...],
                    v_min,
                    v_max,
                ),
                torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
                data_range=v_max - v_min,
                reduction="none",
Exemplo n.º 23
0
def test_psnr(input_tensors: Tuple[torch.Tensor, torch.Tensor],
              device: str) -> None:
    x, y = input_tensors
    psnr(x.to(device), y.to(device), data_range=1.0)
Exemplo n.º 24
0
def test_psnr_works_for_zero_tensors() -> None:
    prediction = torch.zeros(4, 3, 256, 256)
    target = torch.zeros(4, 3, 256, 256)
    measure = psnr(prediction, target, data_range=1.0)
    assert torch.isclose(measure, torch.tensor(80.))
Exemplo n.º 25
0
def test_psnr_works_for_2d_tensors() -> None:
    prediction = torch.rand(256, 256)
    target = torch.rand(256, 256)
    psnr(prediction, target, data_range=1.0)
Exemplo n.º 26
0
def test_psnr_works_for_zero_tensors() -> None:
    x = torch.zeros(4, 3, 256, 256)
    y = torch.zeros(4, 3, 256, 256)
    measure = psnr(x, y, data_range=1.0)
    assert torch.isclose(measure, torch.tensor(80.))
Exemplo n.º 27
0
    ssims_forged = {}
    psnrs_forged = {}
    ssims_input = {}
    psnrs_input = {}
    ssims_forged["value"] = []
    psnrs_forged["value"] = []
    ssims_input["value"] = []
    psnrs_input["value"] = []
    ssims_forged["dataset"] = []
    psnrs_forged["dataset"] = []
    ssims_input["dataset"] = []
    psnrs_input["dataset"] = []

    for i, j, k in zip(forged, reference, inputs):
        ssims_forged["value"].append(ssim(i, j, data_range=1.).item())
        psnrs_forged["value"].append(psnr(i, j, data_range=1.).item())
        ssims_input["value"].append(ssim(j, k, data_range=1.).item())
        psnrs_input["value"].append(psnr(j, k, data_range=1.).item())
        ssims_forged["dataset"].append("forged")
        psnrs_forged["dataset"].append("forged")
        ssims_input["dataset"].append("input")
        psnrs_input["dataset"].append("input")

    ssims_input_df = pd.DataFrame(ssims_input)
    ssims_forged_df = pd.DataFrame(ssims_forged)
    psnrs_input_df = pd.DataFrame(psnrs_input)
    psnrs_forged_df = pd.DataFrame(psnrs_forged)

    plot_comparison(ssims_input_df, psnrs_input_df, ssims_forged_df,
                    psnrs_forged_df, args.title)
    plot_individual(ssims_input_df, psnrs_input_df, ssims_forged_df,
Exemplo n.º 28
0
def main():
    # Read RGB image and it's noisy version
    x = torch.tensor(imread('tests/assets/i01_01_5.bmp')).permute(2, 0,
                                                                  1) / 255.
    y = torch.tensor(imread('tests/assets/I01.BMP')).permute(2, 0, 1) / 255.

    if torch.cuda.is_available():
        # Move to GPU to make computaions faster
        x = x.cuda()
        y = y.cuda()

    # To compute BRISQUE score as a measure, use lower case function from the library
    brisque_index: torch.Tensor = piq.brisque(x,
                                              data_range=1.,
                                              reduction='none')
    # In order to use BRISQUE as a loss function, use corresponding PyTorch module.
    # Note: the back propagation is not available using torch==1.5.0.
    # Update the environment with latest torch and torchvision.
    brisque_loss: torch.Tensor = piq.BRISQUELoss(data_range=1.,
                                                 reduction='none')(x)
    print(
        f"BRISQUE index: {brisque_index.item():0.4f}, loss: {brisque_loss.item():0.4f}"
    )

    # To compute Content score as a loss function, use corresponding PyTorch module
    # By default VGG16 model is used, but any feature extractor model is supported.
    # Don't forget to adjust layers names accordingly. Features from different layers can be weighted differently.
    # Use weights parameter. See other options in class docstring.
    content_loss = piq.ContentLoss(feature_extractor="vgg16",
                                   layers=("relu3_3", ),
                                   reduction='none')(x, y)
    print(f"ContentLoss: {content_loss.item():0.4f}")

    # To compute DISTS as a loss function, use corresponding PyTorch module
    # By default input images are normalized with ImageNet statistics before forwarding through VGG16 model.
    # If there is no need to normalize the data, use mean=[0.0, 0.0, 0.0] and std=[1.0, 1.0, 1.0].
    dists_loss = piq.DISTS(reduction='none')(x, y)
    print(f"DISTS: {dists_loss.item():0.4f}")

    # To compute FSIM as a measure, use lower case function from the library
    fsim_index: torch.Tensor = piq.fsim(x, y, data_range=1., reduction='none')
    # In order to use FSIM as a loss function, use corresponding PyTorch module
    fsim_loss = piq.FSIMLoss(data_range=1., reduction='none')(x, y)
    print(
        f"FSIM index: {fsim_index.item():0.4f}, loss: {fsim_loss.item():0.4f}")

    # To compute GMSD as a measure, use lower case function from the library
    # This is port of MATLAB version from the authors of original paper.
    # In any case it should me minimized. Usually values of GMSD lie in [0, 0.35] interval.
    gmsd_index: torch.Tensor = piq.gmsd(x, y, data_range=1., reduction='none')
    # In order to use GMSD as a loss function, use corresponding PyTorch module:
    gmsd_loss: torch.Tensor = piq.GMSDLoss(data_range=1., reduction='none')(x,
                                                                            y)
    print(
        f"GMSD index: {gmsd_index.item():0.4f}, loss: {gmsd_loss.item():0.4f}")

    # To compute HaarPSI as a measure, use lower case function from the library
    # This is port of MATLAB version from the authors of original paper.
    haarpsi_index: torch.Tensor = piq.haarpsi(x,
                                              y,
                                              data_range=1.,
                                              reduction='none')
    # In order to use HaarPSI as a loss function, use corresponding PyTorch module
    haarpsi_loss: torch.Tensor = piq.HaarPSILoss(data_range=1.,
                                                 reduction='none')(x, y)
    print(
        f"HaarPSI index: {haarpsi_index.item():0.4f}, loss: {haarpsi_loss.item():0.4f}"
    )

    # To compute LPIPS as a loss function, use corresponding PyTorch module
    lpips_loss: torch.Tensor = piq.LPIPS(reduction='none')(x, y)
    print(f"LPIPS: {lpips_loss.item():0.4f}")

    # To compute MDSI as a measure, use lower case function from the library
    mdsi_index: torch.Tensor = piq.mdsi(x, y, data_range=1., reduction='none')
    # In order to use MDSI as a loss function, use corresponding PyTorch module
    mdsi_loss: torch.Tensor = piq.MDSILoss(data_range=1., reduction='none')(x,
                                                                            y)
    print(
        f"MDSI index: {mdsi_index.item():0.4f}, loss: {mdsi_loss.item():0.4f}")

    # To compute MS-SSIM index as a measure, use lower case function from the library:
    ms_ssim_index: torch.Tensor = piq.multi_scale_ssim(x, y, data_range=1.)
    # In order to use MS-SSIM as a loss function, use corresponding PyTorch module:
    ms_ssim_loss = piq.MultiScaleSSIMLoss(data_range=1., reduction='none')(x,
                                                                           y)
    print(
        f"MS-SSIM index: {ms_ssim_index.item():0.4f}, loss: {ms_ssim_loss.item():0.4f}"
    )

    # To compute Multi-Scale GMSD as a measure, use lower case function from the library
    # It can be used both as a measure and as a loss function. In any case it should me minimized.
    # By defualt scale weights are initialized with values from the paper.
    # You can change them by passing a list of 4 variables to scale_weights argument during initialization
    # Note that input tensors should contain images with height and width equal 2 ** number_of_scales + 1 at least.
    ms_gmsd_index: torch.Tensor = piq.multi_scale_gmsd(x,
                                                       y,
                                                       data_range=1.,
                                                       chromatic=True,
                                                       reduction='none')
    # In order to use Multi-Scale GMSD as a loss function, use corresponding PyTorch module
    ms_gmsd_loss: torch.Tensor = piq.MultiScaleGMSDLoss(chromatic=True,
                                                        data_range=1.,
                                                        reduction='none')(x, y)
    print(
        f"MS-GMSDc index: {ms_gmsd_index.item():0.4f}, loss: {ms_gmsd_loss.item():0.4f}"
    )

    # To compute PSNR as a measure, use lower case function from the library.
    psnr_index = piq.psnr(x, y, data_range=1., reduction='none')
    print(f"PSNR index: {psnr_index.item():0.4f}")

    # To compute PieAPP as a loss function, use corresponding PyTorch module:
    pieapp_loss: torch.Tensor = piq.PieAPP(reduction='none', stride=32)(x, y)
    print(f"PieAPP loss: {pieapp_loss.item():0.4f}")

    # To compute SSIM index as a measure, use lower case function from the library:
    ssim_index = piq.ssim(x, y, data_range=1.)
    # In order to use SSIM as a loss function, use corresponding PyTorch module:
    ssim_loss: torch.Tensor = piq.SSIMLoss(data_range=1.)(x, y)
    print(
        f"SSIM index: {ssim_index.item():0.4f}, loss: {ssim_loss.item():0.4f}")

    # To compute Style score as a loss function, use corresponding PyTorch module:
    # By default VGG16 model is used, but any feature extractor model is supported.
    # Don't forget to adjust layers names accordingly. Features from different layers can be weighted differently.
    # Use weights parameter. See other options in class docstring.
    style_loss = piq.StyleLoss(feature_extractor="vgg16",
                               layers=("relu3_3", ))(x, y)
    print(f"Style: {style_loss.item():0.4f}")

    # To compute TV as a measure, use lower case function from the library:
    tv_index: torch.Tensor = piq.total_variation(x)
    # In order to use TV as a loss function, use corresponding PyTorch module:
    tv_loss: torch.Tensor = piq.TVLoss(reduction='none')(x)
    print(f"TV index: {tv_index.item():0.4f}, loss: {tv_loss.item():0.4f}")

    # To compute VIF as a measure, use lower case function from the library:
    vif_index: torch.Tensor = piq.vif_p(x, y, data_range=1.)
    # In order to use VIF as a loss function, use corresponding PyTorch class:
    vif_loss: torch.Tensor = piq.VIFLoss(sigma_n_sq=2.0, data_range=1.)(x, y)
    print(f"VIFp index: {vif_index.item():0.4f}, loss: {vif_loss.item():0.4f}")

    # To compute VSI score as a measure, use lower case function from the library:
    vsi_index: torch.Tensor = piq.vsi(x, y, data_range=1.)
    # In order to use VSI as a loss function, use corresponding PyTorch module:
    vsi_loss: torch.Tensor = piq.VSILoss(data_range=1.)(x, y)
    print(f"VSI index: {vsi_index.item():0.4f}, loss: {vsi_loss.item():0.4f}")
Exemplo n.º 29
0
def test_psnr_fails_for_incorrect_data_range(x, y, device: str) -> None:
    # Scale to [0, 255]
    x_scaled = (x * 255).type(torch.uint8)
    y_scaled = (y * 255).type(torch.uint8)
    with pytest.raises(AssertionError):
        psnr(x_scaled.to(device), y_scaled.to(device), data_range=1.0)
Exemplo n.º 30
0
def test_psnr_loss_backward():
    x = torch.rand(1, 3, 256, 256, requires_grad=True)
    y = torch.rand(1, 3, 256, 256)
    loss = 80 - psnr(x, y, reduction='mean')
    loss.backward()
    assert x.grad is not None, 'Expected non None gradient of leaf variable'