示例#1
0
def test_psnr_batch():
    shape4d = [4, 6, 32, 32]
    input, input2 = create_input_pair(shape4d, sigma=0.1)
    input_numpy = utils.torch_to_numpy(input)
    input2_numpy = utils.torch_to_numpy(input2)

    err = loss.psnr(input, input2, batch=True).item()
    err_numpy = 0
    for i in range(shape4d[0]):
        err_curr = psnr(input_numpy[i], input2_numpy[i])
        err_numpy += err_curr
    err_numpy /= shape4d[0]
    assert np.allclose(err, err_numpy)

    shape5d = [4, 6, 1, 32, 32]
    input, input2 = create_input_pair(shape5d, sigma=0.1)
    input_numpy = utils.torch_to_numpy(input)
    input2_numpy = utils.torch_to_numpy(input2)

    err = loss.psnr(input, input2)
    err_numpy = 0
    for i in range(shape5d[0]):
        err_numpy += psnr(input_numpy[i][:,0], input2_numpy[i][:,0])
    err_numpy /= shape5d[0]

    assert np.allclose(err, err_numpy)
示例#2
0
 def _evaluate(self, val_logs):
     losses = []
     outputs = defaultdict(list)
     targets = defaultdict(list)
     for log in val_logs:
         losses.append(log['val_loss'].cpu().numpy())
         for i, (fname, slice) in enumerate(zip(log['fname'],
                                                log['slice'])):
             outputs[fname].append((slice, log['output'][i]))
             targets[fname].append((slice, log['target'][i]))
     metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
     for fname in outputs:
         output = np.stack([out for _, out in sorted(outputs[fname])])
         target = np.stack([tgt for _, tgt in sorted(targets[fname])])
         metrics['nmse'].append(evaluate.nmse(target, output))
         metrics['ssim'].append(evaluate.ssim(target, output))
         metrics['psnr'].append(evaluate.psnr(target, output))
     metrics = {
         metric: np.mean(values)
         for metric, values in metrics.items()
     }
     print(metrics, '\n')
     # save the metrics data
     metric_file_path = Path(
         self.hparams.exp_dir) / self.hparams.exp / "validation_metrics"
     metric_file_path.mkdir(parents=True, exist_ok=True)
     metric_file_path = metric_file_path / "metrics.csv"
     df = pd.DataFrame([metrics])
     if metric_file_path.exists():
         df.to_csv(metric_file_path, mode="a", header=False, index=False)
     else:
         df.to_csv(metric_file_path, mode="w", header=True, index=False)
     return dict(log=metrics, **metrics)
示例#3
0
def test_psnr(shape):
    input, input2 = create_input_pair(shape, sigma=0.1)
    input_numpy = utils.torch_to_numpy(input)
    input2_numpy = utils.torch_to_numpy(input2)

    err = loss.psnr(input, input2, batch=False).item()
    err_numpy = psnr(input_numpy, input2_numpy)
    assert np.allclose(err, err_numpy)
def eval(args, model, data_loader):
    model.eval()
    psnrl = []
    with torch.no_grad():
        for (input, target, mean, std) in data_loader:
            input = input.to(args.device)
            recons = model(input).to('cpu').squeeze(1)
            target = target.cpu()
            psnrl.append(psnr(target.numpy(), recons.numpy()))

    print(f'PSNR: {np.mean(psnrl):.2f}')
    return
示例#5
0
def eval(args, model, data_loader):
    model.eval()
    psnr_l = []
    ssim_l = []
    with torch.no_grad():
        for (input, target, mean, std, norm) in data_loader:
            input = input.to(args.device)
            recons = model(input).to('cpu').squeeze(1)
            # recons = transforms.complex_abs(recons)  # complex to real
            recons = recons.squeeze()
            target=target.to('cpu')

            psnr_l.append(psnr(target.numpy(), recons.numpy()))
            ssim_l.append(ssim(target.numpy(), recons.numpy()))

    print(f'PSNR: {np.mean(psnr_l):.2f} +- {np.std(psnr_l):.2f}, SSIM: {np.mean(ssim_l):.4f} +- {np.std(ssim_l):.4f}')
    return
示例#6
0
 def _evaluate(self, val_logs):
     losses = []
     outputs = defaultdict(list)
     targets = defaultdict(list)
     for log in val_logs:
         losses.append(log['val_loss'].cpu().numpy())
         for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
             outputs[fname].append((slice, log['output'][i]))
             targets[fname].append((slice, log['target'][i]))
     metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
     for fname in outputs:
         output = np.stack([out for _, out in sorted(outputs[fname])])
         target = np.stack([tgt for _, tgt in sorted(targets[fname])])
         metrics['nmse'].append(evaluate.nmse(target, output))
         metrics['ssim'].append(evaluate.ssim(target, output))
         metrics['psnr'].append(evaluate.psnr(target, output))
     metrics = {metric: np.mean(values) for metric, values in metrics.items()}
     print(metrics, '\n')
     return dict(log=metrics, **metrics)