Exemple #1
0
def test_nmse_batch():
    shape4d = [4, 6, 32, 32]
    input, input2 = create_input_pair(shape4d, sigma=0.1)
    input_numpy = utils.torch_to_numpy(input)
    input2_numpy = utils.torch_to_numpy(input2)

    err = loss.nmse(input, input2).item()
    err_numpy = 0
    for i in range(shape4d[0]):
        err_curr = nmse(input_numpy[i], input2_numpy[i])
        err_numpy += err_curr
    err_numpy /= shape4d[0]
    assert np.allclose(err, err_numpy)

    shape5d = [4, 6, 1, 32, 32]
    input, input2 = create_input_pair(shape5d, sigma=0.1)
    input_numpy = utils.torch_to_numpy(input)
    input2_numpy = utils.torch_to_numpy(input2)

    err = loss.nmse(input, input2).item()
    err_numpy = 0
    for i in range(shape5d[0]):
        err_numpy += nmse(input_numpy[i][:,0], input2_numpy[i][:,0])
    err_numpy /= shape5d[0]

    assert np.allclose(err, err_numpy)
Exemple #2
0
 def _evaluate(self, val_logs):
     losses = []
     outputs = defaultdict(list)
     targets = defaultdict(list)
     for log in val_logs:
         losses.append(log['val_loss'].cpu().numpy())
         for i, (fname, slice) in enumerate(zip(log['fname'],
                                                log['slice'])):
             outputs[fname].append((slice, log['output'][i]))
             targets[fname].append((slice, log['target'][i]))
     metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
     for fname in outputs:
         output = np.stack([out for _, out in sorted(outputs[fname])])
         target = np.stack([tgt for _, tgt in sorted(targets[fname])])
         metrics['nmse'].append(evaluate.nmse(target, output))
         metrics['ssim'].append(evaluate.ssim(target, output))
         metrics['psnr'].append(evaluate.psnr(target, output))
     metrics = {
         metric: np.mean(values)
         for metric, values in metrics.items()
     }
     print(metrics, '\n')
     # save the metrics data
     metric_file_path = Path(
         self.hparams.exp_dir) / self.hparams.exp / "validation_metrics"
     metric_file_path.mkdir(parents=True, exist_ok=True)
     metric_file_path = metric_file_path / "metrics.csv"
     df = pd.DataFrame([metrics])
     if metric_file_path.exists():
         df.to_csv(metric_file_path, mode="a", header=False, index=False)
     else:
         df.to_csv(metric_file_path, mode="w", header=True, index=False)
     return dict(log=metrics, **metrics)
Exemple #3
0
def test_nmse(shape):
    input, input2 = create_input_pair(shape, sigma=0.1)
    input_numpy = utils.torch_to_numpy(input)
    input2_numpy = utils.torch_to_numpy(input2)

    err = loss.nmse(input, input2, batch=False).item()
    err_numpy = nmse(input_numpy, input2_numpy)
    assert np.allclose(err, err_numpy)
 def _evaluate(self, val_logs):
     losses = []
     outputs = defaultdict(list)
     targets = defaultdict(list)
     for log in val_logs:
         losses.append(log['val_loss'].cpu().numpy())
         for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
             outputs[fname].append((slice, log['output'][i]))
             targets[fname].append((slice, log['target'][i]))
     metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
     for fname in outputs:
         output = np.stack([out for _, out in sorted(outputs[fname])])
         target = np.stack([tgt for _, tgt in sorted(targets[fname])])
         metrics['nmse'].append(evaluate.nmse(target, output))
         metrics['ssim'].append(evaluate.ssim(target, output))
         metrics['psnr'].append(evaluate.psnr(target, output))
     metrics = {metric: np.mean(values) for metric, values in metrics.items()}
     print(metrics, '\n')
     return dict(log=metrics, **metrics)