-
Notifications
You must be signed in to change notification settings - Fork 3
/
evaluate.py
144 lines (106 loc) · 3.92 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import argparse
import pathlib
from argparse import ArgumentParser
import h5py
import numpy as np
from runstats import Statistics
from skimage.measure import compare_psnr, compare_ssim
from skimage.filters import laplace
from tqdm import tqdm
# adding hfn metric
def hfn(gt,pred):
hfn_total = []
for ii in range(gt.shape[-1]):
gt_slice = gt[:,:,ii]
pred_slice = pred[:,:,ii]
pred_slice[pred_slice<0] = 0 #bring the range to 0 and 1.
pred_slice[pred_slice>1] = 1
gt_slice_laplace = laplace(gt_slice)
pred_slice_laplace = laplace(pred_slice)
hfn_slice = np.sum((gt_slice_laplace - pred_slice_laplace) ** 2) / np.sum(gt_slice_laplace **2)
hfn_total.append(hfn_slice)
return np.mean(hfn_total)
def mse(gt, pred):
""" Compute Mean Squared Error (MSE) """
return np.mean((gt - pred) ** 2)
def nmse(gt, pred):
""" Compute Normalized Mean Squared Error (NMSE) """
return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2
def psnr(gt, pred):
""" Compute Peak Signal to Noise Ratio metric (PSNR) """
return compare_psnr(gt, pred, data_range=gt.max())
def ssim(gt, pred):
""" Compute Structural Similarity Index Metric (SSIM). """
#return compare_ssim(
# gt.transpose(1, 2, 0), pred.transpose(1, 2, 0), multichannel=True, data_range=gt.max()
#)
return compare_ssim(gt,pred,multichannel=True, data_range=gt.max())
METRIC_FUNCS = dict(
MSE=mse,
NMSE=nmse,
PSNR=psnr,
SSIM=ssim,
HFN=hfn
)
class Metrics:
"""
Maintains running statistics for a given collection of metrics.
"""
def __init__(self, metric_funcs):
self.metrics = {
metric: Statistics() for metric in metric_funcs
}
def push(self, target, recons):
for metric, func in METRIC_FUNCS.items():
self.metrics[metric].push(func(target, recons))
def means(self):
return {
metric: stat.mean() for metric, stat in self.metrics.items()
}
def stddevs(self):
return {
metric: stat.stddev() for metric, stat in self.metrics.items()
}
'''
def __repr__(self):
means = self.means()
stddevs = self.stddevs()
metric_names = sorted(list(means))
return ' '.join(
f'{name} = {means[name]:.4g} +/- {2 * stddevs[name]:.4g}' for name in metric_names
)
'''
def get_report(self):
means = self.means()
stddevs = self.stddevs()
metric_names = sorted(list(means))
return ' '.join(
f'{name} = {means[name]:.4g} +/- {2 * stddevs[name]:.4g}' for name in metric_names
)
def evaluate(args, recons_key):
metrics = Metrics(METRIC_FUNCS)
for tgt_file in args.target_path.iterdir():
#print (tgt_file)
with h5py.File(tgt_file) as target, h5py.File(
args.predictions_path / tgt_file.name) as recons:
target = target[recons_key].value
recons = recons['reconstruction'].value
recons = np.transpose(recons,[1,2,0])
#print (target.shape,recons.shape)
metrics.push(target, recons)
return metrics
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--target-path', type=pathlib.Path, required=True,
help='Path to the ground truth data')
parser.add_argument('--predictions-path', type=pathlib.Path, required=True,
help='Path to reconstructions')
parser.add_argument('--report-path', type=pathlib.Path, required=True,
help='Path to save metrics')
args = parser.parse_args()
recons_key = 'volfs'
metrics = evaluate(args, recons_key)
metrics_report = metrics.get_report()
with open(args.report_path / 'report.txt','w') as f:
f.write(metrics_report)
#print(metrics)