Esempio n. 1
0
	def __call__(self):
		if not self.models.is_dir():
			raise ValueError(f"{self.models} is not a directory.")

		self.eval_dir = self.save/'Evaluations'
		self.fig_dir = self.save/'Figures'
		self.eval_dir.mkdir(parents=True, exist_ok=True)
		self.fig_dir.mkdir(parents=True, exist_ok=True)
		self.bin_only = set(map(str.lower, ensure_iterable(self.extra.get('no_roc', ('ScleraMaskRCNN')), True)))

		plt.rcParams['font.family'] = 'Times New Roman'
		plt.rcParams['font.weight'] = 'normal'
		plt.rcParams['font.size'] = 24

		print("Sorting models by their binary F1-Score")
		self._sorted_models = sorted(os.listdir(self.models), key=lambda model: self._load(model, 'Overall')[1].f1score.mean, reverse=True)

		self._experiment1()
		for attrs in ATTR_EXP:
			try:
				self._experiment2(attrs)
			except ValueError as err:
				print(f"Non-existant record for split across {attrs}s.")
		self._experiment3()

		if self.plot:
			plt.show()
Esempio n. 2
0
    def _experiment2(self, pred_bin_gt, attrs):
        attrs = ensure_iterable(attrs, True)
        print(
            f"Experiment 2: Performance across different {', '.join(attr + 's' for attr in attrs)}"
        )
        values = {
            attr: {getattr(sample, attr)
                   for sample in pred_bin_gt}
            for attr in attrs
        }

        for current_values in dict_product(values):
            current_name = ", ".join(attr.title() + "=" + val.name.title()
                                     for attr, val in current_values.items())
            data = (pbg for sample, pbg in pred_bin_gt.items() if all(
                getattr(sample, attr) == val
                for attr, val in current_values.items()))
            self._compute(data, current_name)
Esempio n. 3
0
	def _experiment2(self, attrs):
		attrs = ensure_iterable(attrs, True)
		print(f"Experiment 2: Performance across different {', '.join(attr + 's' for attr in attrs)}")
		value_re = re.compile(', '.join(attr.title() + r"=([^,]*)" for attr in attrs))

		info = defaultdict(dict)
		for model in self._sorted_models:
			for f in os.listdir(self.models/model/'Pickles'):
				bname = os.path.splitext(f)[0]
				if value_re.fullmatch(bname):
					info[model][bname] = self._load(model, bname)

		bar_name = ','.join(attr.title() for attr in attrs)
		with Bar(bar_name, self.fig_dir, self._sorted_models, max(len(d) for d in info.values())) as bar:
			for i, (model, model_info) in enumerate(info.items()):
				#with ROC(f"{model} - {bar_name}", self.fig_dir) as roc:
					colours = iter(CMAP(np.linspace(0, 1, len(model_info))))
					for j, (bname, (pred_eval, bin_eval, plots, mean_plot, lower_std, upper_std)) in enumerate(model_info.items()):
						self._save_evals(pred_eval, bin_eval, f'{model} - {bname}')

						label = ",".join(value_re.fullmatch(bname).groups())
						colour = next(colours)
						#roc.plot(mean_plot, lower_std, upper_std, label=label, colour=colour, bin_only=model.lower() in self.bin_only)
						bar.plot(bin_eval, i, j, label=label, colour=colour)
Esempio n. 4
0
    def __call__(self):
        if not self.models.is_dir():
            raise ValueError(f"{self.models} is not a directory.")
        if not self.gt.is_dir():
            raise ValueError(f"{self.gt} is not a directory.")

        self.threshold = np.linspace(
            0, 1,
            self.extra.get('interp', self.extra.get('interp_points', 1000)))

        if self.dataset.lower() == 'mobius':
            from datasets import MOBIUS
            dataset = MOBIUS
        elif self.dataset.lower() == 'sbvpi':
            from datasets import SBVPI
            dataset = SBVPI
        else:
            from datasets import Dataset
            dataset = Dataset

        dataset = dataset.from_dir(self.gt, mask_dir=None)
        dataset.shuffle()

        with tqdm_joblib(tqdm(desc="Reading GT", total=len(dataset))):
            gt = dict(
                Parallel(n_jobs=-1)(delayed(self._load_gt)(gt_sample)
                                    for gt_sample in dataset))

        for self._model in self.models.iterdir():
            self._predictions = self._model / 'Predictions'
            self._binarised = self._model / 'Binarised'
            if not self._predictions.is_dir():
                raise ValueError(f"{self._predictions} is not a directory.")
            if not self._binarised.is_dir():
                raise ValueError(f"{self._binarised} is not a directory.")

            # Check if all pickles already exist
            flat_attrs = tuple()
            for attr in ATTR_EXP:
                try:
                    flat_attrs += attr
                except TypeError:
                    flat_attrs += attr,
            unique_attr_values = {
                attr: {getattr(sample, attr)
                       for sample in dataset}
                for attr in set(flat_attrs)
            }
            exp_attr_values = [{
                attr: unique_attr_values[attr]
                for attr in ensure_iterable(attrs, True)
            } for attrs in ATTR_EXP]
            attr_experiments = {
                ', '.join(f'{attr.title()}={val.name.title()}'
                          for attr, val in current_values.items()):
                current_values
                for current_exp in exp_attr_values
                for current_values in dict_product(current_exp)
            }
            all_names = ['Overall'] + list(attr_experiments)
            if not self.extra.get('overwrite', False) and all(
                (self._model / f'Pickles/{name}.pkl').is_file()
                    for name in all_names):
                print(
                    f"All pickles already exist, skipping {self._model.name}")
                continue

            #TODO: Move folds here and only load one fold's predictions at a time
            # We can't do this because experiment2 needs to have different splits. If we absolutely need this, we'll have to reread the images for each sub-experiment anew.
            # We can cache the images for each split until the end of the split - that way we'll only need to read some of the images anew.

            print(f"Evaluating model {self._model.name}")
            with tqdm_joblib(
                    tqdm(desc="Reading predictions", total=len(dataset))):
                pred_bin = dict(
                    Parallel(n_jobs=-1)(delayed(self._process_image)(gt_sample)
                                        for gt_sample in dataset))
            # This will filter out non-existing predictions, so the code will still work, but missing predictions should be addressed (otherwise evaluation is unfair)
            pred_bin_gt = {
                gt_sample: (*pred_bin[gt_sample], gt[gt_sample])
                for gt_sample in dataset if pred_bin[gt_sample] is not None
            }

            # Overall
            self._experiment1(pred_bin_gt)

            # Split by lighting, phones, and gaze
            for attrs in ATTR_EXP:
                self._experiment2(pred_bin_gt, attrs)
Esempio n. 5
0
 def __init__(self, metrics, *args):
     super().__init__((metric.name, metric)
                      for metric in ensure_iterable(metrics) + args)
Esempio n. 6
0
	'sclerau-net': (2.16e6, 4.32e6),
	'multi-deeplab': (41e6, 117e9),
	'multi-fcn': (134e6, 112e9),
	'rgb-ss-eye-ms': (22.7e6, None),
	'y-ss-eye-ms': (22.6e6, None),
	'color ritnet': (250e3, None),
	's-net': (1.18e6, None),
	'unet-p': (1.94e6, 3.88e6),
	'fcn8': (138e6, 15e9),
	'mask2020cl': (64e6, None),
	'mu-net': (409e3, 180e9)
}

# Auxiliary stuff
dict_product = lambda d: (dict(zip(d, x)) for x in itertools.product(*d.values()))
FIG_EXTS = ensure_iterable(FIG_EXTS, True)


class Main:
	def __init__(self, *args, **kw):
		# Default values
		root = Path('/path/to/Segmentation/Results/Sclera/2020 SSBC')
		self.models = Path(args[0] if len(args) > 0 else kw.get('models', root/'Models'))
		self.save = Path(args[1] if len(args) > 1 else kw.get('save', root))
		self.plot = kw.get('plot', False)

		# Extra keyword arguments
		self.extra = DotDict(**kw)

	def __str__(self):
		return str(vars(self))