コード例 #1
0
 def reset(self):
     self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
     self._temp_dir = self._working_dir.name
     # All workers will write to the same results directory
     # TODO this does not work in distributed training
     self._temp_dir = comm.all_gather(self._temp_dir)[0]
     if self._temp_dir != self._working_dir.name:
         self._working_dir.cleanup()
     self._logger.info(
         "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
     )
コード例 #2
0
    def evaluate(self):
        """
        Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):

        * Mean intersection-over-union averaged across classes (mIoU)
        * Frequency Weighted IoU (fwIoU)
        * Mean pixel accuracy averaged across classes (mACC)
        * Pixel Accuracy (pACC)
        """
        if self._distributed:
            comm.synchronize()
            conf_matrix_list = comm.all_gather(self._conf_matrix)
            self._predictions = comm.all_gather(self._predictions)
            self._predictions = list(itertools.chain(*self._predictions))
            if not comm.is_main_process():
                return

            self._conf_matrix = np.zeros_like(self._conf_matrix)
            for conf_matrix in conf_matrix_list:
                self._conf_matrix += conf_matrix

        if self._output_dir:
            PathManager.mkdirs(self._output_dir)
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_predictions.json")
            with PathManager.open(file_path, "w") as f:
                f.write(json.dumps(self._predictions))

        acc = np.zeros(self._num_classes, dtype=np.float)
        iou = np.zeros(self._num_classes, dtype=np.float)
        tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
        pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
        class_weights = pos_gt / np.sum(pos_gt)
        pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
        acc_valid = pos_gt > 0
        acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
        iou_valid = (pos_gt + pos_pred) > 0
        union = pos_gt + pos_pred - tp
        iou[acc_valid] = tp[acc_valid] / union[acc_valid]
        macc = np.sum(acc) / np.sum(acc_valid)
        miou = np.sum(iou) / np.sum(iou_valid)
        fiou = np.sum(iou * class_weights)
        pacc = np.sum(tp) / np.sum(pos_gt)

        res = {}
        res["mIoU"] = 100 * miou
        res["fwIoU"] = 100 * fiou
        res["mACC"] = 100 * macc
        res["pACC"] = 100 * pacc

        if self._output_dir:
            file_path = os.path.join(self._output_dir,
                                     "sem_seg_evaluation.pth")
            with PathManager.open(file_path, "wb") as f:
                torch.save(res, f)
        results = OrderedDict({"sem_seg": res})

        small_table = create_small_table(res)
        self._logger.info("Evaluation results for sem_seg: \n" + small_table)

        if self._dump:
            dump_info_one_task = {
                "task": "sem_seg",
                "tables": [small_table],
            }
            _dump_to_markdown([dump_info_one_task])

        return results