def evaluate_semanticsegmentation(prediction_directory, groundtruth_directory): prediction_files = [os.path.join(prediction_directory, f)\ for f in os.listdir(prediction_directory)] groundtruth_files = [os.path.join(groundtruth_directory, f)\ for f in os.listdir(groundtruth_directory)\ if "_labelIds.png" in f] print(prediction_files) print(groundtruth_files) evalPixelLevelSemanticLabeling.evaluateImgLists(prediction_files, groundtruth_files, evalPixelLevelSemanticLabeling.args)
def _evaluate_cityscapes(self, results, logger, imgfile_prefix): """Evaluation in Cityscapes protocol. Args: results (list): Testing results of the dataset. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. imgfile_prefix (str | None): The prefix of output image file Returns: dict[str: float]: Cityscapes evaluation results. """ try: import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa except ImportError: raise ImportError('Please run "pip install cityscapesscripts" to ' 'install cityscapesscripts first.') msg = 'Evaluating in Cityscapes style' if logger is None: msg = '\n' + msg print_log(msg, logger=logger) result_files, tmp_dir = self.format_results(results, imgfile_prefix) print("result_files", result_files) if tmp_dir is None: result_dir = imgfile_prefix else: result_dir = tmp_dir.name eval_results = dict() print_log(f'Evaluating results under {result_dir} ...', logger=logger) CSEval.args.evalInstLevelScore = True CSEval.args.predictionPath = osp.abspath(result_dir) CSEval.args.evalPixelAccuracy = True CSEval.args.JSONOutput = False seg_map_list = [] pred_list = [] # when evaluating with official cityscapesscripts, # **_gtFine_labelIds.png is used for seg_map in mmcv.scandir(self.ann_dir, 'gtCoarse_labelIds.png', recursive=True): seg_map_list.append(osp.join(self.ann_dir, seg_map)) pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) eval_results.update( CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) if tmp_dir is not None: tmp_dir.cleanup() return eval_results
def run(self, prediction_filenames, ground_truth_filenames): # finally run evaluation from cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling import evaluateImgLists args = {'quiet': True} confusion_matrix = evaluateImgLists(prediction_filenames, ground_truth_filenames, args) import numpy as np np.savetxt("{}/confusion_matrix.csv".format(self._log_dir), confusion_matrix)
# scipy.misc.imsave(path, dst) groundTruthImgList.append(src) predictionImgList = [] for i in range(4): src = 'logs/MODEL2001tmp_gta25k20_segment_LinkNet_csEval_256x512_bs4_flip_crop_bright_lr2e-4/sample/predictionImg%02d.png'%i # dst = trainId2Id(scipy.misc.imread(src)) # path = 'tmp/2.png' # scipy.misc.imsave(path, dst) predictionImgList.append(src) # for i in range(1): # # pred # path = '{}/predictionImg{:02d}.jpg'.format(sample_dir,i) # predictionImgList.append(path) # # pdb.set_trace() # # imsave(trainId2Id(np.expand_dims(pred_mask_B_ts[i,:,:],0)), [1,1], path) # pred = trainId2Id(pred_mask_B_ts[i,:,:]) # scipy.misc.imsave(path, pred) # # gt # path = '{}/groundTruthImg{:02d}.jpg'.format(sample_dir,i) # groundTruthImgList.append(path) # # imsave(trainId2Id(np.expand_dims(mask_B_ts_ori[i,:,:],0)), [1,1], path) # # pdb.set_trace() # gt = trainId2Id(mask_B_ts_ori[i,:,:]) # scipy.misc.imsave(path, gt) # CSUPPORT = False cs_seg_eval.evaluateImgLists(predictionImgList, groundTruthImgList, cs_seg_eval.args) # # cs_seg_eval.evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, instanceStats, perImageStats, args)