def evaluate(): # generate panoptic pictures print("Convert to panoptic format!!!") converter(cfg.two_channel_pngs, cfg.img_info, cfg.panoptic_cate, cfg.generated_panoptic, cfg.panoptic_res) print("Finished!!!") # evaluate print("Start to evaluate!!!") pq_compute(cfg.panoptic_info, cfg.panoptic_res, cfg.panoptic_gt, cfg.generated_panoptic)
def evaluate(self): from panopticapi.evaluation import pq_compute gt_json_file = self._gt_json_file gt_folder = self._gt_folder pred_json_file = self._pred_json_file pred_folder = self._pred_folder with open(gt_json_file, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions with PathManager.open(self._predictions_json, "w") as f: f.write(json.dumps(json_data, cls=MyEncoder)) pq_res = pq_compute(gt_json_file, pred_json_file, gt_folder, pred_folder) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_th"] = 100 * pq_res["Things"]["pq"] res["SQ_th"] = 100 * pq_res["Things"]["sq"] res["RQ_th"] = 100 * pq_res["Things"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] results = OrderedDict({"panoptic_seg": res}) self._logger.info(results) _print_panoptic_results(pq_res) return results
def summarize(self): if utils.is_main_process(): json_data = {"annotations": self.predictions} predictions_json = os.path.join(self.output_dir, "predictions.json") with open(predictions_json, "w") as f: f.write(json.dumps(json_data)) return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) return None
def evaluate(self): comm.synchronize() self._predictions = comm.gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return # gt_json = PathManager.get_local_path(self._metadata.panoptic_json) gt_json = self._metadata.panoptic_json gt_folder = self._metadata.panoptic_root with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: logger.info( "Writing all panoptic predictions to {} ...".format(pred_dir)) for p in self._predictions: with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: f.write(p.pop("png_string")) with open(gt_json, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions with megfile.smart_open(self._predictions_json, "w") as f: f.write(json.dumps(json_data)) from panopticapi.evaluation import pq_compute with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( gt_json, self._predictions_json, gt_folder=gt_folder, pred_folder=pred_dir, ) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_th"] = 100 * pq_res["Things"]["pq"] res["SQ_th"] = 100 * pq_res["Things"]["sq"] res["RQ_th"] = 100 * pq_res["Things"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] results = OrderedDict({"panoptic_seg": res}) table = _print_panoptic_results(pq_res) if self._dump: dump_info_one_task = { "task": "panoptic_seg", "tables": [table], } _dump_to_markdown([dump_info_one_task]) return results
def evaluate(self): comm.synchronize() self._predictions = comm.gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return # PanopticApi requires local files gt_json = PathManager.get_local_path(self._metadata.panoptic_json) gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: logger.info( "Writing all panoptic predictions to {} ...".format(pred_dir)) for p in self._predictions: with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: f.write(p.pop("png_string")) with open(gt_json, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions output_dir = self._output_dir or pred_dir predictions_json = os.path.join(output_dir, "predictions.json") with PathManager.open(predictions_json, "w") as f: f.write(json.dumps(json_data)) from panopticapi.evaluation import pq_compute with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( gt_json, PathManager.get_local_path(predictions_json), gt_folder=gt_folder, pred_folder=pred_dir, ) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_th"] = 100 * pq_res["Things"]["pq"] res["SQ_th"] = 100 * pq_res["Things"]["sq"] res["RQ_th"] = 100 * pq_res["Things"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] results = OrderedDict({"panoptic_seg": res}) _print_panoptic_results(pq_res) return results
def validation_epoch_end(self, outputs): # Create and save all predictions files generate_pred_panoptic(self.cfg, outputs) # Compute PQ metric with panpticapi pq_res = pq_compute( gt_json_file= os.path.join(self.cfg.DATASET_PATH, self.cfg.VALID_JSON), pred_json_file= os.path.join(self.cfg.DATASET_PATH, self.cfg.PRED_JSON), gt_folder= os.path.join(self.cfg.DATASET_PATH, "gtFine/cityscapes_panoptic_val/"), pred_folder=os.path.join(self.cfg.DATASET_PATH, self.cfg.PRED_DIR) ) self.log("PQ", 100 * pq_res["All"]["pq"]) self.log("SQ", 100 * pq_res["All"]["sq"]) self.log("RQ", 100 * pq_res["All"]["rq"]) self.log("PQ_th", 100 * pq_res["Things"]["pq"]) self.log("SQ_th", 100 * pq_res["Things"]["sq"]) self.log("RQ_th", 100 * pq_res["Things"]["rq"]) self.log("PQ_st", 100 * pq_res["Stuff"]["pq"]) self.log("SQ_st", 100 * pq_res["Stuff"]["sq"]) self.log("RQ_st", 100 * pq_res["Stuff"]["rq"])
data = [] for name in ["All", "Things", "Stuff"]: row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] data.append(row) table = tabulate( data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" ) logger.info("Panoptic Evaluation Results:\n" + table) if __name__ == "__main__": from mydl.utils.logger import setup_logger logger = setup_logger() import argparse parser = argparse.ArgumentParser() parser.add_argument("--gt-json") parser.add_argument("--gt-dir") parser.add_argument("--pred-json") parser.add_argument("--pred-dir") args = parser.parse_args() from panopticapi.evaluation import pq_compute with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir ) _print_panoptic_results(pq_res)
def evaluate(self): comm.synchronize() self._predictions = comm.gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return # PanopticApi requires local files gt_json = PathManager.get_local_path(self._metadata.panoptic_json) gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) for p in self._predictions: with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: f.write(p.pop("png_string")) with open(gt_json, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions with PathManager.open(self._predictions_json, "w") as f: f.write(json.dumps(json_data)) from panopticapi.evaluation import pq_compute import string import random import shutil rand_str = lambda n: ''.join([random.choice(string.ascii_lowercase) for i in range(n)]) results_dir = None # while results_dir is None or os.path.exists(results_dir): # results_dir = '/BS/ahmed_projects/work/data/panoptic_eval/' + rand_str(10) # logger.info("Writing all panoptic predictions for future use to {} ...\n These files can be used to get results from evaluation server. Tested on COCO and Cityscapes. ".format(results_dir)) # shutil.copytree(os.path.dirname(self._predictions_json), results_dir) # png_path = os.path.join(results_dir, os.path.splitext(os.path.basename(self._predictions_json))[0]) # shutil.copytree(pred_dir, png_path) with contextlib.redirect_stdout(io.StringIO()): pq_res, pq_per_image_res = pq_compute( gt_json, PathManager.get_local_path(self._predictions_json), gt_folder=gt_folder, pred_folder=pred_dir, ) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_th"] = 100 * pq_res["Things"]["pq"] res["SQ_th"] = 100 * pq_res["Things"]["sq"] res["RQ_th"] = 100 * pq_res["Things"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] results = OrderedDict({"panoptic_seg": res}) # Convert class ids to names: per_class = pq_res['per_class'] new_per_class = {} for label in per_class.keys(): isthing = label in self._metadata.thing_dataset_id_to_contiguous_id.keys() if isthing: class_name = self._metadata.thing_classes[self._metadata.thing_dataset_id_to_contiguous_id[label]] else: class_name = self._metadata.stuff_classes[self._metadata.stuff_dataset_id_to_contiguous_id[label]] new_per_class[class_name] = per_class[label] pq_res['per_class'] = new_per_class _print_panoptic_results(pq_res) _print_panoptic_results_per_image(pq_per_image_res) return results