def __init__(self, dataset_name, meta, cfg, distributed, output_dir=None, dump=False): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in cvpods's standard dataset format so it can be converted to COCO format automatically. meta (SimpleNamespace): dataset metadata. cfg (config dict): cvpods Config instance. distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instance_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. dump (bool): If True, after the evaluation is completed, a Markdown file that records the model evaluation metrics and corresponding scores will be generated in the working directory. """ self._dump = dump self.cfg = cfg self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = meta if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'." " Trying to convert it to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset
def __init__(self, dataset_name, meta, cfg, distributed, output_dir=None, dump=False): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in cvpods's standard dataset format so it can be converted to COCO format automatically. cfg (CfgNode): cvpods Config instance. distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump results. dump (bool): If True, after the evaluation is completed, a Markdown file that records the model evaluation metrics and corresponding scores will be generated in the working directory. """ self._dump = dump self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._metadata = meta if not hasattr(self._metadata, "json_file"): logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'" ) cache_path = convert_to_coco_json(dataset_name, output_dir) self._metadata.json_file = cache_path # json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(self._metadata.json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset
def __init__(self, dataset_name, meta, cfg, distributed, output_dir=None, dump=False): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in cvpods's standard dataset format so it can be converted to COCO format automatically. cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump results. """ self._dump = dump self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = meta if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'" ) cache_path = convert_to_coco_json(dataset_name, output_dir) self._metadata.json_file = cache_path self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS