def test_bad_args(self) -> None: with self.assertRaises(NotImplementedError): PathManager.copy( self._remote_uri, self._remote_uri, foo="foo" # type: ignore ) with self.assertRaises(NotImplementedError): PathManager.exists(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.get_local_path( self._remote_uri, foo="foo" # type: ignore ) with self.assertRaises(NotImplementedError): PathManager.isdir(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.isfile(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.ls(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.mkdirs(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.open(self._remote_uri, foo="foo") # type: ignore with self.assertRaises(NotImplementedError): PathManager.rm(self._remote_uri, foo="foo") # type: ignore PathManager.set_strict_kwargs_checking(False) PathManager.get_local_path(self._remote_uri, foo="foo") # type: ignore f = PathManager.open(self._remote_uri, foo="foo") # type: ignore f.close() PathManager.set_strict_kwargs_checking(True)
def _loadGEval(self): smpl_subdiv_fpath = PathManager.get_local_path( "https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat" ) pdist_transform_fpath = PathManager.get_local_path( "https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat" ) pdist_matrix_fpath = PathManager.get_local_path( "https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl" ) SMPL_subdiv = loadmat(smpl_subdiv_fpath) self.PDIST_transform = loadmat(pdist_transform_fpath) self.PDIST_transform = self.PDIST_transform["index"].squeeze() UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze() ClosestVertInds = np.arange(UV.shape[1]) + 1 self.Part_UVs = [] self.Part_ClosestVertInds = [] for i in np.arange(24): self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]) self.Part_ClosestVertInds.append( ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)] ) with open(pdist_matrix_fpath, "rb") as hFile: arrays = pickle.load(hFile, encoding="latin1") self.Pdist_matrix = arrays["Pdist_matrix"] self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze()) # Mean geodesic distances for parts. self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150]) # Coarse Part labels. self.CoarseParts = np.array( [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8] )
def evaluate(self): comm.synchronize() self._predictions = comm.gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return gt_json = PathManager.get_local_path(self._metadata.panoptic_json) gt_folder = self._metadata.panoptic_root with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: if "://" not in self._predictions_dir: pred_dir = self._predictions_dir os.makedirs(pred_dir, exist_ok=True) logger.info( "Writing all panoptic predictions to {} ...".format(pred_dir)) for p in self._predictions: with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: f.write(p.pop("png_string")) with open(gt_json, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions with PathManager.open(self._predictions_json, "w") as f: f.write(json.dumps(json_data)) from panopticapi.evaluation import pq_compute with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( gt_json, PathManager.get_local_path(self._predictions_json), gt_folder=gt_folder, pred_folder=pred_dir, ) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_th"] = 100 * pq_res["Things"]["pq"] res["SQ_th"] = 100 * pq_res["Things"]["sq"] res["RQ_th"] = 100 * pq_res["Things"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] results = OrderedDict({"panoptic_seg": res}) _print_panoptic_results(pq_res) return results
def evaluate(self): comm.synchronize() self._predictions = comm.gather(self._predictions) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return # PanopticApi requires local files gt_json = PathManager.get_local_path(self._metadata.panoptic_json) gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) for p in self._predictions: with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: f.write(p.pop("png_string")) with open(gt_json, "r") as f: json_data = json.load(f) json_data["annotations"] = self._predictions with PathManager.open(self._predictions_json, "w") as f: f.write(json.dumps(json_data)) with contextlib.redirect_stdout(io.StringIO()): pq_res = pq_compute( gt_json, PathManager.get_local_path(self._predictions_json), gt_folder=gt_folder, pred_folder=pred_dir, unknown_label_list=self.unknown_label_list ) res = {} res["PQ"] = 100 * pq_res["All"]["pq"] res["SQ"] = 100 * pq_res["All"]["sq"] res["RQ"] = 100 * pq_res["All"]["rq"] res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] if 'Known Things' in pq_res: res["PQ_known"] = 100 * pq_res["Known Things"]["pq"] res["SQ_known"] = 100 * pq_res["Known Things"]["sq"] res["RQ_known"] = 100 * pq_res["Known Things"]["rq"] if "Unknown Things" in pq_res: res["PQ_unknown"] = 100 * pq_res["Unknown Things"]["pq"] res["SQ_unknown"] = 100 * pq_res["Unknown Things"]["sq"] res["RQ_unknown"] = 100 * pq_res["Unknown Things"]["rq"] results = OrderedDict({"panoptic_seg": res}) _print_panoptic_results(pq_res) return results
def __init__(self, dataset_name, cfg, distributed, output_dir=None): self._tasks = ("polygon", "recognition") self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): raise AttributeError( f"json_file was not found in MetaDataCatalog for '{dataset_name}'." ) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # use dataset_name to decide eval_gt_path if "totaltext" in dataset_name: self._text_eval_gt_path = "datasets/evaluation/gt_totaltext.zip" self._word_spotting = True elif "ctw1500" in dataset_name: self._text_eval_gt_path = "datasets/evaluation/gt_ctw1500.zip" self._word_spotting = False self._text_eval_confidence = cfg.MODEL.FCOS.INFERENCE_TH_TEST
def setUp(self): json_file = MetadataCatalog.get("coco_2017_val_100").json_file if not PathManager.isfile(json_file): raise unittest.SkipTest("{} not found".format(json_file)) with contextlib.redirect_stdout(io.StringIO()): json_file = PathManager.get_local_path(json_file) self.coco = COCO(json_file)
def main_worker_eval(worker_id, args): device = torch.device("cuda:%d" % worker_id) cfg = setup(args) # build test set test_loader = build_data_loader( cfg, get_dataset_name(cfg), "test", multigpu=False ) test_loader.dataset.set_depth_only(True) test_loader.collate_fn = default_collate logger.info("test - %d" % len(test_loader)) # load checkpoing and build model if cfg.MODEL.CHECKPOINT == "": raise ValueError("Invalid checkpoing provided") logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp["best_states"]["model"]) model = MVSNet(cfg.MODEL.MVSNET) model.load_state_dict(state_dict) logger.info("Model loaded") model.to(device) eval_split = 'test' test_metrics, test_preds = evaluate_split_depth( model, test_loader, prefix="%s_" % eval_split ) str_out = "Results on %s: " % eval_split for k, v in test_metrics.items(): str_out += "%s %.4f " % (k, v) logger.info(str_out)
def load_dacon_test_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): dacon_api = DaconAPI(json_file) anns = dacon_api.features if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format( json_file, timer.seconds())) meta = MetadataCatalog.get(dataset_name) meta.thing_classes = dacon_api.thing_classes logger.info("Loaded {} images in dacon format from {}".format( len(anns), json_file)) dataset_dicts = [] for ann in anns: record = {} record["file_name"] = os.path.join(image_root, ann['image_id']) dataset_dicts.append(record) return dataset_dicts
def __init__(self, dataset_name, cfg, distributed, output_dir=None): self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") # this is hard-coded so that logging is normal self._logger = logging.getLogger("detectron2.evaluation.coco_evaluation") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'" ) cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if "BIT" in cfg.MODEL.DANCE.MASK_IN: self.input_format = "bit" else: self.input_format = "rle"
def __init__(self, dataset_name, cfg, distributed, output_dir=None): self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger("detectron2") self._metadata = MetadataCatalog.get(dataset_name) self.internal_dataset_mapping = copy.deepcopy( self._metadata.thing_dataset_id_to_contiguous_id) if -1 in self.internal_dataset_mapping.keys(): self.internal_dataset_mapping = dict([ (k, v - 1) for k, v in self.internal_dataset_mapping.items() ]) if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'." " Trying to convert it to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file)
def evaluate(self): comm.synchronize() if comm.get_rank() > 0: return import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval self._logger.info(f"Evaluating results under {self._temp_dir} ...".format()) cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) cityscapes_eval.args.predictionWalk = None cityscapes_eval.args.JSONOutput = False cityscapes_eval.args.colorized = False cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") gt_dir = PathManager.get_local_path(self._metadata.gt_dir) groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) assert len(groundTruthImgList), \ "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( cityscapes_eval.args.groundTruthSearch ) predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) results = cityscapes_eval.evaluateImgLists( predictionImgList, groundTruthImgList, cityscapes_eval.args )["averages"] ret = OrderedDict() ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} self._working_dir.cleanup() return ret
def load_coco_unlabel_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): from pycocotools.coco import COCO timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format( json_file, timer.seconds())) id_map = None # sort indices for reproducible results img_ids = sorted(coco_api.imgs.keys()) imgs = coco_api.loadImgs(img_ids) logger.info("Loaded {} images in COCO format from {}".format( len(imgs), json_file)) dataset_dicts = [] for img_dict in imgs: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] record["width"] = img_dict["width"] image_id = record["image_id"] = img_dict["id"] dataset_dicts.append(record) return dataset_dicts
def __init__(self, dataset_name, cfg, distributed, output_dir=None): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instance_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. """ self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'" ) cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._fashion_api = Fashion(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = ("annotations" in self._fashion_api.dataset \ and "annotations2" in self._fashion_api.dataset) self._view_error = cfg.TEST.VIEW_ERROR if self._view_error: self._error_output_path = self._output_dir + '/error_result' os.makedirs(self._error_output_path, exist_ok=True) with open(os.path.join(self._error_output_path, 'info.txt'), 'w') as f: s = "time: " + time.strftime("%Y-%m-%d-%H:%M", time.localtime()) + "\n" s += f"weight: {cfg.MODEL.WEIGHTS}\n" s += f"dataset: {cfg.DATASETS.TEST}\n" f.write(s)
def __init__(self, dataset_name, cfg, distributed, output_dir=None): self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'." " Trying to convert it to COCO format ..." ) cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS self._do_evaluation = "annotations" in self._coco_api.dataset
def load_voc_instances(dirname, split='train'): with PathManager.open( os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: fileids = np.loadtxt(f, dtype=np.str) # Needs to read many small annotation files. Makes sense at local annotation_dirname = PathManager.get_local_path( os.path.join(dirname, "Annotations/")) dicts = [] class_names = {} for fileid in fileids: anno_file = os.path.join(annotation_dirname, fileid + ".xml") with PathManager.open(anno_file) as f: tree = ET.parse(f) r = {} instances = [] for obj in tree.findall("object"): cls = obj.find("name").text if cls not in class_names: class_names[cls] = len(class_names) instances.append({"category_id": class_names[cls]}) r["annotations"] = instances dicts.append(r) return dicts, list(class_names.keys())
def main_worker_eval(worker_id, args): device = torch.device("cuda:%d" % worker_id) cfg = setup(args) # build test set test_loader = build_data_loader(cfg, dataset, "test", multigpu=False, num_workers=8) logger.info("test - %d" % len(test_loader)) # load checkpoing and build model if cfg.MODEL.CHECKPOINT == "": raise ValueError("Invalid checkpoing provided") logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp["best_states"]["model"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info("Model loaded") model.to(device) wandb.init(project='MeshRCNN', config=cfg, name='meshrcnn-eval') if args.eval_p2m: evaluate_test_p2m(model, test_loader) else: evaluate_test(model, test_loader)
def __init__(self, dataset_name, cfg, distributed, output_dir=None, *, use_fast_impl=True): self._predictions = [] self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._cpu_device = torch.device("cpu") self._logger = logging.getLogger('MyCocoEvaluator') self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset
def load_cityscapes_semantic(image_dir, gt_dir): """ Args: image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". Returns: list[dict]: a list of dict, each has "file_name" and "sem_seg_file_name". """ ret = [] # gt_dir is small and contain many small files. make sense to fetch to local first gt_dir = PathManager.get_local_path(gt_dir) for image_file, _, label_file, json_file in get_cityscapes_files( image_dir, gt_dir): label_file = label_file.replace("labelIds", "labelTrainIds") with PathManager.open(json_file, "r") as f: jsonobj = json.load(f) ret.append({ "file_name": image_file, "sem_seg_file_name": label_file, "height": jsonobj["imgHeight"], "width": jsonobj["imgWidth"], }) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile( ret[0]["sem_seg_file_name"] ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa return ret
def __init__(self, dataset_name, cfg, distributed, output_dir=None): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have the following corresponding metadata: "json_file": the path to the LVIS format annotation cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump results. """ from lvis import LVIS self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) self._lvis_api = LVIS(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the LVIS evaluation server). self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
def __init__(self, dataset_name, cfg, distributed, output_dir=None): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump results. """ self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.warning(f"json_file was not found in MetaDataCatalog for '{dataset_name}'") cache_path = convert_to_coco_json(dataset_name, output_dir) self._metadata.json_file = cache_path json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset
def __init__(self, dataset_name, cfg, distributed, output_dir=None): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have the following corresponding metadata: "json_file": the path to the COCO format annotation cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump results. """ self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = len(self._coco_api.getAnnIds()) > 0
def load(self, path: str): """ Load from the given checkpoint. When path points to network file, this function has to be called on all ranks. Args: path (str): path or url to the checkpoint. If empty, will not load anything. Returns: dict: extra data loaded from the checkpoint that has not been processed. For example, those saved with :meth:`.save(**extra_data)`. """ if not path: # no checkpoint provided self.logger.info( "No checkpoint found. Initializing model from scratch") return {} self.logger.info("Loading checkpoint from {}".format(path)) if not os.path.isfile(path): path = PathManager.get_local_path(path) assert os.path.isfile(path), "Checkpoint {} not found!".format( path) checkpoint = self._load_file(path) self._load_model(checkpoint) for key, obj in self.checkpointables.items(): if key in checkpoint: self.logger.info("Loading {} from {}".format(key, path)) obj.load_state_dict(checkpoint.pop(key)) # return any further checkpoint data return checkpoint
def __init__(self, cfg, is_train=True): self.tfm_gens = utils.build_transform_gen(cfg, is_train) # fmt: off self.img_format = cfg.INPUT.FORMAT self.mask_on = cfg.MODEL.MASK_ON self.keypoint_on = cfg.MODEL.KEYPOINT_ON self.densepose_on = cfg.MODEL.DENSEPOSE_ON assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet" # fmt: on if self.keypoint_on and is_train: # Flip only makes sense in training self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices( cfg.DATASETS.TRAIN) else: self.keypoint_hflip_indices = None if self.densepose_on: densepose_transform_srcs = [ MetadataCatalog.get(ds).densepose_transform_src for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST ] assert len(densepose_transform_srcs) > 0 # TODO: check that DensePose transformation data is the same for # all the datasets. Otherwise one would have to pass DB ID with # each entry to select proper transformation data. For now, since # all DensePose annotated data uses the same data semantics, we # omit this check. densepose_transform_data_fpath = PathManager.get_local_path( densepose_transform_srcs[0]) self.densepose_transform_data = DensePoseTransformData.load( densepose_transform_data_fpath) self.is_train = is_train
def load(self, path: str, checkpointables: Optional[List[str]] = None) -> object: """ Load from the given checkpoint. When path points to network file, this function has to be called on all ranks. Args: path (str): path or url to the checkpoint. If empty, will not load anything. checkpointables (list): List of checkpointable names to load. If not specified (None), will load all the possible checkpointables. Returns: dict: extra data loaded from the checkpoint that has not been processed. For example, those saved with :meth:`.save(**extra_data)`. """ if not path: # no checkpoint provided self.logger.info("No checkpoint found. Initializing model from scratch") return {} self.logger.info("Loading checkpoint from {}".format(path)) if not os.path.isfile(path): path = PathManager.get_local_path(path) assert os.path.isfile(path), "Checkpoint {} not found!".format(path) checkpoint = self._load_file(path) self._load_model(checkpoint) for key in self.checkpointables if checkpointables is None else checkpointables: if key in checkpoint: # pyre-ignore self.logger.info("Loading {} from {}".format(key, path)) obj = self.checkpointables[key] obj.load_state_dict(checkpoint.pop(key)) # pyre-ignore # return any further checkpoint data return checkpoint
def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): """ Load Pascal VOC detection annotations to Detectron2 format. Args: dirname: Contain "Annotations", "ImageSets", "JPEGImages" split (str): one of "train", "test", "val", "trainval" class_names: list or tuple of class names """ print(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) with PathManager.open( os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: fileids = np.loadtxt(f, dtype=np.str) # Needs to read many small annotation files. Makes sense at local annotation_dirname = PathManager.get_local_path( os.path.join(dirname, "Annotations/")) dicts = [] for fileid in fileids: anno_file = os.path.join(annotation_dirname, fileid + ".xml") jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") with PathManager.open(anno_file) as f: tree = ET.parse(f) r = { "file_name": jpeg_file, "image_id": fileid, "height": int(tree.findall("./size/height")[0].text), "width": int(tree.findall("./size/width")[0].text), } instances = [] for obj in tree.findall("object"): cls = obj.find("name").text # We include "difficult" samples in training. # Based on limited experiments, they don't hurt accuracy. # difficult = int(obj.find("difficult").text) # if difficult == 1: # continue bbox = obj.find("bndbox") bbox = [ float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"] ] # Original annotations are integers in the range [1, W or H] # Assuming they mean 1-based pixel indices (inclusive), # a box with annotation (xmin=1, xmax=W) covers the whole image. # In coordinate space this is represented by (xmin=0, xmax=W) bbox[0] -= 1.0 bbox[1] -= 1.0 instances.append({ "category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS }) r["annotations"] = instances dicts.append(r) return dicts
def __init__(self, dataset_name, cfg, distributed, output_dir=None, result_df=None, output_file=None, *, use_fast_impl=True): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instance_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. """ self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.info( f"'{dataset_name}' is not registered by `register_coco_instances`." " Therefore trying to convert it to COCO format ..." ) cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset #Changed made by Johan to store all results self.result_df = None if len(result_df) == 0: self.result_df = result_df self._output_file = output_file
def test_bad_args(self) -> None: # TODO (T58240718): Replace with dynamic checks with self.assertRaises(ValueError): PathManager.copy( self._tmpfile, self._tmpfile, foo="foo" # type: ignore ) with self.assertRaises(ValueError): PathManager.exists(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.get_local_path(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.isdir(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.isfile(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.ls(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.mkdirs(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.open(self._tmpfile, foo="foo") # type: ignore with self.assertRaises(ValueError): PathManager.rm(self._tmpfile, foo="foo") # type: ignore PathManager.set_strict_kwargs_checking(False) PathManager.copy( self._tmpfile, self._tmpfile, foo="foo" # type: ignore ) PathManager.exists(self._tmpfile, foo="foo") # type: ignore PathManager.get_local_path(self._tmpfile, foo="foo") # type: ignore PathManager.isdir(self._tmpfile, foo="foo") # type: ignore PathManager.isfile(self._tmpfile, foo="foo") # type: ignore PathManager.ls(self._tmpdir, foo="foo") # type: ignore PathManager.mkdirs(self._tmpdir, foo="foo") # type: ignore f = PathManager.open(self._tmpfile, foo="foo") # type: ignore f.close() # pyre-ignore with open(os.path.join(self._tmpdir, "test_rm.txt"), "w") as f: rm_file = f.name f.write(self._tmpfile_contents) f.flush() PathManager.rm(rm_file, foo="foo") # type: ignore
def __init__(self, dataset_name, cfg, distributed, output_dir=None): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. cfg (CfgNode): config instance distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instance_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. """ self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) has_json = True if not hasattr(self._metadata, "json_file"): self._logger.warning( f"json_file was not found in MetaDataCatalog for '{dataset_name}'." " Trying to convert it to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path try: convert_to_coco_json(dataset_name, cache_path) except KeyError: self._logger.warning( f"Failed to convert {dataset_name} to COCO format. Do inference only." ) has_json = False if has_json: json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset else: self._coco_api = None self._do_evaluation = False self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS
def load_voc_rotated_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): """ Load Pascal VOC rotated detection annotations to Detectron2 format. annotation tool: https://github.com/chinakook/labelimg2 Args: dirname: Contain "Annotations", "ImageSets", "JPEGImages" split (str): one of "train", "test", "val", "trainval" class_names: list or tuple of class names """ with PathManager.open( os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: fileids = np.loadtxt(f, dtype=np.str) # Needs to read many small annotation files. Makes sense at local annotation_dirname = PathManager.get_local_path( os.path.join(dirname, "Annotations/")) dicts = [] for fileid in fileids: anno_file = os.path.join(annotation_dirname, fileid + ".xml") jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") with PathManager.open(anno_file) as f: tree = ET.parse(f) r = { "file_name": jpeg_file, "image_id": fileid, "height": int(tree.findall("./size/height")[0].text), "width": int(tree.findall("./size/width")[0].text), } instances = [] for obj in tree.findall("object"): cls = obj.find("name").text rbbox = obj.find("robndbox") rbbox = [ float(rbbox.find(x).text) for x in ["cx", "cy", "w", "h", "angle"] ] # see https://detectron2.readthedocs.io/modules/structures.html#detectron2.structures.RotatedBoxes rbbox[-1] = rbbox[-1] * 180.0 / math.pi if 270.0 >= rbbox[-1] > 90.0: rbbox[-1] = -(180.0 - rbbox[-1]) elif 360.0 >= rbbox[-1] > 270.0: rbbox[-1] = rbbox[-1] - 360.0 rbbox[-1] = -rbbox[-1] instances.append({ "category_id": class_names.index(cls), "bbox": rbbox, "bbox_mode": BoxMode.XYWHA_ABS }) r["annotations"] = instances dicts.append(r) return dicts
def load_lvis_json(json_file, image_root, dataset_name=None): from lvis import LVIS json_file = PathManager.get_local_path(json_file) timer = Timer() lvis_api = LVIS(json_file) if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) if dataset_name is not None: meta = get_lvis_instances_meta(dataset_name) MetadataCatalog.get(dataset_name).set(**meta) img_ids = sorted(lvis_api.imgs.keys()) imgs = lvis_api.load_imgs(img_ids) anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] assert len(set(ann_ids)) == len(ann_ids), \ f"Annotation ids in '{json_file}' are not unique" imgs_anns = list(zip(imgs, anns)) logger.info(f"Loaded {len(imgs_anns)} images in the LVIS format from {json_file}") dataset_dicts = [] for (img_dict, anno_dict_list) in imgs_anns: record = {} file_name = img_dict["file_name"] if img_dict["file_name"].startswith("COCO"): file_name = file_name[-16:] record["file_name"] = os.path.join(image_root, file_name) record["height"] = img_dict["height"] record["width"] = img_dict["width"] record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) record["neg_category_ids"] = img_dict.get("neg_category_ids", []) image_id = record["image_id"] = img_dict["id"] objs = [] for anno in anno_dict_list: assert anno["image_id"] == image_id obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} obj["category_id"] = anno["category_id"] - 1 segm = anno["segmentation"] valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] assert len(segm) == len(valid_segm), \ "Annotation contains an invalid polygon with < 3 points" assert len(segm) > 0 obj["segmentation"] = segm objs.append(obj) record["annotations"] = objs dataset_dicts.append(record) return dataset_dicts