def register_lvis_instances(name, metadata, json_file, image_root): """ Register a dataset in LVIS's json annotation format for instance detection. Args: name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". metadata (dict): extra metadata associated with this dataset. It can be an empty dict. json_file (str): path to the json instance annotation file. image_root (str): directory which contains all the images. """ DatasetCatalog.register( name, lambda: load_lvis_json(json_file, image_root, name)) MetadataCatalog.get(name).set(json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata)
def register_meta_pascal_voc( name, metadata, dirname, split, year, keepclasses, sid): if keepclasses.startswith('base_novel'): thing_classes = metadata["thing_classes"][sid] elif keepclasses.startswith('base'): thing_classes = metadata["base_classes"][sid] elif keepclasses.startswith('novel'): thing_classes = metadata["novel_classes"][sid] DatasetCatalog.register( name, lambda: load_filtered_voc_instances( name, dirname, split, thing_classes) ) MetadataCatalog.get(name).set( thing_classes=thing_classes, dirname=dirname, year=year, split=split, base_classes=metadata["base_classes"][sid], novel_classes=metadata["novel_classes"][sid] )
def register_meta_coco(name, metadata, imgdir, annofile): DatasetCatalog.register( name, lambda: load_coco_json(annofile, imgdir, metadata, name), ) if '_base' in name or '_novel' in name: split = 'base' if '_base' in name else 'novel' metadata['thing_dataset_id_to_contiguous_id'] = \ metadata['{}_dataset_id_to_contiguous_id'.format(split)] metadata['thing_classes'] = metadata['{}_classes'.format(split)] MetadataCatalog.get(name).set( json_file=annofile, image_root=imgdir, evaluator_type="coco", dirname="datasets/coco", **metadata, )
def register_coco_instances(name, metadata, json_file, image_root): """ Register a dataset in COCO's json annotation format for instance detection. This is an example of how to register a new dataset. You can do something similar to this function, to register new datasets. Args: name (str): the name that identifies a dataset, e.g. "coco_2014_train". metadata (dict): extra metadata associated with this dataset. You can leave it as an empty dict. json_file (str): path to the json instance annotation file. image_root (str): directory which contains all the images. """ # 1. register a function which returns dicts DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) # 2. Optionally, add metadata about this dataset, # since they might be useful in evaluation, visualization or logging MetadataCatalog.get(name).set( json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata )
parser.add_argument("--input", required=True, help="JSON file produced by the model") parser.add_argument("--output", required=True, help="output directory") parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") args = parser.parse_args() logger = setup_logger() with PathManager.open(args.input, "r") as f: predictions = json.load(f) pred_by_image = defaultdict(list) for p in predictions: pred_by_image[p["image_id"]].append(p) dicts = list(DatasetCatalog.get(args.dataset)) metadata = MetadataCatalog.get(args.dataset) if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): def dataset_id_map(ds_id): return metadata.thing_dataset_id_to_contiguous_id[ds_id] elif "lvis" in args.dataset: # LVIS results are in the same format as COCO results, but have a different # mapping from dataset category id to contiguous category id in [0, #categories - 1] def dataset_id_map(ds_id): return ds_id - 1 else: raise ValueError("Unsupported dataset: {}".format(args.dataset))
def register_pascal_voc(name, dirname, split, year): DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split)) MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES, dirname=dirname, year=year, split=split)
for per_image in batch: # Pytorch tensor is in (C, H, W) format img = per_image["image"].permute(1, 2, 0) if cfg.INPUT.FORMAT == "BGR": img = img[:, :, [2, 1, 0]] else: img = np.asarray( Image.fromarray(img, mode=cfg.INPUT.FORMAT).convert("RGB")) visualizer = Visualizer(img, metadata=metadata, scale=scale) target_fields = per_image["instances"].get_fields() labels = [ metadata.thing_classes[i] for i in target_fields["gt_classes"] ] vis = visualizer.overlay_instances( labels=labels, boxes=target_fields.get("gt_boxes", None), ) output(vis, str(per_image["image_id"]) + ".jpg") else: dicts = list( chain.from_iterable( [DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN])) for dic in dicts: img = utils.read_image(dic["file_name"], "RGB") visualizer = Visualizer(img, metadata=metadata, scale=scale) vis = visualizer.draw_dataset_dict(dic) output(vis, os.path.basename(dic["file_name"]))