Exemple #1
0
    def test(self):
        from cvpods.data import DatasetCatalog, MetadataCatalog  # noqa
        from cvpods.data.datasets.coco import convert_to_coco_dict, load_coco_json  # noqa
        # Make a dummy dataset.
        mask = make_mask()
        DatasetCatalog.register("test_dataset", lambda: make_dataset_dicts(mask))
        MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])

        # Dump to json.
        json_dict = convert_to_coco_dict("test_dataset")
        with tempfile.TemporaryDirectory() as tmpdir:
            json_file_name = os.path.join(tmpdir, "test.json")
            with open(json_file_name, "w") as f:
                json.dump(json_dict, f)
            # Load from json.
            dicts = load_coco_json(json_file_name, "")

        # Check the loaded mask matches the original.
        anno = dicts[0]["annotations"][0]
        loaded_mask = mask_util.decode(anno["segmentation"])
        self.assertTrue(np.array_equal(loaded_mask, mask))
Exemple #2
0
def get_evaluator(cfg, dataset_name, output_folder=None):
    """
    Create evaluator(s) for a given dataset.
    This uses the special metadata "evaluator_type" associated with each builtin dataset.
    For your own dataset, you can simply create an evaluator manually in your
    script and do not have to worry about the hacky if-else logic here.
    """
    if output_folder is None:
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
    evaluator_list = []
    evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
    if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
        evaluator_list.append(
            SemSegEvaluator(
                dataset_name,
                distributed=True,
                num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                output_dir=output_folder,
            ))
    if evaluator_type in ["coco", "coco_panoptic_seg"]:
        evaluator_list.append(
            COCOEvaluator(dataset_name, cfg, True, output_folder))
    if evaluator_type == "coco_panoptic_seg":
        evaluator_list.append(
            COCOPanopticEvaluator(dataset_name, output_folder))
    if evaluator_type == "cityscapes":
        assert (
            torch.cuda.device_count() >= comm.get_rank()
        ), "CityscapesEvaluator currently do not work with multiple machines."
        return CityscapesEvaluator(dataset_name)
    if evaluator_type == "pascal_voc":
        return PascalVOCDetectionEvaluator(dataset_name)
    if evaluator_type == "lvis":
        return LVISEvaluator(dataset_name, cfg, True, output_folder)
    if len(evaluator_list) == 0:
        raise NotImplementedError(
            "no Evaluator for the dataset {} with the type {}".format(
                dataset_name, evaluator_type))
    if len(evaluator_list) == 1:
        return evaluator_list[0]
    return DatasetEvaluators(evaluator_list)
Exemple #3
0
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    return parser.parse_args(in_args)


if __name__ == "__main__":
    args = parse_args()
    logger = setup_logger()
    logger.info("Arguments: " + str(args))
    cfg = setup(args)

    dirname = args.output_dir
    os.makedirs(dirname, exist_ok=True)
    metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])

    def output(vis, fname):
        if args.show:
            print(fname)
            cv2.imshow("window", vis.get_image()[:, :, ::-1])
            cv2.waitKey()
        else:
            filepath = os.path.join(dirname, fname)
            print("Saving to {} ...".format(filepath))
            vis.save(filepath)

    scale = 2.0 if args.show else 1.0
    if args.source == "dataloader":
        train_data_loader = build_train_loader(cfg)
        for batch in train_data_loader: