Esempio n. 1
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.
        This uses the special metadata "evaluator_type" associated with each builtin dataset.
        For your own dataset, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        metadata = MetadataCatalog.get(dataset_name)
        # Note(): Check original meta classes, also tells us if we have a meta dataset
        # Ugly but easier to support than other options
        classes_to_eval = []
        if hasattr(metadata, 'classes_to_eval'):
            classes_to_eval = metadata.classes_to_eval
            print(f'Using meta-dataset with classes {classes_to_eval}')

        evaluator_type = metadata.evaluator_type
        if evaluator_type == "coco":
            evaluator_list.append(
                COCOEvaluator(dataset_name,
                              cfg,
                              True,
                              output_folder,
                              classes_to_eval=classes_to_eval))
        if evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type))
        if len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "coco":
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 3
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     if "voc" in dataset_name:
         return PascalVOCDetectionEvaluator(dataset_name)
     else:
         return COCOEvaluator(dataset_name, cfg, True, output_folder)
Esempio n. 4
0
 def build_evaluator(cls,
                     cfg,
                     dataset_name,
                     output_folder=None,
                     AP_method='norm'):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name,
                           cfg,
                           True,
                           output_folder,
                           AP_method=AP_method))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes_instance":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     if evaluator_type == "cityscapes_sem_seg":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 5
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = d2.data.MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "coco":
         return COCOEvaluator(dataset_name, cfg, True, output_folder)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
Esempio n. 6
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     if "coco" in dataset_name:
         return COCOEvaluator(dataset_name, cfg, True, output_folder)
     elif "cityscapes" in dataset_name:
         #assert (
         #torch.cuda.device_count() >= comm.get_rank()
         #), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     else:
         assert "voc" in dataset_name
         return PascalVOCDetectionEvaluator(dataset_name)
Esempio n. 7
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """Create evaluator(s) for a given dataset.

        Modified from D2's example `tools/train_net.py`"""
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")

        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type

        if evaluator_type == "coco":
            return COCOEvaluator(dataset_name, output_dir=output_folder)
        if evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)

        raise NotImplementedError(
            "No evaluator implementation for the dataset {} with the type {}".format(dataset_name, evaluator_type)
        )
Esempio n. 8
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         model_name = osp.basename(cfg.MODEL.WEIGHTS).split(".")[0]
         output_folder = osp.join(
             cfg.OUTPUT_DIR,
             "inference_{}/{}".format(model_name, dataset_name))
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg", "coco_bop"]:
         evaluator_list.append(
             MyCOCOEvaluator(dataset_name, cfg, True, output_folder))
         # evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if evaluator_type == "text":
         return TextEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 9
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes_instance":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     if evaluator_type == "cityscapes_sem_seg":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 10
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type

        if evaluator_type == "coco":
            evaluator_list.append(COCOEvaluator(
                dataset_name, output_dir=output_folder))
        elif evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type
                )
            )
        elif len(evaluator_list) == 1:
            return evaluator_list[0]

        return DatasetEvaluators(evaluator_list)
Esempio n. 11
0
# Fixing the image for PASCAL evaluation:
for image in images:
    image["image_id"] = os.path.splitext(os.path.split(image["image_id"])[-1])[0]

# Evaluating the prediction:

# Setting up the COCO evaluation:
coco_evaluator = COCOEvaluator(dataset_name, cfg, distributed=True, output_dir=output_folder_path)

# Setting up the PASCAL VOC evaluation:
my_dataset.dirname = os.path.join(output_folder_path, "PascalVOCAnnotations")
my_dataset.split = 'test'
my_dataset.year = 2012
dataset.to_pascal(my_dataset.dirname) # Converting the dataset to PASCAL VOC
pascal_evaluator = PascalVOCDetectionEvaluator(dataset_name)

# COCO evaluating:
coco_evaluator.reset()
coco_evaluator.process(images, outputs_list)
coco_results = coco_evaluator.evaluate()

# Dumping the COCO evaluation results:
coco_results_file_path = os.path.join(output_folder_path, "coco_eval_results.json")
with open(coco_results_file_path, "w") as coco_results_file:
        json.dump(coco_results, coco_results_file, indent=2)

print("COCO EVALUATION FINISHED")

# PASCAL VOC evaluating:
pascal_evaluator.reset()
Esempio n. 12
0
 def build_evaluator(cls, cfg, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [PascalVOCDetectionEvaluator(dataset_name)
                   ]  # FIXME: evaluator需要再改
     return DatasetEvaluators(evaluators)
Esempio n. 13
0
    model_zoo.get_config_file("COCO-Detection/retinanet_R_101_FPN_3x.yaml"))
cfg_source.DATASETS.TRAIN = ("city_trainS", )
cfg_source.DATALOADER.NUM_WORKERS = 0
cfg_source.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
    "COCO-Detection/retinanet_R_101_FPN_3x.yaml")
cfg_source.SOLVER.IMS_PER_BATCH = 2
cfg_source.SOLVER.BASE_LR = 0.001
cfg_source.SOLVER.MAX_ITER = 70000
cfg_source.SOLVER.STEPS = (50000, )
cfg_source.INPUT.MIN_SIZE_TRAIN = (600, )
cfg_source.INPUT.MIN_SIZE_TEST = 0
os.makedirs(cfg_source.OUTPUT_DIR, exist_ok=True)
cfg_source.MODEL.RETINANET.NUM_CLASSES = 8
model = build_model(cfg_source)

cfg_target = get_cfg()
cfg_target.DATASETS.TRAIN = ("city_trainT", )
cfg_target.INPUT.MIN_SIZE_TRAIN = (600, )
cfg_target.DATALOADER.NUM_WORKERS = 0
cfg_target.SOLVER.IMS_PER_BATCH = 2

do_train(cfg_source, cfg_target, model, False)

from detectron2.evaluation import COCOEvaluator, inference_on_dataset, PascalVOCDetectionEvaluator
from detectron2.data import build_detection_test_loader

evaluator = PascalVOCDetectionEvaluator("city_testT")
val_loader = build_detection_test_loader(cfg_source, "city_testT")
res = inference_on_dataset(model, val_loader, evaluator)
print(res)
Esempio n. 14
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create VOC evaluator(s) for a given dataset.
     """
     return PascalVOCDetectionEvaluator(dataset_name)
Esempio n. 15
0
    def get_evaluator(self, cfg, dataset_name, output_folder=None):
        """Create evaluator(s) for a given dataset.

        This uses the special metadata "evaluator_type" associated with
        each builtin dataset. For your own dataset, you can simply
        create an evaluator manually in your script and do not have to
        worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = osp.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
            evaluator_list.append(
                SemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                    ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                    output_dir=output_folder,
                ))
        if evaluator_type in ["coco", "coco_panoptic_seg"]:
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type == "coco_panoptic_seg":
            evaluator_list.append(
                COCOPanopticEvaluator(dataset_name, output_folder))
        if evaluator_type == "cityscapes_instance":
            assert (
                torch.cuda.device_count() >= self.global_rank
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesInstanceEvaluator(dataset_name)
        if evaluator_type == "cityscapes_sem_seg":
            assert (
                torch.cuda.device_count() >= self.global_rank
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesSemSegEvaluator(dataset_name)
        if evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        if evaluator_type == "lvis":
            return LVISEvaluator(dataset_name, cfg, True, output_folder)

        _distributed = self.world_size > 1
        dataset_meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
        train_obj_names = dataset_meta.objs
        if evaluator_type == "bop":
            if cfg.VAL.get("USE_BOP", False):
                return GDRN_Evaluator(cfg,
                                      dataset_name,
                                      distributed=_distributed,
                                      output_dir=output_folder,
                                      train_objs=train_obj_names)
            else:
                return GDRN_EvaluatorCustom(cfg,
                                            dataset_name,
                                            distributed=_distributed,
                                            output_dir=output_folder,
                                            train_objs=train_obj_names)

        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type))
        if len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)