def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesEvaluator(dataset_name)
     if evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 2
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes_instance":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     if evaluator_type == "cityscapes_sem_seg":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 3
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        if cfg.MULTI_DATASET.ENABLED:
            # TODO: refactor
            try:
                model.set_eval_dataset(dataset_name)
            except:
                try:
                    model.module.set_eval_dataset(dataset_name)
                except:
                    try:
                        model.model.set_eval_dataset(dataset_name)
                    except:
                        print('set eval dataset failed.')
        data_loader = build_detection_test_loader(cfg, dataset_name)
        output_folder = os.path.join(
            cfg.OUTPUT_DIR, "inference_{}".format(dataset_name))
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        
        if cfg.MULTI_DATASET.UNIFIED_EVAL:
            evaluator = get_unified_evaluator(
                evaluator_type, dataset_name, cfg, True, output_folder)
            # print('evaluator', evaluator)
        else:
            if evaluator_type == "lvis":
                evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder)
            elif evaluator_type == 'oid':
                evaluator = OIDEvaluator(dataset_name, cfg, True, output_folder)
            else:
                evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder)
            
        results[dataset_name] = inference_on_dataset(
            model, data_loader, evaluator)
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results[dataset_name])
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Esempio n. 4
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.
        This uses the special metadata "evaluator_type" associated with each builtin dataset.
        For your own dataset, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        metadata = MetadataCatalog.get(dataset_name)
        # Check original meta classes, also tells us if we have a meta dataset
        # Ugly but easier to support than other options
        classes_to_eval = None
        if hasattr(metadata, 'classes_to_eval'):
            classes_to_eval = metadata.classes_to_eval
            print(f'Using meta-dataset with classes {classes_to_eval}')

        #TODO(): Temp hack. Force COCO evaluation for the moment on all datasets.
        evaluator_list.append(
            COCOEvaluator(dataset_name, cfg, True, output_folder, classes_to_eval=classes_to_eval))
        return evaluator_list[0]

        evaluator_type = metadata.evaluator_type
        if evaluator_type == "coco":
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder, classes_to_eval=classes_to_eval))
        if evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        if evaluator_type == "lvis":
            return LVISEvaluator(dataset_name, cfg, True, output_folder)
        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type
                )
            )
        if len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)
Esempio n. 5
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, output_dir=output_folder)
     if evaluator_type == "coco":
         return COCOEvaluator(dataset_name, output_dir=output_folder)
     if evaluator_type == "sem_seg":
         return SemSegEvaluator(
             dataset_name,
             distributed=True,
             output_dir=output_folder,
         )
     if evaluator_type == "cityscapes_instance":
         assert (
             torch.cuda.device_count() > comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     if evaluator_type == "cityscapes_sem_seg":
         assert (
             torch.cuda.device_count() > comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type
             )
         )
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Esempio n. 6
0
def build_evaluator(cfg, dataset_name, output_folder=None):
    """
    Create evaluator(s) for a given dataset.
    This uses the special metadata "evaluator_type" associated with each builtin dataset.
    For your own dataset, you can simply create an evaluator manually in your
    script and do not have to worry about the hacky if-else logic here.
    """
    if output_folder is None:
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
    evaluator_list = []
    evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
    if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
        evaluator_list.append(
            SemSegEvaluator(
                dataset_name,
                distributed=True,
                output_dir=output_folder,
            ))
    if evaluator_type in ["coco", "coco_panoptic_seg"]:
        evaluator_list.append(
            COCOEvaluator(dataset_name, output_dir=output_folder))
    if evaluator_type == "coco_panoptic_seg":
        evaluator_list.append(
            COCOPanopticEvaluator(dataset_name, output_folder))
    if evaluator_type == "cityscapes_instance":
        return CityscapesInstanceEvaluator(dataset_name)
    if evaluator_type == "cityscapes_sem_seg":
        return CityscapesSemSegEvaluator(dataset_name)
    elif evaluator_type == "pascal_voc":
        return PascalVOCDetectionEvaluator(dataset_name)
    elif evaluator_type == "lvis":
        return LVISEvaluator(dataset_name, output_dir=output_folder)
    if len(evaluator_list) == 0:
        raise NotImplementedError(
            "no Evaluator for the dataset {} with the type {}".format(
                dataset_name, evaluator_type))
    elif len(evaluator_list) == 1:
        return evaluator_list[0]
    return DatasetEvaluators(evaluator_list)
Esempio n. 7
0
 def build_evaluator(
     cls,
     cfg: CfgNode,
     dataset_name: str,
     output_folder: Optional[str] = None,
     embedder: Embedder = None,
 ) -> DatasetEvaluators:
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "coco":
         evaluators.append(
             COCOEvaluator(dataset_name, output_dir=output_folder))
     elif evaluator_type == "lvis":
         evaluators.append(
             LVISEvaluator(dataset_name, output_dir=output_folder))
     if cfg.MODEL.DENSEPOSE_ON:
         evaluators.append(
             DensePoseCOCOEvaluator(dataset_name,
                                    True,
                                    output_folder,
                                    embedder=embedder))
     return DatasetEvaluators(evaluators)
Esempio n. 8
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     return LVISEvaluator(dataset_name, cfg, True, output_folder)
Esempio n. 9
0
    def get_evaluator(self, cfg, dataset_name, output_folder=None):
        """Create evaluator(s) for a given dataset.

        This uses the special metadata "evaluator_type" associated with
        each builtin dataset. For your own dataset, you can simply
        create an evaluator manually in your script and do not have to
        worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = osp.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
            evaluator_list.append(
                SemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                    ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                    output_dir=output_folder,
                ))
        if evaluator_type in ["coco", "coco_panoptic_seg"]:
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type == "coco_panoptic_seg":
            evaluator_list.append(
                COCOPanopticEvaluator(dataset_name, output_folder))
        if evaluator_type == "cityscapes_instance":
            assert (
                torch.cuda.device_count() >= self.global_rank
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesInstanceEvaluator(dataset_name)
        if evaluator_type == "cityscapes_sem_seg":
            assert (
                torch.cuda.device_count() >= self.global_rank
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesSemSegEvaluator(dataset_name)
        if evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        if evaluator_type == "lvis":
            return LVISEvaluator(dataset_name, cfg, True, output_folder)

        _distributed = self.world_size > 1
        dataset_meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
        train_obj_names = dataset_meta.objs
        if evaluator_type == "bop":
            if cfg.VAL.get("USE_BOP", False):
                return GDRN_Evaluator(cfg,
                                      dataset_name,
                                      distributed=_distributed,
                                      output_dir=output_folder,
                                      train_objs=train_obj_names)
            else:
                return GDRN_EvaluatorCustom(cfg,
                                            dataset_name,
                                            distributed=_distributed,
                                            output_dir=output_folder,
                                            train_objs=train_obj_names)

        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type))
        if len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)