示例#1
0
 def get_evaluator(cfg, dataset_name, output_folder):
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         # D2 is in the process of reducing the use of cfg.
         dataset_evaluators = COCOEvaluator(
             dataset_name,
             output_dir=output_folder,
             kpt_oks_sigmas=cfg.TEST.KEYPOINT_OKS_SIGMAS,
             max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
         )
     elif evaluator_type in ["rotated_coco"]:
         dataset_evaluators = DatasetEvaluators(
             [RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder)])
     elif evaluator_type in ["lvis"]:
         dataset_evaluators = LVISEvaluator(
             dataset_name,
             output_dir=output_folder,
             max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
         )
     else:
         dataset_evaluators = D2Trainer.build_evaluator(
             cfg, dataset_name, output_folder)
     if not isinstance(dataset_evaluators, DatasetEvaluators):
         dataset_evaluators = DatasetEvaluators([dataset_evaluators])
     return dataset_evaluators
示例#2
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None): #It is not implemented by default.
     #output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     #output_folder = os.path.join(cfg.OUTPUT_DIR)
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
     return DatasetEvaluators(evaluators)
示例#3
0
 def build_evaluator(
     cls,
     cfg: CfgNode,
     dataset_name: str,
     output_folder: Optional[str] = None,
     embedder: Embedder = None,
 ) -> DatasetEvaluators:
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = []
     # Note: we currently use COCO evaluator for both COCO and LVIS datasets
     # to have compatible metrics. LVIS bbox evaluator could also be used
     # with an adapter to properly handle filtered / mapped categories
     # evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     # if evaluator_type == "coco":
     #     evaluators.append(COCOEvaluator(dataset_name, output_dir=output_folder))
     # elif evaluator_type == "lvis":
     #     evaluators.append(LVISEvaluator(dataset_name, output_dir=output_folder))
     evaluators.append(
         Detectron2COCOEvaluatorAdapter(dataset_name,
                                        output_dir=output_folder))
     if cfg.MODEL.DENSEPOSE_ON:
         storage = build_densepose_evaluator_storage(cfg, output_folder)
         evaluators.append(
             DensePoseCOCOEvaluator(
                 dataset_name,
                 True,
                 output_folder,
                 evaluator_type=cfg.DENSEPOSE_EVALUATION.TYPE,
                 min_iou_threshold=cfg.DENSEPOSE_EVALUATION.
                 MIN_IOU_THRESHOLD,
                 storage=storage,
                 embedder=embedder,
             ))
     return DatasetEvaluators(evaluators)
示例#4
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.
        This uses the special metadata "evaluator_type" associated with each builtin dataset.
        For your own dataset, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type

        if cfg.DATASETS.NAME == 'flickr30k':
            evaluator_list.append(
                FLICKR30KEvaluatorKAC(
                    dataset_name,
                    distributed=True,
                    output_dir=output_folder,
                ))
        else:
            raise NotImplementedError

        ## TODO
        ## the result can be passed out as dict
        return DatasetEvaluators(evaluator_list)
示例#5
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.
        This uses the special metadata "evaluator_type" associated with each builtin dataset.
        For your own dataset, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        metadata = MetadataCatalog.get(dataset_name)
        # Note(): Check original meta classes, also tells us if we have a meta dataset
        # Ugly but easier to support than other options
        classes_to_eval = []
        if hasattr(metadata, 'classes_to_eval'):
            classes_to_eval = metadata.classes_to_eval
            print(f'Using meta-dataset with classes {classes_to_eval}')

        evaluator_type = metadata.evaluator_type
        if evaluator_type == "coco":
            evaluator_list.append(
                COCOEvaluator(dataset_name,
                              cfg,
                              True,
                              output_folder,
                              classes_to_eval=classes_to_eval))
        if evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type))
        if len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#7
0
 def build_evaluator(cls, cfg: CfgNode, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
     if cfg.MODEL.DENSEPOSE_ON:
         evaluators.append(
             DensePoseCOCOEvaluator(dataset_name, True, output_folder))
     return DatasetEvaluators(evaluators)
示例#8
0
def evaluate(cfg=None,
             trainer=None,
             pdmClasses=None,
             set="_test",
             threshold=0.7):
    if cfg is None:
        cfg = get_cfg(find_outputn())
    dataset = CustomConfig.dataset
    if pdmClasses is None:
        pdmClasses = CustomConfig.pdmClasses
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = threshold
    if trainer is None:
        trainer = CustomTrainer(cfg)
        trainer.resume_or_load(resume=False)

    evaluator = DatasetEvaluators([
        COCOEvaluator(
            dataset + set,
            ("bbox", "segm"),
            False,
            output_dir=cfg.OUTPUT_DIR + "/coco_eval_test",
        ),
        PDM_Evaluator(dataset + set, pdmClasses),
    ])
    test_loader = build_detection_test_loader(cfg, dataset + set)
    print(
        f"{cfg.OUTPUT_DIR.split('/')[-1]}={inference_on_dataset(trainer.model, test_loader, evaluator)}"
    )
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "coco":
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
    dataset_name = cfg.DATASETS.TEST[0]
    evaluator = DatasetEvaluators(
        [COCOEvaluator(dataset_name, cfg, True, cfg.OUTPUT_DIR)])
    data_loader = build_detection_test_loader(cfg, dataset_name)
    results = inference_on_dataset(model, data_loader, evaluator)
    return results
示例#11
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if evaluator_type == "coco":
         return COCOEvaluator(dataset_name, cfg, True, output_folder)
     if evaluator_type == "cityscapes":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesEvaluator(dataset_name)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#12
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["cityscapes_panoptic_seg", "coco_panoptic_seg"]:
         evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes_panoptic_seg":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         evaluator_list.append(CityscapesSemSegEvaluator(dataset_name))
         evaluator_list.append(CityscapesInstanceEvaluator(dataset_name))
     if evaluator_type == "coco_panoptic_seg":
         # Evaluate bbox and segm.
         cfg.defrost()
         cfg.MODEL.MASK_ON = True
         cfg.MODEL.KEYPOINT_ON = False
         cfg.freeze()
         evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type
             )
         )
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#13
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "sem_seg":
         return SemSegEvaluator(
             dataset_name,
             distributed=True,
             num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
             ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
             output_dir=output_folder,
         )
     if evaluator_type == "cityscapes_sem_seg":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#14
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.
        This uses the special metadata "evaluator_type" associated with each builtin dataset.
        For your own dataset, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluators_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["sem_seg", "isprs_panoptic_seg"]:
            evaluators_list.append(
                ISPRSSemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    output_dir=output_folder,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES))
        if evaluator_type in [
                "isprs_instance", "isprs_panoptic_seg", "isprs_rpn"
        ]:
            if cfg.ISPRS.LABEL.BOXMODE == "ROTATED":
                evaluators_list.append(
                    RotatedCOCOEvaluatorWithMask(dataset_name, cfg, True,
                                                 output_folder))
            else:
                evaluators_list.append(
                    COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type == "isprs_panoptic_seg":
            evaluators_list.append(
                COCOPanopticEvaluator(dataset_name, output_folder))

        return DatasetEvaluators(evaluators_list)
示例#15
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if "dota_val_mini" in dataset_name:
         evaluator_type = 'rot'
     if "dota_test" in dataset_name:
         evaluator_type = 'hw'
     if 'hw' in evaluator_type:
         evaluator_list.append(DotaVOCDetectionEvaluator(
             dataset_name, 'hw'))
     if 'rot' in evaluator_type:
         evaluator_list.append(
             DotaVOCDetectionEvaluator(dataset_name, 'rot'))
     if 'hbb' in evaluator_type:
         evaluator_list.append(
             DotaVOCDetectionEvaluator(dataset_name, 'hbb'))
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#16
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
            evaluator_list.append(
                SemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                    ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                    output_dir=output_folder))
        if evaluator_type in ["coco", "coco_panoptic_seg"]:
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type == "coco_panoptic_seg":
            evaluator_list.append(
                COCOPanopticEvaluatorWith2ChPNG(
                    dataset_name,
                    output_folder,
                    gen_png=cfg.MODEL.SOGNET.GEN_PNG))
        elif evaluator_type == "cityscapes":
            assert (
                torch.cuda.device_count() >= comm.get_rank()
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesEvaluator(dataset_name)

        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type))
        elif len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)
示例#17
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "sem_seg":
         return SemSegEvaluator(
             dataset_name,
             distributed=True,
             output_dir=output_folder,
         )
     if evaluator_type == "cityscapes_sem_seg":
         return CityscapesSemSegEvaluator(dataset_name)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#18
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        if 'sku110' in dataset_name:
            return VOCDetectionEvaluator(cfg, dataset_name)

        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
            evaluator_list.append(
                SemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                    ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                    output_dir=output_folder,
                )
            )
        if evaluator_type in ["coco", "coco_panoptic_seg"]:
            evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type == "coco_panoptic_seg":
            evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))

        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type
                )
            )
        elif len(evaluator_list) == 1:
            return evaluator_list[0]

        return DatasetEvaluators(evaluator_list)
示例#19
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     pdmClasses = CustomConfig.pdmClasses
     return DatasetEvaluators([
         COCOEvaluator(dataset_name, ("bbox", "segm"), True, output_folder),
         PDM_Evaluator(dataset_name, pdmClasses),
     ])
示例#20
0
 def build_evaluator(cls, cfg, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if cfg.MODEL.DENSEPOSE_ON and cfg.MODEL.ROI_DENSEPOSE_HEAD.RCNN_HEAD_ON:
         evaluators.append(
             DensePoseCOCOEvaluator(dataset_name, True, output_folder))
     # if evaluator_type in ["coco", "coco_panoptic_seg"]:
     #     evaluators.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
     return DatasetEvaluators(evaluators)
示例#21
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_list.append(
         COCOEvaluator(dataset_name, cfg, True, output_folder))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#22
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["coco"]:
         evaluator_list.append(
             PartiallySupervisedEvaluator(dataset_name, cfg, True,
                                          output_folder))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#23
0
文件: train_net.py 项目: unsky/YOLOF
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each
     builtin dataset.
     For your own dataset, you can simply create an evaluator manually in
     your script and do not have to worry about the hacky if-else logic
     here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 output_dir=output_folder,
             )
         )
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name, output_dir=output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes_instance":
         assert (
                 torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently " \
            "do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     if evaluator_type == "cityscapes_sem_seg":
         assert (
                 torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently " \
            "do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, output_dir=output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type
             )
         )
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#24
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        if output_folder is None:
            os.makedirs(cfg.OUTPUT_DIR + f"/eval/{dataset_name}",
                        exist_ok=True)

        return DatasetEvaluators([
            COCOEvaluatorWithRecall(dataset_name,
                                    cfg,
                                    False,
                                    output_dir=cfg.OUTPUT_DIR +
                                    f"/eval/{dataset_name}"),
            SickTreesEvaluator()
        ])
示例#25
0
 def build_evaluator(cls, cfg_arg: CfgNode, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     eval_img = cfg.TEST.TEST_IMAGES[0]
     if dataset_name != 'kodak_test':
         evaluators = [COCOEvaluator(dataset_name, cfg_arg, True)]
     else:
         evaluators = []
     if cfg.MODEL.RECONSTRUCT_HEADS_ON and dataset_name == 'kodak_test':
         evaluators.append(
             ReconstructionEvaluator(dataset_name,
                                     output_folder,
                                     eval_img=eval_img))
     return DatasetEvaluators(evaluators)
示例#26
0
    def build_evaluator(cls, cfg, dataset_name):
        """
        Builds evaluators for post-training mAP report.
        Args:
            cfg(CfgNode): a detectron2 CfgNode
            dataset_name(str): registered dataset name

        Returns:
            detectron2 DatasetEvaluators object
        """
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
        return DatasetEvaluators(evaluators)
示例#27
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.

        This uses the special metadata "evaluator_type" associated with
        each builtin dataset. For your own dataset, you can simply
        create an evaluator manually in your script and do not have to
        worry about the hacky if-else logic here.

        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ['sem_seg', 'coco_panoptic_seg']:
            evaluator_list.append(
                SemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                    ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                    output_dir=output_folder,
                ))
        if evaluator_type in ['coco', 'coco_panoptic_seg', 'indiscapes']:
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder))
            evaluator_list.append(HDEvaluator(dataset_name))
        if evaluator_type == 'coco_panoptic_seg':
            evaluator_list.append(
                COCOPanopticEvaluator(dataset_name, output_folder))
        if evaluator_type == 'cityscapes_instance':
            assert (
                torch.cuda.device_count() >= comm.get_rank()
            ), 'CityscapesEvaluator currently do not work with multiple machines.'
            return CityscapesInstanceEvaluator(dataset_name)
        if evaluator_type == 'cityscapes_sem_seg':
            assert (
                torch.cuda.device_count() >= comm.get_rank()
            ), 'CityscapesEvaluator currently do not work with multiple machines.'
            return CityscapesSemSegEvaluator(dataset_name)
        elif evaluator_type == 'pascal_voc':
            return PascalVOCDetectionEvaluator(dataset_name)
        elif evaluator_type == 'lvis':
            return LVISEvaluator(dataset_name, cfg, True, output_folder)
        if len(evaluator_list) == 0:
            raise NotImplementedError(
                'no Evaluator for the dataset {} with the type {}'.format(
                    dataset_name, evaluator_type))
        elif len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)
示例#28
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "coco":
         return COCOEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#29
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             )
         )
     if evaluator_type in ["coco", "coco_panoptic_seg", "d2sa", "coco_amodal_cls", "kins", "sailvos", "cocoa"] \
             and cfg.TEST.EVAL_AMODAL_TYPE == "NORMAL":
         evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type in ["d2sa", "coco_amodal_cls", "kins", "sailvos", "cocoa"] \
             and cfg.TEST.EVAL_AMODAL_TYPE == "AMODAL_VISIBLE":
         evaluator_list.append(AmodalVisibleEvaluator(dataset_name, cfg, True, output_folder))
     # if evaluator_type == "coco_amodal":
     #     evaluator_list.append(COCOAmodalEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
     elif evaluator_type == "cityscapes":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type
             )
         )
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
示例#30
0
def get_evaluator(cfg, dataset_name, output_folder=None):
    """
    Create evaluator(s) for a given dataset.
    This uses the special metadata "evaluator_type" associated with each builtin dataset.
    For your own dataset, you can simply create an evaluator manually in your
    script and do not have to worry about the hacky if-else logic here.
    """
    if output_folder is None:
        output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
    evaluator_list = []
    evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))

    if len(evaluator_list) == 1:
        return evaluator_list[0]
    return DatasetEvaluators(evaluator_list)