Esempio n. 1
0
 def get_evaluator(cfg, dataset_name, output_folder):
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         # D2 is in the process of reducing the use of cfg.
         dataset_evaluators = COCOEvaluator(
             dataset_name,
             output_dir=output_folder,
             kpt_oks_sigmas=cfg.TEST.KEYPOINT_OKS_SIGMAS,
             max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
         )
     elif evaluator_type in ["rotated_coco"]:
         dataset_evaluators = DatasetEvaluators(
             [RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder)])
     elif evaluator_type in ["lvis"]:
         dataset_evaluators = LVISEvaluator(
             dataset_name,
             output_dir=output_folder,
             max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
         )
     else:
         dataset_evaluators = D2Trainer.build_evaluator(
             cfg, dataset_name, output_folder)
     if not isinstance(dataset_evaluators, DatasetEvaluators):
         dataset_evaluators = DatasetEvaluators([dataset_evaluators])
     return dataset_evaluators
Esempio n. 2
0
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given dataset.
        This uses the special metadata "evaluator_type" associated with each builtin dataset.
        For your own dataset, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["coco", "coco_panoptic_seg"]:
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type in ["rcoco"]:
            evaluator_list.append(
                RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder))
            evaluator_list.append(
                COCOEvaluator(dataset_name, cfg, True, output_folder))

        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the dataset {} with the type {}".format(
                    dataset_name, evaluator_type))
        elif len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)
Esempio n. 3
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     if evaluator_type == "cityscapes_instance":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesInstanceEvaluator(dataset_name)
     if evaluator_type == "cityscapes_sem_seg":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesSemSegEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg=cfg,
                                                  dataset_name=dataset_name,
                                                  mapper=PathwayDatasetMapper(
                                                      cfg, False))
        evaluator = RotatedCOCOEvaluator(dataset_name=dataset_name,
                                         cfg=cfg,
                                         distributed=False,
                                         output_dir=os.path.join(
                                             cfg.OUTPUT_DIR, "inference",
                                             dataset_name))
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
 def build_evaluator(cls, cfg, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [
         RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder)
     ]
     return DatasetEvaluators(evaluators)
Esempio n. 6
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     return RotatedCOCOEvaluator(dataset_name,
                                 cfg,
                                 True,
                                 output_dir=output_folder)