Beispiel #1
0
 def build_evaluator(cls, cfg, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
     if cfg.MODEL.DENSEPOSE_ON:
         evaluators.append(
             DensePoseCOCOEvaluator(dataset_name, True, output_folder))
     return DatasetEvaluators(evaluators)
Beispiel #2
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if evaluator_type == "coco":
         return COCOEvaluator(dataset_name, cfg, True, output_folder)
     if evaluator_type == "cityscapes":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesEvaluator(dataset_name)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     if len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Beispiel #3
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     """
     Create evaluator(s) for a given dataset.
     This uses the special metadata "evaluator_type" associated with each builtin dataset.
     For your own dataset, you can simply create an evaluator manually in your
     script and do not have to worry about the hacky if-else logic here.
     """
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluator_list = []
     evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
     if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
         evaluator_list.append(
             SemSegEvaluator(
                 dataset_name,
                 distributed=True,
                 num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                 ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                 output_dir=output_folder,
             ))
     if evaluator_type in ["coco", "coco_panoptic_seg"]:
         evaluator_list.append(
             COCOEvaluator(dataset_name, cfg, True, output_folder))
     if evaluator_type == "coco_panoptic_seg":
         evaluator_list.append(
             COCOPanopticEvaluator(dataset_name, output_folder))
     elif evaluator_type == "cityscapes":
         assert (
             torch.cuda.device_count() >= comm.get_rank()
         ), "CityscapesEvaluator currently do not work with multiple machines."
         return CityscapesEvaluator(dataset_name)
     elif evaluator_type == "pascal_voc":
         return PascalVOCDetectionEvaluator(dataset_name)
     elif evaluator_type == "lvis":
         return LVISEvaluator(dataset_name, cfg, True, output_folder)
     if len(evaluator_list) == 0:
         raise NotImplementedError(
             "no Evaluator for the dataset {} with the type {}".format(
                 dataset_name, evaluator_type))
     elif len(evaluator_list) == 1:
         return evaluator_list[0]
     return DatasetEvaluators(evaluator_list)
Beispiel #4
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     return COCOEvaluator(dataset_name, cfg, True, output_folder)
Beispiel #5
0
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    logger = setup_logger()
    logger.info("Command line arguments: " + str(args))

    cfg = setup_cfg(args)

    # create a torch model
    torch_model = build_model(cfg)
    DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)

    # get a sample data
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    first_batch = next(iter(data_loader))

    # convert and save caffe2 model
    caffe2_model = export_caffe2_model(cfg, torch_model, first_batch)
    caffe2_model.save_protobuf(args.output)
    # draw the caffe2 graph
    caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch)

    # run evaluation with the converted model
    if args.run_eval:
        dataset = cfg.DATASETS.TEST[0]
        data_loader = build_detection_test_loader(cfg, dataset)
        # NOTE: hard-coded evaluator. change to the evaluator for your dataset
        evaluator = COCOEvaluator(dataset, cfg, True, args.output)
        metrics = inference_on_dataset(caffe2_model, data_loader, evaluator)
        print_csv_format(metrics)