def do_test(cfg, model):
    if "evaluator" in cfg.dataloader:
        ret = inference_on_dataset(
            model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
        )
        print_csv_format(ret)
        return ret
Exemple #2
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        mapper = None if cfg.INPUT.TEST_INPUT_TYPE == 'default' else \
            DatasetMapper(
                cfg, False, augmentations=build_custom_augmentation(cfg, False))
        data_loader = build_detection_test_loader(cfg,
                                                  dataset_name,
                                                  mapper=mapper)
        output_folder = os.path.join(cfg.OUTPUT_DIR,
                                     "inference_{}".format(dataset_name))
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type

        if evaluator_type == "lvis":
            evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder)
        elif evaluator_type == 'coco':
            evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder)
        else:
            assert 0, evaluator_type

        results[dataset_name] = inference_on_dataset(model, data_loader,
                                                     evaluator)
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results[dataset_name])
    if len(results) == 1:
        results = list(results.values())[0]
    return results
    def test(cls, cfg, model, evaluators=None):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                ``cfg.DATASETS.TEST``.

        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        # 如果有多个测试集,那么就有多个DatasetEvaluator
        if evaluators is not None:
            assert len(
                cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                    len(cfg.DATASETS.TEST), len(evaluators))

        results = OrderedDict()
        # 如果是多个数据集,就要枚举数据 coco_2017_val
        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            data_loader = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    # 在此会创建evaluator
                    evaluator = cls.build_evaluator(cfg, dataset_name)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method.")
                    results[dataset_name] = {}
                    continue
            #  执行推理
            """
            res = {
                "AP", "AP50", "AP75", "APs", "APm", "APl", "AP-{#class_name}",...
             }
            """
            results_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i)
                logger.info("Evaluation results for {} in csv format:".format(
                    dataset_name))
                print_csv_format(results_i)

        if len(results) == 1:
            results = list(results.values())[0]
        # 如果只有一个数据集,那只用返回单个,a dict
        return results
Exemple #4
0
    def test(
        cls,
        cfg: CfgNode,
        model: nn.Module,
        evaluators: Optional[Union[DatasetEvaluator,
                                   List[DatasetEvaluator]]] = None,
    ):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (DatasetEvaluator, list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                ``cfg.DATASETS.TEST``.

        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        if evaluators is not None:
            assert len(
                cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                    len(cfg.DATASETS.TEST), len(evaluators))

        results = OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            data_loader = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    embedder = cls.extract_embedder_from_model(model)
                    evaluator = cls.build_evaluator(cfg,
                                                    dataset_name,
                                                    embedder=embedder)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method.")
                    results[dataset_name] = {}
                    continue
            results_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i)
                logger.info("Evaluation results for {} in csv format:".format(
                    dataset_name))
                print_csv_format(results_i)

        if len(results) == 1:
            results = list(results.values())[0]
        return results
Exemple #5
0
    def test(cls, cfg, model, evaluators=None):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                `cfg.DATASETS.TEST`.

        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        if evaluators is not None:
            assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                len(cfg.DATASETS.TEST), len(evaluators)
            )

        results = OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            data_loader = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    evaluator = cls.build_evaluator(cfg, dataset_name)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method."
                    )
                    results[dataset_name] = {}
                    continue
            results_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i
                )
                logger.info("Evaluation results for {} in csv format:".format(dataset_name))

                output_file = 'res_final.json'
                with open(os.path.join(cfg.OUTPUT_DIR, 'inference',
                                       output_file), 'w') as fp:
                    json.dump(results[dataset_name], fp, indent = 4)

                print_csv_format(results_i)

        if len(results) == 1:
            results = list(results.values())[0]
        return results
Exemple #6
0
    def _process_dataset_evaluation_results(self) -> OrderedDict:
        results = OrderedDict()
        for idx, dataset_name in enumerate(self.cfg.DATASETS.TEST):
            results[dataset_name] = self._evaluators[idx].evaluate()
            if comm.is_main_process():
                print_csv_format(results[dataset_name])

        if len(results) == 1:
            results = list(results.values())[0]
        return results
Exemple #7
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(cfg, dataset_name)
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        logger.info("Evaluation results for {} in csv format:".format(dataset_name))
        print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #8
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name,
            os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name))
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #9
0
    def test(cls, cfg, model, evaluators=None):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                `cfg.DATASETS.TEST`.

        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        if evaluators is not None:
            assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                len(cfg.DATASETS.TEST), len(evaluators)
            )
        # print(evaluators)

        results = OrderedDict()
        relations =OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            data_loader = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            evaluator = (
                evaluators[idx]
                if evaluators is not None
                else cls.build_evaluator(cfg, dataset_name)
            )
            results_i,relations_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i
            relations[dataset_name] = relations_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i
                )
                logger.info("Evaluation results for {} in csv format:".format(dataset_name))
                print_csv_format(results_i)
                # print_csv_format(relations_i)

        if len(results) == 1 and len(relations):
            results = list(results.values())[0]
            relations = list(relations.values())[0]
        return results,relations
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name, mapper=TaobaoMapper(cfg, False))
        evaluator = get_evaluator(
            cfg, dataset_name, pjoin(cfg.OUTPUT_DIR, "inference", dataset_name)
        )
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #11
0
def do_evaluate(cfg, model):
    """
    Evaluate on test set using coco evaluate
    """
    results = OrderedDict()
    dataset_name = cfg.DATASETS.TEST[1]
    data_loader = build_detection_test_loader(cfg, dataset_name)
    evaluator = COCOEvaluator(dataset_name, cfg, False, output_dir= cfg.OUTPUT_DIR)
    results_i = inference_on_dataset(model, data_loader, evaluator)
    results[dataset_name] = results_i
    if comm.is_main_process():
        logger.info("Evaluation results for {} in csv format:".format(dataset_name))
        print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        )
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        wandb.log({'AP': results_i['bbox']['AP'], 'AP50': results_i['bbox']['AP50']})
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
def main():
    parser = default_argument_parser()
    parser.add_argument("--prediction", help="predictions_file_path")
    args = parser.parse_args()
    cfg = setup(args)
    with smart_path(args.prediction).open("rb") as fp:
        buf = io.BytesIO(fp.read())
        predictions = torch.load(buf)
    pred_by_image = defaultdict(list)
    for p in predictions:
        pred_by_image[p["image_id"]].append(p)

    dataset = cfg.DATASETS.TEST[0]

    metadata = MetadataCatalog.get(dataset)

    if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):

        def dataset_id_map(ds_id):
            return metadata.thing_dataset_id_to_contiguous_id[ds_id]

    output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
    evaluator = COCOEvaluator(dataset, cfg, False, output_folder)
    evaluator.reset()

    dicts = list(DatasetCatalog.get(dataset))

    count = 0
    
    for dic in tqdm.tqdm(dicts):
        #assert len(pred_by_image[dic["image_id"]]) == 1, str(dic["image_id"])
        if len(pred_by_image[dic["image_id"]]) == 0:
            continue
        prediction = pred_by_image[dic["image_id"]][0]
        prediction = create_instances(prediction, (dic["height"], dic["width"]))
        # Push an image
        dic["annotations"] = reconstruct_ann(dic["annotations"])
        evaluator.process([dic], [{"instances": prediction}])
        count += 1

    result = evaluator.evaluate()
    prediction_path = smart_path(args.prediction)
    save_path = prediction_path.parent.joinpath(prediction_path.stem + ".pkl")
    with save_path.open("wb") as writer:
        pickle.dump(result, writer)
    print_csv_format(result)
Exemple #14
0
def do_test(cfg, model):
    results = OrderedDict()
    for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name,
            os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name))
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]

    # Store JSON of results somewhere, for visualization.
    return results
def do_test(cfg1, model1, model2):

    results = OrderedDict()
    for dataset_name in cfg1.DATASETS.TEST:
        data_loader = custom_test_loader(cfg1, dataset_name)
        evaluator = get_evaluator(
            cfg1, dataset_name,
            os.path.join(cfg1.OUTPUT_DIR, "inference", dataset_name))
        results_i = inference_first_stream(model1, model2, data_loader,
                                           evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
def do_test(cfg, model, cat_heatmap_file):
    """
    Run the model on the test sets and output the results
    """
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name),
            cat_heatmap_file
        )
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #17
0
def do_test(cfg, model):
    """

    # TODO: Write docstring
    """
    # Initialise results dictionary
    results = OrderedDict()

    # Loop through the datasets in the config file
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)

        # Generate the evaluator
        evaluator = get_evaluator(
            cfg,
            dataset_name,
            os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        )

        # Run inference and add to results dictionary
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i

        # Log the result set to weights and biases
        result_log = {}
        result_dict = results_i
        for iou_type in result_dict:
          for metric, result in result_dict[iou_type].items():
            metric_log = f"{iou_type}_{metric}"
            result_log[metric_log] = result

        logger.debug('Log the eval results on Weights & Biases')
        wandb.log(result_log)

        # Print to terminal
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]

    return results
Exemple #18
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        print('--', dataset_name)
        try:
            data_loader = build_detection_test_loader(cfg,
                                                      dataset_name,
                                                      mapper=TblDatasetMapper(
                                                          cfg, is_train=False))
        except Exception as ex:
            logging.exception("Something awful happened!")
        evaluator = get_evaluator(
            cfg, dataset_name,
            os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name))
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #19
0
def do_test(cfg, model):

    img_folder = "../data/HICO_DET/images/test2015"
    json_folder = "../data/HICO_DET/hico_det_json"
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = custom_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name,
            os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name),
            img_folder, json_folder)
        results_i = inference_custom(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #20
0
def do_test(cfg, model):
    import random
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        # evaluator = get_evaluator(
        #     cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        # )
        from detectron2.utils.visualizer import ColorMode
        predictor = DefaultPredictor(cfg)
        dataset_dicts = get_car_dicts("./data/dataset/car_val")
        for d in random.sample(dataset_dicts, 10):
            im = cv2.imread(d["file_name"])
            outputs = predictor(
                im
            )  # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
            v = Visualizer(
                im[:, :, ::-1],
                metadata=MetadataCatalog.get("car_val"),
                scale=0.7,
                instance_mode=ColorMode.
                IMAGE_BW  # remove the colors of unsegmented pixels. This option is only available for segmentation models
            )
            out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
            cv2.imshow('hello', out.get_image()[:, :, ::-1])
            cv2.waitKey(0)

        cv2.destroyAllWindows()
        evaluator = COCOEvaluator("car_val",
                                  None,
                                  False,
                                  output_dir="./output/2021_03_12")
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #21
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        if cfg.MULTI_DATASET.ENABLED:
            # TODO: refactor
            try:
                model.set_eval_dataset(dataset_name)
            except:
                try:
                    model.module.set_eval_dataset(dataset_name)
                except:
                    try:
                        model.model.set_eval_dataset(dataset_name)
                    except:
                        print('set eval dataset failed.')
        data_loader = build_detection_test_loader(cfg, dataset_name)
        output_folder = os.path.join(
            cfg.OUTPUT_DIR, "inference_{}".format(dataset_name))
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        
        if cfg.MULTI_DATASET.UNIFIED_EVAL:
            evaluator = get_unified_evaluator(
                evaluator_type, dataset_name, cfg, True, output_folder)
            # print('evaluator', evaluator)
        else:
            if evaluator_type == "lvis":
                evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder)
            elif evaluator_type == 'oid':
                evaluator = OIDEvaluator(dataset_name, cfg, True, output_folder)
            else:
                evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder)
            
        results[dataset_name] = inference_on_dataset(
            model, data_loader, evaluator)
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results[dataset_name])
    if len(results) == 1:
        results = list(results.values())[0]
    return results
Exemple #22
0
    def test_multi_models(cls, cfg, models, evaluators=None):
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        if evaluators is not None:
            assert len(
                cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                    len(cfg.DATASETS.TEST), len(evaluators))

        results = OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            data_loader = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    evaluator = cls.build_evaluator(cfg, dataset_name)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method.")
                    results[dataset_name] = {}
                    continue
            results_i = inference_ensemble_on_dataset(models, data_loader,
                                                      evaluator)
            results[dataset_name] = results_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i)
                logger.info("Evaluation results for {} in csv format:".format(
                    dataset_name))
                print_csv_format(results_i)

        if len(results) == 1:
            results = list(results.values())[0]
        return results
Exemple #23
0
def main(args):
    cfg = setup(args)
    assert cfg.TEST.TOPK_CAT.ENABLED
    topk = cfg.TEST.TOPK_CAT.K

    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=args.resume
    )
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
        )
        results_i = inference_on_dataset(
            model,
            data_loader,
            evaluator,
            num_classes=cfg.MODEL.ROI_HEADS.NUM_CLASSES,
            topk=topk,
            num_estimate=cfg.TEST.TOPK_CAT.NUM_ESTIMATE,
            min_score=cfg.TEST.TOPK_CAT.MIN_SCORE,
        )
        results[dataset_name] = results_i
        if comm.is_main_process():
            for max_dets, max_dets_results in results_i.items():
                logger.info(
                    f"Evaluation results for {dataset_name},max_dets={max_dets} in "
                    f"csv format:"
                )
                print_csv_format(max_dets_results)
    if len(results) == 1:
        results = list(results.values())[0]

    with open(Path(cfg.OUTPUT_DIR) / "metrics_infer_topk.json", "w") as f:
        json.dump(results, f)

    return results
Exemple #24
0
def do_test(cfg, model, iteration):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = get_evaluator(
            cfg, dataset_name,
            os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name))
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)

            # log inference result
            # write csv head if file not exists
            if not os.path.exists('inference0.csv'):
                with open('inference0.csv', 'w') as f:
                    for task, res in results_i.items():
                        important_res = [(k, v) for k, v in res.items()
                                         if "-" not in k]
                        f.write("Time,Task,Iteration,")
                        f.write(",".join([k[0] for k in important_res]))
                        f.write("\n")
                        break
            # write result
            localtime = time.asctime(time.localtime(time.time()))
            with open('inference0.csv', 'a') as f:
                for task, res in results_i.items():
                    important_res = [(k, v) for k, v in res.items()
                                     if "-" not in k]
                    f.write(localtime + ",")
                    f.write(task + ",")
                    f.write(str(iteration) + ",")
                    f.write(",".join(
                        ["{0:.4f}".format(k[1]) for k in important_res]))
                    f.write("\n")
    if len(results) == 1:
        results = list(results.values())[0]
    return results
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg=cfg,
                                                  dataset_name=dataset_name,
                                                  mapper=PathwayDatasetMapper(
                                                      cfg, False))
        evaluator = RotatedCOCOEvaluator(dataset_name=dataset_name,
                                         cfg=cfg,
                                         distributed=False,
                                         output_dir=os.path.join(
                                             cfg.OUTPUT_DIR, "inference",
                                             dataset_name))
        results_i = inference_on_dataset(model, data_loader, evaluator)
        results[dataset_name] = results_i
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results_i)
    if len(results) == 1:
        results = list(results.values())[0]
    return results
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        # Create the evaluator
        evaluator = get_evaluator(cfg,
                                  dataset_name,
                                  output_folder=os.path.join(
                                      cfg.OUTPUT_DIR, "inference",
                                      dataset_name))
        # Make inference on dataset
        results_i = inference_on_dataset(model, data_loader, evaluator)
        # Update results dictionary
        results[dataset_name] = results_i

        print("### Returning results_i...")
        #print(results_i)
        #print(f"### Average Precision: {results_i['AP']}")
        # Let's get some communication happening
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            ## wandb.log()? TODO/NOTE: This may be something Weights & Biases can track
            #print("### Calculating results...")
            print_csv_format(results_i)

        # Check to see length of results
        if len(results) == 1:
            results = list(results.values())[0]
        #print("### Returning results...")
        #print(results)

        # TODO : log results_i dict with different parameters
        print("### Saving results to Weights & Biases...")
        wandb.log(results_i)

        return results
Exemple #27
0
    torch_model = build_model(cfg)
    DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
    torch_model.eval()

    # get a sample data
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    first_batch = next(iter(data_loader))

    # convert and save model
    if args.export_method == "caffe2_tracing":
        exported_model = export_caffe2_tracing(cfg, torch_model, first_batch)
    elif args.export_method == "scripting":
        exported_model = export_scripting(torch_model)
    elif args.export_method == "tracing":
        exported_model = export_tracing(torch_model, first_batch)

    # run evaluation with the converted model
    if args.run_eval:
        assert exported_model is not None, (
            "Python inference is not yet implemented for "
            f"export_method={args.export_method}, format={args.format}.")
        logger.info(
            "Running evaluation ... this takes a long time if you export to CPU."
        )
        dataset = cfg.DATASETS.TEST[0]
        data_loader = build_detection_test_loader(cfg, dataset)
        # NOTE: hard-coded evaluator. change to the evaluator for your dataset
        evaluator = COCOEvaluator(dataset, output_dir=args.output)
        metrics = inference_on_dataset(exported_model, data_loader, evaluator)
        print_csv_format(metrics)
Exemple #28
0
    def _do_test(self, cfg, model, train_iter=None, model_tag="default"):
        """train_iter: Current iteration of the model, None means final iteration"""
        assert len(cfg.DATASETS.TEST)
        assert cfg.OUTPUT_DIR

        is_final = (train_iter is None) or (train_iter
                                            == cfg.SOLVER.MAX_ITER - 1)

        logger.info(
            f"Running evaluation for model tag {model_tag} at iter {train_iter}..."
        )

        def _get_inference_dir_name(base_dir, inference_type, dataset_name):
            return os.path.join(
                base_dir,
                inference_type,
                model_tag,
                str(train_iter) if train_iter is not None else "final",
                dataset_name,
            )

        add_print_flops_callback(cfg, model, disable_after_callback=True)

        results = OrderedDict()
        results[model_tag] = OrderedDict()
        for dataset_name in cfg.DATASETS.TEST:
            # Evaluator will create output folder, no need to create here
            output_folder = _get_inference_dir_name(cfg.OUTPUT_DIR,
                                                    "inference", dataset_name)

            # NOTE: creating evaluator after dataset is loaded as there might be dependency.  # noqa
            data_loader = self.build_detection_test_loader(cfg, dataset_name)
            evaluator = self.get_evaluator(cfg,
                                           dataset_name,
                                           output_folder=output_folder)

            if not isinstance(evaluator, DatasetEvaluators):
                evaluator = DatasetEvaluators([evaluator])
            if comm.is_main_process():
                tbx_writer = _get_tbx_writer(
                    get_tensorboard_log_dir(cfg.OUTPUT_DIR))
                logger.info("Adding visualization evaluator ...")
                mapper = self.get_mapper(cfg, is_train=False)
                evaluator._evaluators.append(
                    self.get_visualization_evaluator()(
                        cfg,
                        tbx_writer,
                        mapper,
                        dataset_name,
                        train_iter=train_iter,
                        tag_postfix=model_tag,
                    ))

            results_per_dataset = inference_on_dataset(model, data_loader,
                                                       evaluator)

            if comm.is_main_process():
                results[model_tag][dataset_name] = results_per_dataset
                if is_final:
                    print_csv_format(results_per_dataset)

            if is_final and cfg.TEST.AUG.ENABLED:
                # In the end of training, run an evaluation with TTA
                # Only support some R-CNN models.
                output_folder = _get_inference_dir_name(
                    cfg.OUTPUT_DIR, "inference_TTA", dataset_name)

                logger.info(
                    "Running inference with test-time augmentation ...")
                data_loader = self.build_detection_test_loader(
                    cfg, dataset_name, mapper=lambda x: x)
                evaluator = self.get_evaluator(cfg,
                                               dataset_name,
                                               output_folder=output_folder)
                inference_on_dataset(GeneralizedRCNNWithTTA(cfg, model),
                                     data_loader, evaluator)

        if is_final and cfg.TEST.EXPECTED_RESULTS and comm.is_main_process():
            assert len(
                results
            ) == 1, "Results verification only supports one dataset!"
            verify_results(cfg, results[model_tag][cfg.DATASETS.TEST[0]])

        # write results to tensorboard
        if comm.is_main_process() and results:
            from detectron2.evaluation.testing import flatten_results_dict

            flattened_results = flatten_results_dict(results)
            for k, v in flattened_results.items():
                tbx_writer = _get_tbx_writer(
                    get_tensorboard_log_dir(cfg.OUTPUT_DIR))
                tbx_writer._writer.add_scalar("eval_{}".format(k), v,
                                              train_iter)

        if comm.is_main_process():
            tbx_writer = _get_tbx_writer(
                get_tensorboard_log_dir(cfg.OUTPUT_DIR))
            tbx_writer._writer.flush()
        return results
Exemple #29
0
    def test(cls, cfg, model, evaluators=None):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                `cfg.DATASETS.TEST`.

        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        if evaluators is not None:
            assert len(
                cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                    len(cfg.DATASETS.TEST), len(evaluators))

        logger.info("Version:{}".format(cfg.OUTPUT.TRAIN_VERSION))
        results = OrderedDict()

        dataset = cfg.DATASETS.TEST[0].split("_")[0]
        if (not cfg.MODEL.ROI_MASK_HEAD.RECON_NET.LOAD_CODEBOOK or not os.path.exists("{}_codebook.npy".format(dataset))) \
                and cfg.MODEL.ROI_MASK_HEAD.RECON_NET.NAME != "":
            for idx, dataset_name in enumerate(cfg.DATASETS.TRAIN):
                embedding_dataloader = cls.build_test_loader(cfg, dataset_name)
                model = embedding_inference_on_train_dataset(
                    model, embedding_dataloader)
            logger.info("Start KMEANS clustering")
            model.roi_heads.recon_net.cluster()
            logger.info("KMEANS clustering has finished")
            torch.save(
                model.roi_heads.recon_net.state_dict(),
                "{}/{}_recon_net.pth".format(model._cfg.OUTPUT_DIR, dataset))
            logger.info("Recon net saved")
            np.save(
                '{}/{}_codebook.npy'.format(model._cfg.OUTPUT_DIR, dataset),
                model.roi_heads.recon_net.vector_dict)

        # if cfg.MODEL.ROI_MASK_HEAD.RECON_NET.NAME == "General_Recon_Net":
        #     # torch.save(model.roi_heads.recon_net.state_dict(), "{}/recon_net.pth".format(model._cfg.OUTPUT_DIR))
        #     # logger.info("Recon net saved")
        #     dataset = "d2sa"
        #     if os.path.exists("{}_codebook.npy".format(dataset)) and :
        #         model.roi_heads.recon_net.load_state_dict(torch.load("{}_recon_net.pth".format(dataset)))
        #         model.roi_heads.recon_net.vector_dict = np.load("{}_codebook.npy".format(dataset))[()]
        #         logger.info("codebook loaded")
        #
        #     else:
        #         for idx, dataset_name in enumerate(cfg.DATASETS.TRAIN):
        #             # if idx == 0:
        #             #     continue
        #             embedding_dataloader = cls.build_test_loader(cfg, dataset_name)
        #             model = embedding_inference_on_train_dataset(model, embedding_dataloader)
        #         logger.info("Start KMEANS clustering")
        #         model.roi_heads.recon_net.cluster()
        #         logger.info("KMEANS clustering has finished")
        #         torch.save(model.roi_heads.recon_net.state_dict(), "{}/recon_net.pth".format(model._cfg.OUTPUT_DIR))
        #         logger.info("Recon net saved")
        #         np.save('{}/codebook.npy'.format(model._cfg.OUTPUT_DIR), model.roi_heads.recon_net.vector_dict)

        #     codebook = model.roi_heads.recon_net.vector_dict
        #     # space = []
        #     # label = []
        #     # name = []
        #     # for i in range(10):
        #     #     name.append(i)
        #     #     space.append(codebook[i + 1][: 256])
        #     #     label.append(torch.ones(256) * i)
        #     # space = torch.cat(space, dim=0)
        #     # label = torch.cat(label, dim=0)
        #     for cls_id in range(1, 21):
        #         num_groups = 6
        #         space = codebook[cls_id]
        #         kmeans = KMeans(n_clusters=num_groups)
        #         kmeans.fit(space.cpu())
        #
        #         coord = TSNE(random_state=20200605).fit_transform(np.array(space.cpu().data))
        #         vis.viz.scatter(coord, win="memory{}".format(cls_id +20), opts=dict(legend=[0,1,2,3,4,5], markersize=5), Y=kmeans.labels_ + 1, name="{}".format(cls_id))
        #
        #         tensor = torch.FloatTensor(kmeans.cluster_centers_).cuda().view(num_groups, 8, 4, 4)
        #         mask = model.roi_heads.recon_net.decode(tensor)
        #         vis.images(mask, win_name='mask{}'.format(cls_id))
        #         ipdb.set_trace()
        # ipdb.set_trace()

        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            data_loader = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    evaluator = cls.build_evaluator(cfg, dataset_name)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method.")
                    results[dataset_name] = {}
                    continue

            results_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i)
                logger.info("Evaluation results for {} in csv format:".format(
                    dataset_name))

                print_csv_format(results_i)

        if len(results) == 1:
            results = list(results.values())[0]

        return results
Exemple #30
0
    def test(cls, cfg, model, mapper_object, evaluators=None):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                `cfg.DATASETS.TEST`.

        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]
        if evaluators is not None:
            assert len(
                cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                    len(cfg.DATASETS.TEST), len(evaluators))

        results = OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
            # if(not isTrackAccuracy):
            # break
            data_loader = cls.build_test_loader(cfg, dataset_name,
                                                mapper_object)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    evaluator = cls.build_evaluator(cfg, dataset_name)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method.")
                    results[dataset_name] = {}
                    continue

            # if(True): return results
            # return results
            results_i = evaluate.inference_on_dataset(model, data_loader,
                                                      evaluator)

            accuracy_test = round((results_i["accuracy"] * 100), 2)

            storage = get_event_storage()
            storage.put_scalar("accuracy_" + dataset_name,
                               accuracy_test,
                               smoothing_hint=False)

            results[dataset_name] = results_i
            if comm.is_main_process():
                assert isinstance(
                    results_i, dict
                ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                    results_i)
                logger.info("Evaluation results for {} in csv format:".format(
                    dataset_name))
                print_csv_format(results_i)

        if len(results) == 1:
            results = list(results.values())[0]
        return results