Exemple #1
0
    def test(cls, cfg, model):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)

        results = OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TESTS):
            logger.info("Prepare testing set")
            try:
                data_loader, evaluator = cls.build_evaluator(cfg, dataset_name)
            except NotImplementedError:
                logger.warn(
                    "No evaluator found. implement its `build_evaluator` method."
                )
                results[dataset_name] = {}
                continue
            results_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i

        if comm.is_main_process():
            assert isinstance(
                results, dict
            ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                results)
            print_csv_format(results)

        if len(results) == 1: results = list(results.values())[0]

        return results
def do_test(cfg, model):
    results = OrderedDict()
    for idx, dataset_name in enumerate(cfg.DATASETS.TESTS):
        logger.info("Prepare testing set")
        try:
            data_loader, evaluator = get_evaluator(cfg, dataset_name)
        except NotImplementedError:
            logger.warn(
                "No evaluator found. implement its `build_evaluator` method.")
            results[dataset_name] = {}
            continue
        results_i = inference_on_dataset(model,
                                         data_loader,
                                         evaluator,
                                         flip_test=cfg.TEST.FLIP_ENABLED)
        results[dataset_name] = results_i

    if comm.is_main_process():
        assert isinstance(
            results, dict
        ), "Evaluator must return a dict on the main process. Got {} instead.".format(
            results)
        print_csv_format(results)

    if len(results) == 1: results = list(results.values())[0]

    return results
Exemple #3
0
def main(args):

    cfg = setup(args)
    logger = logging.getLogger("fastreid.trainer")
    cfg.defrost()
    cfg.MODEL.BACKBONE.PRETRAIN = False
    model = H_Trainer.build_model(cfg)

    Checkpointer(model).load(cfg.MODEL.WEIGHTS)  # load trained model

    res = H_Trainer.test(cfg, model)
    print_csv_format(res)
Exemple #4
0
    def test(cls, cfg, model, evaluators=None):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
            evaluators (list[DatasetEvaluator] or None): if None, will call
                :meth:`build_evaluator`. Otherwise, must have the same length as
                `cfg.DATASETS.TEST`.
        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger(__name__)
        if isinstance(evaluators, DatasetEvaluator):
            evaluators = [evaluators]

        if evaluators is not None:
            assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
                len(cfg.DATASETS.TEST), len(evaluators)
            )

        results = OrderedDict()
        for idx, dataset_name in enumerate(cfg.DATASETS.TESTS):
            logger.info("Prepare testing set")
            data_loader, num_query = cls.build_test_loader(cfg, dataset_name)
            # When evaluators are passed in as arguments,
            # implicitly assume that evaluators can be created before data_loader.
            if evaluators is not None:
                evaluator = evaluators[idx]
            else:
                try:
                    evaluator = cls.build_evaluator(cfg, num_query)
                except NotImplementedError:
                    logger.warn(
                        "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
                        "or implement its `build_evaluator` method."
                    )
                    results[dataset_name] = {}
                    continue
            results_i = inference_on_dataset(model, data_loader, evaluator)
            results[dataset_name] = results_i

        if comm.is_main_process():
            assert isinstance(
                results, dict
            ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                results
            )
            print_csv_format(results)

        if len(results) == 1: results = list(results.values())[0]

        return results
Exemple #5
0
 def test(self, cfg, model):
     results = OrderedDict()
     for idx, dataset_name in enumerate(cfg.DATASETS.TESTS):
         results_i = inference_on_dataset(
             model, self.test_data_loader[dataset_name],
             self.evaluator[dataset_name])
         results[dataset_name] = results_i
     self.eval_results = results
     if comm.is_main_process():
         assert isinstance(
             results, dict
         ), "Evaluator must return a dict on the main process. Got {} instead.".format(
             results)
         print_csv_format(results)
     if len(results) == 1: results = list(results.values())[0]
     return results
Exemple #6
0
    def test(cls, cfg, model):
        """
        Args:
            cfg (CfgNode):
            model (nn.Module):
        Returns:
            dict: a dict of result metrics
        """
        logger = logging.getLogger('fastreid')

        results = OrderedDict()
        dataset_name = cfg.DATASETS.TGT

        logger.info("Prepare testing set")
        try:
            data_loader, evaluator = cls.build_evaluator(cfg, dataset_name)
        except NotImplementedError:
            logger.warn(
                "No evaluator found. implement its `build_evaluator` method.")
            results[dataset_name] = {}

        results_i = inference_on_dataset(model,
                                         data_loader,
                                         evaluator,
                                         flip_test=cfg.TEST.FLIP.ENABLED)
        results[dataset_name] = results_i

        if comm.is_main_process():
            assert isinstance(
                results, dict
            ), "Evaluator must return a dict on the main process. Got {} instead.".format(
                results)
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            results_i['dataset'] = dataset_name
            print_csv_format(results_i)

        # if len(results) == 1:
        #     results = list(results.values())[0]

        return results
Exemple #7
0
        for cfg in cfgs:
            self.predictors.append(DefaultPredictor(cfg))

    def run_on_loader(self, data_loader):
        for batch in data_loader:
            predictions = []
            for predictor in self.predictors:
                predictions.append(predictor(batch["images"]))
            yield torch.cat(predictions, dim=-1), batch


if __name__ == "__main__":
    args = get_parser().parse_args()
    logger = setup_logger()
    cfgs = []
    for config_file in args.config_file:
        cfg = setup_cfg(config_file, args.opts)
        cfgs.append(cfg)
    results = OrderedDict()
    for dataset_name in cfgs[0].DATASETS.TESTS:
        test_loader, num_query = build_reid_test_loader(cfgs[0], dataset_name)
        evaluator = ReidEvaluator(cfgs[0], num_query)
        feat_extract = FeatureExtraction(cfgs)
        for (feat, batch) in tqdm.tqdm(feat_extract.run_on_loader(test_loader),
                                       total=len(test_loader)):
            evaluator.process(batch, feat)
        result = evaluator.evaluate()
        results[dataset_name] = result
    print_csv_format(results)