Esempio n. 1
0
    def _reset_dataset_evaluators(self):
        """reset validation dataset evaluator to be run in EVAL_PERIOD steps"""
        assert (not self.trainer.distributed_backend
                or self.trainer.distributed_backend.lower() in [
                    "ddp",
                    "ddp_cpu",
                ]), ("Only DDP and DDP_CPU distributed backend are supported")

        def _get_inference_dir_name(base_dir, inference_type, dataset_name,
                                    model_tag: ModelTag):
            next_eval_iter = self.trainer.global_step + self.cfg.TEST.EVAL_PERIOD
            if self.trainer.global_step == 0:
                next_eval_iter -= 1
            return os.path.join(
                base_dir,
                inference_type,
                model_tag,
                str(next_eval_iter),
                dataset_name,
            )

        for tag, dataset_evaluators in self.dataset_evaluators.items():
            dataset_evaluators.clear()
            assert self.cfg.OUTPUT_DIR, "Expect output_dir to be specified in config"
            for dataset_name in self.cfg.DATASETS.TEST:
                # setup evaluator for each dataset
                output_folder = _get_inference_dir_name(
                    self.cfg.OUTPUT_DIR, "inference", dataset_name, tag)
                evaluator = Detectron2GoRunner.get_evaluator(
                    self.cfg, dataset_name, output_folder=output_folder)
                evaluator.reset()
                dataset_evaluators.append(evaluator)
Esempio n. 2
0
 def get_evaluator(cfg: CfgNode, dataset_name: str, output_folder: str):
     return Detectron2GoRunner.get_evaluator(cfg=cfg,
                                             dataset_name=dataset_name,
                                             output_folder=output_folder)