def do_evaluate(pred_config, output_file):
    num_gpu = cfg.TRAIN.NUM_GPUS
    graph_funcs = MultiTowerOfflinePredictor(pred_config, list(
        range(num_gpu))).get_predictors()

    for dataset in cfg.DATA.VAL:
        logger.info("Evaluating {} ...".format(dataset))
        dataflows = [
            get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)
            for k in range(num_gpu)
        ]
        all_results = multithread_predict_dataflow(dataflows, graph_funcs)
        output = output_file + '-' + dataset
        DetectionDataset().eval_or_save_inference_results(
            all_results, dataset, output)
    for dataset in cfg.DATA.TEST:
        logger.info("Evaluating {} ...".format(dataset))
        dataflows = [
            get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)
            for k in range(num_gpu)
        ]
        all_results = multithread_predict_dataflow(dataflows, graph_funcs)
        output = output_file + '-' + dataset
        DetectionDataset().eval_or_save_inference_results(
            all_results, dataset, output)
def do_evaluate(pred_config, output_file):
    num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
    graph_funcs = MultiTowerOfflinePredictor(
        pred_config, list(range(num_tower))).get_predictors()

    for dataset in cfg.DATA.VAL:
        logger.info("Evaluating {} ...".format(dataset))
        dataflows = [
            get_eval_dataflow(dataset, shard=k, num_shards=num_tower)
            for k in range(num_tower)]
        all_results = multithread_predict_dataflow(dataflows, graph_funcs)
        output = output_file + '-' + dataset
        DatasetRegistry.get(dataset).eval_inference_results(all_results, output)
Example #3
0
    def compute_validation_metrics(self) -> Any:
        if self.trainer_type == "replicated":
            all_results = multithread_predict_dataflow(
                self.dataflows, self.predictors
            )  # type: ignore
        else:
            filenames = [
                os.path.join(
                    self._output_dir, "outputs{}-part{}.json".format(self.trainer.global_step, rank)
                )
                for rank in range(hvd.local_size())
            ]

            if self.machine_rank == 0:
                local_results = predict_dataflow(self.dataflow, self.predictor)
                fname = filenames[hvd.local_rank()]
                with open(fname, "w") as f:
                    json.dump(local_results, f)
            self.barrier.eval()
            if hvd.rank() > 0:
                return
            all_results = []
            for fname in filenames:
                with open(fname, "r") as f:
                    obj = json.load(f)
                all_results.extend(obj)

        output_file = os.path.join(
            self._output_dir,
            "{}-outputs{}-{}.json".format(
                self._eval_dataset, self.trainer.global_step, time.time()
            ),
        )

        metrics = DatasetRegistry.get(self._eval_dataset).eval_inference_results(  # type: ignore
            all_results, output_file
        )

        # If there are no detections, the metrics result is totally empty, instead of containing
        # zeroes. Ensure that the main evaluation metric has some value.
        metrics.setdefault("mAP(bbox)/IoU=0.5:0.95", 0)

        return metrics
Example #4
0
    def _eval(self):
        logdir = args.logdir
        if cfg.TRAINER == 'replicated':
            all_results = multithread_predict_dataflow(self.dataflows,
                                                       self.predictors)
        else:
            filenames = [
                os.path.join(
                    logdir,
                    'outputs{}-part{}.json'.format(self.global_step, rank))
                for rank in range(hvd.local_size())
            ]

            if self._horovod_run_eval:
                local_results = predict_dataflow(self.dataflow, self.predictor)
                fname = filenames[hvd.local_rank()]
                with open(fname, 'w') as f:
                    json.dump(local_results, f)
            self.barrier.eval()
            if hvd.rank() > 0:
                return
            all_results = []
            for fname in filenames:
                with open(fname, 'r') as f:
                    obj = json.load(f)
                all_results.extend(obj)
                os.unlink(fname)

        output_file = os.path.join(
            logdir, '{}-outputs{}.json'.format(self._eval_dataset,
                                               self.global_step))

        scores = DetectionDataset().eval_or_save_inference_results(
            all_results, self._eval_dataset, output_file)
        for k, v in scores.items():
            self.trainer.monitors.put_scalar(k, v)