Exemplo n.º 1
0
    def _do_eval(self):
        results = self._func()

        if results:
            assert isinstance(
                results, dict
            ), "Eval function must return a dict. Got {} instead.".format(
                results)

            print('Before flatten: ', results)
            flattened_results = flatten_results_dict(results)
            print('After flatten: ', flattened_results)

            for k, v in flattened_results.items():
                try:
                    v = float(v)
                except Exception:
                    raise ValueError(
                        "[EvalHook] eval_function should return a nested dict of float. "
                        "Got '{}: {}' instead.".format(k, v))
            self.trainer.storage.put_scalars(**flattened_results,
                                             smoothing_hint=False)

        # Evaluation may take different time among workers.
        # A barrier make them start the next iteration together.
        comm.synchronize()
Exemplo n.º 2
0
    def after_step(self):
        next_iter = self.trainer.iter + 1
        is_final = next_iter == self.trainer.max_iter
        if is_final or (self._period > 0 and next_iter % self._period == 0):
            results = self._func()

            if results:
                assert isinstance(
                    results, dict
                ), "Eval function must return a dict. Got {} instead.".format(
                    results)

                flattened_results = flatten_results_dict(results)
                for k, v in flattened_results.items():
                    try:
                        v = float(v)
                    except Exception:
                        raise ValueError(
                            "[EvalHook] eval_function should return a nested dict of float. "
                            "Got '{}: {}' instead.".format(k, v))
                self.trainer.storage.put_scalars(**flattened_results,
                                                 smoothing_hint=False)

            # Evaluation may take different time among workers.
            # A barrier make them start the next iteration together.
            comm.synchronize()
Exemplo n.º 3
0
    def _do_eval(self):
        results = self._func()

        if results:
            assert isinstance(
                results, dict
            ), "Eval function must return a dict. Got {} instead.".format(
                results)

            flattened_results = flatten_results_dict(results)
            for k, v in flattened_results.items():
                try:
                    v = float(v)
                except Exception as e:
                    raise ValueError(
                        "[EvalHook] eval_function should return a nested dict of float. "
                        "Got '{}: {}' instead.".format(k, v)) from e
            self.trainer.storage.put_scalars(**flattened_results,
                                             smoothing_hint=False)

        if comm.is_main_process() and results:
            # save evaluation results in json
            is_final = self.trainer.iter + 1 >= self.trainer.max_iter
            os.makedirs(os.path.join(self.cfg.OUTPUT_DIR, 'inference'),
                        exist_ok=True)
            output_file = 'res_final.json' if is_final else \
                'iter_{:07d}.json'.format(self.trainer.iter)
            with PathManager.open(
                    os.path.join(self.cfg.OUTPUT_DIR, 'inference',
                                 output_file), 'w') as fp:
                json.dump(results, fp)

        # Evaluation may take different time among workers.
        # A barrier make them start the next iteration together.
        comm.synchronize()
Exemplo n.º 4
0
    def _do_eval(self):
        results = self._func()
        logger = logging.getLogger(__name__)

        if results:
            assert isinstance(
                results, dict
            ), "Eval function must return a dict. Got {} instead.".format(
                results)

            flattened_results = flatten_results_dict(results)
            valid = dict()
            for k, v in flattened_results.items():
                try:
                    valid[k] = float(v)
                # currently only support skipping (List, Tensor, numpy.nda)
                # TODO: Maybe other types of Exceptions need to be taken into consideration
                except (ValueError, TypeError):
                    logger.info("Skip put {}: {} to tensorboard".format(
                        k, type(v)))

            self.trainer.storage.put_scalars(**valid, smoothing_hint=False)

        # Evaluation may take different time among workers.
        # A barrier make them start the next iteration together.
        comm.synchronize()
Exemplo n.º 5
0
    def validation_epoch_end(self, _outputs):
        results = self._process_dataset_evaluation_results(_outputs)

        flattened_results = flatten_results_dict(results)
        for k, v in flattened_results.items():
            try:
                v = float(v)
            except Exception as e:
                raise ValueError(
                    "[EvalHook] eval_function should return a nested dict of float. "
                    "Got '{}: {}' instead.".format(k, v)) from e
        self.storage.put_scalars(**flattened_results, smoothing_hint=False)
Exemplo n.º 6
0
    def _do_eval(self):
        results = self._func()

        if results:
            assert isinstance(
                results, dict
            ), "Eval function must return a dict. Got {} instead.".format(
                results)

            print('Before flatten: ', results)
            flattened_results = flatten_results_dict(results)
            print('After flatten: ', flattened_results)

            for k, v in flattened_results.items():
                try:
                    v = float(v)
                except Exception:
                    raise ValueError(
                        "[EvalHook] eval_function should return a nested dict of float. "
                        "Got '{}: {}' instead.".format(k, v))
            self.trainer.storage.put_scalars(**flattened_results,
                                             smoothing_hint=False)

        # Evaluation may take different time among workers.
        # A barrier make them start the next iteration together.
        comm.synchronize()
        if results:
            if flattened_results['bbox/AP'] > self._best_AP:
                self._best_AP = flattened_results['bbox/AP']
                print('self._best_AP', self._best_AP)
                print('Improving! Should save new checkpoing')
                self.save('best_AP_model')

            if np.isfinite(flattened_results['bbox/AP-Car']) and \
                np.isfinite(flattened_results['bbox/AP-Pedestrian']) and \
                np.isfinite(flattened_results['bbox/AP-Cyclist']) and \
                (flattened_results['bbox/AP-Car'] + flattened_results['bbox/AP-Pedestrian'] + flattened_results['bbox/AP-Cyclist'])/3.0 > self._best_AP_carpedcyc:
                self._best_AP_carpedcyc = (
                    flattened_results['bbox/AP-Car'] +
                    flattened_results['bbox/AP-Pedestrian'] +
                    flattened_results['bbox/AP-Cyclist']) / 3.0
                print('self._best_AP_carpedcyc', self._best_AP_carpedcyc)
                print('Improving! Should save new checkpoing')
                self.save('best_AP_carpedcyc_model')
Exemplo n.º 7
0
    def _do_eval(self, is_final):
        results = self._func()

        if results:
            assert isinstance(
                results, dict
            ), "Eval function must return a dict. Got {} instead.".format(
                results)

            flattened_results = flatten_results_dict(results)
            for k, v in flattened_results.items():
                try:
                    v = float(v)
                except Exception:
                    raise ValueError(
                        "[EvalHook] eval_function should return a nested dict of float. "
                        "Got '{}: {}' instead.".format(k, v))
            self.trainer.storage.put_scalars(**flattened_results,
                                             smoothing_hint=False)

        if comm.is_main_process() and results:
            # TODO(): This is an ugly hack that introduces global state in this hook function.
            logg = logging.getLogger(__name__)
            logg.info(
                'Creating directory {} and placing evaluation results in it'.
                format(global_cfg.OUTPUT_DIR))
            # save evaluation results in json
            os.makedirs(os.path.join(global_cfg.OUTPUT_DIR, 'inference'),
                        exist_ok=True)
            output_file = 'res_final.json' if is_final else \
                'iter_{:07d}.json'.format(self.trainer.iter)
            with open(
                    os.path.join(global_cfg.OUTPUT_DIR, 'inference',
                                 output_file), 'w') as fp:
                json.dump(results, fp, indent=4)

        # Evaluation may take different time among workers.
        # A barrier make them start the next iteration together.
        comm.synchronize()
Exemplo n.º 8
0
    def _do_test(self, cfg, model, train_iter=None, model_tag="default"):
        """train_iter: Current iteration of the model, None means final iteration"""
        assert len(cfg.DATASETS.TEST)
        assert cfg.OUTPUT_DIR

        is_final = (train_iter is None) or (train_iter
                                            == cfg.SOLVER.MAX_ITER - 1)

        logger.info(
            f"Running evaluation for model tag {model_tag} at iter {train_iter}..."
        )

        def _get_inference_dir_name(base_dir, inference_type, dataset_name):
            return os.path.join(
                base_dir,
                inference_type,
                model_tag,
                str(train_iter) if train_iter is not None else "final",
                dataset_name,
            )

        add_print_flops_callback(cfg, model, disable_after_callback=True)

        results = OrderedDict()
        results[model_tag] = OrderedDict()
        for dataset_name in cfg.DATASETS.TEST:
            # Evaluator will create output folder, no need to create here
            output_folder = _get_inference_dir_name(cfg.OUTPUT_DIR,
                                                    "inference", dataset_name)

            # NOTE: creating evaluator after dataset is loaded as there might be dependency.  # noqa
            data_loader = self.build_detection_test_loader(cfg, dataset_name)
            evaluator = self.get_evaluator(cfg,
                                           dataset_name,
                                           output_folder=output_folder)

            if not isinstance(evaluator, DatasetEvaluators):
                evaluator = DatasetEvaluators([evaluator])
            if comm.is_main_process():
                tbx_writer = _get_tbx_writer(
                    get_tensorboard_log_dir(cfg.OUTPUT_DIR))
                logger.info("Adding visualization evaluator ...")
                mapper = self.get_mapper(cfg, is_train=False)
                evaluator._evaluators.append(
                    self.get_visualization_evaluator()(
                        cfg,
                        tbx_writer,
                        mapper,
                        dataset_name,
                        train_iter=train_iter,
                        tag_postfix=model_tag,
                    ))

            results_per_dataset = inference_on_dataset(model, data_loader,
                                                       evaluator)

            if comm.is_main_process():
                results[model_tag][dataset_name] = results_per_dataset
                if is_final:
                    print_csv_format(results_per_dataset)

            if is_final and cfg.TEST.AUG.ENABLED:
                # In the end of training, run an evaluation with TTA
                # Only support some R-CNN models.
                output_folder = _get_inference_dir_name(
                    cfg.OUTPUT_DIR, "inference_TTA", dataset_name)

                logger.info(
                    "Running inference with test-time augmentation ...")
                data_loader = self.build_detection_test_loader(
                    cfg, dataset_name, mapper=lambda x: x)
                evaluator = self.get_evaluator(cfg,
                                               dataset_name,
                                               output_folder=output_folder)
                inference_on_dataset(GeneralizedRCNNWithTTA(cfg, model),
                                     data_loader, evaluator)

        if is_final and cfg.TEST.EXPECTED_RESULTS and comm.is_main_process():
            assert len(
                results
            ) == 1, "Results verification only supports one dataset!"
            verify_results(cfg, results[model_tag][cfg.DATASETS.TEST[0]])

        # write results to tensorboard
        if comm.is_main_process() and results:
            from detectron2.evaluation.testing import flatten_results_dict

            flattened_results = flatten_results_dict(results)
            for k, v in flattened_results.items():
                tbx_writer = _get_tbx_writer(
                    get_tensorboard_log_dir(cfg.OUTPUT_DIR))
                tbx_writer._writer.add_scalar("eval_{}".format(k), v,
                                              train_iter)

        if comm.is_main_process():
            tbx_writer = _get_tbx_writer(
                get_tensorboard_log_dir(cfg.OUTPUT_DIR))
            tbx_writer._writer.flush()
        return results
Exemplo n.º 9
0
from detectron2.evaluation.testing import flatten_results_dict

import logging
import torch
import numpy as np

results = dict(list=[1, 2, 3, 4],
               tensor=torch.zeros((2, 3)),
               numpy=np.zeros((2, 3)),
               valid=int(8))

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('detectron2')
flattened_results = flatten_results_dict(results)
valid = dict()
for k, v in flattened_results.items():
    try:
        valid[k] = float(v)
    except (ValueError, TypeError):
        logger.info("Skip put {}: {} to tensorboard".format(k, type(v)))

print(valid)