示例#1
0
    def on_train_start(self, trainer, pl_module):
        self.available_data = len(trainer.val_dataloaders) > 0 and len(
            trainer.val_dataloaders[0]) > 0
        if self.available_data:
            self.dataset = trainer.val_dataloaders[0].dataset
            # Determine the path in which MOT results will be stored
            if trainer.logger is not None:
                save_dir = osp.join(trainer.logger.save_dir,
                                    trainer.logger.name,
                                    trainer.logger.version)

            else:
                save_dir = trainer.default_save_path

            self.output_files_dir = osp.join(save_dir, 'mot_files')
            self.output_metrics_dir = osp.join(save_dir, 'mot_metrics')
            os.makedirs(self.output_metrics_dir, exist_ok=True)

        # Compute oracle results if needed
        if self.available_data and self.compute_oracle_results:
            mot_metrics_summary = self._compute_mot_metrics(
                trainer.current_epoch, pl_module, oracle_results=True)
            print(mot_metrics_summary)
            oracle_path = osp.join(self.output_metrics_dir, 'oracle.npy')
            save_pickle(mot_metrics_summary.to_dict(), oracle_path)
            trainer.oracle_metrics = mot_metrics_summary
示例#2
0
    def on_epoch_end(self, trainer, pl_module):
        # Compute MOT metrics on validation data, save them and log them
        if self.available_data:
            mot_metrics_summary = self._compute_mot_metrics(
                trainer.current_epoch, pl_module, oracle_results=False)
            metrics_path = osp.join(
                self.output_metrics_dir,
                f'epoch_{trainer.current_epoch + 1:03}.npy')
            save_pickle(mot_metrics_summary.to_dict(), metrics_path)

            if self.compute_oracle_results:
                for metric in pl_module.hparams['eval_params'][
                        'mot_metrics_to_norm']:
                    mot_metrics_summary[
                        'norm_' + metric] = mot_metrics_summary[
                            metric] / trainer.oracle_metrics[metric]

            if pl_module.logger is not None and hasattr(
                    pl_module.logger, 'experiment'):
                metric_names = pl_module.hparams['eval_params'][
                    'mot_metrics_to_log']
                if pl_module.hparams['eval_params']['log_per_seq_metrics']:
                    metrics_log = {
                        f'{metric}/val/{seq}': met_dict[seq]
                        for metric, met_dict in mot_metrics_summary.items()
                        for seq in list(self.dataset.seq_names) + ['OVERALL']
                        if metric in metric_names
                    }

                else:
                    metrics_log = {
                        f'{metric}/val': met_dict['OVERALL']
                        for metric, met_dict in mot_metrics_summary.items()
                        if metric in metric_names
                    }
                    pl_module.logger.log_metrics(metrics_log,
                                                 step=trainer.global_step)
示例#3
0
    def on_epoch_end(self, trainer, pl_module):
        # Compute MOT metrics on validation data, save them and log them
        if self.available_data:
            mot_metrics_summary = self._compute_mot_metrics(
                trainer.current_epoch, pl_module, oracle_results=False)
            metrics_path = osp.join(
                self.output_metrics_dir,
                f"epoch_{trainer.current_epoch + 1:03}.npy")
            save_pickle(mot_metrics_summary.to_dict(), metrics_path)

            if self.compute_oracle_results:
                for metric in pl_module.hparams["eval_params"][
                        "mot_metrics_to_norm"]:
                    mot_metrics_summary["norm_" + metric] = (
                        mot_metrics_summary[metric] /
                        trainer.oracle_metrics[metric])

            if pl_module.logger is not None and hasattr(
                    pl_module.logger, "experiment"):
                metric_names = pl_module.hparams["eval_params"][
                    "mot_metrics_to_log"]
                if pl_module.hparams["eval_params"]["log_per_seq_metrics"]:
                    metrics_log = {
                        f"{metric}/val/{seq}": met_dict[seq]
                        for metric, met_dict in mot_metrics_summary.items()
                        for seq in list(self.dataset.seq_names) + ["OVERALL"]
                        if metric in metric_names
                    }

                else:
                    metrics_log = {
                        f"{metric}/val": met_dict["OVERALL"]
                        for metric, met_dict in mot_metrics_summary.items()
                        if metric in metric_names
                    }
                    pl_module.logger.log_metrics(metrics_log,
                                                 step=trainer.global_step)