def run_task(self, db_client):
        import logging
        import traceback
        import util.database_helpers as dh
        import dataset.image_collection_builder as collection_builder

        # Try and import the desired loader module
        controller = dh.load_object(db_client,
                                    db_client.image_source_collection,
                                    self.controller_id)
        simulator = dh.load_object(db_client,
                                   db_client.image_source_collection,
                                   self.simulator_id,
                                   config=self.simulator_config)

        if simulator is None:
            logging.getLogger(__name__).error(
                "Could not deserialize simulator {0}".format(
                    self.simulator_id))
            self.mark_job_failed()
        elif controller is None:
            logging.getLogger(__name__).error(
                "Could not deserialize controller {0}".format(
                    self.controller_id))
            self.mark_job_failed()
        elif not controller.can_control_simulator(simulator):
            logging.getLogger(__name__).error(
                "Controller {0} can not control simulator {1}".format(
                    self.controller_id, self.simulator_id))
            self.mark_job_failed()
        else:
            controller.set_simulator(simulator)
            logging.getLogger(__name__).info(
                "Generating dataset from {0} using controller {1}".format(
                    self.simulator_id, self.controller_id))
            builder = collection_builder.ImageCollectionBuilder(db_client)
            try:
                builder.add_from_image_source(controller)
                dataset_id = builder.save()
            except Exception:
                dataset_id = None
                logging.getLogger(__name__).error(
                    "Exception occurred while generating dataset from simulator {0} with controller {1}:\n{2}"
                    .format(self.simulator_id, self.controller_id,
                            traceback.format_exc()))
            if dataset_id is None:
                logging.getLogger(__name__).error(
                    "Failed to generate dataset from simulator {0} with controller {1}: Dataset was null"
                    .format(self.simulator_id, self.controller_id))
                self.mark_job_failed()
            else:
                self.mark_job_complete(dataset_id)
                logging.getLogger(__name__).info(
                    "Successfully generated dataset {0}".format(dataset_id))
Пример #2
0
    def run_task(self, db_client):
        import logging
        import traceback
        import util.database_helpers as dh

        trial_result = dh.load_object(db_client, db_client.trials_collection,
                                      self.trial_result)
        benchmark = dh.load_object(db_client, db_client.benchmarks_collection,
                                   self.benchmark)

        if trial_result is None:
            logging.getLogger(__name__).error(
                "Could not deserialize trial result {0}".format(
                    self.trial_result))
            self.mark_job_failed()
        elif benchmark is None:
            logging.getLogger(__name__).error(
                "Could not deserialize benchmark {0}".format(self.benchmark))
            self.mark_job_failed()
        elif not benchmark.is_trial_appropriate(trial_result):
            logging.getLogger(__name__).error(
                "Benchmark {0} cannot assess trial {1}".format(
                    self.benchmark, self.trial_result))
            self.mark_job_failed()
        else:
            logging.getLogger(__name__).info(
                "Benchmarking result {0} with benchmark {1}".format(
                    self.trial_result, self.benchmark))
            try:
                benchmark_result = benchmark.benchmark_results(trial_result)
            except Exception:
                logging.getLogger(__name__).error(
                    "Exception while benchmarking {0} with benchmark {1}:\n{2}"
                    .format(self.trial_result, self.benchmark,
                            traceback.format_exc()))
                benchmark_result = None
            if benchmark_result is None:
                logging.getLogger(__name__).error(
                    "Failed to benchmark {0} with {1}".format(
                        self.trial_result, self.benchmark))
                self.mark_job_failed()
            else:
                benchmark_result_id = db_client.results_collection.insert(
                    benchmark_result.serialize())
                logging.getLogger(__name__).info(
                    "Successfully benchmarked trial {0} with benchmark {1},"
                    "producing result {2}".format(self.trial_result,
                                                  self.benchmark,
                                                  benchmark_result_id))
                self.mark_job_complete(benchmark_result_id)
    def run_task(self, db_client):
        import logging
        import traceback
        import util.database_helpers as dh

        trainer = dh.load_object(db_client, db_client.trainer_collection,
                                 self.trainer)
        trainee = dh.load_object(db_client, db_client.trainee_collection,
                                 self.trainee)

        if trainer is None:
            logging.getLogger(__name__).error(
                "Could not deserialize trainer {0}".format(self.trainer))
            self.mark_job_failed()
        elif trainee is None:
            logging.getLogger(__name__).error(
                "Could not deserialize trainee {0}".format(self.trainee))
            self.mark_job_failed()
        elif not trainer.can_train_trainee(trainee):
            logging.getLogger(__name__).error(
                "Trainer {0} cannot train trainee {1}".format(
                    self.trainer, self.trainee))
            self.mark_job_failed()
        else:
            logging.getLogger(__name__).info(
                "Start training trainee {0} ({1}) with trainer {2} (3)".format(
                    self.trainee,
                    trainee.__module__ + '.' + trainee.__class__.__name__,
                    self.trainer,
                    trainer.__module__ + '.' + trainer.__class__.__name__))
            try:
                system = trainer.train_vision_system(trainee)
            except Exception:
                logging.getLogger(__name__).error(
                    "Error occurred while trainer {0} trains trainee {1}:\n{2}"
                    .format(self.trainer, self.trainee,
                            traceback.format_exc()))
                system = None
            if system is None:
                logging.getLogger(__name__).error(
                    "Failed to train trainee {0} with trainer {1}".format(
                        self.trainer, self.trainee))
                self.mark_job_failed()
            else:
                system_id = db_client.system_collection.insert(
                    system.serialize())
                logging.getLogger(__name__).info(
                    "Successfully trained system {0}".format(system_id))
                self.mark_job_complete(system_id)
Пример #4
0
def main(*args):
    """
    Run a particular task.
    :args: Only argument is the id of the task to run
    :return:
    """
    if len(args) >= 1:
        task_id = bson.objectid.ObjectId(args[0])

        config = global_conf.load_global_config('config.yml')
        if __name__ == '__main__':
            # Only configure the logging if this is the main function, don't reconfigure
            logging.config.dictConfig(config['logging'])
        db_client = database.client.DatabaseClient(config=config)

        task = dh.load_object(db_client, db_client.tasks_collection, task_id)
        if task is not None:
            try:
                task.run_task(db_client)
            except Exception:
                logging.getLogger(__name__).error(
                    "Exception occurred while running {0}: {1}".format(
                        type(task).__name__, traceback.format_exc()))
                task.mark_job_failed()
            task.save_updates(db_client.tasks_collection)
def main(do_imports: bool = True,
         schedule_tasks: bool = True,
         run_tasks: bool = True,
         experiment_ids: typing.List[str] = None):
    """
    Schedule tasks for all experiments.
    We need to find a way of running this repeatedly as a daemon
    :param do_imports: Whether to do imports for all the experiments. Default true.
    :param schedule_tasks: Whether to schedule execution tasks for the experiments. Default true.
    :param experiment_ids: A limited set of experiments to schedule for. Default None, which is all experiments.
    :param run_tasks: Actually use the job system to execute scheduled tasks
    """
    config = global_conf.load_global_config('config.yml')
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config=config)
    task_manager = batch_analysis.task_manager.TaskManager(
        db_client.tasks_collection, db_client, config)

    if do_imports or schedule_tasks:
        query = {'enabled': {'$ne': False}}
        if experiment_ids is not None and len(experiment_ids) > 0:
            query['_id'] = {
                '$in': [bson.ObjectId(id_) for id_ in experiment_ids]
            }
        experiment_ids = db_client.experiments_collection.find(
            query, {'_id': True})

        logging.getLogger(__name__).info("Scheduling experiments...")
        for experiment_id in experiment_ids:
            experiment = dh.load_object(db_client,
                                        db_client.experiments_collection,
                                        experiment_id['_id'])
            if experiment is not None and experiment.enabled:
                logging.getLogger(__name__).info(" ... experiment {0}".format(
                    experiment.identifier))
                try:
                    if do_imports:
                        experiment.do_imports(task_manager, db_client)
                    if schedule_tasks:
                        experiment.schedule_tasks(task_manager, db_client)
                    experiment.save_updates(db_client)
                except Exception:
                    logging.getLogger(__name__).error(
                        "Exception occurred during scheduling:\n{0}".format(
                            traceback.format_exc()))

    if run_tasks:
        logging.getLogger(__name__).info("Running tasks...")
        job_system = job_system_factory.create_job_system(config=config)
        task_manager.schedule_tasks(job_system)

        # Actually run the queued jobs.
        job_system.run_queued_jobs()
 def get(self, index):
     """
     A getter for random access, since we're storing a list
     :param index:
     :return:
     """
     if index in self._images:
         return dh.load_object(self._db_client,
                               self._db_client.image_collection,
                               self._images[index])
     return None
    def run_task(self, db_client):
        import logging
        import traceback
        import util.database_helpers as dbhelp

        system = dbhelp.load_object(db_client, db_client.system_collection, self.system)
        image_source = dbhelp.load_object(db_client, db_client.image_source_collection, self.image_source)

        if system is None:
            logging.getLogger(__name__).error("Could not deserialize system {0}".format(self.system))
            self.mark_job_failed()
        elif image_source is None:
            logging.getLogger(__name__).error("Could not deserialize image source {0}".format(self.image_source))
            self.mark_job_failed()
        elif not system.is_image_source_appropriate(image_source):
            logging.getLogger(__name__).error("Image source {0} is inappropriate for system {1}".format(
                self.image_source, self.system))
            self.mark_job_failed()
        else:
            logging.getLogger(__name__).info("Start running system {0} ({1}) with image source {2}".format(
                self.system,
                system.__module__ + '.' + system.__class__.__name__,
                self.image_source))
            try:
                trial_result = run_system_with_source(system, image_source)
            except Exception:
                logging.getLogger(__name__).error("Error occurred while running system {0} "
                                                  "with image source {1}:\n{2}".format(
                    self.system, self.image_source, traceback.format_exc()))
                trial_result = None
            if trial_result is None:
                logging.getLogger(__name__).error("Failed to system {0} with image source {1}.".format(
                    self.system, self.image_source))
                self.mark_job_failed()
            else:
                trial_result.save_data(db_client)
                trial_result_id = db_client.trials_collection.insert(trial_result.serialize())
                logging.getLogger(__name__).info(("Successfully ran system {0} with image source {1},"
                                                  "producing trial result {2}").format(
                    self.system, self.image_source, trial_result_id))
                self.mark_job_complete(trial_result_id)
def main(*args):
    config = global_conf.load_global_config("config.yml")
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config)
    experiment_ids = db_client.experiments_collection.find(
        {'enabled': {
            '$ne': False
        }}, {'_id': True})
    for ex_id in experiment_ids:
        experiment = dh.load_object(db_client,
                                    db_client.experiments_collection,
                                    ex_id['_id'])
        if experiment is not None and experiment.enabled:
            experiment.plot_results(db_client)
def main():
    """
    Allow experiments to dump some data to file. This might be aggregate statistics,
    I'm currently using this for camera trajectories.
    :return:
    """
    config = global_conf.load_global_config('config.yml')
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config=config)
    experiment_ids = db_client.experiments_collection.find(
        {'enabled': {
            '$ne': False
        }}, {'_id': True})
    for ex_id in experiment_ids:
        experiment = dh.load_object(db_client,
                                    db_client.experiments_collection,
                                    ex_id['_id'])
        if experiment is not None and experiment.enabled:
            experiment.export_data(db_client)
    def do_imports(self, task_manager: batch_analysis.task_manager.TaskManager,
                   db_client: database.client.DatabaseClient) -> bool:
        """
        Do imports and dataset generation for this trajectory group.
        Will create a controller, and then generate reduced quality synthetic datasets.
        :param task_manager:
        :param db_client:
        :return: True if part of the group has changed, and it needs to be re-saved
        """
        changed = False
        # First, make a follow controller for the base dataset if we don't have one.
        # This will be used to generate reduced-quality datasets following the same trajectory
        # as the root dataset
        if self.follow_controller_id is None:
            self.follow_controller_id = follow_cont.create_follow_controller(
                db_client,
                self.reference_dataset,
                sequence_type=core.sequence_type.ImageSequenceType.SEQUENTIAL)
            changed = True
        # Next, if we haven't already, compute baseline configuration from the reference dataset
        if self.baseline_configuration is None or len(
                self.baseline_configuration) == 0:
            reference_dataset = dh.load_object(
                db_client, db_client.image_source_collection,
                self.reference_dataset)
            if isinstance(reference_dataset,
                          core.image_collection.ImageCollection):
                intrinsics = reference_dataset.get_camera_intrinsics()
                self.baseline_configuration = {
                        # Simulation execution config
                        'stereo_offset': reference_dataset.get_stereo_baseline() \
                        if reference_dataset.is_stereo_available else 0,
                        'provide_rgb': True,
                        'provide_depth': reference_dataset.is_depth_available,
                        'provide_labels': reference_dataset.is_labels_available,
                        'provide_world_normals': reference_dataset.is_normals_available,

                        # Simulator camera settings, be similar to the reference dataset
                        'resolution': {'width': intrinsics.width, 'height': intrinsics.height},
                        'fov': max(intrinsics.horizontal_fov, intrinsics.vertical_fov),
                        'depth_of_field_enabled': False,
                        'focus_distance': None,
                        'aperture': 2.2,

                        # Quality settings - Maximum quality
                        'lit_mode': True,
                        'texture_mipmap_bias': 0,
                        'normal_maps_enabled': True,
                        'roughness_enabled': True,
                        'geometry_decimation': 0,
                        'depth_noise_quality': 1,

                        # Simulation server config
                        'host': 'localhost',
                        'port': 9000,
                    }
                changed = True

        # Then, for each combination of simulator id and config, generate a dataset
        for sim_name, (simulator_id,
                       simulator_config) in self.simulators.items():
            # Schedule generation of quality variations that don't exist yet
            if sim_name not in self.generated_datasets:
                generate_dataset_task = task_manager.get_generate_dataset_task(
                    controller_id=self.follow_controller_id,
                    simulator_id=simulator_id,
                    simulator_config=du.defaults({}, simulator_config,
                                                 self.baseline_configuration),
                    num_cpus=1,
                    num_gpus=0,
                    memory_requirements='3GB',
                    expected_duration='4:00:00')
                if generate_dataset_task.is_finished:
                    self.generated_datasets[
                        sim_name] = generate_dataset_task.result
                    changed = True
                else:
                    task_manager.do_task(generate_dataset_task)
        return changed
    def _plot_relative_pose_error(self,
                                  db_client: database.client.DatabaseClient):
        import matplotlib.pyplot as pyplot

        logging.getLogger(__name__).info("Plotting relative pose error...")
        # Map system ids and simulator ids to printable names
        simulator_names = {v: k for k, v in self._simulators.items()}
        systems = du.defaults({'LIBVISO 2': self._libviso_system},
                              self._orbslam_systems)

        for trajectory_group in self._trajectory_groups.values():
            # Collect all the image sources for this trajectory group
            image_sources = {
                'reference dataset': trajectory_group.reference_dataset
            }
            for simulator_id, dataset_id in trajectory_group.generated_datasets.items(
            ):
                if simulator_id in simulator_names:
                    image_sources[simulator_names[simulator_id]] = dataset_id
                else:
                    image_sources[simulator_id] = dataset_id

            if len(image_sources) <= 1:
                # Skip where we've only got one image source, it's not interesting.
                continue

            # Collect the results for each image source in this group
            results = {}
            styles = {}
            for system_name, system_id in systems.items():
                for dataset_name, dataset_id in image_sources.items():
                    trial_result_id = self.get_trial_result(
                        system_id, dataset_id)
                    if trial_result_id is not None:
                        result_id = self.get_benchmark_result(
                            trial_result_id, self._benchmark_rpe)
                        if result_id is not None:
                            label = "{0} on {1}".format(
                                system_name, dataset_name)
                            results[label] = result_id
                            styles[
                                label] = '-' if dataset_name == 'reference dataset' else '--'

            if len(results) > 1:
                figure = pyplot.figure(figsize=(14, 10), dpi=80)
                figure.suptitle("Relative pose error for {0}".format(
                    trajectory_group.name))
                ax = figure.add_subplot(111)
                ax.set_xlabel('time')
                ax.set_ylabel('relative pose error')

                # For each trial result
                for label, result_id in results.items():
                    result = dh.load_object(db_client,
                                            db_client.results_collection,
                                            result_id)
                    if result is not None:
                        if result.success:
                            x = []
                            y = []
                            times = sorted(result.translational_error.keys())
                            for time in times:
                                error = result.translational_error[time]
                                if error < 100:
                                    x.append(time - times[0])
                                    y.append(error)
                            ax.plot(x,
                                    y,
                                    styles[label],
                                    label=label,
                                    alpha=0.7)
                        else:
                            print("Got failed result: {0}".format(
                                result.reason))

                logging.getLogger(__name__).info(
                    "... plotted rpe for {0}".format(trajectory_group.name))
                ax.legend()
                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.95, right=0.99)
        pyplot.show()
    def _plot_trajectories(self, db_client: database.client.DatabaseClient):
        """
        Plot the ground-truth and computed trajectories for each system for each trajectory.
        This is important for validation
        :param db_client:
        :return:
        """
        import matplotlib.pyplot as pyplot
        # noinspection PyUnresolvedReferences
        from mpl_toolkits.mplot3d import Axes3D

        logging.getLogger(__name__).info("Plotting trajectories...")
        # Map system ids and simulator ids to printable names
        simulator_names = {v: k for k, v in self._simulators.items()}
        systems = du.defaults({'LIBVISO 2': self._libviso_system},
                              self._orbslam_systems)

        for trajectory_group in self._trajectory_groups.values():
            # Collect all the image sources for this trajectory group
            image_sources = {
                'reference dataset': trajectory_group.reference_dataset
            }
            for simulator_id, dataset_id in trajectory_group.generated_datasets.items(
            ):
                if simulator_id in simulator_names:
                    image_sources[simulator_names[simulator_id]] = dataset_id
                else:
                    image_sources[simulator_id] = dataset_id
            if len(image_sources) <= 1:
                continue

            # Collect the trial results for each image source in this group
            trial_results = {}
            style = {}
            for system_name, system_id in systems.items():
                for dataset_name, dataset_id in image_sources.items():
                    trial_result_id = self.get_trial_result(
                        system_id, dataset_id)
                    if trial_result_id is not None:
                        label = "{0} on {1}".format(system_name, dataset_name)
                        trial_results[label] = trial_result_id
                        style[
                            label] = '--' if dataset_name == 'reference dataset' else '-'

                        # Make sure we have at least one result to plot
            if len(trial_results) > 1:
                figure = pyplot.figure(figsize=(14, 10), dpi=80)
                figure.suptitle("Computed trajectories for {0}".format(
                    trajectory_group.name))
                ax = figure.add_subplot(111, projection='3d')
                ax.set_xlabel('x-location')
                ax.set_ylabel('y-location')
                ax.set_zlabel('z-location')
                ax.plot([0], [0], [0], 'ko', label='origin')
                added_ground_truth = False

                # For each trial result
                for label, trial_result_id in trial_results.items():
                    trial_result = dh.load_object(db_client,
                                                  db_client.trials_collection,
                                                  trial_result_id)
                    if trial_result is not None:
                        if trial_result.success:
                            if not added_ground_truth:
                                lower, upper = plot_trajectory(
                                    ax,
                                    trial_result.get_ground_truth_camera_poses(
                                    ), 'ground truth trajectory')
                                mean = (upper + lower) / 2
                                lower = 1.2 * lower - mean
                                upper = 1.2 * upper - mean
                                ax.set_xlim(lower, upper)
                                ax.set_ylim(lower, upper)
                                ax.set_zlim(lower, upper)
                                added_ground_truth = True
                            plot_trajectory(
                                ax,
                                trial_result.get_computed_camera_poses(),
                                label=label,
                                style=style[label])
                        else:
                            print("Got failed trial: {0}".format(
                                trial_result.reason))

                logging.getLogger(__name__).info(
                    "... plotted trajectories for {0}".format(
                        trajectory_group.name))
                ax.legend()
                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.95, right=0.99)
        pyplot.show()
    def run_task(self, db_client):
        import logging
        import traceback
        import util.database_helpers as dh

        trial_result_1 = dh.load_object(db_client, db_client.trials_collection,
                                        self.trial_result1)
        trial_result_2 = dh.load_object(db_client, db_client.trials_collection,
                                        self.trial_result2)
        comparison_benchmark = dh.load_object(db_client,
                                              db_client.benchmarks_collection,
                                              self.comparison)

        if trial_result_1 is None:
            logging.getLogger(__name__).error(
                "Could not deserialize trial result {0}".format(
                    self.trial_result1))
            self.mark_job_failed()
        elif trial_result_2 is None:
            logging.getLogger(__name__).error(
                "Could not deserialize trial result {0}".format(
                    self.trial_result2))
            self.mark_job_failed()
        elif comparison_benchmark is None:
            logging.getLogger(__name__).error(
                "Could not deserialize comparison benchmark {0}".format(
                    self.comparison))
            self.mark_job_failed()
        elif (not comparison_benchmark.is_trial_appropriate(trial_result_1) or
              not comparison_benchmark.is_trial_appropriate(trial_result_2)):
            logging.getLogger(__name__).error(
                "Benchmark {0} is not appropriate for trial {1} or {2}".format(
                    self.comparison, self.trial_result1, self.trial_result2))
            self.mark_job_failed()
        else:
            logging.getLogger(__name__).info(
                "Comparing trial results {0} and {1} with comparison benchmark {2}"
                .format(self.trial_result1, self.trial_result2,
                        self.comparison))
            try:
                comparison_result = comparison_benchmark.compare_trial_results(
                    trial_result_1, trial_result_2)
            except Exception:
                logging.getLogger(__name__).error(
                    "Error occurred while comparing trials {0} and {1}"
                    "with benchmark {2}:\n{3}".format(self.trial_result1,
                                                      self.trial_result2,
                                                      self.comparison,
                                                      traceback.format_exc()))
                comparison_result = None
            if comparison_result is None:
                logging.getLogger(__name__).error(
                    "Failed to compare trials {0} and {1}"
                    "with benchmark {2}".format(self.trial_result1,
                                                self.trial_result2,
                                                self.comparison))
                self.mark_job_failed()
            else:
                comparison_result.save_data(db_client)
                result_id = db_client.results_collection.insert(
                    comparison_result.serialize())
                logging.getLogger(__name__).info(
                    "Successfully compared trials {0} and {1},"
                    "producing result {2}".format(self.trial_result1,
                                                  self.trial_result2,
                                                  result_id))
                self.mark_job_complete(result_id)
    def plot_results(self, db_client):

        for result_id in self.results_ids:
            result = dh.load_object(db_client, db_client.results_collection, result_id)
            if result is not None:
                precision, recall, f1score, log_gt_area, iou = result.list_results(
                    overlap_result.precision,
                    overlap_result.recall,
                    overlap_result.f1_score,
                    lambda x: np.log(x['ground_truth_area']) if x['ground_truth_area'] > 0 else 0,
                    lambda x: x['overlap'] / (x['bounding_box_area'] + x['ground_truth_area'] - x['overlap'])
                )

        figure = pyplot.figure(figsize=(14, 10), dpi=80)
        ax_pr = figure.add_subplot(111)
        ax_pr.set_xlabel('precision')
        ax_pr.set_ylabel('recall')
        pyplot.tight_layout()
        pyplot.subplots_adjust(top=0.95, right=0.99)

        figure = pyplot.figure(figsize=(14, 10), dpi=80)
        ax_p_area = figure.add_subplot(121)
        ax_p_area.set_xlabel('bounding box area')
        ax_p_area.set_ylabel('precision')

        ax_r_area = figure.add_subplot(122)
        ax_r_area.set_xlabel('bounding box area')
        ax_r_area.set_ylabel('recall')
        pyplot.tight_layout()
        pyplot.subplots_adjust(top=0.95, right=0.99)

        fp_values = []
        fn_values = []
        false_labels = []

        boxplot_stuff = []
        boxplot_labels = []
        for result_id in self.results_ids:
            s_result = db_client.results_collection.find_one({'_id': result_id})
            result = db_client.deserialize_entity(s_result)
            name = self.get_name(result, db_client)
            precision, recall, f1score, gt_area, iou = result.list_results(
                overlap_result.precision, overlap_result.recall, overlap_result.f1_score,
                lambda x: np.log(x['ground_truth_area']) if x['ground_truth_area'] > 0 else 0,
                lambda x: x['overlap'] / (x['bounding_box_area'] + x['ground_truth_area'] - x['overlap']))

            boxplot_stuff.append(iou)
            boxplot_labels.append(name)

            ax_pr.scatter(precision, recall, label=name)
            x, y = prune_zero_points(gt_area, precision)
            ax_p_area.scatter(x, y, label=name)
            x, y = prune_zero_points(gt_area, recall)
            ax_r_area.scatter(x, y, label=name)

            # Count false positives
            false_labels.append(name)
            false_positives = 0
            false_negatives = 0
            for bbox_results in result.overlaps.values():
                for bbox_result in bbox_results:
                    if bbox_result['ground_truth_area'] == 0:
                        false_positives += 1
                    if bbox_result['bounding_box_area'] == 0:
                        false_negatives += 1
            fp_values.append(false_positives)
            fn_values.append(false_negatives)

        ax_pr.legend()
        ax_p_area.legend()

        # False positives
        x = np.arange(len(fp_values))
        figure = pyplot.figure(figsize=(14, 10), dpi=80)
        ax_fp = figure.add_subplot(121)
        ax_fp.set_ylabel('false positives')
        ax_fp.set_xticklabels(false_labels)
        ax_fp.bar(x, fp_values, align='center')
        ax_fn = figure.add_subplot(122)
        ax_fn.set_ylabel('false negatives')
        ax_fn.set_xticklabels(false_labels)
        ax_fn.bar(x, fn_values, align='center')
        pyplot.tight_layout()
        pyplot.subplots_adjust(top=0.95, right=0.99)

        figure = pyplot.figure(figsize=(14, 10), dpi=80)
        ax = figure.add_subplot(111)
        ax.set_xlabel('precision')
        ax.set_ylabel('Intersection over Union')
        ax.boxplot(boxplot_stuff, positions=list(range(len(self.results_ids))))
        ax.set_xticks(list(range(len(self.results_ids))))
        ax.set_xticklabels(boxplot_labels)

        pyplot.tight_layout()
        pyplot.subplots_adjust(top=0.95, right=0.99)
        pyplot.show()
Пример #15
0
    def schedule_all(self,
                     task_manager: batch_analysis.task_manager.TaskManager,
                     db_client: database.client.DatabaseClient,
                     systems: typing.List[bson.ObjectId],
                     image_sources: typing.List[bson.ObjectId],
                     benchmarks: typing.List[bson.ObjectId]):
        """
        Schedule all combinations of running some list of systems with some list of image sources,
        and then benchmarking the results with some list of benchmarks.
        Uses is_image_source_appropriate and is_benchmark_appropriate to filter.
        Created results can be retrieved with get_trial_result and get_benchmark_result.

        :param task_manager: The task manager to perform scheduling
        :param db_client: The database client, to load the systems, image sources, etc..
        :param systems: The list of system ids to test
        :param image_sources: The list of image source ids to use
        :param benchmarks: The list of benchmark ids to measure the results
        :return: void
        """
        # Trial results will be collected as we go
        trial_results = set()

        # For each image dataset, run libviso with that dataset, and store the result in the trial map
        for image_source_id in image_sources:
            image_source = dh.load_object(db_client,
                                          db_client.image_source_collection,
                                          image_source_id)
            if image_source is None:
                continue
            for system_id in systems:
                system = dh.load_object(db_client, db_client.system_collection,
                                        system_id)
                if system is not None and system.is_image_source_appropriate(
                        image_source):
                    task = task_manager.get_run_system_task(
                        system_id=system.identifier,
                        image_source_id=image_source.identifier,
                        expected_duration='8:00:00',
                        memory_requirements='12GB')
                    if not task.is_finished:
                        task_manager.do_task(task)
                    else:
                        trial_results.add(task.result)
                        self.store_trial_result(system_id, image_source_id,
                                                task.result)

        # Benchmark trial results
        for trial_result_id in trial_results:
            trial_result = dh.load_object(db_client,
                                          db_client.trials_collection,
                                          trial_result_id)
            if trial_result is None:
                continue
            for benchmark_id in benchmarks:
                benchmark = dh.load_object(db_client,
                                           db_client.benchmarks_collection,
                                           benchmark_id)
                if benchmark is not None and benchmark.is_trial_appropriate(
                        trial_result):
                    task = task_manager.get_benchmark_task(
                        trial_result_id=trial_result.identifier,
                        benchmark_id=benchmark.identifier,
                        expected_duration='6:00:00',
                        memory_requirements='6GB')
                    if not task.is_finished:
                        task_manager.do_task(task)
                    else:
                        self.store_benchmark_result(trial_result_id,
                                                    benchmark_id, task.result)
    def _plot_trajectories(self, db_client: database.client.DatabaseClient):
        """
        Plot the ground-truth and computed trajectories for each system for each trajectory.
        This is important for validation
        :param db_client:
        :return:
        """
        import matplotlib.pyplot as pyplot
        # noinspection PyUnresolvedReferences
        from mpl_toolkits.mplot3d import Axes3D

        logging.getLogger(__name__).info("Plotting trajectories...")
        # Map system ids and simulator ids to printable names
        simulator_names = {v: k for k, v in self._simulators.items()}
        systems = du.defaults({'LIBVISO 2': self._libviso_system},
                              self._orbslam_systems)

        for trajectory_group in self._trajectory_groups.values():

            # Collect all the image sources for this trajectory group
            image_sources = {}
            for simulator_id, dataset_id in trajectory_group.generated_datasets.items(
            ):
                if simulator_id in simulator_names:
                    image_sources[simulator_names[simulator_id]] = dataset_id
                else:
                    image_sources[simulator_id] = dataset_id

            # Collect the trial results for each image source in this group
            trial_results = {}
            for system_name, system_id in systems.items():
                for dataset_name, dataset_id in image_sources.items():
                    trial_result_id = self.get_trial_result(
                        system_id, dataset_id)
                    if trial_result_id is not None:
                        label = "{0} on {1}".format(system_name, dataset_name)
                        trial_results[label] = trial_result_id

            # Make sure we have at least one result to plot
            if len(trial_results) >= 1:
                figure = pyplot.figure(figsize=(14, 10), dpi=80)
                figure.suptitle("Computed trajectories for {0}".format(
                    trajectory_group.name))
                ax = figure.add_subplot(111, projection='3d')
                ax.set_xlabel('x-location')
                ax.set_ylabel('y-location')
                ax.set_zlabel('z-location')

                figure = pyplot.figure(figsize=(14, 10), dpi=80)
                figure.suptitle("Computed orientation for {0}".format(
                    trajectory_group.name))
                oax = figure.add_subplot(111, projection='3d')
                oax.set_xlabel('x-location')
                oax.set_ylabel('y-location')
                oax.set_zlabel('z-location')

                added_ground_truth = False
                lowest = -0.001
                highest = 0.001
                cmap = pyplot.get_cmap('Set1')
                colour_index = 0

                # For each trial result
                for label, trial_result_id in trial_results.items():
                    trial_result = dh.load_object(db_client,
                                                  db_client.trials_collection,
                                                  trial_result_id)
                    if trial_result is not None:
                        if trial_result.success:
                            if not added_ground_truth:
                                trajectory = trial_result.get_ground_truth_camera_poses(
                                )
                                lower, upper = plot_trajectory(
                                    ax,
                                    trajectory,
                                    'ground truth trajectory',
                                    style='k--')
                                lowest = min(lowest, lower)
                                highest = max(highest, upper)
                                added_ground_truth = True
                            trajectory = trial_result.get_computed_camera_poses(
                            )
                            lower, upper = plot_trajectory(ax,
                                                           trajectory,
                                                           label=label,
                                                           style='-')
                            plot_forward(
                                oax,
                                trajectory,
                                label=label,
                                colors=[cmap(colour_index / 9, alpha=0.5)])
                            lowest = min(lowest, lower)
                            highest = max(highest, upper)
                            colour_index += 1
                        else:
                            print("Got failed trial: {0}".format(
                                trial_result.reason))

                logging.getLogger(__name__).info(
                    "... plotted trajectories for {0}".format(
                        trajectory_group.name))
                ax.legend()
                ax.set_xlim(lowest, highest)
                ax.set_ylim(lowest, highest)
                ax.set_zlim(lowest, highest)

                oax.legend()
                oax.set_xlim(lowest, highest)
                oax.set_ylim(lowest, highest)
                oax.set_zlim(lowest, highest)

                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.95, right=0.99)
        pyplot.show()