def test_load_global_config_saves_config_file_if_not_available(
         self, mock_isfile, mock_yaml_load, mock_save):
     mock_isfile.return_value = False
     mock_yaml_load.return_value = {}
     filename = 'test_config_file_3'
     global_conf.load_global_config(filename)
     self.assertTrue(mock_save.called)
     self.assertEqual(filename, mock_save.call_args[0][0])
 def test_load_global_config_waits_and_retries_three_times_if_load_failed(
         self, mock_isfile, mock_yaml_load, mock_sleep):
     mock_isfile.return_value = True
     mock_yaml_load.return_value = None
     with mock.patch('config.global_configuration.open',
                     mock.mock_open(),
                     create=True):
         global_conf.load_global_config('test_config_file_5')
     self.assertEqual(3, mock_yaml_load.call_count)
     self.assertEqual(3, mock_sleep.call_count)
 def test_load_global_config_does_not_save_config_file_if_available(
         self, mock_isfile, mock_yaml_load, mock_save):
     mock_isfile.return_value = True
     mock_yaml_load.return_value = {}
     filename = 'test_config_file_4'
     with mock.patch('config.global_configuration.open',
                     mock.mock_open(),
                     create=True):
         global_conf.load_global_config(filename)
     self.assertFalse(mock_save.called)
 def test_load_global_config_reads_config_file_if_available(
         self, mock_isfile, mock_yaml_load):
     mock_isfile.return_value = True
     mock_yaml_load.return_value = {}
     mock_open = mock.mock_open()
     filename = 'test_config_file_2'
     with mock.patch('config.global_configuration.open',
                     mock_open,
                     create=True):
         global_conf.load_global_config(filename)
     self.assertTrue(mock_open.called)
     self.assertEqual(filename, mock_open.call_args[0][0])
def main():
    """
    Run a particular task.
    :args: Only argument is the id of the task to run
    :return:
    """
    config = global_conf.load_global_config('config.yml')
    if __name__ == '__main__':
        # Only configure the logging if this is the main function, don't reconfigure
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config=config)

    orbslam_ids = db_client.system_collection.find(
        {'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
    for system_id in orbslam_ids:
        logging.getLogger(__name__).info("Invalidating system {0}".format(
            system_id['_id']))
        batch_analysis.invalidate.invalidate_system(db_client,
                                                    system_id['_id'])

    failed_trials = db_client.trials_collection.find({'success': False},
                                                     {'_id': True})
    for trial_id in failed_trials:
        logging.getLogger(__name__).info(
            "Invalidating failed trial {0}".format(trial_id['_id']))
        batch_analysis.invalidate.invalidate_trial_result(
            db_client, trial_id['_id'])
Esempio n. 6
0
def main(*args):
    """
    Run a particular task.
    :args: Only argument is the id of the task to run
    :return:
    """
    if len(args) >= 1:
        task_id = bson.objectid.ObjectId(args[0])

        config = global_conf.load_global_config('config.yml')
        if __name__ == '__main__':
            # Only configure the logging if this is the main function, don't reconfigure
            logging.config.dictConfig(config['logging'])
        db_client = database.client.DatabaseClient(config=config)

        task = dh.load_object(db_client, db_client.tasks_collection, task_id)
        if task is not None:
            try:
                task.run_task(db_client)
            except Exception:
                logging.getLogger(__name__).error(
                    "Exception occurred while running {0}: {1}".format(
                        type(task).__name__, traceback.format_exc()))
                task.mark_job_failed()
            task.save_updates(db_client.tasks_collection)
def main():
    """
    Visualize a random generated dataset, to make sure we're generating them right
    :return:
    """
    config = global_conf.load_global_config('config.yml')
    db_client = database.client.DatabaseClient(config=config)

    visualize_dataset(db_client, bson.ObjectId("5a00854936ed1e1fa9a4ae19"))
def main(*args):
    """
    Run a given system with a given image source.
    This represents a basic task.
    Scripts to run this will be autogenerated by the job system
    The first argument is the system id, the second argument is the image source to use
    (note that args[0] should be the x
    :return:
    """
    if len(args) >= 1:
        image_source_id = bson.objectid.ObjectId(args[0])

        config = global_conf.load_global_config('config.yml')
        db_client = database.client.DatabaseClient(config=config)

        image_source = None
        s_image_source = db_client.image_source_collection.find_one(
            {'_id': image_source_id})
        if s_image_source is not None:
            image_source = db_client.deserialize_entity(s_image_source)
        del s_image_source

        if image_source is not None:
            image_source.begin()
            while not image_source.is_complete():
                image, _ = image_source.get_next_image()
                debug_img = image.data[:, :, ::-1].copy()
                for obj in image.metadata.labelled_objects:
                    x, y, w, h = obj.bounding_box
                    cv2.rectangle(debug_img, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)

                    text_label = str(obj.class_names[0])
                    (retval,
                     baseLine) = cv2.getTextSize(text_label,
                                                 cv2.FONT_HERSHEY_COMPLEX, 1,
                                                 1)
                    text_org = (x, y - 0)

                    cv2.rectangle(
                        debug_img,
                        (text_org[0] - 5, text_org[1] + baseLine - 5),
                        (text_org[0] + retval[0] + 5,
                         text_org[1] - retval[1] - 5), (0, 0, 0), 2)
                    cv2.rectangle(
                        debug_img,
                        (text_org[0] - 5, text_org[1] + baseLine - 5),
                        (text_org[0] + retval[0] + 5,
                         text_org[1] - retval[1] - 5), (255, 255, 255), -1)
                    cv2.putText(debug_img, text_label, text_org,
                                cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)

                cv2.imshow('debug', debug_img)
                cv2.waitKey(0)
def main(do_imports: bool = True,
         schedule_tasks: bool = True,
         run_tasks: bool = True,
         experiment_ids: typing.List[str] = None):
    """
    Schedule tasks for all experiments.
    We need to find a way of running this repeatedly as a daemon
    :param do_imports: Whether to do imports for all the experiments. Default true.
    :param schedule_tasks: Whether to schedule execution tasks for the experiments. Default true.
    :param experiment_ids: A limited set of experiments to schedule for. Default None, which is all experiments.
    :param run_tasks: Actually use the job system to execute scheduled tasks
    """
    config = global_conf.load_global_config('config.yml')
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config=config)
    task_manager = batch_analysis.task_manager.TaskManager(
        db_client.tasks_collection, db_client, config)

    if do_imports or schedule_tasks:
        query = {'enabled': {'$ne': False}}
        if experiment_ids is not None and len(experiment_ids) > 0:
            query['_id'] = {
                '$in': [bson.ObjectId(id_) for id_ in experiment_ids]
            }
        experiment_ids = db_client.experiments_collection.find(
            query, {'_id': True})

        logging.getLogger(__name__).info("Scheduling experiments...")
        for experiment_id in experiment_ids:
            experiment = dh.load_object(db_client,
                                        db_client.experiments_collection,
                                        experiment_id['_id'])
            if experiment is not None and experiment.enabled:
                logging.getLogger(__name__).info(" ... experiment {0}".format(
                    experiment.identifier))
                try:
                    if do_imports:
                        experiment.do_imports(task_manager, db_client)
                    if schedule_tasks:
                        experiment.schedule_tasks(task_manager, db_client)
                    experiment.save_updates(db_client)
                except Exception:
                    logging.getLogger(__name__).error(
                        "Exception occurred during scheduling:\n{0}".format(
                            traceback.format_exc()))

    if run_tasks:
        logging.getLogger(__name__).info("Running tasks...")
        job_system = job_system_factory.create_job_system(config=config)
        task_manager.schedule_tasks(job_system)

        # Actually run the queued jobs.
        job_system.run_queued_jobs()
def main(*args):
    config = global_conf.load_global_config("config.yml")
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config)
    experiment_ids = db_client.experiments_collection.find(
        {'enabled': {
            '$ne': False
        }}, {'_id': True})
    for ex_id in experiment_ids:
        experiment = dh.load_object(db_client,
                                    db_client.experiments_collection,
                                    ex_id['_id'])
        if experiment is not None and experiment.enabled:
            experiment.plot_results(db_client)
def main():
    """
    Add hard-coded entities to the database.
    Should become only experiments, when that happens, rename to 'create_experiments.py'
    :return: void
    """
    config = global_conf.load_global_config('config.yml')
    db_client = database.client.DatabaseClient(config=config)

    # Create the experiments
    c = db_client.experiments_collection
    # db_help.add_unique(c, experiments.visual_slam.visual_slam_experiment.VisualSlamExperiment())
    #db_help.add_unique(c, experiments.visual_slam.visual_odometry_experiment.VisualOdometryExperiment())
    db_help.add_unique(
        c,
        experiments.visual_slam.simple_motion_experiment.
        SimpleMotionExperiment())
def main():
    """
    Allow experiments to dump some data to file. This might be aggregate statistics,
    I'm currently using this for camera trajectories.
    :return:
    """
    config = global_conf.load_global_config('config.yml')
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config=config)
    experiment_ids = db_client.experiments_collection.find(
        {'enabled': {
            '$ne': False
        }}, {'_id': True})
    for ex_id in experiment_ids:
        experiment = dh.load_object(db_client,
                                    db_client.experiments_collection,
                                    ex_id['_id'])
        if experiment is not None and experiment.enabled:
            experiment.export_data(db_client)
 def test_load_global_config_returns_read_config_merged_with_defaults(
         self, mock_isfile, mock_yaml_load):
     mock_isfile.return_value = True
     config = {
         'test': 12.35,
         'database_config': {
             'database_name': 'a_different_database',
             'gridfs_bucket': 'file_system_fs',
             'collections': {
                 'trainer_collection': 'these_are_the_trainers_yo',
                 'system_collection': 'deze_sysTems',
                 'benchmarks_collection': 'mark_those_benches',
             }
         },
         'job_system_config': {
             'a': 1
         },
         'logging': {
             'demo': 'ATestProperty'
         }
     }
     mock_yaml_load.return_value = config
     with mock.patch('config.global_configuration.open',
                     mock.mock_open(),
                     create=True):
         result = global_conf.load_global_config('test_config_file_6')
     for key, val in config.items():
         self.assertIn(key, result)
         if isinstance(val, dict):
             for inner1_key, inner1_val in val.items():
                 self.assertIn(inner1_key, result[key])
                 if isinstance(inner1_val, dict):
                     for inner2_key, inner2_val in inner1_val.items():
                         self.assertIn(inner2_key, result[key][inner1_key])
                         self.assertEqual(
                             inner2_val,
                             result[key][inner1_key][inner2_key])
                 else:
                     self.assertEqual(inner1_val, result[key][inner1_key])
         else:
             self.assertEqual(val, result[key])
Esempio n. 14
0
def main(check_collections: bool = True,
         remove_orphans: bool = False,
         recalculate_metadata: bool = False):
    """
    Verify the state of the database
    :return:
    """
    config = global_conf.load_global_config('config.yml')
    if __name__ == '__main__':
        logging.config.dictConfig(config['logging'])
    db_client = database.client.DatabaseClient(config=config)

    if remove_orphans:
        remove_orphan_images(db_client)

    if recalculate_metadata:
        recalculate_derivative_metadata(db_client)

    # Patch saved entity types to fully-qualified names
    if check_collections:
        logging.getLogger(__name__).info('Checking experiments...')
        check_collection(db_client.experiments_collection, db_client)
        logging.getLogger(__name__).info('Checking trainers...')
        check_collection(db_client.trainer_collection, db_client)
        logging.getLogger(__name__).info('Checking trainees...')
        check_collection(db_client.trainee_collection, db_client)
        logging.getLogger(__name__).info('Checking systems...')
        check_collection(db_client.system_collection, db_client)
        logging.getLogger(__name__).info('Checking benchmarks...')
        check_collection(db_client.benchmarks_collection, db_client)
        logging.getLogger(__name__).info('Checking image sources...')
        check_collection(db_client.image_source_collection, db_client)
        # check_collection(db_client.image_collection, db_client)    # This is covered by image sources
        logging.getLogger(__name__).info('Checking trials...')
        check_collection(db_client.trials_collection, db_client)
        logging.getLogger(__name__).info('Checking results...')
        check_collection(db_client.results_collection, db_client)