Esempio n. 1
0
def purge_instance(instance_id, nb_to_keep):
    instance = models.Instance.query.get(instance_id)
    logger = get_instance_logger(instance)
    logger.info('purge of backup directories for %s', instance.name)
    instance_config = load_instance_config(instance.name)
    backups = set(glob.glob('{}/*'.format(instance_config.backup_directory)))
    logger.info('backups are: %s', backups)
    # we add the realpath not to have problems with double / or stuff like that
    loaded = set(
        os.path.realpath(os.path.dirname(dataset.name)) for dataset in instance.last_datasets(nb_to_keep)
    )
    logger.info('loaded  data are: %s', loaded)

    running = set(os.path.realpath(os.path.dirname(dataset.name)) for dataset in instance.running_datasets())
    logger.info('running  bina are: %s', running)
    to_remove = [os.path.join(instance_config.backup_directory, f) for f in backups - loaded - running]

    missing = [l for l in loaded if l not in backups]
    if missing:
        logger.error(
            "MISSING backup files! impossible to find %s in the backup dir, "
            "we skip the purge, repair ASAP to fix the purge",
            missing,
        )
        return

    logger.info('we remove: %s', to_remove)
    for path in to_remove:
        shutil.rmtree(path)
Esempio n. 2
0
def heartbeat():
    """
    send a heartbeat to all kraken
    """
    logging.info('ping krakens!!')
    with kombu.Connection(
            current_app.config['CELERY_BROKER_URL']) as connection:
        instances = models.Instance.query_existing().all()
        task = task_pb2.Task()
        task.action = task_pb2.HEARTBEAT

        for instance in instances:
            try:
                config = load_instance_config(instance.name)
                exchange = kombu.Exchange(config.exchange,
                                          'topic',
                                          durable=True)
                producer = connection.Producer(exchange=exchange)
                producer.publish(task.SerializeToString(),
                                 routing_key='{}.task.heartbeat'.format(
                                     instance.name))
            except Exception as e:
                logging.error(
                    "Could not ping krakens for instance {i}: {e}".format(
                        i=instance, e=e))
Esempio n. 3
0
    def run(self, name_=""):
        if name_:
            instances = models.Instance.query.filter_by(name=name_).all()
        else:
            instances = models.Instance.query.all()

        if not instances:
            logging.getLogger(__name__).\
                error("Unable to find any instance for name '{name}'"
                      .format(name=name_))
            return

        #TODO: create a real job
        job_id = 1
        instances_name = [instance.name for instance in instances]
        for instance_name in instances_name:
            instance_config = None
            try:
                instance_config = load_instance_config(instance_name)
            except ValueError:
                logging.getLogger(__name__).\
                    info("Unable to find instance " + instance_name)
                continue
            aggregate_places(instance_config, job_id)
            job_id += 1
Esempio n. 4
0
def update_data():
    for instance in models.Instance.query_existing().all():
        current_app.logger.debug("Update data of : {}".format(instance.name))
        instance_config = load_instance_config(instance.name)
        files = glob.glob(instance_config.source_directory + "/*")
        if files:
            import_data(files, instance, backup_file=True)
Esempio n. 5
0
def build_data(instance):
    job = models.Job()
    job.instance = instance
    job.state = 'pending'
    instance_config = load_instance_config(instance.name)
    models.db.session.add(job)
    models.db.session.commit()
    chain(ed2nav.si(instance_config, job.id, None), finish_job.si(job.id)).delay()
    current_app.logger.info("Job build data of : %s queued"%instance.name)
Esempio n. 6
0
def reload_kraken(instance_id):
    instance = models.Instance.query.get(instance_id)
    job = models.Job()
    job.instance = instance
    job.state = 'pending'
    instance_config = load_instance_config(instance.name)
    models.db.session.add(job)
    models.db.session.commit()
    chain(reload_data.si(instance_config, job.id), finish_job.si(job.id)).delay()
    logging.info("Task reload kraken for instance {} queued".format(instance.name))
Esempio n. 7
0
def reload_at(instance_id):
    instance = models.Instance.query.get(instance_id)
    job = models.Job()
    job.instance = instance
    job.state = 'pending'
    instance_config = load_instance_config(instance.name)
    models.db.session.add(job)
    models.db.session.commit()
    chain(nav2rt.si(instance_config, job.id),
          reload_data.si(instance_config, job.id),
          finish_job.si(job.id)).delay()
Esempio n. 8
0
def scan_instances():
    for instance_file in glob.glob(current_app.config['INSTANCES_DIR'] + '/*.ini'):
        instance_name = os.path.basename(instance_file).replace('.ini', '')
        instance = models.Instance.query.filter_by(name=instance_name).first()
        if not instance:
            current_app.logger.info('new instances detected: %s', instance_name)
            instance = models.Instance(name=instance_name)
            instance_config = load_instance_config(instance.name)
            instance.is_free = instance_config.is_free

            models.db.session.add(instance)
            models.db.session.commit()
Esempio n. 9
0
def build_all_data():
    for instance in models.Instance.query.all():
        job = models.Job()
        job.instance = instance
        job.state = 'pending'
        instance_config = load_instance_config(instance.name)
        models.db.session.add(job)
        models.db.session.commit()
        chain(ed2nav.si(instance_config, job.id),
              nav2rt.si(instance_config, job.id)).delay()
        current_app.logger.info("Job  build data of : %s queued" %
                                instance.name)
Esempio n. 10
0
def update_data():
    for instance in models.Instance.query_existing().all():
        current_app.logger.debug("Update data of : {}".format(instance.name))
        instance_config = None
        try:
            instance_config = load_instance_config(instance.name)
        except:
            current_app.logger.exception("impossible to load instance configuration for %s", instance.name)
            # Do not stop the task if only one instance is missing
            continue
        files = glob.glob(instance_config.source_directory + "/*")
        if files:
            import_data(files, instance, backup_file=True)
Esempio n. 11
0
def send_to_mimir(instance, filename):
    """
    :param instance: instance to receive the data
    :param filename: file to inject towards mimir

    - create a job with a data_set
    - data injection towards mimir(stops2mimir, ntfs2mimir)

    returns action list
    """
    # This test is to avoid creating a new job if there is no action on mimir.
    if not (instance.import_ntfs_in_mimir or instance.import_stops_in_mimir):
        return []

    actions = []
    job = models.Job()
    instance_config = load_instance_config(instance.name)
    job.instance = instance
    job.state = 'running'

    dataset = models.DataSet()
    dataset.family_type = 'mimir'
    dataset.type = 'fusio'

    # currently the name of a dataset is the path to it
    dataset.name = filename
    models.db.session.add(dataset)
    job.data_sets.append(dataset)

    models.db.session.add(job)
    models.db.session.commit()

    # Import ntfs in Mimir
    if instance.import_ntfs_in_mimir:
        actions.append(
            ntfs2mimir.si(instance_config,
                          filename,
                          job.id,
                          dataset_uid=dataset.uid))

    # Import stops in Mimir
    # if we are loading pt data we might want to load the stops to autocomplete
    if instance.import_stops_in_mimir and not instance.import_ntfs_in_mimir:
        actions.append(
            stops2mimir.si(instance_config,
                           filename,
                           job.id,
                           dataset_uid=dataset.uid))

    actions.append(finish_job.si(job.id))
    return actions
Esempio n. 12
0
 def _init(self):
     instances = models.Instance.query.all()
     self.connection = kombu.Connection(
         current_app.config['CELERY_BROKER_URL'])
     for instance in instances:
         #initialize the last relaod at the minimum date possible
         self.last_reload[instance.id] = datetime(1, 1, 1)
         config = load_instance_config(instance.name)
         exchange = kombu.Exchange(config.exchange, 'topic', durable=True)
         for topic in config.rt_topics:
             self.topics_to_instances[topic].append(instance)
             queue = kombu.Queue(exchange=exchange,
                                 durable=True,
                                 routing_key=topic)
             self.queues.append(queue)
Esempio n. 13
0
    :param files: files to import
    :param instance: instance to receive the data
    :param backup_file: If True the files are moved to a backup directory, else they are not moved
    :param async: If True all jobs are run in background, else the jobs are run in sequence the function will only return when all of them are finish
    :param reload: If True kraken would be reload at the end of the treatment

    run the whole data import process:

    - data import in bdd (fusio2ed, gtfs2ed, poi2ed, ...)
    - export bdd to nav file
    - update the jormungandr db with the new data for the instance
    - reload the krakens
    """
    actions = []
    job = models.Job()
    instance_config = load_instance_config(instance.name)
    job.instance = instance
    job.state = 'pending'
    task = {
        'gtfs': gtfs2ed,
        'fusio': fusio2ed,
        'osm': osm2ed,
        'geopal': geopal2ed,
        'fare': fare2ed,
        'poi': poi2ed,
        'synonym': synonym2ed,
        'shape': shape2ed,
    }

    for _file in files:
        filename = None
Esempio n. 14
0
def import_data(
    files, instance, backup_file, asynchronous=True, reload=True, custom_output_dir=None, skip_mimir=False
):
    """
    import the data contains in the list of 'files' in the 'instance'

    :param files: files to import
    :param instance: instance to receive the data
    :param backup_file: If True the files are moved to a backup directory, else they are not moved
    :param asynchronous: If True all jobs are run in background, else the jobs are run in sequence the function
     will only return when all of them are finish
    :param reload: If True kraken would be reload at the end of the treatment
    :param custom_output_dir: subdirectory for the nav file created. If not given, the instance default one is taken
    :param skip_mimir: skip importing data into mimir

    run the whole data import process:

    - data import in bdd (fusio2ed, gtfs2ed, poi2ed, ...)
    - export bdd to nav file
    - update the jormungandr db with the new data for the instance
    - reload the krakens
    """
    actions = []
    job = models.Job()
    instance_config = load_instance_config(instance.name)
    job.instance = instance
    job.state = 'running'
    task = {
        'gtfs': gtfs2ed,
        'fusio': fusio2ed,
        'osm': osm2ed,
        'geopal': geopal2ed,
        'fare': fare2ed,
        'poi': poi2ed,
        'synonym': synonym2ed,
        'shape': shape2ed,
    }

    for _file in files:
        filename = None

        dataset = models.DataSet()
        # NOTE: for the moment we do not use the path to load the data here
        # but we'll need to refactor this to take it into account
        try:
            dataset.type, _ = utils.type_of_data(_file)
            dataset.family_type = utils.family_of_data(dataset.type)
        except Exception:
            if backup_file:
                move_to_backupdirectory(_file, instance_config.backup_directory)
            current_app.logger.debug(
                "Corrupted source file : {} moved to {}".format(_file, instance_config.backup_directory)
            )
            continue

        if dataset.type in task:
            if backup_file:
                filename = move_to_backupdirectory(_file, instance_config.backup_directory)
            else:
                filename = _file
            actions.append(task[dataset.type].si(instance_config, filename, dataset_uid=dataset.uid))
        else:
            # unknown type, we skip it
            current_app.logger.debug("unknown file type: {} for file {}".format(dataset.type, _file))
            continue

        # currently the name of a dataset is the path to it
        dataset.name = filename
        models.db.session.add(dataset)
        job.data_sets.append(dataset)

    if actions:
        models.db.session.add(job)
        models.db.session.commit()
        # We pass the job id to each tasks, but job need to be commited for having an id
        for action in actions:
            action.kwargs['job_id'] = job.id
        # Create binary file (New .nav.lz4)
        binarisation = [ed2nav.si(instance_config, job.id, custom_output_dir)]
        actions.append(chain(*binarisation))
        # Reload kraken with new data after binarisation (New .nav.lz4)
        if reload:
            actions.append(reload_data.si(instance_config, job.id))

        if not skip_mimir:
            for dataset in job.data_sets:
                actions.extend(send_to_mimir(instance, dataset.name, dataset.family_type))
        else:
            current_app.logger.info("skipping mimir import")

        actions.append(finish_job.si(job.id))
        if asynchronous:
            return chain(*actions).delay()
        else:
            # all job are run in sequence and import_data will only return when all the jobs are finish
            return chain(*actions).apply()
Esempio n. 15
0
def import_data(
    files,
    instance,
    backup_file,
    asynchronous=True,
    reload=True,
    custom_output_dir=None,
    skip_mimir=False,
    skip_2ed=False,
):
    """
    import the data contains in the list of 'files' in the 'instance'

    :param files: files to import
    :param instance: instance to receive the data
    :param backup_file: If True the files are moved to a backup directory, else they are not moved
    :param asynchronous: If True all jobs are run in background, else the jobs are run in sequence the function
     will only return when all of them are finish
    :param reload: If True kraken would be reload at the end of the treatment
    :param custom_output_dir: subdirectory for the nav file created. If not given, the instance default one is taken
    :param skip_mimir: skip importing data into mimir
    :param skip_2ed: skip inserting last_load_dataset files into ed database
    run the whole data import process:

    - data import in bdd (fusio2ed, gtfs2ed, poi2ed, ...)
    - export bdd to nav file
    - update the jormungandr db with the new data for the instance
    - reload the krakens
    """
    actions = []
    job = models.Job()
    instance_config = load_instance_config(instance.name)
    job.instance = instance
    job.state = 'running'
    task = {
        'gtfs': gtfs2ed,
        'fusio': fusio2ed,
        'osm': osm2ed,
        'geopal': geopal2ed,
        'fare': fare2ed,
        'poi': poi2ed,
        'synonym': synonym2ed,
        'shape': shape2ed,
    }

    def process_ed2nav():
        models.db.session.add(job)
        models.db.session.commit()
        # We pass the job id to each tasks, but job need to be commited for having an id
        for action in actions:
            action.kwargs['job_id'] = job.id
        # Create binary file (New .nav.lz4)
        binarisation = [ed2nav.si(instance_config, job.id, custom_output_dir)]
        actions.append(chain(*binarisation))
        # Reload kraken with new data after binarisation (New .nav.lz4)
        if reload:
            actions.append(reload_data.si(instance_config, job.id))

        if not skip_mimir:
            for dataset in job.data_sets:
                actions.extend(
                    send_to_mimir(instance, dataset.name, dataset.family_type))
        else:
            current_app.logger.info("skipping mimir import")

        actions.append(finish_job.si(job.id))

        # We should delete old backup directories related to this instance
        actions.append(
            purge_instance.si(
                instance.id,
                current_app.config['DATASET_MAX_BACKUPS_TO_KEEP']))
        if asynchronous:
            return chain(*actions).delay()
        else:
            # all job are run in sequence and import_data will only return when all the jobs are finish
            return chain(*actions).apply()

    if skip_2ed:
        # For skip_2ed, skip inserting last_load_dataset files into ed database
        return process_ed2nav()
    for _file in files:
        filename = None

        dataset = models.DataSet()
        # NOTE: for the moment we do not use the path to load the data here
        # but we'll need to refactor this to take it into account
        try:
            dataset.type, _ = utils.type_of_data(_file)
            dataset.family_type = utils.family_of_data(dataset.type)
        except Exception:
            if backup_file:
                move_to_backupdirectory(_file,
                                        instance_config.backup_directory)
            current_app.logger.debug(
                "Corrupted source file : {} moved to {}".format(
                    _file, instance_config.backup_directory))
            continue

        if dataset.type in task:
            if backup_file:
                filename = move_to_backupdirectory(
                    _file,
                    instance_config.backup_directory,
                    manage_sp_char=True)
            else:
                filename = _file

            has_pt_planner_loki = (
                hasattr(instance, 'pt_planners_configurations')
                and "loki" in instance.pt_planners_configurations)
            if has_pt_planner_loki:
                loki_data_source = instance.pt_planners_configurations.get(
                    'loki', {}).get('data_source')
                if loki_data_source is not None:
                    if loki_data_source == "minio":
                        if dataset.type == "fusio":
                            actions.append(
                                fusio2s3.si(instance_config,
                                            filename,
                                            dataset_uid=dataset.uid))
                        if dataset.type == "gtfs":
                            actions.append(
                                gtfs2s3.si(instance_config,
                                           filename,
                                           dataset_uid=dataset.uid))
                    elif loki_data_source == "local" and dataset.type in [
                            "fusio", "gtfs"
                    ]:
                        zip_file = zip_if_needed(filename)
                        dest = os.path.join(
                            os.path.dirname(instance_config.target_file),
                            "ntfs.zip")
                        shutil.copy(zip_file, dest)
                    else:
                        current_app.logger.debug(
                            "unknown loki data_source '{}' for coverage '{}'".
                            format(loki_data_source, instance.name))

            actions.append(task[dataset.type].si(instance_config,
                                                 filename,
                                                 dataset_uid=dataset.uid))
        else:
            # unknown type, we skip it
            current_app.logger.debug(
                "unknown file type: {} for file {}".format(
                    dataset.type, _file))
            continue

        # currently the name of a dataset is the path to it
        dataset.name = filename
        dataset.state = "pending"
        models.db.session.add(dataset)
        job.data_sets.append(dataset)

    if actions:
        return process_ed2nav()
Esempio n. 16
0
def update_data():
    for instance in models.Instance.query.all():
        current_app.logger.debug("Update data of : %s" % instance.name)
        instance_config = load_instance_config(instance.name)
        files = glob.glob(instance_config.source_directory + "/*")
        actions = []
        job = models.Job()
        job.instance = instance
        job.state = 'pending'
        for _file in files:
            dataset = models.DataSet()
            filename = None

            dataset.type = type_of_data(_file)
            if dataset.type == 'gtfs':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(gtfs2ed.si(instance_config, filename))
            elif dataset.type == 'fusio':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(fusio2ed.si(instance_config, filename))
            elif dataset.type == 'osm':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(osm2ed.si(instance_config, filename))
            elif dataset.type == 'geopal':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(geopal2ed.si(instance_config, filename))
            elif dataset.type == 'fare':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(fare2ed.si(instance_config, filename))
            elif dataset.type == 'poi':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(poi2ed.si(instance_config, filename))
            elif dataset.type == 'synonym':
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
                actions.append(synonym2ed.si(instance_config, filename))
            else:
                #unknown type, we skip it
                continue

            #currently the name of a dataset is the path to it
            dataset.name = filename
            models.db.session.add(dataset)
            job.data_sets.append(dataset)

        if actions:
            models.db.session.add(job)
            models.db.session.commit()
            for action in actions:
                action.kwargs['job_id'] = job.id
            #We pass the job id to each tasks, but job need to be commited for
            #having an id
            binarisation = [
                ed2nav.si(instance_config, job.id),
                nav2rt.si(instance_config, job.id)
            ]
            aggregate = aggregate_places.si(instance_config, job.id)
            #We pass the job id to each tasks, but job need to be commited for
            #having an id
            actions.append(group(chain(*binarisation), aggregate))
            actions.append(reload_data.si(instance_config, job.id))
            actions.append(finish_job.si(job.id))
            chain(*actions).delay()
Esempio n. 17
0
def bounding_shape(instance_name, shape_path):
    """ Set the bounding shape to a custom value """

    instance_conf = load_instance_config(instance_name)

    load_bounding_shape(instance_name, instance_conf, shape_path)
Esempio n. 18
0
def import_data(files, instance, backup_file):
    """
    import the data contains in the list of 'files' in the 'instance'

    :param files: files to import
    :param instance: instance to receive the data
    :param backup_file: If True the files are moved to a backup directory, else they are not moved

    run the whole data import process:

    - data import in bdd (fusio2ed, gtfs2ed, poi2ed, ...)
    - export bdd to nav file
    - update the jormungandr db with the new data for the instance
    - reload the krakens
    """
    actions = []
    job = models.Job()
    instance_config = load_instance_config(instance.name)
    job.instance = instance
    job.state = 'pending'
    task = {
        'gtfs': gtfs2ed,
        'fusio': fusio2ed,
        'osm': osm2ed,
        'geopal': geopal2ed,
        'fare': fare2ed,
        'poi': poi2ed,
        'synonym': synonym2ed,
    }

    for _file in files:
        filename = None

        dataset = models.DataSet()
        dataset.type = type_of_data(_file)
        if dataset.type in task:
            if backup_file:
                filename = move_to_backupdirectory(
                    _file, instance_config.backup_directory)
            else:
                filename = _file
            actions.append(task[dataset.type].si(instance_config, filename))
        else:
            #unknown type, we skip it
            current_app.logger.debug("unknwn file type: {} for file {}".format(
                dataset.type, _file))
            continue

        #currently the name of a dataset is the path to it
        dataset.name = filename
        models.db.session.add(dataset)
        job.data_sets.append(dataset)

    if actions:
        models.db.session.add(job)
        models.db.session.commit()
        for action in actions:
            action.kwargs['job_id'] = job.id
        #We pass the job id to each tasks, but job need to be commited for
        #having an id
        binarisation = [
            ed2nav.si(instance_config, job.id),
            nav2rt.si(instance_config, job.id)
        ]
        aggregate = aggregate_places.si(instance_config, job.id)
        #We pass the job id to each tasks, but job need to be commited for
        #having an id
        actions.append(group(chain(*binarisation), aggregate))
        actions.append(reload_data.si(instance_config, job.id))
        actions.append(finish_job.si(job.id))
        chain(*actions).delay()