Beispiel #1
0
def purge_rt_update(self, config):
    func_name = "purge_rt_update"
    connector = config["connector"]

    logger = logging.LoggerAdapter(logging.getLogger(__name__),
                                   extra={"connector": connector})
    logger.debug("purge realtime update for %s", connector)

    lock_name = make_kirin_lock_name(func_name, connector)
    with get_lock(logger, lock_name,
                  app.config[str("REDIS_LOCK_TIMEOUT_PURGE")]) as locked:
        if not locked:
            logger.warning("%s for %s is already in progress", func_name,
                           connector)
            return

        until = datetime.date.today() - datetime.timedelta(
            days=int(config["nb_days_to_keep"]))
        logger.info("purge realtime update for {} until {}".format(
            connector, until))

        # TODO:  we want to purge on "contributor" later, not "connector".
        RealTimeUpdate.remove_by_connectors_until(connectors=[connector],
                                                  until=until)
        logger.info("%s for %s is finished", func_name, connector)
Beispiel #2
0
def _purge(process_name: str, contributor: Contributor,
           delete_rows_func: Callable[[Contributor], Dict]) -> Dict:
    logger = logging.LoggerAdapter(logging.getLogger(process_name),
                                   extra={"contributor": contributor.id})
    logger.debug("preparing '{}' for contributor '{}'".format(
        process_name, contributor.id))

    lock_name = make_kirin_lock_name(process_name, contributor.id)
    with get_lock(logger, lock_name,
                  app.config["REDIS_LOCK_TIMEOUT_PURGE"]) as locked:
        if not locked:
            logger.warning(
                "operation '{}' for contributor '{}' is already in progress".
                format(process_name, contributor.id))
            return {}

        start_datetime = datetime.datetime.utcnow()
        logger.info("executing '{}' for contributor '{}'".format(
            process_name, contributor.id))

        purge_count = delete_rows_func(contributor)
        duration = (datetime.datetime.utcnow() -
                    start_datetime).total_seconds()
        logger.warning(
            "operation '{}' for contributor '{}' is finished: purged {} row(s) in {} s"
            .format(process_name, contributor.id, purge_count, duration))
        new_relic.record_custom_parameter(
            "{}:{}_count".format(contributor.id, process_name), purge_count)
        new_relic.record_custom_parameter(
            "{}:{}_duration".format(contributor.id, process_name), duration)
        return purge_count
def test_redis_lock_update(
    test_client,
    pg_docker_fixture,
    rabbitmq_docker_fixture,
    broker_connection,
    mq_handler,
):
    xml_feed = '<?xml version="1.0" encoding="UTF-8"?><something></something>'
    logger = logging.getLogger(
        "kirin.queue_workers.siri_et_xml_tn.siri_et_xml_tn_worker")
    with patch.object(logger, "debug") as mock_debug:
        with SiriEtXmlTnWorkerTest(test_client, rabbitmq_docker_fixture.url,
                                   broker_connection, pg_docker_fixture):
            # Check that SiriEtXmlTnWorker is creating the queue
            wait_until(lambda: is_queue_created(broker_connection,
                                                SIRI_ET_XML_TN_QUEUE_NAME))

            time.sleep((WORKER_REDIS_TIMEOUT_LOCK.total_seconds() // 5) + 1)
            mq_handler.publish(xml_feed, SIRI_ET_XML_TN_CONTRIBUTOR_ID)
            wait_until(lambda: RealTimeUpdate.query.count() == 1)
            # Check lock refreshed
            mock_debug.assert_any_call(
                "lock {%s} updated",
                make_kirin_lock_name("{}_worker".format(
                    ConnectorType.siri_et_xml_tn.value)))
Beispiel #4
0
 def __init__(self, test_client, broker_url, broker_connection,
              pg_docker_fixture):
     self.test_client = test_client
     self.broker_url = broker_url
     self.broker_connection = broker_connection
     self.pg_docker_fixture = pg_docker_fixture
     self.last_lock_update = datetime.now()
     self.lock_name = make_kirin_lock_name("{}_worker".format(
         ConnectorType.piv.value))
Beispiel #5
0
def gtfs_poller(self, config):
    func_name = "gtfs_poller"
    contributor = (
        model.Contributor.query_existing()
        .filter_by(id=config.get("contributor"), connector_type=ConnectorType.gtfs_rt.value)
        .first()
    )

    logger = logging.LoggerAdapter(logging.getLogger(__name__), extra={"contributor": contributor.id})

    lock_name = make_kirin_lock_name(func_name, contributor.id)
    with get_lock(logger, lock_name, app.config[str("REDIS_LOCK_TIMEOUT_POLLER")]) as locked:
        if not locked or not config.get("feed_url"):
            new_relic.ignore_transaction()
            return

        retrieval_interval = config.get("retrieval_interval", 10)
        if _is_last_call_too_recent(func_name, contributor.id, retrieval_interval):
            # do nothing if the last call is too recent
            new_relic.ignore_transaction()
            return

        logger.debug("polling of %s", config.get("feed_url"))

        # We do a HEAD request at the very beginning of polling and we compare it with the previous one to check if
        # the gtfs-rt is changed.
        # If the HEAD request or Redis get/set fail, we just ignore this part and do the polling anyway
        if not _is_newer(config):
            new_relic.ignore_transaction()
            manage_db_no_new(connector_type=ConnectorType.gtfs_rt.value, contributor_id=contributor.id)
            return

        try:
            response = _retrieve_gtfsrt(config)
            response.raise_for_status()
        except Exception as e:
            manage_db_error(
                data="",
                connector_type=ConnectorType.gtfs_rt.value,
                contributor_id=contributor.id,
                error="Http Error",
                is_reprocess_same_data_allowed=True,
            )
            logger.debug(six.text_type(e))
            return

        wrap_build(KirinModelBuilder(contributor), response.content)
        logger.info("%s for %s is finished", func_name, contributor.id)
Beispiel #6
0
def gtfs_poller(self, config):
    func_name = 'gtfs_poller'
    logger = logging.LoggerAdapter(logging.getLogger(__name__), extra={'contributor': config['contributor']})
    logger.debug('polling of %s', config['feed_url'])

    contributor = config['contributor']
    lock_name = make_kirin_lock_name(func_name, contributor)
    with get_lock(logger, lock_name, app.config['REDIS_LOCK_TIMEOUT_POLLER']) as locked:
        if not locked:
            new_relic.ignore_transaction()
            return

        # We do a HEAD request at the very beginning of polling and we compare it with the previous one to check if
        # the gtfs-rt is changed.
        # If the HEAD request or Redis get/set fail, we just ignore this part and do the polling anyway
        if not _is_newer(config):
            new_relic.ignore_transaction()
            manage_db_no_new(connector='gtfs-rt', contributor=contributor)
            return

        try:
            response = requests.get(config['feed_url'], timeout=config.get('timeout', 1))
            response.raise_for_status()

        except Exception as e:
            manage_db_error(data='', connector='gtfs-rt', contributor=contributor,
                            status='KO', error='Http Error')
            logger.debug(str(e))
            return

        nav = navitia_wrapper.Navitia(url=config['navitia_url'],
                                      token=config['token'],
                                      timeout=5,
                                      cache=redis,
                                      query_timeout=app.config.get('NAVITIA_QUERY_CACHE_TIMEOUT', 600),
                                      pubdate_timeout=app.config.get('NAVITIA_PUBDATE_CACHE_TIMEOUT', 600))\
            .instance(config['coverage'])

        proto = gtfs_realtime_pb2.FeedMessage()
        try:
            proto.ParseFromString(response.content)
        except DecodeError:
            manage_db_error(proto, 'gtfs-rt', contributor=contributor, status='KO', error='Decode Error')
            logger.debug('invalid protobuf')
        else:
            model_maker.handle(proto, nav, contributor)
            logger.info('%s for %s is finished', func_name, contributor)
Beispiel #7
0
def purge_trip_update(self, config):
    func_name = "purge_trip_update"
    contributor = config["contributor"]
    logger = logging.LoggerAdapter(logging.getLogger(__name__),
                                   extra={"contributor": contributor})
    logger.debug("purge trip update for %s", contributor)

    lock_name = make_kirin_lock_name(func_name, contributor)
    with get_lock(logger, lock_name,
                  app.config[str("REDIS_LOCK_TIMEOUT_PURGE")]) as locked:
        if not locked:
            logger.warning("%s for %s is already in progress", func_name,
                           contributor)
            return
        until = datetime.date.today() - datetime.timedelta(
            days=int(config["nb_days_to_keep"]))
        logger.info("purge trip update for {} until {}".format(
            contributor, until))

        TripUpdate.remove_by_contributors_and_period(
            contributors=[contributor], start_date=None, end_date=until)
        logger.info("%s for %s is finished", func_name, contributor)
Beispiel #8
0
def test_redis_lock_update(
    test_client,
    pg_docker_fixture,
    rabbitmq_docker_fixture,
    broker_connection,
    mq_handler,
):
    logger = logging.getLogger("kirin.queue_workers.piv.piv_worker")
    with patch.object(logger, "debug") as mock_debug:
        with PivWorkerTest(test_client, rabbitmq_docker_fixture.url,
                           broker_connection, pg_docker_fixture):
            # Check that PivWorker is creating the queue
            wait_until(
                lambda: is_queue_created(broker_connection, PIV_QUEUE_NAME))

            time.sleep((WORKER_REDIS_TIMEOUT_LOCK.total_seconds() // 5) + 1)
            mq_handler.publish('{"key": "Some valid JSON"}',
                               PIV_CONTRIBUTOR_ID)
            wait_until(lambda: RealTimeUpdate.query.count() == 1)
            # Check lock refreshed
            mock_debug.assert_any_call(
                "lock {%s} updated",
                make_kirin_lock_name("{}_worker".format(
                    ConnectorType.piv.value)))
Beispiel #9
0
    def poll(self, model_builder):
        contributor_id = self.config.get("contributor")

        logger = logging.LoggerAdapter(logging.getLogger(__name__), extra={"contributor": contributor_id})

        logger.debug("start polling")
        lock_name = make_kirin_lock_name(self.name, contributor_id)
        logger.debug("try getting lock")
        with get_lock(logger, lock_name, app.config["REDIS_LOCK_TIMEOUT_POLLER"]) as locked:
            logger.debug("locked acquired")
            if not locked or not self.config.get("feed_url"):
                logger.debug("lock is not aquired, ignoring the transaction")
                new_relic.ignore_transaction()
                return

            if self._is_last_call_too_recent():
                # do nothing if the last call is too recent
                logger.debug("last call was too recent, ignoring the transaction")
                new_relic.ignore_transaction()
                return

            logger.debug("polling of %s", self.config.get("feed_url"))
            self._poll_feed(model_builder)
            logger.debug("%s for %s is finished", self.name, contributor_id)
Beispiel #10
0
 def get_lock_name(cls) -> str:
     return make_kirin_lock_name("{}_worker".format(cls.get_connector_type(
     ).value))  # only 1 contributor and 1 worker at a time