예제 #1
0
def snapshot_single_service(service_id, search_role):

    start = time.time()
    service = uec_dos.get_service_by_service_id(service_id, search_role)
    round_trip_time = time.time() - start

    logger.info(
        f"Ran probe for {service_id} as {search_role} (Took {round_trip_time})"
    )
    database.add_metric({
        "eventTime": datetime.datetime.utcnow(),
        "type": "single_service",
        "service_id": service_id,
        "total_time": float("{0:.2f}".format(round_trip_time)),
    })

    if service:
        store_snapshot(service)

        try:
            logger.debug(f"{service_id} - {service['name']}")

            # Only store snapshots and queue status checks if the status has a value
            if service["capacity"]["status"]["human"] != "":
                logger.debug("Queueing capacity check for {service_id}")

                q.enqueue(has_status_changed, service["id"])
            else:
                logger.debug("Empty capacity - skipping status check")

        except IndexError:
            logger.exception("Service not found")
예제 #2
0
def add_service_status_job():

    if config.SLACK_ENABLED:

        q.enqueue(
            slack.send_slack_status_update, ttl=f"{config.STATUS_UPDATE_RATE_MINUTES}m"
        )

        logger.info(
            "Slack Status Summary will run every "
            f"{config.STATUS_UPDATE_RATE_MINUTES} minute(s)."
        )

    else:
        return
예제 #3
0
def add_search_jobs():

    probe_list = probes.get_watched_search_list()

    search_job_count = 0

    for probe in probe_list:
        q.enqueue(
            monitor.snapshot_service_search, probe, ttl=f"{config.CHECK_RATE_MINUTES}m"
        )
        search_job_count += 1

    logger.info(
        f"{search_job_count} search probes configured to "
        f"run every {config.CHECK_RATE_MINUTES} minute(s)."
    )
예제 #4
0
def add_service_jobs():

    service_list = probes.get_watched_service_list()

    service_job_count = 0

    for service in service_list:

        q.enqueue(
            monitor.snapshot_single_service,
            service["id"],
            service["search_role"],
            ttl=f"{config.CHECK_RATE_MINUTES}m",
        )

        service_job_count += 1

    logger.info(
        f"{service_job_count} service probes configured to "
        f"run every {config.CHECK_RATE_MINUTES} minute(s)."
    )
예제 #5
0
def has_status_changed(service_id):

    logger.debug(f"Checking status for {service_id}")

    status = database.get_status_for_single_service(service_id)

    if status:
        old_status = status["capacity"]
        old_rag = status["rag"]

    elif not status:
        service_data = uec_dos.get_service_by_service_id(service_id)
        logger.warn("No status for this service - adding a status entry")
        update_status_from_service_data(service_data)
        return

    service_snapshot = database.get_most_recent_snapshot_for_service(
        service_id)

    new_status = service_snapshot["capacity"]["status"]

    if service_snapshot:

        logger.debug(f"Previous Status: {old_status}, "
                     f"New Status: {new_status}")

        if old_status == new_status:
            logger.debug(f"Status for {service_id} hasn't changed")
            update_status_from_latest_snapshot(service_id)
            return

        elif old_status != new_status:

            if is_robot_change(old_status, new_status):
                logger.info("Skipping change as it's just the 24h ROBOT")
                return

            # Retrieve the entire service record from the DoS as this
            # contains details of the person who changed the status
            service_data = uec_dos.get_service_by_service_id(service_id)

            if not service_data:
                logger.error("No service information retrieved from DoS")
                return

            service_updated_by = service_data["capacity"]["updated"]["by"]
            service_status = service_data["capacity"]["status"]["human"]
            service_rag = service_data["capacity"]["status"]["rag"]
            service_name = service_data["name"]
            service_postcode = service_data["postcode"]
            service_updated_date = service_data["capacity"]["updated"]["date"]
            service_updated_time = service_data["capacity"]["updated"]["time"]
            service_region = service_data["region"]["name"]
            service_type = service_data["type"]["name"]

            logger.info(f"Status has changed for {service_id} - "
                        f"{service_name} - {service_status} "
                        f"({service_rag})")

            # Fix the incorrect service_updated_time by subtracting an hour from the supplied time.
            # Below line needs to be included when in BST
            # TODO: Remove this fix when the API is fixed to return the correct local time
            service_updated_time, service_updated_date = utils.adjust_timestamp_for_api_bst_bug(
                service_updated_time, service_updated_date)

            document = {
                "id": service_id,
                "name": service_name,
                "type": service_type,
                "postCode": service_postcode,
                "region": service_region,
                "eventTime": datetime.datetime.utcnow(),
                "capacity": {
                    "newStatus": service_status,
                    "newRag": service_rag,
                    "previousStatus": old_status,
                    "previousRag": old_rag,
                    "changedBy": service_updated_by,
                    "changedDate": service_updated_date,
                    "changedTime": service_updated_time,
                },
                "source": config.APP_NAME,
            }

            database.add_change(document)

            update_status_from_service_data(service_data)

            if config.SMS_ENABLED:
                q.enqueue(
                    sms.send_sms,
                    config.MOBILE_NUMBER,
                    f"{service_name} ({service_id}) in {service_region} "
                    f"changed to {service_status} ({service_rag}) by "
                    f"{service_updated_by} at {service_updated_time}.",
                    at_front=True,
                )

            if config.SLACK_ENABLED:
                q.enqueue(
                    slack.send_slack_notification,
                    service_name,
                    service_region,
                    service_status,
                    old_status,
                    service_type,
                    service_updated_time,
                    service_updated_by,
                    at_front=True,
                )

            return
예제 #6
0
def snapshot_service_search(probe):
    postcode = probe["postcode"]
    search_distance = probe["search_distance"]
    service_types = probe["service_types"]
    number_per_type = probe["number_per_type"]
    gp = probe["gp"]
    search_role = probe["search_role"]

    try:
        start = time.time()
        services = uec_dos.get_services_by_service_search(
            postcode, search_distance, service_types, number_per_type, gp,
            search_role)
        round_trip_time = time.time() - start

        logger.info(
            f"Ran probe for {postcode} as {search_role}, at {search_distance} "
            f"miles, for {number_per_type} each of service types: "
            f"{service_types} - {len(services)} services (Took {round_trip_time})"
        )
        database.add_metric({
            "eventTime":
            datetime.datetime.utcnow(),
            "type":
            "service_search",
            "postcode":
            postcode,
            "search_distance":
            int(search_distance),
            "service_types":
            service_types,
            "number_per_type":
            int(number_per_type),
            "gp":
            gp,
            "search_role":
            search_role,
            "total_time":
            float("{0:.2f}".format(round_trip_time)),
        })

        for service in services:

            store_snapshot(service)

            service_id = service["id"]

            # Only store snapshots and queue status checks if the status has a value
            if service["capacity"]["status"]["human"] != "":
                logger.debug("Queueing capacity check for {service_id}")

                q.enqueue(has_status_changed, service_id)
            else:
                logger.debug("Empty capacity - skipping status check")
                update_status_from_latest_snapshot(service_id)

    except Exception as e:
        logger.exception(
            f"Error whilst running probe for {postcode}, at {search_distance} "
            f"miles, for {number_per_type} each of service types: "
            f"{service_types}")
예제 #7
0
def get_coverage_data():
    statuses = database.get_all_postcodes()
    count = len(statuses)
    logger.info(f"REQUEST /coverage_data {count} records")
    return jsonify(statuses)
예제 #8
0
def get_statuses():
    statuses = database.get_low_statuses()
    count = len(statuses)
    logger.info(f"REQUEST /statuses {count} records")
    return jsonify(statuses)
예제 #9
0
def add_housekeeping_jobs():

    q.enqueue(housekeeping.get_old_snapshots)
    logger.info(f"Housekeeping jobs configured to run every 5 minutes.")
예제 #10
0
        logger.info(
            "Slack Status Summary will run every "
            f"{config.STATUS_UPDATE_RATE_MINUTES} minute(s)."
        )

    else:
        return


def add_housekeeping_jobs():

    q.enqueue(housekeeping.get_old_snapshots)
    logger.info(f"Housekeeping jobs configured to run every 5 minutes.")


add_search_jobs()
add_service_jobs()
add_service_status_job()
add_housekeeping_jobs()

# Set up the scheduled jobs
schedule.every(config.CHECK_RATE_MINUTES).minutes.do(add_search_jobs)
schedule.every(config.CHECK_RATE_MINUTES).minutes.do(add_service_jobs)
schedule.every(config.STATUS_UPDATE_RATE_MINUTES).minutes.do(add_service_status_job)
schedule.every(5).minutes.do(add_housekeeping_jobs)

while True:
    logger.info(f"Tick!")
    schedule.run_pending()
    time.sleep(60)