def get_service_by_service_id(service_id: str,
                              search_role: str = "PATHWAYS_REFERRAL") -> dict:

    # This is to make sure we don't spam the DoS API with more than 2 requests per second.
    time.sleep(0.5)

    pathways_session = get_session("PATHWAYS_REFERRAL")
    digital_session = get_session("DIGITAL_REFERRAL")

    url = (f"{config.UEC_DOS_BASE_URL}/app/controllers/api/v1.0/"
           f"services/byServiceId/{service_id}")
    result = pathways_session.get(url)
    service_json = result.json()

    if service_json["success"]["serviceCount"] == 0:
        logger.debug("Trying Digital Referral search role")
        result = digital_session.get(url)
        service_json = result.json()

    try:
        service = service_json["success"]["services"][0]
        return service

    except IndexError:
        logger.exception("Service not found in JSON response")
        return None
Exemple #2
0
def get_most_recent_snapshot_for_service(service_id):
    # TODO: Fix so it doesn't throw an error if there's only one previous snapshot
    logger.debug("Getting latest snapshot from database")
    query = {"id": service_id, "source": config.APP_NAME}
    results = (snapshots.find(query).sort([("snapshotTime", pymongo.DESCENDING)
                                           ]).limit(1))
    result = results[0]
    return result
Exemple #3
0
def get_all_postcodes():
    logger.debug("Getting all service postcodes")

    projection = {"_id": False}
    results = statuses.find(projection=projection)

    result_list = [result for result in results]

    return result_list
Exemple #4
0
def update_status(document):
    query = {"id": document["id"]}
    update = {"$set": document}
    try:
        r = statuses.update_one(query, update, upsert=True)
        logger.debug(f"Updated status for {document['id']}")
        return r
    except:
        logger.error(f"Failed to update status for {document['id']}")
        raise
Exemple #5
0
def get_all_statuses():
    logger.debug("Getting all service statuses")

    projection = {"_id": False}
    results = statuses.find(projection=projection).sort([("capacity",
                                                          pymongo.DESCENDING)])

    result_list = [result for result in results if result["capacity"] != ""]

    return result_list
Exemple #6
0
def add_snapshot(document):
    try:
        snapshots.insert(document)
        logger.debug(f"Added snapshot for {document['id']}")
    except errors.WriteError:
        logger.error("Error writing snapshot to database")
    except errors.OperationFailure as e:
        if "over your space quota" in str(e):
            logger.error(
                "Error writing snapshot to database - no database quota remaining"
            )
Exemple #7
0
def get_status_for_single_service(service_id):
    logger.debug(f"Getting status for single service {service_id}")
    query = {"id": service_id}
    projection = {"id": True, "capacity": True, "rag": True}
    result = statuses.find_one(query, projection=projection)
    try:
        return result

    except TypeError:
        logger.debug(f"No status found for {service_id}")
        return None
Exemple #8
0
def get_old_snapshots():
    logger.debug(f"Getting previous snapshots")
    query = {"snapshotTime": {"$lt": datetime.now()}}
    results = snapshots.find(query).limit(1)
    try:
        for result in results:
            pprint.pprint(result)

    except TypeError:
        logger.debug(f"No snapshots found")
        return None
def get_session(search_role) -> requests.Session:

    s = requests.session()

    if search_role == "DIGITAL_REFERRAL":
        s.auth = (config.UEC_DOS_USERNAME_DIGITAL,
                  config.UEC_DOS_PASSWORD_DIGITAL)
        logger.debug(f"Using account {config.UEC_DOS_USERNAME_DIGITAL}")
    else:
        s.auth = (config.UEC_DOS_USERNAME, config.UEC_DOS_PASSWORD)
        logger.debug(f"Using account {config.UEC_DOS_USERNAME}")
    return s
Exemple #10
0
def store_snapshot(service):
    if service["capacity"]["status"]["human"]:
        logger.debug(
            f"{service['name']} - {service['capacity']['status']['human']}")

    snapshot = {
        "id": service["id"],
        "name": service["name"],
        "type": service["type"]["name"],
        "postCode": service["postcode"],
        "easting": int(service["easting"]),
        "northing": int(service["northing"]),
        "snapshotTime": datetime.datetime.utcnow(),
        "capacity": {
            "status": service["capacity"]["status"]["human"],
            "rag": service["capacity"]["status"]["rag"],
        },
        "source": config.APP_NAME,
    }

    database.add_snapshot(snapshot)
Exemple #11
0
def snapshot_single_service(service_id, search_role):

    start = time.time()
    service = uec_dos.get_service_by_service_id(service_id, search_role)
    round_trip_time = time.time() - start

    logger.info(
        f"Ran probe for {service_id} as {search_role} (Took {round_trip_time})"
    )
    database.add_metric({
        "eventTime": datetime.datetime.utcnow(),
        "type": "single_service",
        "service_id": service_id,
        "total_time": float("{0:.2f}".format(round_trip_time)),
    })

    if service:
        store_snapshot(service)

        try:
            logger.debug(f"{service_id} - {service['name']}")

            # Only store snapshots and queue status checks if the status has a value
            if service["capacity"]["status"]["human"] != "":
                logger.debug("Queueing capacity check for {service_id}")

                q.enqueue(has_status_changed, service["id"])
            else:
                logger.debug("Empty capacity - skipping status check")

        except IndexError:
            logger.exception("Service not found")
Exemple #12
0
def has_status_changed(service_id):

    logger.debug(f"Checking status for {service_id}")

    status = database.get_status_for_single_service(service_id)

    if status:
        old_status = status["capacity"]
        old_rag = status["rag"]

    elif not status:
        service_data = uec_dos.get_service_by_service_id(service_id)
        logger.warn("No status for this service - adding a status entry")
        update_status_from_service_data(service_data)
        return

    service_snapshot = database.get_most_recent_snapshot_for_service(
        service_id)

    new_status = service_snapshot["capacity"]["status"]

    if service_snapshot:

        logger.debug(f"Previous Status: {old_status}, "
                     f"New Status: {new_status}")

        if old_status == new_status:
            logger.debug(f"Status for {service_id} hasn't changed")
            update_status_from_latest_snapshot(service_id)
            return

        elif old_status != new_status:

            if is_robot_change(old_status, new_status):
                logger.info("Skipping change as it's just the 24h ROBOT")
                return

            # Retrieve the entire service record from the DoS as this
            # contains details of the person who changed the status
            service_data = uec_dos.get_service_by_service_id(service_id)

            if not service_data:
                logger.error("No service information retrieved from DoS")
                return

            service_updated_by = service_data["capacity"]["updated"]["by"]
            service_status = service_data["capacity"]["status"]["human"]
            service_rag = service_data["capacity"]["status"]["rag"]
            service_name = service_data["name"]
            service_postcode = service_data["postcode"]
            service_updated_date = service_data["capacity"]["updated"]["date"]
            service_updated_time = service_data["capacity"]["updated"]["time"]
            service_region = service_data["region"]["name"]
            service_type = service_data["type"]["name"]

            logger.info(f"Status has changed for {service_id} - "
                        f"{service_name} - {service_status} "
                        f"({service_rag})")

            # Fix the incorrect service_updated_time by subtracting an hour from the supplied time.
            # Below line needs to be included when in BST
            # TODO: Remove this fix when the API is fixed to return the correct local time
            service_updated_time, service_updated_date = utils.adjust_timestamp_for_api_bst_bug(
                service_updated_time, service_updated_date)

            document = {
                "id": service_id,
                "name": service_name,
                "type": service_type,
                "postCode": service_postcode,
                "region": service_region,
                "eventTime": datetime.datetime.utcnow(),
                "capacity": {
                    "newStatus": service_status,
                    "newRag": service_rag,
                    "previousStatus": old_status,
                    "previousRag": old_rag,
                    "changedBy": service_updated_by,
                    "changedDate": service_updated_date,
                    "changedTime": service_updated_time,
                },
                "source": config.APP_NAME,
            }

            database.add_change(document)

            update_status_from_service_data(service_data)

            if config.SMS_ENABLED:
                q.enqueue(
                    sms.send_sms,
                    config.MOBILE_NUMBER,
                    f"{service_name} ({service_id}) in {service_region} "
                    f"changed to {service_status} ({service_rag}) by "
                    f"{service_updated_by} at {service_updated_time}.",
                    at_front=True,
                )

            if config.SLACK_ENABLED:
                q.enqueue(
                    slack.send_slack_notification,
                    service_name,
                    service_region,
                    service_status,
                    old_status,
                    service_type,
                    service_updated_time,
                    service_updated_by,
                    at_front=True,
                )

            return
Exemple #13
0
def snapshot_service_search(probe):
    postcode = probe["postcode"]
    search_distance = probe["search_distance"]
    service_types = probe["service_types"]
    number_per_type = probe["number_per_type"]
    gp = probe["gp"]
    search_role = probe["search_role"]

    try:
        start = time.time()
        services = uec_dos.get_services_by_service_search(
            postcode, search_distance, service_types, number_per_type, gp,
            search_role)
        round_trip_time = time.time() - start

        logger.info(
            f"Ran probe for {postcode} as {search_role}, at {search_distance} "
            f"miles, for {number_per_type} each of service types: "
            f"{service_types} - {len(services)} services (Took {round_trip_time})"
        )
        database.add_metric({
            "eventTime":
            datetime.datetime.utcnow(),
            "type":
            "service_search",
            "postcode":
            postcode,
            "search_distance":
            int(search_distance),
            "service_types":
            service_types,
            "number_per_type":
            int(number_per_type),
            "gp":
            gp,
            "search_role":
            search_role,
            "total_time":
            float("{0:.2f}".format(round_trip_time)),
        })

        for service in services:

            store_snapshot(service)

            service_id = service["id"]

            # Only store snapshots and queue status checks if the status has a value
            if service["capacity"]["status"]["human"] != "":
                logger.debug("Queueing capacity check for {service_id}")

                q.enqueue(has_status_changed, service_id)
            else:
                logger.debug("Empty capacity - skipping status check")
                update_status_from_latest_snapshot(service_id)

    except Exception as e:
        logger.exception(
            f"Error whilst running probe for {postcode}, at {search_distance} "
            f"miles, for {number_per_type} each of service types: "
            f"{service_types}")
Exemple #14
0
def remove_watched_service(service_id):
    logger.debug(f"Removing watch for service {service_id}")
    query = {"id": service_id}
    r = watched_services.delete_many(query)
    return r
Exemple #15
0
def add_change(document):
    changes.insert(document)
    logger.debug(f"Added change event for {document['id']}")
Exemple #16
0
def add_watched_service(service_id):
    logger.debug(f"Adding watch for service {service_id}")
    query = {"id": service_id}
    update = {"id": service_id}
    r = watched_services.replace_one(query, update, upsert=True)
    return r
Exemple #17
0
def get_watched_searches():
    logger.debug("Getting search watchlist from database")
    query = {}
    results = watched_searches.find(query)
    return list(results)