Beispiel #1
0
def query_process_data(node:str, client: object, start: str, end: str, 
                       interval: str, value: str, time_list: list) -> dict:
    json_data = {}
    try:
        node_data = query_node_data(node, client, start, end, interval, value)

        # print(f"{node}: ")
        # print(json.dumps(node_data, indent=4))

        if node_data:
            json_data = process_node_data(node, node_data, value, time_list)
    except:
        logging.error(f"Failed to query and process data of node: {node}, time range: {start} - {end}, interval: {interval}, value: {value}")
    return json_data
Beispiel #2
0
def query_process_data(node: str, influx: object, start: str, end: str,
                       interval: str, value: str, time_list: list) -> dict:
    json_data = {}
    try:
        node_data = query_node_data(node, influx, start, end, interval, value)

        # print(f"{node}: ")
        # print(json.dumps(node_data, indent=4))

        if node_data:
            json_data = process_node_data(node, node_data, value, time_list)
    except Exception as err:
        print(err)
        print("Query Data Error!")
    return json_data
def get_metrics(start, end, interval, value):  # noqa: E501
    """get_metrics

    Get **unified metrics** based on speficied start time, end time, time interval and value type. The **start** and **end** time should follow date-time Notation as defined by [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6), e.g. `2020-02-12T14:00:00Z`; the time **interval** should follow **duration literals**, which specify a length of time; the **value** type should only be `min`, `max`, `mean`, or `median`. A duration literal is an integer literal followed immediately (with no spaces) by a duration unit, the units include `s`(second), `m`(minute), `h`(hour), `d`(day), `w`(week). # noqa: E501

    :param start: start time of time range of the monitoring metrics
    :type start: str
    :param end: end time of time range of the monitoring metrics
    :type end: str
    :param interval: time interval for aggregating the monitoring metrics
    :type interval: str
    :param value: value type of the monitoring metrics
    :type value: str

    :rtype: UnifiedMetrics
    """
    # Initialization
    config = parse_conf()
    node_list = parse_host()
    influx = QueryInfluxdb(config["influxdb"])

    # Time string used in query_data
    start_str = start
    end_str = end

    print(f"Start time str: {start_str}; End time str: {end_str}")

    start = util.deserialize_datetime(start)
    end = util.deserialize_datetime(end)

    # print(f"Start time: {start}; End time: {end}")

    # Check Sanity
    if start > end:
        return ErrorMessage(
            error_code='400 INVALID_PARAMETERS',
            error_message='Start time should no larger than end time')
    else:
        unified_metrics = UnifiedMetrics()

        query_start = time.time()
        # Get time stamp
        # time_list = gen_timestamp(start, end, interval)
        # Epoch time in seconds
        epoch_time_list = gen_epoch_timestamp(start, end, interval)
        unified_metrics.TimeStamp = epoch_time_list

        # print(epoch_time_list)

        # Query Nodes and Jobs info
        all_data = query_data(node_list, influx, start_str, end_str, interval,
                              value)

        query_elapsed = float("{0:.2f}".format(time.time() - query_start))

        print(query_elapsed)

        process_start = time.time()
        # Process Nodes and Jobs info
        unified_metrics.JobsInfo = all_data["job_data"]
        unified_metrics.NodesInfo = process_node_data(node_list,
                                                      all_data["node_data"],
                                                      value)

        process_elapsed = float("{0:.2f}".format(time.time() - process_start))
        total_elapsed = float("{0:.2f}".format(query_elapsed +
                                               process_elapsed))
        # In seconds
        time_range = int(end.timestamp()) - int(start.timestamp())

        with open("requests.log", "a+") as requests_log:
            print(
                f"{time_range}:{interval}:{value}:{query_elapsed}:{process_elapsed}:{total_elapsed}",
                file=requests_log)

    return unified_metrics