Пример #1
0
 def _parse_job_cpu(self, job):
     if not job.passed:
         return PerfInterval(0, 0, "cpu_percent")
     else:
         cpu_percent = job.result["data"]["end"]["cpu_utilization_percent"][
             "host_total"]
         return PerfInterval(cpu_percent, 1, "cpu_percent")
Пример #2
0
def get_interval(s_start: Dict, s_end: Dict, job_start: float,
                 neper_start: float) -> Tuple[PerfInterval, PerfInterval]:

    transactions = int(s_end['transactions']) - int(s_start['transactions'])
    s_start_time = float(s_start['time'])
    s_start_utime = float(s_start['utime'])
    s_start_stime = float(s_start['stime'])

    s_end_time = float(s_end['time'])
    s_end_utime = float(s_end['utime'])
    s_end_stime = float(s_end['stime'])

    # cpu_usage_percent = (utime_delta + stime_delta) / duration
    utime_delta = s_end_utime - s_start_utime
    stime_delta = s_end_stime - s_start_stime

    # neper uses CLOCK_MONOTONIC, need to convert to
    # unix time using job_start as reference
    timestamp = job_start + (s_start_time - neper_start)
    duration = s_end_time - s_start_time
    cpu_usage = (utime_delta + stime_delta) / duration

    interval = PerfInterval(transactions, duration, 'transactions', timestamp)
    cpu_interval = PerfInterval(cpu_usage, duration, 'cpu_percent', timestamp)

    return interval, cpu_interval
Пример #3
0
 def _parse_job_cpu(self, job):
     if not job.passed:
         return PerfInterval(0, 0, "cpu_percent", time.time())
     else:
         cpu_percent = job.result["data"]["end"]["cpu_utilization_percent"]["host_total"]
         job_start = job.result["data"]["start"]["timestamp"]["timesecs"]
         duration = job.result["data"]["start"]["test_start"]["duration"]
         return PerfInterval(cpu_percent*duration, duration, "cpu_percent", job_start)
Пример #4
0
    def _parse_job_samples(self, job: Job) ->\
            Tuple[ParallelPerfResult, ParallelPerfResult]:
        """
        each perfinterval is samples.csv line #2 (l2) - line #1 (l1) to get # transactions and duration
        timestamp is time of l1, but we need to convert it from CLOCK_MONOTONIC time to unix time.
        samples.csv looks like this:
        ```
        tid,flow_id,time,transactions,utime,stime,maxrss,minflt,majflt,nvcsw,nivcsw,latency_min,latency_mean,latency_max,latency_stddev
        0,0,1898645.747723502,1,0.000371,0.000000,1144,39,0,2,0,0.000000,0.000000,0.000000,-nan
        0,0,1898647.747733162,59322,0.185458,0.241758,1144,43,0,59320,0,0.000000,0.000000,0.000000,0.000000
        0,0,1898648.747757407,89210,0.281500,0.354934,1144,43,0,89207,0,0.000000,0.000000,0.000000,0.000000
        0,0,1898649.747737156,118790,0.281500,0.354934,1144,43,0,89207,0,0.000000,0.000000,0.000000,0.000000
        ```
        :param job:
        :type job:
        :return:
        :rtype:
        """

        results = SequentialPerfResult()
        cpu_results = SequentialPerfResult()

        if not job.passed:
            results.append(PerfInterval(0, 0, "transactions", time.time()))
            cpu_results.append(PerfInterval(0, 0, "cpu_percent", time.time()))
        elif job.what.is_crr_server():
            # Neper doesn't support server stats for tcp_crr due to memory issues.
            # Use perf_interval of 0.
            # For duration neper doesn't have time_start/time_end for tcp_crr
            # So just use the value of test length
            d = float(job.what.params.test_length)
            results.append(PerfInterval(0, d, "transactions", time.time()))
            cpu_results.append(PerfInterval(0, d, "cpu_percent", time.time()))
        else:
            job_start = job.result['start_time']
            samples = job.result['samples']
            if samples is not None:
                neper_start_time = float(samples[0]['time'])
                for s_start, s_end in pairwise(samples):
                    flow, cpu = get_interval(s_start, s_end,
                                             job_start, neper_start_time)
                    results.append(flow)
                    cpu_results.append(cpu)

        #Wrap in ParallelPerfResult for now for easier graphing
        #TODO When we add support for multiple flows and threads
        #We want to update this accordingly.
        p_results = ParallelPerfResult()
        p_results.append(results)
        p_cpu_results = ParallelPerfResult()
        p_cpu_results.append(cpu_results)
        return p_results, p_cpu_results
Пример #5
0
    def _parse_job_streams(self, job):
        result = ParallelPerfResult()
        if not job.passed:
            result.append(PerfInterval(0, 0, "bits"))
        else:
            for i in job.result["data"]["end"]["streams"]:
                result.append(SequentialPerfResult())

            for interval in job.result["data"]["intervals"]:
                for i, stream in enumerate(interval["streams"]):
                    result[i].append(
                        PerfInterval(stream["bytes"] * 8, stream["seconds"],
                                     "bits"))
        return result
Пример #6
0
    def _parse_job_streams(self, job):
        result = ParallelPerfResult()
        if not job.passed:
            result.append(PerfInterval(0, 0, "bits", time.time()))
        else:
            for i in job.result["data"]["end"]["streams"]:
                result.append(SequentialPerfResult())

            job_start = job.result["data"]["start"]["timestamp"]["timesecs"]
            for interval in job.result["data"]["intervals"]:
                interval_start = interval["sum"]["start"]
                for i, stream in enumerate(interval["streams"]):
                    result[i].append(PerfInterval(stream["bytes"] * 8,
                                                  stream["seconds"],
                                                  "bits", job_start + interval_start))
        return result
Пример #7
0
    def _parse_results_by_port(self, job, port, flow):
        results = FlowMeasurementResults(measurement=self, flow=flow)
        results.generator_results = SequentialPerfResult()
        results.generator_cpu_stats = SequentialPerfResult()

        results.receiver_results = SequentialPerfResult()
        results.receiver_cpu_stats = SequentialPerfResult()

        if not job.passed:
            results.generator_results.append(PerfInterval(0, 0, "packets"))
            results.generator_cpu_stats.append(PerfInterval(0, 0, "cpu_percent"))
            results.receiver_results.append(PerfInterval(0, 0, "packets"))
            results.receiver_cpu_stats.append(PerfInterval(0, 0, "cpu_percent"))
        else:
            prev_time = job.result["start_time"]
            prev_tx_val = 0
            prev_rx_val = 0
            for i in job.result["data"]:
                time_delta = i["timestamp"] - prev_time
                tx_delta = i["measurement"][port]["opackets"] - prev_tx_val
                rx_delta = i["measurement"][port]["ipackets"] - prev_rx_val
                results.generator_results.append(PerfInterval(
                            tx_delta,
                            time_delta,
                            "pkts"))
                results.receiver_results.append(PerfInterval(
                            rx_delta,
                            time_delta,
                            "pkts"))

                prev_time = i["timestamp"]
                prev_tx_val = i["measurement"][port]["opackets"]
                prev_rx_val = i["measurement"][port]["ipackets"]

                cpu_delta = i["measurement"]["global"]["cpu_util"]
                results.generator_cpu_stats.append(PerfInterval(
                    cpu_delta,
                    time_delta,
                    "cpu_percent"))
                results.receiver_cpu_stats.append(PerfInterval(
                    cpu_delta,
                    time_delta,
                    "cpu_percent"))
        return results
Пример #8
0
from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
from lnst.RecipeCommon.Perf.Results import ParallelPerfResult
from lnst.RecipeCommon.Perf.Results import PerfInterval

from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import FlowMeasurementResults
from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import BaseMeasurement

nonzero_measurement = SequentialPerfResult(
    ParallelPerfResult(
        ParallelPerfResult(
            SequentialPerfResult([
                PerfInterval(value=8708503410.285599,
                             duration=1.0004048347473145,
                             unit="bits",
                             timestamp=1642059692),
                PerfInterval(value=8714238798.109116,
                             duration=1.000683069229126,
                             unit="bits",
                             timestamp=1642059693.000405),
                PerfInterval(value=8695993478.856504,
                             duration=0.9989690780639648,
                             unit="bits",
                             timestamp=1642059694.001088),
                PerfInterval(value=8719970801.200619,
                             duration=1.000675916671753,
                             unit="bits",
                             timestamp=1642059695.000057),
                PerfInterval(value=8682960442.327856,
                             duration=0.9997539520263672,
                             unit="bits",
                             timestamp=1642059696.000733),
Пример #9
0
 def _create_cpu_intervals(self, duration, cpu_intervals):
     result = {}
     for key, value in list(cpu_intervals.items()):
         result[key] = PerfInterval(value, duration, "time units")
     return result