예제 #1
0
    def _parse_job_samples(self, job: Job) ->\
            Tuple[ParallelPerfResult, ParallelPerfResult]:
        """
        each perfinterval is samples.csv line #2 (l2) - line #1 (l1) to get # transactions and duration
        timestamp is time of l1, but we need to convert it from CLOCK_MONOTONIC time to unix time.
        samples.csv looks like this:
        ```
        tid,flow_id,time,transactions,utime,stime,maxrss,minflt,majflt,nvcsw,nivcsw,latency_min,latency_mean,latency_max,latency_stddev
        0,0,1898645.747723502,1,0.000371,0.000000,1144,39,0,2,0,0.000000,0.000000,0.000000,-nan
        0,0,1898647.747733162,59322,0.185458,0.241758,1144,43,0,59320,0,0.000000,0.000000,0.000000,0.000000
        0,0,1898648.747757407,89210,0.281500,0.354934,1144,43,0,89207,0,0.000000,0.000000,0.000000,0.000000
        0,0,1898649.747737156,118790,0.281500,0.354934,1144,43,0,89207,0,0.000000,0.000000,0.000000,0.000000
        ```
        :param job:
        :type job:
        :return:
        :rtype:
        """

        results = SequentialPerfResult()
        cpu_results = SequentialPerfResult()

        if not job.passed:
            results.append(PerfInterval(0, 0, "transactions", time.time()))
            cpu_results.append(PerfInterval(0, 0, "cpu_percent", time.time()))
        elif job.what.is_crr_server():
            # Neper doesn't support server stats for tcp_crr due to memory issues.
            # Use perf_interval of 0.
            # For duration neper doesn't have time_start/time_end for tcp_crr
            # So just use the value of test length
            d = float(job.what.params.test_length)
            results.append(PerfInterval(0, d, "transactions", time.time()))
            cpu_results.append(PerfInterval(0, d, "cpu_percent", time.time()))
        else:
            job_start = job.result['start_time']
            samples = job.result['samples']
            if samples is not None:
                neper_start_time = float(samples[0]['time'])
                for s_start, s_end in pairwise(samples):
                    flow, cpu = get_interval(s_start, s_end,
                                             job_start, neper_start_time)
                    results.append(flow)
                    cpu_results.append(cpu)

        #Wrap in ParallelPerfResult for now for easier graphing
        #TODO When we add support for multiple flows and threads
        #We want to update this accordingly.
        p_results = ParallelPerfResult()
        p_results.append(results)
        p_cpu_results = ParallelPerfResult()
        p_cpu_results.append(cpu_results)
        return p_results, p_cpu_results
예제 #2
0
    def _parse_job_streams(self, job):
        result = ParallelPerfResult()
        if not job.passed:
            result.append(PerfInterval(0, 0, "bits"))
        else:
            for i in job.result["data"]["end"]["streams"]:
                result.append(SequentialPerfResult())

            for interval in job.result["data"]["intervals"]:
                for i, stream in enumerate(interval["streams"]):
                    result[i].append(
                        PerfInterval(stream["bytes"] * 8, stream["seconds"],
                                     "bits"))
        return result
예제 #3
0
    def _parse_job_streams(self, job):
        result = ParallelPerfResult()
        if not job.passed:
            result.append(PerfInterval(0, 0, "bits", time.time()))
        else:
            for i in job.result["data"]["end"]["streams"]:
                result.append(SequentialPerfResult())

            job_start = job.result["data"]["start"]["timestamp"]["timesecs"]
            for interval in job.result["data"]["intervals"]:
                interval_start = interval["sum"]["start"]
                for i, stream in enumerate(interval["streams"]):
                    result[i].append(PerfInterval(stream["bytes"] * 8,
                                                  stream["seconds"],
                                                  "bits", job_start + interval_start))
        return result