示例#1
0
ctl.wait(5)

results = h1.run(trex_client_mod, timeout=(test_duration+10)*test_runs)
trex_result = results.get_result()

trex_server.intr()

#============================================
# Aggregate result
#============================================

port0_rates = [i["port_0"]["rx_pps"] for i in trex_result["res_data"]["results"]]
port1_rates = [i["port_1"]["rx_pps"] for i in trex_result["res_data"]["results"]]
aggregate_rates = map(sum, zip(port0_rates, port1_rates))

aggr_std_dev = std_deviation(aggregate_rates)

avg_rate = sum(aggregate_rates)/len(aggregate_rates)
avg_rate_port0 = sum(port0_rates)/len(port0_rates)
avg_rate_port1 = sum(port1_rates)/len(port1_rates)
rate_deviation = 2*aggr_std_dev

# prepare PerfRepo result for tcp
pr_result = perf_api.new_result("ovs_dpdk_pvp_2streams_id",
                                "ovs_dpdk_pvp_2streams",
                                 hash_ignore=[r'kernel_release',
                                              r'redhat_release',
                                              r'trex_path',
                                              r'dpdk_version',
                                              r'test_conf.duration',
                                              r'test_conf.runs'])
示例#2
0
    def _run_client(self, cmd):
        logging.debug("running as client...")

        res_data = {}
        res_data["testname"] = self._testname

        rv = 0
        results = []
        rates = []
        for i in range(1, self._runs + 1):
            if self._runs > 1:
                logging.info("Netperf starting run %d" % i)
            clients = []
            client_results = []
            for i in range(0, self._num_parallel):
                clients.append(ShellProcess(cmd))

            for client in clients:
                ret_code = None
                try:
                    ret_code = client.wait()
                    rv += ret_code
                except OSError as e:
                    if e.errno == errno.EINTR:
                        client.kill()

                output = client.read_nonblocking()
                logging.debug(output)

                if ret_code is not None and ret_code == 0:
                    client_results.append(self._parse_output(output))

            if len(client_results) > 0:
                #accumulate all the parallel results into one
                result = client_results[0]
                for res in client_results[1:]:
                    result = self._sum_results(result, res)

                results.append(result)
                rates.append(results[-1]["rate"])

        if results > 1:
            res_data["results"] = results

        if len(rates) > 0:
            rate = sum(rates) / len(rates)
        else:
            rate = 0.0

        if len(rates) > 1:
            # setting deviation to 2xstd_deviation because of the 68-95-99.7
            # rule this seems comparable to the -I 99 netperf setting
            res_data["std_deviation"] = std_deviation(rates)
            rate_deviation = 2 * res_data["std_deviation"]
        elif len(rates) == 1 and self._confidence is not None:
            result = results[0]
            rate_deviation = rate * (result["confidence"][1] / 100)
        else:
            rate_deviation = 0.0

        res_data["rate"] = rate
        res_data["rate_deviation"] = rate_deviation

        rate_pretty = self._pretty_rate(rate)
        rate_dev_pretty = self._pretty_rate(rate_deviation,
                                            unit=rate_pretty["unit"])

        if rv != 0 and self._runs == 1:
            res_data["msg"] = "Could not get performance throughput!"
            logging.info(res_data["msg"])
            return (False, res_data)
        elif rv != 0 and self._runs > 1:
            res_data["msg"] = "At least one of the Netperf runs failed, "\
                              "check the logs and result data for more "\
                              "information."
            logging.info(res_data["msg"])
            return (False, res_data)

        res_val = False
        res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
                                            (rate_pretty["rate"],
                                             rate_dev_pretty["rate"],
                                             rate_pretty["unit"])
        if rate > 0.0:
            res_val = True
        else:
            res_val = False
            return (res_val, res_data)

        if self._max_deviation is not None:
            if self._max_deviation["type"] == "percent":
                percentual_deviation = (rate_deviation / rate) * 100
                if percentual_deviation > self._max_deviation["value"]:
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %%)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       self._max_deviation["value"])
                    return (res_val, res_data)
            elif self._max_deviation["type"] == "absolute":
                if rate_deviation > self._max_deviation["value"]["rate"]:
                    pretty_deviation = self._pretty_rate(
                        self._max_deviation["value"]["rate"])
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %s)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       pretty_deviation["rate"],
                                       pretty_deviation["unit"])
                    return (res_val, res_data)
        if self._threshold_interval is not None:
            result_interval = (rate - rate_deviation, rate + rate_deviation)

            threshold_pretty = self._pretty_rate(self._threshold["rate"])
            threshold_dev_pretty = self._pretty_rate(
                self._threshold_deviation["rate"],
                unit=threshold_pretty["unit"])

            if self._threshold_interval[0] > result_interval[1]:
                res_val = False
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
            else:
                res_val = True
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
        return (res_val, res_data)
示例#3
0
 def std_deviation(self):
     return std_deviation([i.average for i in self])
示例#4
0
文件: Netperf.py 项目: jpirko/lnst
    def _run_client(self, cmd):
        logging.debug("running as client...")

        res_data = {}
        res_data["testname"] = self._testname

        rv = 0
        results = []
        rates = []
        for i in range(1, self._runs+1):
            if self._runs > 1:
                logging.info("Netperf starting run %d" % i)
            clients = []
            client_results = []
            for i in range(0, self._num_parallel):
                clients.append(ShellProcess(cmd))

            for client in clients:
                ret_code = None
                try:
                    ret_code = client.wait()
                    rv += ret_code
                except OSError as e:
                    if e.errno == errno.EINTR:
                        client.kill()

                output = client.read_nonblocking()
                logging.debug(output)

                if ret_code is not None and ret_code == 0:
                    client_results.append(self._parse_output(output))

            if len(client_results) > 0:
                #accumulate all the parallel results into one
                result = client_results[0]
                for res in client_results[1:]:
                    result = self._sum_results(result, res)

                results.append(result)
                rates.append(results[-1]["rate"])

        if results > 1:
            res_data["results"] = results

        if len(rates) > 0:
            rate = sum(rates)/len(rates)
        else:
            rate = 0.0

        if len(rates) > 1:
            # setting deviation to 2xstd_deviation because of the 68-95-99.7
            # rule this seems comparable to the -I 99 netperf setting
            res_data["std_deviation"] = std_deviation(rates)
            rate_deviation = 2*res_data["std_deviation"]
        elif len(rates) == 1 and self._confidence is not None:
            result = results[0]
            rate_deviation = rate * (result["confidence"][1] / 100)
        else:
            rate_deviation = 0.0

        res_data["rate"] = rate
        res_data["rate_deviation"] = rate_deviation

        rate_pretty = self._pretty_rate(rate)
        rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"])

        if rv != 0 and self._runs == 1:
            res_data["msg"] = "Could not get performance throughput!"
            logging.info(res_data["msg"])
            return (False, res_data)
        elif rv != 0 and self._runs > 1:
            res_data["msg"] = "At least one of the Netperf runs failed, "\
                              "check the logs and result data for more "\
                              "information."
            logging.info(res_data["msg"])
            return (False, res_data)

        res_val = False
        res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
                                            (rate_pretty["rate"],
                                             rate_dev_pretty["rate"],
                                             rate_pretty["unit"])
        if rate > 0.0:
            res_val = True
        else:
            res_val = False
            return (res_val, res_data)

        if self._max_deviation is not None:
            if self._max_deviation["type"] == "percent":
                percentual_deviation = (rate_deviation / rate) * 100
                if percentual_deviation > self._max_deviation["value"]:
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %%)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       self._max_deviation["value"])
                    return (res_val, res_data)
            elif self._max_deviation["type"] == "absolute":
                if rate_deviation > self._max_deviation["value"]["rate"]:
                    pretty_deviation = self._pretty_rate(self._max_deviation["value"]["rate"])
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %s)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       pretty_deviation["rate"],
                                       pretty_deviation["unit"])
                    return (res_val, res_data)
        if self._threshold_interval is not None:
            result_interval = (rate - rate_deviation,
                               rate + rate_deviation)

            threshold_pretty = self._pretty_rate(self._threshold["rate"])
            threshold_dev_pretty = self._pretty_rate(self._threshold_deviation["rate"],
                                                     unit = threshold_pretty["unit"])

            if self._threshold_interval[0] > result_interval[1]:
                res_val = False
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
            else:
                res_val = True
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
        return (res_val, res_data)