예제 #1
0
    def run(self, result):
        """execute the benchmark"""

        if "options" in self.scenario_cfg:
            options = "-s %s" % \
                self.scenario_cfg['options'].get("packetsize", '56')
        else:
            options = ""

        destination = self.context_cfg['target'].get('ipaddr', '127.0.0.1')
        dest_list = [s.strip() for s in destination.split(',')]

        rtt_result = {}
        ping_result = {"rtt": rtt_result}
        sla_max_rtt = self.scenario_cfg.get("sla", {}).get("max_rtt")

        for pos, dest in enumerate(dest_list):
            if 'targets' in self.scenario_cfg:
                target_vm = self.scenario_cfg['targets'][pos]
            else:
                target_vm = self.scenario_cfg['target']

            LOG.debug("ping %s %s", options, dest)
            with open(self.target_script, "r") as stdin_file:
                exit_status, stdout, stderr = self.connection.execute(
                    "/bin/sh -s {0} {1}".format(dest, options),
                    stdin=stdin_file)

            if exit_status != 0:
                raise RuntimeError(stderr)

            if isinstance(target_vm, dict):
                target_vm_name = target_vm.get("name")
            else:
                target_vm_name = target_vm.split('.')[0]
            if stdout:
                rtt_result[target_vm_name] = float(stdout.strip())
                # store result before potential AssertionError
                result.update(utils.flatten_dict_key(ping_result))
                if sla_max_rtt is not None:
                    sla_max_rtt = float(sla_max_rtt)
                    assert rtt_result[target_vm_name] <= sla_max_rtt,\
                        "rtt %f > sla: max_rtt(%f); " % \
                        (rtt_result[target_vm_name], sla_max_rtt)
            else:
                LOG.error("ping '%s' '%s' timeout", options, target_vm)
                # we need to specify a result to satisfy influxdb schema
                # choose a very large number to inidcate timeout
                # in this case choose an order of magnitude greater than the SLA
                rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
                # store result before potential AssertionError
                result.update(utils.flatten_dict_key(ping_result))
                if sla_max_rtt is not None:
                    raise AssertionError(
                        "packet dropped rtt {:f} > sla: max_rtt({:f})".format(
                            rtt_result[target_vm_name], sla_max_rtt))

                else:
                    raise AssertionError("packet dropped rtt {:f}".format(
                        rtt_result[target_vm_name]))
예제 #2
0
    def test_ramspeed_successful_run_sla(self, mock_ssh):

        options = {"test_id": 1, "load": 16, "block_size": 32}
        args = {"options": options, "sla": {"min_bandwidth": 6000}}
        r = ramspeed.Ramspeed(args, self.ctx)

        sample_output = '{"Result": [{"Test_type": "INTEGER & WRITING",\
 "Block_size(kb)": 1, "Bandwidth(MBps)": 19909.18}, {"Test_type":\
 "INTEGER & WRITING", "Block_size(kb)": 2, "Bandwidth(MBps)": 19873.89},\
 {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 4, "Bandwidth(MBps)":\
 19907.56}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 8,\
 "Bandwidth(MBps)": 19906.94}, {"Test_type": "INTEGER & WRITING",\
 "Block_size(kb)": 16, "Bandwidth(MBps)": 19881.74}, {"Test_type":\
 "INTEGER & WRITING", "Block_size(kb)": 32, "Bandwidth(MBps)": 19395.65},\
 {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 64, "Bandwidth(MBps)":\
 17623.14}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 128,\
 "Bandwidth(MBps)": 17677.36}, {"Test_type": "INTEGER & WRITING",\
 "Block_size(kb)": 256, "Bandwidth(MBps)": 16113.49}, {"Test_type":\
 "INTEGER & WRITING", "Block_size(kb)": 512, "Bandwidth(MBps)": 14659.19},\
 {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 1024, "Bandwidth(MBps)":\
 14680.75}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 2048,\
 "Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\
 "Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\
 "INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\
 {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\
 "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\
 "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'

        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        r.run(self.result)
        expected_result = utils.flatten_dict_key(
            jsonutils.loads(sample_output))
        self.assertEqual(self.result, expected_result)
예제 #3
0
 def test__dict_key_flatten(self):
     line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \
            'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
     # need to sort for assert to work
     line = ",".join(sorted(line.split(',')))
     flattened_data = utils.flatten_dict_key(self.data['benchmark']['data'])
     result = ",".join(
         ("=".join(item) for item in sorted(flattened_data.items())))
     self.assertEqual(result, line)
예제 #4
0
    def test_iperf_successful_sla(self, mock_ssh):
        options = {}
        args = {'options': options, 'sla': {'bytes_per_second': 15000000}}
        result = {}

        p = iperf3.Iperf(args, self.ctx)
        mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
        p.host = mock_ssh.SSH.from_node()

        sample_output = self._read_sample_output(self.output_name_tcp)
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        expected_result = utils.flatten_dict_key(
            jsonutils.loads(sample_output))
        p.run(result)
        self.assertEqual(result, expected_result)
예제 #5
0
    def test_iperf_successful_sla_jitter(self, mock_ssh):
        options = {"protocol": "udp", "bandwidth": "20m"}
        args = {'options': options, 'sla': {'jitter': 10}}
        result = {}

        p = iperf3.Iperf(args, self.ctx)
        mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
        p.host = mock_ssh.SSH.from_node()

        sample_output = self._read_sample_output(self.output_name_udp)
        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        expected_result = utils.flatten_dict_key(
            jsonutils.loads(sample_output))
        p.run(result)
        self.assertEqual(result, expected_result)
예제 #6
0
    def test_ramspeed_mem_successful_run_sla(self, mock_ssh):
        options = {"test_id": 3, "load": 16, "block_size": 32, "iteration": 1}
        args = {"options": options, "sla": {"min_bandwidth": 6000}}
        r = ramspeed.Ramspeed(args, self.ctx)

        sample_output = '{"Result": [{"Test_type": "INTEGER Copy:",\
 "Bandwidth(MBps)": 8353.97}, {"Test_type": "INTEGER Scale:",\
 "Bandwidth(MBps)": 9078.59}, {"Test_type": "INTEGER Add:",\
 "Bandwidth(MBps)": 10057.48}, {"Test_type": "INTEGER Triad:",\
 "Bandwidth(MBps)": 10116.27}, {"Test_type": "INTEGER AVERAGE:",\
 "Bandwidth(MBps)": 9401.58}]}'

        mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
        r.run(self.result)
        expected_result = utils.flatten_dict_key(
            jsonutils.loads(sample_output))
        self.assertEqual(self.result, expected_result)
예제 #7
0
    def run(self, result):
        """execute the benchmark"""

        if "options" in self.scenario_cfg:
            options = "-s %s" % \
                self.scenario_cfg['options'].get("packetsize", '56')
        else:
            options = ""

        destination = self.context_cfg['target'].get('ipaddr', '127.0.0.1')
        dest_list = [s.strip() for s in destination.split(',')]

        rtt_result = {}
        ping_result = {"rtt": rtt_result}

        for pos, dest in enumerate(dest_list):
            if 'targets' in self.scenario_cfg:
                target_vm = self.scenario_cfg['targets'][pos]
            else:
                target_vm = self.scenario_cfg['target']

            LOG.debug("ping '%s' '%s'", options, dest)
            with open(self.target_script, "r") as stdin_file:
                exit_status, stdout, stderr = self.connection.execute(
                    "/bin/sh -s {0} {1}".format(dest, options),
                    stdin=stdin_file)

            if exit_status != 0:
                raise RuntimeError(stderr)

            if stdout:
                if isinstance(target_vm, dict):
                    target_vm_name = target_vm.get("name")
                else:
                    target_vm_name = target_vm.split('.')[0]
                rtt_result[target_vm_name] = float(stdout)
                if "sla" in self.scenario_cfg:
                    sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
                    assert rtt_result[target_vm_name] <= sla_max_rtt,\
                        "rtt %f > sla: max_rtt(%f); " % \
                        (rtt_result[target_vm_name], sla_max_rtt)
            else:
                LOG.error("ping '%s' '%s' timeout", options, target_vm)
        result.update(utils.flatten_dict_key(ping_result))
예제 #8
0
    def run(self, result):
        """execute the benchmark"""

        if not self.setup_done:
            self.setup()

        options = self.scenario_cfg['options']
        test_id = options.get('type_id', 1)
        load = options.get('load', 8)
        block_size = options.get('block_size', 32)

        if test_id == 3 or test_id == 6:
            iteration = options.get('iteration', 1)
            cmd = "sudo bash ramspeed_mem_benchmark.sh %d %d %d %d" % \
                  (test_id, load, block_size, iteration)
        elif 0 < test_id <= 5:
            cmd = "sudo bash ramspeed_mark_benchmark.sh %d %d %d" % \
                  (test_id, load, block_size)
        # only the test_id 1-6 will be used in this scenario
        else:
            raise RuntimeError("No such type_id: %s for Ramspeed scenario",
                               test_id)

        LOG.debug("Executing command: %s", cmd)
        status, stdout, stderr = self.client.execute(cmd)
        if status:
            raise RuntimeError(stderr)

        ramspeed_result = jsonutils.loads(stdout)
        result.update(utils.flatten_dict_key(ramspeed_result))

        if "sla" in self.scenario_cfg:
            sla_error = ""
            sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
            for i in ramspeed_result["Result"]:
                bw = i["Bandwidth(MBps)"]
                if bw < sla_min_bw:
                    sla_error += "Bandwidth %f < " \
                        "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
            assert sla_error == "", sla_error
예제 #9
0
    def _data_to_line_protocol(self, data, case, criteria):
        msg = {}

        if not self.tags:
            self.tags = {
                'deploy_scenario': os.environ.get('DEPLOY_SCENARIO',
                                                  'unknown'),
                'installer': os.environ.get('INSTALLER_TYPE', 'unknown'),
                'pod_name': os.environ.get('NODE_NAME', 'unknown'),
                'version': os.environ.get('YARDSTICK_BRANCH', 'unknown')
            }

        point = {
            "measurement": case,
            "fields": utils.flatten_dict_key(data["data"]),
            "time": self._get_nano_timestamp(data),
            "tags": self._get_extended_tags(criteria),
        }
        msg["points"] = [point]
        msg["tags"] = self.tags

        return make_lines(msg).encode('utf-8')

    def _get_nano_timestamp(self, results):
        try:
            timestamp = results["timestamp"]
        except KeyError:
            timestamp = time.time()

        return str(int(float(timestamp) * 1000000000))
예제 #10
0
    def run(self, result):
        """execute the benchmark"""
        if not self.setup_done:
            self.setup()

        # if run by a duration runner, get the duration time and setup as arg
        time = self.scenario_cfg["runner"].get("duration", None) \
            if "runner" in self.scenario_cfg else None
        options = self.scenario_cfg['options']

        cmd = "iperf3 -c %s --json" % (self.context_cfg['target']['ipaddr'])

        # If there are no options specified
        if not options:
            options = {}

        use_UDP = False
        try:
            protocol = options.get("protocol")
            bandwidth = options.get('bandwidth')
            use_UDP = protocol == 'udp'
            if protocol:
                cmd += " --" + protocol
            if use_UDP and bandwidth:
                cmd += " --bandwidth " + bandwidth
            # if nodelay in the option, protocal maybe null or 'tcp'
            if "nodelay" in options:
                cmd += " --nodelay"
        except AttributeError:
            LOG.warning("Can't parser the options in your config file!!!")

        # these options are mutually exclusive in iperf3
        if time:
            cmd += " %d" % time
        elif "bytes" in options:
            # number of bytes to transmit (instead of --time)
            cmd += " --bytes %d" % options["bytes"]
        elif "blockcount" in options:
            cmd += " --blockcount %d" % options["blockcount"]

        if "length" in options:
            cmd += " --length %s" % options["length"]

        if "window" in options:
            cmd += " --window %s" % options["window"]

        LOG.debug("Executing command: %s", cmd)

        status, stdout, stderr = self.host.execute(cmd)
        if status:
            # error cause in json dict on stdout
            raise RuntimeError(stdout)

        # Note: convert all ints to floats in order to avoid
        # schema conflicts in influxdb. We probably should add
        # a format func in the future.
        iperf_result = jsonutils.loads(stdout, parse_int=float)
        result.update(utils.flatten_dict_key(iperf_result))

        if "sla" in self.scenario_cfg:
            sla_iperf = self.scenario_cfg["sla"]
            if not use_UDP:
                sla_bytes_per_second = int(sla_iperf["bytes_per_second"])

                # convert bits per second to bytes per second
                bit_per_second = \
                    int(iperf_result["end"]["sum_received"]["bits_per_second"])
                bytes_per_second = bit_per_second / 8
                assert bytes_per_second >= sla_bytes_per_second, \
                    "bytes_per_second %d < sla:bytes_per_second (%d); " % \
                    (bytes_per_second, sla_bytes_per_second)
            else:
                sla_jitter = float(sla_iperf["jitter"])

                jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
                assert jitter_ms <= sla_jitter, \
                    "jitter_ms  %f > sla:jitter %f; " % \
                    (jitter_ms, sla_jitter)
예제 #11
0
파일: lmbench.py 프로젝트: yc18/yardstick
    def run(self, result):
        """execute the benchmark"""

        if not self.setup_done:
            self.setup()

        options = self.scenario_cfg['options']
        test_type = options.get('test_type', 'latency')

        if test_type == 'latency':
            stride = options.get('stride', 128)
            stop_size = options.get('stop_size', 16.0)
            cmd = "sudo bash lmbench_latency.sh %f %d" % (stop_size, stride)
        elif test_type == 'bandwidth':
            size = options.get('size', 128)
            benchmark = options.get('benchmark', 'rd')
            warmup_repetitions = options.get('warmup', 0)
            cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
                  (size, benchmark, warmup_repetitions)
        elif test_type == 'latency_for_cache':
            repetition = options.get('repetition', 1)
            warmup = options.get('warmup', 0)
            cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
                  (repetition, warmup)
        else:
            raise RuntimeError("No such test_type: %s for Lmbench scenario" %
                               test_type)

        LOG.debug("Executing command: %s", cmd)
        status, stdout, stderr = self.client.execute(cmd)

        if status:
            raise RuntimeError(stderr)

        lmbench_result = {}
        if test_type == 'latency':
            lmbench_result.update({"latencies": jsonutils.loads(stdout)})
        else:
            lmbench_result.update(jsonutils.loads(stdout))
        result.update(utils.flatten_dict_key(lmbench_result))

        if "sla" in self.scenario_cfg:
            sla_error = ""
            if test_type == 'latency':
                sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
                for t_latency in lmbench_result["latencies"]:
                    latency = t_latency['latency']
                    if latency > sla_max_latency:
                        sla_error += "latency %f > sla:max_latency(%f); " \
                            % (latency, sla_max_latency)
            elif test_type == 'bandwidth':
                sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
                bw = lmbench_result["bandwidth(MBps)"]
                if bw < sla_min_bw:
                    sla_error += "bandwidth %f < " \
                                 "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
            elif test_type == 'latency_for_cache':
                sla_latency = float(self.scenario_cfg['sla']['max_latency'])
                cache_latency = float(lmbench_result['L1cache'])
                if sla_latency < cache_latency:
                    sla_error += "latency %f > sla:max_latency(%f); " \
                        % (cache_latency, sla_latency)
            self.verify_SLA(sla_error == "", sla_error)