コード例 #1
0
    def startup(self):
        executable = self.settings.get("interpreter", sys.executable)

        report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
        report_tpl = self.engine.create_artifact("apiritif",
                                                 ".") + "%s" + report_type
        cmdline = [
            executable, "-m", "apiritif.loadgen", '--result-file-template',
            report_tpl
        ]

        load = self.get_load()
        if load.concurrency:
            cmdline += ['--concurrency', str(load.concurrency)]

        if load.iterations:
            cmdline += ['--iterations', str(load.iterations)]

        if load.hold:
            cmdline += ['--hold-for', str(load.hold)]

        if load.ramp_up:
            cmdline += ['--ramp-up', str(load.ramp_up)]

        if load.steps:
            cmdline += ['--steps', str(load.steps)]

        if self.__is_verbose():
            cmdline += ['--verbose']

        cmdline += [self.script]
        self.process = self.execute(cmdline)
        self._tailer = FileReader(filename=self.stdout.name,
                                  parent_logger=self.log)
コード例 #2
0
ファイル: executor.py プロジェクト: Avi-Labs/taurus
 def __init__(self):
     super(ApiritifNoseExecutor, self).__init__()
     self._tailer = FileReader(file_opener=lambda _: None,
                               parent_logger=self.log)
     self.apiritif = None
     self.selenium = None
     self.test_mode = None
コード例 #3
0
ファイル: pbench.py プロジェクト: wr-tty/taurus
    def __init__(self, load, payload_filename, parent_logger):
        super(Scheduler, self).__init__()
        self.need_start_loop = None
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.load = load
        self.payload_file = FileReader(filename=payload_filename,
                                       parent_logger=self.log)
        if not load.duration and not load.iterations:
            self.iteration_limit = 1
        else:
            self.iteration_limit = load.iterations

        self.concurrency = load.concurrency if load.concurrency is not None else 1

        self.step_len = load.ramp_up / load.steps if load.steps and load.ramp_up else 0
        if load.throughput:
            self.ramp_up_slope = load.throughput / load.ramp_up if load.ramp_up else 0
            self.step_size = float(
                load.throughput) / load.steps if load.steps else 0
        else:
            self.ramp_up_slope = None
            self.step_size = float(
                self.concurrency) / load.steps if load.steps else 0

        self.count = 0.0
        self.time_offset = 0.0
        self.iterations = 0
コード例 #4
0
ファイル: ab.py プロジェクト: Emilnurg/Falcon-simple-API
 def __init__(self, filename, parent_logger):
     super(TSVDataReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.skipped_header = False
     self.concurrency = None
     self.url_label = None
コード例 #5
0
ファイル: pbench.py プロジェクト: wr-tty/taurus
 def __init__(self, filename, parent_logger):
     super(PBenchStatsReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.buffer = ''
     self.data = {}
     self.last_data = 0
コード例 #6
0
ファイル: pytest.py プロジェクト: Emilnurg/Falcon-simple-API
 def __init__(self):
     super(PyTestExecutor, self).__init__()
     self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py")
     self._tailer = FileReader('',
                               file_opener=lambda _: None,
                               parent_logger=self.log)
     self._additional_args = []
コード例 #7
0
ファイル: pytest.py プロジェクト: Emilnurg/Falcon-simple-API
    def startup(self):
        """
        run python tests
        """
        executable = self.settings.get("interpreter", sys.executable)

        cmdline = [
            executable, self.runner_path, '--report-file', self.report_file
        ]

        load = self.get_load()
        if load.iterations:
            cmdline += ['-i', str(load.iterations)]

        if load.hold:
            cmdline += ['-d', str(load.hold)]

        cmdline += self._additional_args
        cmdline += [self.script]

        self.process = self._execute(cmdline)

        if self.__is_verbose():
            self._tailer = FileReader(filename=self.stdout.name,
                                      parent_logger=self.log)
コード例 #8
0
ファイル: python.py プロジェクト: liu5269/taurus
    def startup(self):
        """
        run python tests
        """
        executable = self.settings.get("interpreter", sys.executable)

        self.env.update({
            "PYTHONPATH":
            os.getenv("PYTHONPATH", "") + os.pathsep +
            get_full_path(__file__, step_up=3)
        })

        cmdline = [
            executable, self.runner_path, '--report-file', self.report_file
        ]
        cmdline += self._additional_args

        load = self.get_load()
        if load.iterations:
            cmdline += ['-i', str(load.iterations)]

        if load.hold:
            cmdline += ['-d', str(load.hold)]

        cmdline += [self.script]
        self._start_subprocess(cmdline)

        if self.__is_verbose():
            self._tailer = FileReader(filename=self.stdout_file,
                                      parent_logger=self.log)
コード例 #9
0
ファイル: python.py プロジェクト: liu5269/taurus
 def __init__(self):
     super(PyTestExecutor, self).__init__()
     self.runner_path = os.path.join(get_full_path(__file__, step_up=2),
                                     "resources", "pytest_runner.py")
     self._tailer = FileReader('',
                               file_opener=lambda _: None,
                               parent_logger=self.log)
     self._additional_args = []
コード例 #10
0
 def __init__(self, basedir, parent_logger, dir_prefix):
     super(DataLogReader, self).__init__()
     self.concurrency = 0
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.basedir = basedir
     self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log)
     self.partial_buffer = ""
     self.delimiter = "\t"
     self.dir_prefix = dir_prefix
     self.guessed_gatling_version = None
コード例 #11
0
ファイル: tsung.py プロジェクト: infomaven/taurus
 def __init__(self, tsung_basedir, parent_logger):
     super(TsungStatsReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.tsung_basedir = tsung_basedir
     self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats)
     self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log)
     self.delimiter = ";"
     self.partial_buffer = ""
     self.skipped_header = False
     self.concurrency = 0
コード例 #12
0
 def __init__(self, tsung_basedir, parent_logger):
     super(TsungStatsReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.tsung_basedir = tsung_basedir
     self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats)
     self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log)
     self.delimiter = ";"
     self.partial_buffer = ""
     self.skipped_header = False
     self.concurrency = 0
コード例 #13
0
 def test_requests(self):
     self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
     self.obj.prepare()
     self.obj.get_widget()
     self.obj.startup()
     while not self.obj.check():
         time.sleep(1)
     self.obj.shutdown()
     reader = FileReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv"))
     lines = reader.get_lines(last_pass=True)
     self.assertEquals(4, len(list(lines)))
コード例 #14
0
 def test_requests(self):
     self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
     self.obj.prepare()
     self.obj.get_widget()
     self.obj.startup()
     while not self.obj.check():
         time.sleep(self.obj.engine.check_interval)
     self.obj.shutdown()
     reader = FileReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv"))
     lines = reader.get_lines(last_pass=True)
     self.assertEquals(4, len(list(lines)))
コード例 #15
0
    def startup(self):
        executable = self.settings.get("interpreter", sys.executable)

        report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
        report_tpl = self.engine.create_artifact("apiritif",
                                                 ".") + "%s" + report_type
        cmdline = [
            executable, "-m", "apiritif.loadgen", '--result-file-template',
            report_tpl
        ]

        load = self.get_load()
        if load.concurrency:
            cmdline += ['--concurrency', str(load.concurrency)]

        iterations = self.get_raw_load().iterations
        if iterations is None:  # defaults:
            msg = "No iterations limit in config, choosing anything... set "
            if load.duration or self.engine.is_functional_mode() and list(
                    self.get_scenario().get_data_sources()):
                iterations = 0  # infinite for func mode and ds
                msg += "0 (infinite) as "
                if load.duration:
                    msg += "duration found (hold-for + ramp-up)"
                elif self.engine.is_functional_mode():
                    msg += "taurus works in functional mode"
                else:
                    msg += "data-sources found"

            else:
                iterations = 1  # run once otherwise
                msg += "1"

            self.log.debug(msg)

        if iterations:
            cmdline += ['--iterations', str(iterations)]

        if load.hold:
            cmdline += ['--hold-for', str(load.hold)]

        if load.ramp_up:
            cmdline += ['--ramp-up', str(load.ramp_up)]

        if load.steps:
            cmdline += ['--steps', str(load.steps)]

        if self.__is_verbose():
            cmdline += ['--verbose']

        cmdline += [self.script]
        self.process = self._execute(cmdline)
        self._tailer = FileReader(filename=self.stdout.name,
                                  parent_logger=self.log)
コード例 #16
0
ファイル: locustio.py プロジェクト: zeesattarny/taurus
 def __init__(self, filename, num_slaves, parent_logger):
     """
     :type filename: str
     :type num_slaves: int
     :type parent_logger: logging.Logger
     """
     super(SlavesReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.join_buffer = {}
     self.num_slaves = num_slaves
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.read_buffer = ""
コード例 #17
0
ファイル: grinder.py プロジェクト: Emilnurg/Falcon-simple-API
 def __init__(self, filename, parent_logger):
     super(DataLogReader, self).__init__()
     self.report_by_url = False
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.idx = {}
     self.partial_buffer = ""
     self.start_time = 0
     self.end_time = 0
     self.concurrency = 0
     self.test_names = {}
     self.known_threads = set()
コード例 #18
0
ファイル: siege.py プロジェクト: pyToshka/taurus
class DataLogReader(ResultsReader):
    def __init__(self, filename, parent_logger):
        super(DataLogReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.concurrency = None

    def _read(self, last_pass=False):
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if line.count(chr(0x1b)) != 2:  # skip garbage
                continue
            l_start = line.index('m') + 1
            l_end = line.index(chr(0x1b), l_start)
            line = line[l_start:l_end]
            log_vals = [val.strip() for val in line.split(',')]

            # _mark = log_vals[0]           # 0. current test mark, defined by --mark key
            # _http = log_vals[1]           # 1. http protocol
            _rstatus = log_vals[2]  # 2. response status code
            _etime = float(
                log_vals[3])  # 3. elapsed time (total time - connection time)
            _rsize = int(log_vals[4])  # 4. size of response
            _url = log_vals[5]  # 6. long or short URL value
            # _url_id = int(log_vals[7])    # 7. url number
            _tstamp = time.strptime(log_vals[7], "%Y-%m-%d %H:%M:%S")
            _tstamp = int(time.mktime(_tstamp))  # 8. moment of request sending

            _con_time = 0
            _latency = 0
            _error = None
            _concur = self.concurrency

            yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _rsize
コード例 #19
0
ファイル: executors.py プロジェクト: andy7i/taurus
    def startup(self):
        executable = self.settings.get("interpreter", sys.executable)

        report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
        report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type
        cmdline = [executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl]

        load = self.get_load()
        if load.concurrency:
            cmdline += ['--concurrency', str(load.concurrency)]

        if load.iterations:
            cmdline += ['--iterations', str(load.iterations)]

        if load.hold:
            cmdline += ['--hold-for', str(load.hold)]

        if load.ramp_up:
            cmdline += ['--ramp-up', str(load.ramp_up)]

        if load.steps:
            cmdline += ['--steps', str(load.steps)]

        if self.__is_verbose():
            cmdline += ['--verbose']

        cmdline += [self.script]
        self.process = self._execute(cmdline)
        self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)
コード例 #20
0
ファイル: pbench.py プロジェクト: infomaven/taurus
 def __init__(self, filename, parent_logger):
     super(PBenchStatsReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.buffer = ''
     self.data = {}
     self.last_data = 0
コード例 #21
0
    def test_subscribe_to_transactions(self):
        dummy = DummyListener()

        self.configure({
            'execution': {
                "iterations": 5,
                'scenario': {
                    'script':
                    RESOURCES_DIR +
                    'selenium/python/test_selenium_transactions.py'
                },
                'executor': 'selenium'
            },
        })
        self.obj_prepare_runner()
        self.obj.subscribe_to_transactions(dummy)
        try:
            self.obj.engine.start_subprocess = self.start_subprocess
            self.obj.startup()
            fake_out = os.path.join(RESOURCES_DIR, 'apiritif/dummy-output.out')
            self.obj.runner._tailer = FileReader(filename=fake_out,
                                                 parent_logger=self.log)
        finally:
            self.obj.shutdown()
        self.obj.post_process()
        self.assertEqual(10, dummy.transactions['hello there'])
コード例 #22
0
ファイル: siege.py プロジェクト: andy7i/taurus
class DataLogReader(ResultsReader):
    def __init__(self, filename, parent_logger):
        super(DataLogReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.concurrency = None

    def _read(self, last_pass=False):
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if line.count(chr(0x1b)) != 2:  # skip garbage
                continue
            l_start = line.index('m') + 1
            l_end = line.index(chr(0x1b), l_start)
            line = line[l_start:l_end]
            log_vals = [val.strip() for val in line.split(',')]

            # _mark = log_vals[0]           # 0. current test mark, defined by --mark key
            # _http = log_vals[1]           # 1. http protocol
            _rstatus = log_vals[2]  # 2. response status code
            _etime = float(log_vals[3])  # 3. elapsed time (total time - connection time)
            _rsize = int(log_vals[4])  # 4. size of response
            _url = log_vals[5]  # 6. long or short URL value
            # _url_id = int(log_vals[7])    # 7. url number
            _tstamp = time.strptime(log_vals[7], "%Y-%m-%d %H:%M:%S")
            _tstamp = int(time.mktime(_tstamp))  # 8. moment of request sending

            _con_time = 0
            _latency = 0
            _error = None
            _concur = self.concurrency

            yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _rsize
コード例 #23
0
 def __init__(self, filename, parent_logger):
     super(K6LogReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.data = {
         'timestamp': [],
         'label': [],
         'r_code': [],
         'error_msg': [],
         'http_req_duration': [],
         'http_req_connecting': [],
         'http_req_tls_handshaking': [],
         'http_req_waiting': [],
         'vus': [],
         'data_received': []
     }
コード例 #24
0
ファイル: ab.py プロジェクト: andy7i/taurus
 def __init__(self, filename, parent_logger):
     super(TSVDataReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.skipped_header = False
     self.concurrency = None
     self.url_label = None
コード例 #25
0
    def test_jsr223(self):
        self.configure(RESOURCES_DIR + "jmeter/jmx/jsr223.jmx")
        try:
            self.obj.process()
            lines = FileReader(self.obj.dst_file).get_lines(last_pass=True)
            yml = yaml.full_load(''.join(lines))
            scenarios = yml.get("scenarios")
            scenario = scenarios["Thread Group"]
            requests = scenario["requests"]
            self.assertEqual(len(requests), 1)
            request = requests[0]
            self.assertIn("jsr223", request)
            parsed_jsrs = request["jsr223"]
            self.assertTrue(isinstance(parsed_jsrs, list))
            self.assertEqual(len(parsed_jsrs), 5)

            target = [{
                'script-text': 'scripty',
                'execute': 'before',
                'compile-cache': 'false',
                'language': 'beanshell',
                'parameters': 'paramssss'
            }, {
                'script-text':
                u'console.log("\u041f\u0420\u0418\u0412\u0415\u0422");\nline("2");',
                'execute': 'after',
                'compile-cache': 'true',
                'language': 'javascript',
                'parameters': 'a b c'
            }, {
                'execute': 'after',
                'compile-cache': 'true',
                'language': 'javascript',
                'parameters': None,
                'script-file': 'script.js'
            }, {
                'script-text': 'console.log("beanshell aka jsr223");',
                'execute': 'before',
                'compile-cache': True,
                'language': 'beanshell',
                'parameters': None
            }, {
                'execute': 'before',
                'compile-cache': 'true',
                'language': 'java',
                'parameters': None,
                'script-file': 'tests/resources/BlazeDemo.java'
            }]

            self.assertEqual(parsed_jsrs, target)

            js_script = os.path.join(
                get_full_path(self.obj.dst_file, step_up=1), 'script.js')
            self.assertTrue(os.path.exists(js_script))
        finally:
            os.remove(
                os.path.join(get_full_path(self.obj.dst_file, step_up=1),
                             'script.js'))
コード例 #26
0
ファイル: python.py プロジェクト: liu5269/taurus
    def startup(self):
        executable = self.settings.get("interpreter", sys.executable)

        py_path = os.getenv("PYTHONPATH")
        taurus_dir = get_full_path(__file__, step_up=3)
        if py_path:
            py_path = os.pathsep.join((py_path, taurus_dir))
        else:
            py_path = taurus_dir

        self.env["PYTHONPATH"] = py_path

        report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
        report_tpl = self.engine.create_artifact("apiritif-",
                                                 "") + "%s" + report_type
        cmdline = [
            executable, "-m", "apiritif.loadgen", '--result-file-template',
            report_tpl
        ]

        load = self.get_load()
        if load.concurrency:
            cmdline += ['--concurrency', str(load.concurrency)]

        if load.iterations:
            cmdline += ['--iterations', str(load.iterations)]

        if load.hold:
            cmdline += ['--hold-for', str(load.hold)]

        if load.ramp_up:
            cmdline += ['--ramp-up', str(load.ramp_up)]

        if load.steps:
            cmdline += ['--steps', str(load.steps)]

        if self.__is_verbose():
            cmdline += ['--verbose']

        cmdline += [self.script]
        self.start_time = time.time()
        self._start_subprocess(cmdline)
        self._tailer = FileReader(filename=self.stdout_file,
                                  parent_logger=self.log)
コード例 #27
0
ファイル: gatling.py プロジェクト: keithmork/taurus
 def __init__(self, basedir, parent_logger, dir_prefix):
     super(DataLogReader, self).__init__()
     self.concurrency = 0
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.basedir = basedir
     self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log)
     self.partial_buffer = ""
     self.delimiter = "\t"
     self.dir_prefix = dir_prefix
     self.guessed_gatling_version = None
コード例 #28
0
ファイル: grinder.py プロジェクト: keithmork/taurus
 def __init__(self, filename, parent_logger):
     super(DataLogReader, self).__init__()
     self.report_by_url = False
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.idx = {}
     self.partial_buffer = ""
     self.start_time = 0
     self.end_time = 0
     self.concurrency = 0
     self.test_names = {}
     self.known_threads = set()
コード例 #29
0
ファイル: locustio.py プロジェクト: andy7i/taurus
 def __init__(self, filename, num_slaves, parent_logger):
     """
     :type filename: str
     :type num_slaves: int
     :type parent_logger: logging.Logger
     """
     super(SlavesReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.join_buffer = {}
     self.num_slaves = num_slaves
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.read_buffer = ""
コード例 #30
0
ファイル: pbench.py プロジェクト: wr-tty/taurus
class PBenchStatsReader(object):
    MARKER = "\n},"

    def __init__(self, filename, parent_logger):
        super(PBenchStatsReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.buffer = ''
        self.data = {}
        self.last_data = 0

    def read_file(self):
        _bytes = self.file.get_bytes()
        if _bytes:
            self.buffer += _bytes

        while self.MARKER in self.buffer:
            idx = self.buffer.find(self.MARKER) + len(self.MARKER)
            chunk_str = self.buffer[:idx - 1]
            self.buffer = self.buffer[idx + +1:]
            chunk = json.loads("{%s}" % chunk_str)

            for date_str in chunk.keys():
                statistics = chunk[date_str]

                date_obj = datetime.datetime.strptime(
                    date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                date = int(time.mktime(date_obj.timetuple()))
                self.data[date] = 0

                for benchmark_name in statistics.keys():
                    if not benchmark_name.startswith("benchmark_io"):
                        continue
                    benchmark = statistics[benchmark_name]
                    for method in benchmark:
                        meth_obj = benchmark[method]
                        if "mmtasks" in meth_obj:
                            self.data[date] += meth_obj["mmtasks"][2]

                self.log.debug("Active instances stats for %s: %s", date,
                               self.data[date])

    def get_data(self, tstmp):
        if tstmp in self.data:
            self.last_data = self.data[tstmp]
            return self.data[tstmp]
        else:
            self.log.debug("No active instances info for %s", tstmp)
            return self.last_data
コード例 #31
0
ファイル: test_jmx2yaml.py プロジェクト: Nami-mp/taurus
 def test_jsr223(self):
     self.configure(RESOURCES_DIR + "jmeter/jmx/jsr223.jmx")
     try:
         self.obj.process()
         lines = FileReader(self.obj.dst_file).get_lines(last_pass=True)
         yml = yaml.load(''.join(lines))
         scenarios = yml.get("scenarios")
         scenario = scenarios["Thread Group"]
         requests = scenario["requests"]
         self.assertEqual(len(requests), 1)
         request = requests[0]
         self.assertIn("jsr223", request)
         jsrs = request["jsr223"]
         self.assertTrue(isinstance(jsrs, list))
         self.assertEqual(len(jsrs), 5)
         self.assertEqual(jsrs[0]["language"], "beanshell")
         self.assertEqual(jsrs[0]["script-text"], "scripty")
         self.assertEqual(jsrs[0]["parameters"], "parames")
         self.assertNotIn('script-file', jsrs[0])
         self.assertEqual(jsrs[1]["language"], "javascript")
         self.assertEqual(jsrs[1]["script-text"],
                          u'console.log("ПРИВЕТ");\nline("2");')
         self.assertEqual(jsrs[1]["parameters"], "a b c")
         self.assertNotIn('script-file', jsrs[1])
         self.assertEqual(jsrs[2]["language"], "javascript")
         self.assertEqual(jsrs[2]["script-file"], "script.js")
         self.assertEqual(jsrs[2]["parameters"], None)
         self.assertNotIn('script-text', jsrs[2])
         self.assertEqual(jsrs[3]["language"], "beanshell")
         self.assertEqual(jsrs[3]["execute"], "before")
         self.assertEqual(jsrs[3]["parameters"], None)
         self.assertEqual(jsrs[3]['script-text'],
                          'console.log("beanshell aka jsr223");')
         self.assertNotIn('script-file', jsrs[3])
         self.assertEqual(jsrs[4]["language"], "java")
         self.assertEqual(jsrs[4]["execute"], "before")
         self.assertEqual(jsrs[4]["parameters"], None)
         self.assertIn('BlazeDemo.java', jsrs[4]['script-file'])
         self.assertNotIn('script-text', jsrs[4])
         self.assertTrue(
             os.path.exists(
                 os.path.join(get_full_path(self.obj.dst_file, step_up=1),
                              'script.js')))
     finally:
         os.remove(
             os.path.join(get_full_path(self.obj.dst_file, step_up=1),
                          'script.js'))
コード例 #32
0
ファイル: pbench.py プロジェクト: infomaven/taurus
class PBenchStatsReader(object):
    MARKER = "\n},"

    def __init__(self, filename, parent_logger):
        super(PBenchStatsReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.buffer = ''
        self.data = {}
        self.last_data = 0

    def read_file(self):
        _bytes = self.file.get_bytes()
        if _bytes:
            self.buffer += _bytes

        while self.MARKER in self.buffer:
            idx = self.buffer.find(self.MARKER) + len(self.MARKER)
            chunk_str = self.buffer[:idx - 1]
            self.buffer = self.buffer[idx + + 1:]
            chunk = json.loads("{%s}" % chunk_str)

            for date_str in chunk.keys():
                statistics = chunk[date_str]

                date_obj = datetime.datetime.strptime(date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                date = int(time.mktime(date_obj.timetuple()))
                self.data[date] = 0

                for benchmark_name in statistics.keys():
                    if not benchmark_name.startswith("benchmark_io"):
                        continue
                    benchmark = statistics[benchmark_name]
                    for method in benchmark:
                        meth_obj = benchmark[method]
                        if "mmtasks" in meth_obj:
                            self.data[date] += meth_obj["mmtasks"][2]

                self.log.debug("Active instances stats for %s: %s", date, self.data[date])

    def get_data(self, tstmp):
        if tstmp in self.data:
            self.last_data = self.data[tstmp]
            return self.data[tstmp]
        else:
            self.log.debug("No active instances info for %s", tstmp)
            return self.last_data
コード例 #33
0
ファイル: executors.py プロジェクト: andy7i/taurus
    def startup(self):
        """
        run python tests
        """
        executable = self.settings.get("interpreter", sys.executable)

        cmdline = [executable, self.runner_path, '--report-file', self.report_file]

        load = self.get_load()
        if load.iterations:
            cmdline += ['-i', str(load.iterations)]

        if load.hold:
            cmdline += ['-d', str(load.hold)]

        cmdline += self._additional_args
        cmdline += [self.script]

        self.process = self._execute(cmdline)

        if self.__is_verbose():
            self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)
コード例 #34
0
class VegetaLogReader(ResultsReader):
    def __init__(self, filename, parent_logger):
        super(VegetaLogReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)

    def _read(self, last_pass=False):
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            log_vals = [val.strip() for val in line.split(',')]

            _tstamp = int(log_vals[0][:10])
            _url = log_vals[10]
            _concur = 1
            _etime = float(log_vals[2]) / 1000000000.0
            _con_time = 0
            _latency = 0
            _rstatus = log_vals[1]
            _error = log_vals[5] or None
            _bytes = int(log_vals[4])

            yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
コード例 #35
0
ファイル: pbench.py プロジェクト: infomaven/taurus
    def __init__(self, load, payload_filename, parent_logger):
        super(Scheduler, self).__init__()
        self.need_start_loop = None
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.load = load
        self.payload_file = FileReader(filename=payload_filename, parent_logger=self.log)
        if not load.duration and not load.iterations:
            self.iteration_limit = 1
        else:
            self.iteration_limit = load.iterations

        self.concurrency = load.concurrency if load.concurrency is not None else 1

        self.step_len = load.ramp_up / load.steps if load.steps and load.ramp_up else 0
        if load.throughput:
            self.ramp_up_slope = load.throughput / load.ramp_up if load.ramp_up else 0
            self.step_size = float(load.throughput) / load.steps if load.steps else 0
        else:
            self.ramp_up_slope = None
            self.step_size = float(self.concurrency) / load.steps if load.steps else 0

        self.count = 0.0
        self.time_offset = 0.0
        self.iterations = 0
コード例 #36
0
ファイル: ab.py プロジェクト: Emilnurg/Falcon-simple-API
class TSVDataReader(ResultsReader):
    def __init__(self, filename, parent_logger):
        super(TSVDataReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.skipped_header = False
        self.concurrency = None
        self.url_label = None

    def setup(self, concurrency, url_label):
        self.concurrency = concurrency
        self.url_label = url_label

        return True

    def _read(self, last_pass=False):
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if not self.skipped_header:
                self.skipped_header = True
                continue
            log_vals = [val.strip() for val in line.split('\t')]

            _error = None
            _rstatus = None

            _url = self.url_label
            _concur = self.concurrency
            _tstamp = int(log_vals[1])  # timestamp - moment of request sending
            _con_time = float(log_vals[2]) / 1000.0  # connection time
            _etime = float(log_vals[4]) / 1000.0  # elapsed time
            _latency = float(log_vals[5]) / 1000.0  # latency (aka waittime)
            _bytes = None

            yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
コード例 #37
0
ファイル: ab.py プロジェクト: andy7i/taurus
class TSVDataReader(ResultsReader):
    def __init__(self, filename, parent_logger):
        super(TSVDataReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.skipped_header = False
        self.concurrency = None
        self.url_label = None

    def setup(self, concurrency, url_label):
        self.concurrency = concurrency
        self.url_label = url_label

        return True

    def _read(self, last_pass=False):
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if not self.skipped_header:
                self.skipped_header = True
                continue
            log_vals = [val.strip() for val in line.split('\t')]

            _error = None
            _rstatus = None

            _url = self.url_label
            _concur = self.concurrency
            _tstamp = int(log_vals[1])  # timestamp - moment of request sending
            _con_time = float(log_vals[2]) / 1000  # connection time
            _etime = float(log_vals[4]) / 1000  # elapsed time
            _latency = float(log_vals[5]) / 1000  # latency (aka waittime)
            _bytes = None

            yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
コード例 #38
0
ファイル: pytest.py プロジェクト: Emilnurg/Falcon-simple-API
class PyTestExecutor(SubprocessedExecutor, HavingInstallableTools):
    def __init__(self):
        super(PyTestExecutor, self).__init__()
        self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py")
        self._tailer = FileReader('',
                                  file_opener=lambda _: None,
                                  parent_logger=self.log)
        self._additional_args = []

    def prepare(self):
        super(PyTestExecutor, self).prepare()
        self.install_required_tools()
        self.script = self.get_script_path()
        if not self.script:
            raise TaurusConfigError(
                "'script' should be present for pytest executor")

        scenario = self.get_scenario()
        if "additional-args" in scenario:
            argv = scenario.get("additional-args")
            self._additional_args = shlex.split(argv)

        self.reporting_setup(suffix=".ldjson")

    def __is_verbose(self):
        engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False)
        executor_verbose = self.settings.get("verbose", engine_verbose)
        return executor_verbose

    def install_required_tools(self):
        """
        we need installed nose plugin
        """
        if sys.version >= '3':
            self.log.warning(
                "You are using Python 3, make sure that your scripts are able to run in Python 3"
            )

        self._check_tools(
            [self._get_tool(TaurusPytestRunner, tool_path=self.runner_path)])

    def startup(self):
        """
        run python tests
        """
        executable = self.settings.get("interpreter", sys.executable)

        cmdline = [
            executable, self.runner_path, '--report-file', self.report_file
        ]

        load = self.get_load()
        if load.iterations:
            cmdline += ['-i', str(load.iterations)]

        if load.hold:
            cmdline += ['-d', str(load.hold)]

        cmdline += self._additional_args
        cmdline += [self.script]

        self.process = self._execute(cmdline)

        if self.__is_verbose():
            self._tailer = FileReader(filename=self.stdout.name,
                                      parent_logger=self.log)

    def check(self):
        self.__log_lines()
        return super(PyTestExecutor, self).check()

    def post_process(self):
        super(PyTestExecutor, self).post_process()
        self.__log_lines()

    def __log_lines(self):
        lines = []
        for line in self._tailer.get_lines():
            if not IGNORED_LINE.match(line):
                lines.append(line)

        if lines:
            self.log.info("\n".join(lines))
コード例 #39
0
class DataLogReader(ResultsReader):
    """ Class to read KPI from data log """
    def __init__(self, basedir, parent_logger, dir_prefix):
        super(DataLogReader, self).__init__()
        self.concurrency = 0
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.basedir = basedir
        self.file = FileReader(file_opener=self.open_fds,
                               parent_logger=self.log)
        self.partial_buffer = ""
        self.delimiter = "\t"
        self.dir_prefix = dir_prefix
        self.guessed_gatling_version = None
        self._group_errors = defaultdict(lambda: defaultdict(set))

    def _extract_log_gatling_21(self, fields):
        """
        Extract stats from Gatling 2.1 format.

        :param fields:
        :return:
        """
        # $scenario  $userId  ${RequestRecordHeader.value}
        # ${serializeGroups(groupHierarchy)}  $name
        # 5requestStartDate  6requestEndDate
        # 7responseStartDate  8responseEndDate
        # 9status
        # ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}$Eol"

        if fields[2].strip() == "USER":
            if fields[3].strip() == "START":
                self.concurrency += 1
            elif fields[3].strip() == "END":
                self.concurrency -= 1

        if fields[2].strip() != "REQUEST":
            return None

        label = fields[4]
        t_stamp = int(fields[8]) / 1000.0

        r_time = (int(fields[8]) - int(fields[5])) / 1000.0
        latency = (int(fields[7]) - int(fields[6])) / 1000.0
        con_time = (int(fields[6]) - int(fields[5])) / 1000.0

        if fields[-1] == 'OK':
            r_code = '200'
        else:
            _tmp_rc = fields[-1].split(" ")[-1]
            r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC'

        if len(fields) >= 11 and fields[10]:
            error = fields[10]
        else:
            error = None
        return int(t_stamp), label, r_time, con_time, latency, r_code, error

    def _extract_log_gatling_22(self, fields):
        """
        Extract stats from Gatling 2.2 format
        :param fields:
        :return:
        """
        # 0 ${RequestRecordHeader.value}
        # 1 $scenario
        # 2 $userId
        # 3 ${serializeGroups(groupHierarchy)}
        # 4 $label
        # 5 $startTimestamp
        # 6 $endTimestamp
        # 7 $status
        # [8] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}

        if fields[0].strip() == "USER":
            user_id = fields[2]
            if fields[3].strip() == "START":
                self.concurrency += 1
                self._group_errors[user_id].clear()
            elif fields[3].strip() == "END":
                self.concurrency -= 1
                self._group_errors.pop(user_id)

        if fields[0].strip() == "GROUP":
            return self.__parse_group(fields)
        elif fields[0].strip() == "REQUEST":
            return self.__parse_request(fields)
        else:
            return None

    def __parse_group(self, fields):
        user_id = fields[2]
        label = fields[3]
        if ',' in label:
            return None  # skip nested groups for now
        t_stamp = int(fields[5]) / 1000.0
        r_time = int(fields[6]) / 1000.0
        latency = 0.0
        con_time = 0.0

        if label in self._group_errors[user_id]:
            error = ';'.join(self._group_errors[user_id].pop(label))
        else:
            error = None

        if fields[7] == 'OK':
            r_code = '200'
        else:
            _tmp_rc = fields[-1].split(" ")[-1]
            r_code = _tmp_rc if _tmp_rc.isdigit() else 'N/A'
            assert error, label

        return int(t_stamp), label, r_time, con_time, latency, r_code, error

    def __parse_request(self, fields):
        # see LogFileDataWriter.ResponseMessageSerializer in gatling-core

        if len(fields) >= 9 and fields[8]:
            error = fields[8]
        else:
            error = None

        req_hierarchy = fields[3].split(',')[0]
        if req_hierarchy:
            user_id = fields[2]
            if error:
                self._group_errors[user_id][req_hierarchy].add(error)
            return None

        label = fields[4]
        t_stamp = int(fields[6]) / 1000.0
        r_time = (int(fields[6]) - int(fields[5])) / 1000.0
        latency = 0.0
        con_time = 0.0
        if fields[7] == 'OK':
            r_code = '200'
        else:
            _tmp_rc = fields[-1].split(" ")[-1]
            r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC'

        return int(t_stamp), label, r_time, con_time, latency, r_code, error

    def _guess_gatling_version(self, fields):
        if fields[0].strip() in ["USER", "REQUEST", "RUN"]:
            self.log.debug("Parsing Gatling 2.2+ stats")
            return "2.2+"
        elif len(fields) >= 3 and fields[2].strip() in [
                "USER", "REQUEST", "RUN"
        ]:
            self.log.debug("Parsing Gatling 2.1 stats")
            return "2.1"
        else:
            return None

    def _extract_log_data(self, fields):
        if self.guessed_gatling_version is None:
            self.guessed_gatling_version = self._guess_gatling_version(fields)

        if self.guessed_gatling_version == "2.1":
            return self._extract_log_gatling_21(fields)
        elif self.guessed_gatling_version == "2.2+":
            return self._extract_log_gatling_22(fields)
        else:
            return None

    def _read(self, last_pass=False):
        """
        Generator method that returns next portion of data

        :param last_pass:
        """
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if not line.endswith("\n"):
                self.partial_buffer += line
                continue

            line = "%s%s" % (self.partial_buffer, line)
            self.partial_buffer = ""

            line = line.strip()
            fields = line.split(self.delimiter)

            data = self._extract_log_data(fields)
            if data is None:
                continue

            t_stamp, label, r_time, con_time, latency, r_code, error = data
            bytes_count = None
            yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count

    def open_fds(self, filename):
        """
        open gatling simulation.log
        """
        if os.path.isdir(self.basedir):
            prog = re.compile("^%s-[0-9]+$" % self.dir_prefix)

            for fname in os.listdir(self.basedir):
                if prog.match(fname):
                    filename = os.path.join(self.basedir, fname,
                                            "simulation.log")
                    break

            if not filename or not os.path.isfile(filename):
                self.log.debug('simulation.log not found')
                return
        elif os.path.isfile(self.basedir):
            filename = self.basedir
        else:
            self.log.debug('Path not found: %s', self.basedir)
            return

        if not os.path.getsize(filename):
            self.log.debug('simulation.log is empty')
        else:
            return open(filename, 'rb')
コード例 #40
0
ファイル: pbench.py プロジェクト: infomaven/taurus
class PBenchKPIReader(ResultsReader):
    """
    Class to read KPI
    :type stats_reader: PBenchStatsReader
    """

    def __init__(self, filename, parent_logger, stats_filename):
        super(PBenchKPIReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.stats_reader = PBenchStatsReader(stats_filename, parent_logger)

    def _read(self, last_pass=False):
        """
        Generator method that returns next portion of data

        :type last_pass: bool
        """

        def mcs2sec(val):
            return int(val) / 1000000.0

        self.stats_reader.read_file()

        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        fields = ("timeStamp", "label", "elapsed",
                  "Connect", "Send", "Latency", "Receive",
                  "internal",
                  "bsent", "brecv",
                  "opretcode", "responseCode")
        dialect = csv.excel_tab()

        rows = csv.DictReader(lines, fields, dialect=dialect)

        for row in rows:
            label = row["label"]

            try:
                rtm = mcs2sec(row["elapsed"])
                ltc = mcs2sec(row["Latency"])
                cnn = mcs2sec(row["Connect"])
                # NOTE: actually we have precise send and receive time here...
            except BaseException:
                raise ToolError("PBench reader: failed record: %s" % row)

            if row["opretcode"] != "0":
                error = strerror(int(row["opretcode"]))
                rcd = error
            else:
                error = None
                rcd = row["responseCode"]

            tstmp = int(float(row["timeStamp"]) + rtm)
            byte_count = int(row["brecv"])
            concur = 0
            yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, '', byte_count

    def _calculate_datapoints(self, final_pass=False):
        for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass):
            concurrency = self.stats_reader.get_data(point[DataPoint.TIMESTAMP])

            for label_data in viewvalues(point[DataPoint.CURRENT]):
                label_data[KPISet.CONCURRENCY] = concurrency

            yield point
コード例 #41
0
ファイル: executors.py プロジェクト: andy7i/taurus
class ApiritifNoseExecutor(SubprocessedExecutor):
    """
    :type _tailer: FileReader
    """

    def __init__(self):
        super(ApiritifNoseExecutor, self).__init__()
        self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log)

    def resource_files(self):
        files = super(ApiritifNoseExecutor, self).resource_files()
        for source in self.get_scenario().get_data_sources():
            files.append(source['path'])

        return files

    def create_func_reader(self, report_file):
        del report_file
        return ApiritifFuncReader(self.engine, self.log)

    def create_load_reader(self, report_file):
        del report_file
        reader = ApiritifLoadReader(self.log)
        reader.engine = self.engine
        return reader

    def prepare(self):
        super(ApiritifNoseExecutor, self).prepare()
        self.script = self.get_script_path()
        if not self.script:
            if "requests" in self.get_scenario():
                self.script = self.__tests_from_requests()
            else:
                raise TaurusConfigError("Nothing to test, no requests were provided in scenario")

        # todo: requred tools?

        # path to taurus dir. It's necessary for bzt usage inside tools/helpers
        self.env.add_path({"PYTHONPATH": get_full_path(BZT_DIR, step_up=1)})

        self.reporting_setup()  # no prefix/suffix because we don't fully control report file names

    def __tests_from_requests(self):
        filename = self.engine.create_artifact("test_requests", ".py")
        test_mode = self.execution.get("test-mode", "apiritif")
        scenario = self.get_scenario()

        if test_mode == "apiritif":
            builder = ApiritifScriptGenerator(self.engine, scenario, self.label, self.log, test_mode=test_mode)
            builder.verbose = self.__is_verbose()
        else:
            wdlog = self.engine.create_artifact('webdriver', '.log')

            generate_markers = self.settings.get('generate-flow-markers', None)
            generate_markers = scenario.get('generate-flow-markers', generate_markers)

            capabilities = copy.deepcopy(self.settings.get("capabilities"))
            capabilities.merge(copy.deepcopy(self.execution.get("capabilities")))

            scenario_caps = copy.deepcopy(scenario.get("capabilities"))

            # todo: just for legacy support, remove it later
            if isinstance(scenario_caps, list):
                self.log.warning("Obsolete format of capabilities found (list), should be dict")
                scenario_caps = {item.keys()[0]: item.values()[0] for item in scenario_caps}

            capabilities.merge(scenario_caps)

            remote = self.settings.get("remote", None)
            remote = self.execution.get("remote", remote)
            remote = scenario.get("remote", remote)

            builder = ApiritifScriptGenerator(
                self.engine, scenario, self.label, self.log, wdlog,
                utils_file=os.path.join(RESOURCES_DIR, "selenium_taurus_extras.py"),
                ignore_unknown_actions=self.settings.get("ignore-unknown-actions", False),
                generate_markers=generate_markers,
                capabilities=capabilities,
                wd_addr=remote, test_mode=test_mode)

        builder.build_source_code()
        builder.save(filename)
        if isinstance(self.engine.aggregator, ConsolidatingAggregator) and isinstance(builder, ApiritifScriptGenerator):
            self.engine.aggregator.ignored_labels.extend(builder.service_methods)
        return filename

    def startup(self):
        executable = self.settings.get("interpreter", sys.executable)

        report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
        report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type
        cmdline = [executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl]

        load = self.get_load()
        if load.concurrency:
            cmdline += ['--concurrency', str(load.concurrency)]

        if load.iterations:
            cmdline += ['--iterations', str(load.iterations)]

        if load.hold:
            cmdline += ['--hold-for', str(load.hold)]

        if load.ramp_up:
            cmdline += ['--ramp-up', str(load.ramp_up)]

        if load.steps:
            cmdline += ['--steps', str(load.steps)]

        if self.__is_verbose():
            cmdline += ['--verbose']

        cmdline += [self.script]
        self.process = self._execute(cmdline)
        self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)

    def has_results(self):
        if not self.reader:
            return False
        return self.reader.read_records

    @staticmethod
    def _normalize_label(label):
        for char in ":/":
            if char in label:
                label = label.replace(char, '_')
        return label

    def _check_stdout(self):
        for line in self._tailer.get_lines():
            if "Adding worker" in line:
                marker = "results="
                pos = line.index(marker)
                fname = line[pos + len(marker):].strip()
                self.log.debug("Adding result reader for %s", fname)
                self.reader.register_file(fname)
            elif "Transaction started" in line:
                colon = line.index('::')
                values = {
                    part.split('=')[0]: part.split('=')[1]
                    for part in line[colon + 2:].strip().split(',')
                }
                label = self._normalize_label(values['name'])
                start_time = float(values['start_time'])
                self.transaction_started(label, start_time)
            elif "Transaction ended" in line:
                colon = line.index('::')
                values = {
                    part.split('=')[0]: part.split('=')[1]
                    for part in line[colon + 2:].strip().split(',')
                }
                label = self._normalize_label(values['name'])
                duration = float(values['duration'])
                self.transacion_ended(label, duration)

    def check(self):
        self._check_stdout()
        return super(ApiritifNoseExecutor, self).check()

    def __log_lines(self):
        lines = []
        for line in self._tailer.get_lines():
            if not IGNORED_LINE.match(line):
                lines.append(line)

        if lines:
            self.log.info("\n".join(lines))

    def post_process(self):
        self._check_stdout()
        self.__log_lines()
        self._tailer.close()
        super(ApiritifNoseExecutor, self).post_process()

    def __is_verbose(self):
        engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False)
        executor_verbose = self.settings.get("verbose", engine_verbose)
        return executor_verbose
コード例 #42
0
ファイル: grinder.py プロジェクト: keithmork/taurus
class DataLogReader(ResultsReader):
    """ Class to read KPI from data log """
    DELIMITER = ","
    DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes")

    def __init__(self, filename, parent_logger):
        super(DataLogReader, self).__init__()
        self.report_by_url = False
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.idx = {}
        self.partial_buffer = ""
        self.start_time = 0
        self.end_time = 0
        self.concurrency = 0
        self.test_names = {}
        self.known_threads = set()

    def _read(self, last_pass=False):
        """
        Generator method that returns next portion of data

        :param last_pass:
        """
        self.log.debug("Reading grinder results...")

        self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))

        lnum = None
        start = time.time()

        for lnum, line in enumerate(self.lines):
            if not self.idx:
                if not line.startswith('data.'):
                    self.__split(line)  # to capture early test name records
                    continue

                line = line[line.find(' '):]

                header_list = line.strip().split(self.DELIMITER)
                for _ix, field in enumerate(header_list):
                    self.idx[field.strip()] = _ix

            data_fields, worker_id = self.__split(line)
            if not data_fields:
                self.log.debug("Skipping line: %s", line.strip())
                continue

            yield self.parse_line(data_fields, worker_id, lnum)

        if lnum is not None:
            duration = time.time() - start
            if duration < 0.001:
                duration = 0.001

            self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration)

    def parse_line(self, data_fields, worker_id, lnum):
        worker_id = worker_id.split('.')[1]
        t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0)
        r_time = int(data_fields[self.idx["Test time"]]) / 1000.0
        latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0
        r_code = data_fields[self.idx["HTTP response code"]].strip()
        con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0
        con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0
        bytes_count = int(data_fields[self.idx["HTTP response length"]].strip())
        test_id = data_fields[self.idx["Test"]].strip()
        thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip()
        if thread_id not in self.known_threads:
            self.known_threads.add(thread_id)
            self.concurrency += 1

        url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)
        if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]):
            if not error_msg:
                if r_code != '0':
                    error_msg = "HTTP %s" % r_code
                else:
                    error_msg = "Java exception calling TestRunner"
        else:
            error_msg = None  # suppress errors

        if self.report_by_url:
            label = url
        elif test_id in self.test_names:
            label = self.test_names[test_id]
        else:
            label = "Test #%s" % test_id

        source_id = ''  # maybe use worker_id somehow?
        return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count

    def __split(self, line):
        if not line.endswith("\n"):
            self.partial_buffer += line
            return None, None

        line = "%s%s" % (self.partial_buffer, line)
        self.partial_buffer = ""

        line = line.strip()
        if not line.startswith('data.'):
            line_parts = line.split(' ')
            if len(line_parts) > 1:
                if line_parts[1] == 'starting,':
                    # self.concurrency += 1
                    pass
                elif line_parts[1] == 'finished':
                    if self.concurrency > 0:
                        self.concurrency -= 1
                elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}:
                    test_id = line_parts[5][:-1]
                    test_name = ' '.join(line_parts[6:])
                    self.test_names[test_id] = test_name
                    self.log.debug("Recognized test id %s => %s", test_id, test_name)
            return None, None

        worker_id = line[:line.find(' ')]
        line = line[line.find(' '):]
        data_fields = line.split(self.DELIMITER)
        if not data_fields[1].strip().isdigit():
            return None, None

        if len(data_fields) < max(self.idx.values()):
            return None, None

        return data_fields, worker_id

    def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count):
        url = ''
        error_msg = None
        for lineNo in reversed(range(max(lnum - 100, 0), lnum)):  # looking max 100 lines back. TODO: parameterize?
            line = self.lines[lineNo].strip()
            matched = self.DETAILS_REGEX.match(line)
            if not matched:
                continue

            if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5):
                return matched.group(2), matched.group(4)

        return url, error_msg
コード例 #43
0
ファイル: pbench.py プロジェクト: wr-tty/taurus
class PBenchKPIReader(ResultsReader):
    """
    Class to read KPI
    :type stats_reader: PBenchStatsReader
    """
    def __init__(self, filename, parent_logger, stats_filename):
        super(PBenchKPIReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.stats_reader = PBenchStatsReader(stats_filename, parent_logger)

    def _read(self, last_pass=False):
        """
        Generator method that returns next portion of data

        :type last_pass: bool
        """
        def mcs2sec(val):
            return int(val) / 1000000.0

        self.stats_reader.read_file()

        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        fields = ("timeStamp", "label", "elapsed", "Connect", "Send",
                  "Latency", "Receive", "internal", "bsent", "brecv",
                  "opretcode", "responseCode")
        dialect = csv.excel_tab()

        rows = csv.DictReader(lines, fields, dialect=dialect)

        for row in rows:
            label = row["label"]

            try:
                rtm = mcs2sec(row["elapsed"])
                ltc = mcs2sec(row["Latency"])
                cnn = mcs2sec(row["Connect"])
                # NOTE: actually we have precise send and receive time here...
            except BaseException:
                raise ToolError("PBench reader: failed record: %s" % row)

            if row["opretcode"] != "0":
                error = strerror(int(row["opretcode"]))
                rcd = error
            else:
                error = None
                rcd = row["responseCode"]

            tstmp = int(float(row["timeStamp"]) + rtm)
            byte_count = int(row["brecv"])
            concur = 0
            yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, '', byte_count

    def _calculate_datapoints(self, final_pass=False):
        for point in super(PBenchKPIReader,
                           self)._calculate_datapoints(final_pass):
            concurrency = self.stats_reader.get_data(
                point[DataPoint.TIMESTAMP])

            for label_data in viewvalues(point[DataPoint.CURRENT]):
                label_data[KPISet.CONCURRENCY] = concurrency

            yield point
コード例 #44
0
ファイル: executors.py プロジェクト: andy7i/taurus
class PyTestExecutor(SubprocessedExecutor, HavingInstallableTools):
    def __init__(self):
        super(PyTestExecutor, self).__init__()
        self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py")
        self._tailer = FileReader('', file_opener=lambda _: None, parent_logger=self.log)
        self._additional_args = []

    def prepare(self):
        super(PyTestExecutor, self).prepare()
        self.install_required_tools()
        self.script = self.get_script_path()
        if not self.script:
            raise TaurusConfigError("'script' should be present for pytest executor")

        scenario = self.get_scenario()
        if "additional-args" in scenario:
            argv = scenario.get("additional-args")
            self._additional_args = shlex.split(argv)

        self.reporting_setup(suffix=".ldjson")

    def __is_verbose(self):
        engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False)
        executor_verbose = self.settings.get("verbose", engine_verbose)
        return executor_verbose

    def install_required_tools(self):
        """
        we need installed nose plugin
        """
        if sys.version >= '3':
            self.log.warning("You are using Python 3, make sure that your scripts are able to run in Python 3")

        self._check_tools([self._get_tool(TaurusPytestRunner, tool_path=self.runner_path)])

    def startup(self):
        """
        run python tests
        """
        executable = self.settings.get("interpreter", sys.executable)

        cmdline = [executable, self.runner_path, '--report-file', self.report_file]

        load = self.get_load()
        if load.iterations:
            cmdline += ['-i', str(load.iterations)]

        if load.hold:
            cmdline += ['-d', str(load.hold)]

        cmdline += self._additional_args
        cmdline += [self.script]

        self.process = self._execute(cmdline)

        if self.__is_verbose():
            self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)

    def check(self):
        self.__log_lines()
        return super(PyTestExecutor, self).check()

    def post_process(self):
        super(PyTestExecutor, self).post_process()
        self.__log_lines()

    def __log_lines(self):
        lines = []
        for line in self._tailer.get_lines():
            if not IGNORED_LINE.match(line):
                lines.append(line)

        if lines:
            self.log.info("\n".join(lines))
コード例 #45
0
ファイル: pbench.py プロジェクト: wr-tty/taurus
class Scheduler(object):
    REC_TYPE_SCHEDULE = 0
    REC_TYPE_LOOP_START = 1
    REC_TYPE_STOP = 2

    def __init__(self, load, payload_filename, parent_logger):
        super(Scheduler, self).__init__()
        self.need_start_loop = None
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.load = load
        self.payload_file = FileReader(filename=payload_filename,
                                       parent_logger=self.log)
        if not load.duration and not load.iterations:
            self.iteration_limit = 1
        else:
            self.iteration_limit = load.iterations

        self.concurrency = load.concurrency if load.concurrency is not None else 1

        self.step_len = load.ramp_up / load.steps if load.steps and load.ramp_up else 0
        if load.throughput:
            self.ramp_up_slope = load.throughput / load.ramp_up if load.ramp_up else 0
            self.step_size = float(
                load.throughput) / load.steps if load.steps else 0
        else:
            self.ramp_up_slope = None
            self.step_size = float(
                self.concurrency) / load.steps if load.steps else 0

        self.count = 0.0
        self.time_offset = 0.0
        self.iterations = 0

    def _payload_reader(self):
        self.iterations = 1
        rec_type = self.REC_TYPE_SCHEDULE
        while True:
            payload_offset = self.payload_file.offset
            line = self.payload_file.get_line()
            if not line:  # rewind
                self.payload_file.offset = 0
                self.iterations += 1

                if self.need_start_loop and not self.iteration_limit:
                    self.need_start_loop = False
                    self.iteration_limit = self.iterations
                    rec_type = self.REC_TYPE_LOOP_START

                if self.iteration_limit and self.iterations > self.iteration_limit:
                    self.log.debug("Schedule iterations limit reached: %s",
                                   self.iteration_limit)
                    break

            if not line.strip(
            ):  # we're fine to skip empty lines between records
                continue

            parts = line.split(' ')
            if len(parts) < 2:
                raise TaurusInternalException(
                    "Wrong format for meta-info line: %s" % line)

            payload_len, marker = parts
            payload_len = int(payload_len)
            payload = self.payload_file.get_bytes(payload_len)
            yield payload_len, payload_offset, payload, marker.strip(), len(
                line), rec_type
            rec_type = self.REC_TYPE_SCHEDULE

    def generate(self):
        for payload_len, payload_offset, payload, marker, meta_len, record_type in self._payload_reader(
        ):
            if self.load.throughput:
                self.time_offset += self.__get_time_offset_rps()
                if self.load.duration and self.time_offset > self.load.duration:
                    self.log.debug("Duration limit reached: %s",
                                   self.time_offset)
                    break
            else:  # concurrency schedule
                self.time_offset = self.__get_time_offset_concurrency()

            overall_len = payload_len + meta_len
            yield self.time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len
            self.count += 1

    def __get_time_offset_concurrency(self):
        if not self.load.ramp_up or self.count >= self.concurrency:
            if self.need_start_loop is None:
                self.need_start_loop = True
            return -1  # special case, means no delay
        elif self.load.steps:
            step = math.floor(self.count / self.step_size)
            return step * self.step_len
        else:  # ramp-up case
            return self.count * self.load.ramp_up / self.concurrency

    def __get_time_offset_rps(self):
        if not self.load.ramp_up or self.time_offset > self.load.ramp_up:
            # limit iterations
            rps = self.load.throughput
            if self.need_start_loop is None:
                self.need_start_loop = True
        elif self.load.steps:
            rps = self.step_size * (
                math.floor(self.time_offset / self.step_len) + 1)
        else:  # ramp-up case
            xpos = math.sqrt(2 * self.count / self.ramp_up_slope)
            rps = xpos * self.ramp_up_slope

        return 1.0 / rps if rps else 0
コード例 #46
0
ファイル: siege.py プロジェクト: andy7i/taurus
 def __init__(self, filename, parent_logger):
     super(DataLogReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.concurrency = None
コード例 #47
0
ファイル: locustio.py プロジェクト: andy7i/taurus
class SlavesReader(ResultsProvider):
    def __init__(self, filename, num_slaves, parent_logger):
        """
        :type filename: str
        :type num_slaves: int
        :type parent_logger: logging.Logger
        """
        super(SlavesReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.join_buffer = {}
        self.num_slaves = num_slaves
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.read_buffer = ""

    def _calculate_datapoints(self, final_pass=False):
        read = self.file.get_bytes(size=1024 * 1024, last_pass=final_pass)
        if not read or not read.strip():
            return
        self.read_buffer += read
        while "\n" in self.read_buffer:
            _line = self.read_buffer[:self.read_buffer.index("\n") + 1]
            self.read_buffer = self.read_buffer[len(_line):]
            self.fill_join_buffer(json.loads(_line))

        max_full_ts = self.get_max_full_ts()

        if max_full_ts is not None:
            for point in self.merge_datapoints(max_full_ts):
                yield point

    def merge_datapoints(self, max_full_ts):
        reader_id = self.file.name + "@" + str(id(self))
        for key in sorted(self.join_buffer.keys(), key=int):
            if int(key) <= max_full_ts:
                sec_data = self.join_buffer.pop(key)
                self.log.debug("Processing complete second: %s", key)
                point = DataPoint(int(key))
                point[DataPoint.SOURCE_ID] = reader_id
                for sid, item in iteritems(sec_data):
                    point.merge_point(self.point_from_locust(key, sid, item))
                point.recalculate()
                yield point

    def get_max_full_ts(self):
        max_full_ts = None
        for key in sorted(self.join_buffer.keys(), key=int):
            if len(key) >= self.num_slaves:
                max_full_ts = int(key)
        return max_full_ts

    def fill_join_buffer(self, data):
        self.log.debug("Got slave data: %s", data)
        for stats_item in data['stats']:
            for timestamp in stats_item['num_reqs_per_sec'].keys():
                if timestamp not in self.join_buffer:
                    self.join_buffer[timestamp] = {}
                self.join_buffer[timestamp][data['client_id']] = data

    @staticmethod
    def point_from_locust(timestamp, sid, data):
        """
        :type timestamp: str
        :type sid: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(timestamp))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if timestamp not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt

            for err in data['errors'].values():
                if err['name'] == item['name']:
                    new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
                                                     Counter(), None)
                    KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
                    kpiset[KPISet.FAILURES] += err['occurences']

            kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[KPISet.FAILURES]
            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset, sid)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
コード例 #48
0
ファイル: executors.py プロジェクト: andy7i/taurus
 def __init__(self):
     super(ApiritifNoseExecutor, self).__init__()
     self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log)
コード例 #49
0
ファイル: gatling.py プロジェクト: keithmork/taurus
class DataLogReader(ResultsReader):
    """ Class to read KPI from data log """

    def __init__(self, basedir, parent_logger, dir_prefix):
        super(DataLogReader, self).__init__()
        self.concurrency = 0
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.basedir = basedir
        self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log)
        self.partial_buffer = ""
        self.delimiter = "\t"
        self.dir_prefix = dir_prefix
        self.guessed_gatling_version = None

    def _extract_log_gatling_21(self, fields):
        """
        Extract stats from Gatling 2.1 format.

        :param fields:
        :return:
        """
        # $scenario  $userId  ${RequestRecordHeader.value}
        # ${serializeGroups(groupHierarchy)}  $name
        # 5requestStartDate  6requestEndDate
        # 7responseStartDate  8responseEndDate
        # 9status
        # ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}$Eol"

        if fields[2].strip() == "USER":
            if fields[3].strip() == "START":
                self.concurrency += 1
            elif fields[3].strip() == "END":
                self.concurrency -= 1

        if fields[2].strip() != "REQUEST":
            return None

        label = fields[4]
        t_stamp = int(fields[8]) / 1000.0

        r_time = (int(fields[8]) - int(fields[5])) / 1000.0
        latency = (int(fields[7]) - int(fields[6])) / 1000.0
        con_time = (int(fields[6]) - int(fields[5])) / 1000.0

        if fields[-1] == 'OK':
            r_code = '200'
        else:
            _tmp_rc = fields[-1].split(" ")[-1]
            r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC'

        if len(fields) >= 11 and fields[10]:
            error = fields[10]
        else:
            error = None
        return int(t_stamp), label, r_time, con_time, latency, r_code, error

    def _extract_log_gatling_22(self, fields):
        """
        Extract stats from Gatling 2.2 format
        :param fields:
        :return:
        """
        # 0 ${RequestRecordHeader.value}
        # 1 $scenario
        # 2 $userId
        # 3 ${serializeGroups(groupHierarchy)}
        # 4 $label
        # 5 $startTimestamp
        # 6 $endTimestamp
        # 7 $status
        # [8] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}

        if fields[0].strip() == "USER":
            if fields[3].strip() == "START":
                self.concurrency += 1
            elif fields[3].strip() == "END":
                self.concurrency -= 1

        if fields[0].strip() != "REQUEST":
            return None

        label = fields[4]
        t_stamp = int(fields[6]) / 1000.0

        r_time = (int(fields[6]) - int(fields[5])) / 1000.0
        latency = 0.0
        con_time = 0.0

        if fields[7] == 'OK':
            r_code = '200'
        else:
            _tmp_rc = fields[-1].split(" ")[-1]
            r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC'

        if len(fields) >= 9 and fields[8]:
            error = fields[8]
        else:
            error = None
        return int(t_stamp), label, r_time, con_time, latency, r_code, error

    def _guess_gatling_version(self, fields):
        if fields[0].strip() in ["USER", "REQUEST", "RUN"]:
            self.log.debug("Parsing Gatling 2.2+ stats")
            return "2.2+"
        elif len(fields) >= 3 and fields[2].strip() in ["USER", "REQUEST", "RUN"]:
            self.log.debug("Parsing Gatling 2.1 stats")
            return "2.1"
        else:
            return None

    def _extract_log_data(self, fields):
        if self.guessed_gatling_version is None:
            self.guessed_gatling_version = self._guess_gatling_version(fields)

        if self.guessed_gatling_version == "2.1":
            return self._extract_log_gatling_21(fields)
        elif self.guessed_gatling_version == "2.2+":
            return self._extract_log_gatling_22(fields)
        else:
            return None

    def _read(self, last_pass=False):
        """
        Generator method that returns next portion of data

        :param last_pass:
        """
        lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if not line.endswith("\n"):
                self.partial_buffer += line
                continue

            line = "%s%s" % (self.partial_buffer, line)
            self.partial_buffer = ""

            line = line.strip()
            fields = line.split(self.delimiter)

            data = self._extract_log_data(fields)
            if data is None:
                continue

            t_stamp, label, r_time, con_time, latency, r_code, error = data
            bytes_count = None
            yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count

    def open_fds(self, filename):
        """
        open gatling simulation.log
        """
        if os.path.isdir(self.basedir):
            prog = re.compile("^%s-[0-9]+$" % self.dir_prefix)

            for fname in os.listdir(self.basedir):
                if prog.match(fname):
                    filename = os.path.join(self.basedir, fname, "simulation.log")
                    break

            if not filename or not os.path.isfile(filename):
                self.log.debug('simulation.log not found')
                return
        elif os.path.isfile(self.basedir):
            filename = self.basedir
        else:
            self.log.debug('Path not found: %s', self.basedir)
            return

        if not os.path.getsize(filename):
            self.log.debug('simulation.log is empty')
        else:
            return open(filename, 'rb')
コード例 #50
0
ファイル: pbench.py プロジェクト: infomaven/taurus
class Scheduler(object):
    REC_TYPE_SCHEDULE = 0
    REC_TYPE_LOOP_START = 1
    REC_TYPE_STOP = 2

    def __init__(self, load, payload_filename, parent_logger):
        super(Scheduler, self).__init__()
        self.need_start_loop = None
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.load = load
        self.payload_file = FileReader(filename=payload_filename, parent_logger=self.log)
        if not load.duration and not load.iterations:
            self.iteration_limit = 1
        else:
            self.iteration_limit = load.iterations

        self.concurrency = load.concurrency if load.concurrency is not None else 1

        self.step_len = load.ramp_up / load.steps if load.steps and load.ramp_up else 0
        if load.throughput:
            self.ramp_up_slope = load.throughput / load.ramp_up if load.ramp_up else 0
            self.step_size = float(load.throughput) / load.steps if load.steps else 0
        else:
            self.ramp_up_slope = None
            self.step_size = float(self.concurrency) / load.steps if load.steps else 0

        self.count = 0.0
        self.time_offset = 0.0
        self.iterations = 0

    def _payload_reader(self):
        self.iterations = 1
        rec_type = self.REC_TYPE_SCHEDULE
        while True:
            payload_offset = self.payload_file.offset
            line = self.payload_file.get_line()
            if not line:  # rewind
                self.payload_file.offset = 0
                self.iterations += 1

                if self.need_start_loop and not self.iteration_limit:
                    self.need_start_loop = False
                    self.iteration_limit = self.iterations
                    rec_type = self.REC_TYPE_LOOP_START

                if self.iteration_limit and self.iterations > self.iteration_limit:
                    self.log.debug("Schedule iterations limit reached: %s", self.iteration_limit)
                    break

            if not line.strip():  # we're fine to skip empty lines between records
                continue

            parts = line.split(' ')
            if len(parts) < 2:
                raise TaurusInternalException("Wrong format for meta-info line: %s" % line)

            payload_len, marker = parts
            payload_len = int(payload_len)
            payload = self.payload_file.get_bytes(payload_len)
            yield payload_len, payload_offset, payload, marker.strip(), len(line), rec_type
            rec_type = self.REC_TYPE_SCHEDULE

    def generate(self):
        for payload_len, payload_offset, payload, marker, meta_len, record_type in self._payload_reader():
            if self.load.throughput:
                self.time_offset += self.__get_time_offset_rps()
                if self.load.duration and self.time_offset > self.load.duration:
                    self.log.debug("Duration limit reached: %s", self.time_offset)
                    break
            else:  # concurrency schedule
                self.time_offset = self.__get_time_offset_concurrency()

            overall_len = payload_len + meta_len
            yield self.time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len
            self.count += 1

    def __get_time_offset_concurrency(self):
        if not self.load.ramp_up or self.count >= self.concurrency:
            if self.need_start_loop is None:
                self.need_start_loop = True
            return -1  # special case, means no delay
        elif self.load.steps:
            step = math.floor(self.count / self.step_size)
            return step * self.step_len
        else:  # ramp-up case
            return self.count * self.load.ramp_up / self.concurrency

    def __get_time_offset_rps(self):
        if not self.load.ramp_up or self.time_offset > self.load.ramp_up:
            # limit iterations
            rps = self.load.throughput
            if self.need_start_loop is None:
                self.need_start_loop = True
        elif self.load.steps:
            rps = self.step_size * (math.floor(self.time_offset / self.step_len) + 1)
        else:  # ramp-up case
            xpos = math.sqrt(2 * self.count / self.ramp_up_slope)
            rps = xpos * self.ramp_up_slope

        return 1.0 / rps if rps else 0
コード例 #51
0
ファイル: pbench.py プロジェクト: wr-tty/taurus
 def __init__(self, filename, parent_logger, stats_filename):
     super(PBenchKPIReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.stats_reader = PBenchStatsReader(stats_filename, parent_logger)
コード例 #52
0
ファイル: siege.py プロジェクト: pyToshka/taurus
 def __init__(self, filename, parent_logger):
     super(DataLogReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.concurrency = None
コード例 #53
0
ファイル: pbench.py プロジェクト: infomaven/taurus
 def __init__(self, filename, parent_logger, stats_filename):
     super(PBenchKPIReader, self).__init__()
     self.log = parent_logger.getChild(self.__class__.__name__)
     self.file = FileReader(filename=filename, parent_logger=self.log)
     self.stats_reader = PBenchStatsReader(stats_filename, parent_logger)
コード例 #54
0
ファイル: executors.py プロジェクト: andy7i/taurus
 def __init__(self):
     super(PyTestExecutor, self).__init__()
     self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py")
     self._tailer = FileReader('', file_opener=lambda _: None, parent_logger=self.log)
     self._additional_args = []
コード例 #55
0
ファイル: tsung.py プロジェクト: infomaven/taurus
class TsungStatsReader(ResultsReader):
    def __init__(self, tsung_basedir, parent_logger):
        super(TsungStatsReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.tsung_basedir = tsung_basedir
        self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats)
        self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log)
        self.delimiter = ";"
        self.partial_buffer = ""
        self.skipped_header = False
        self.concurrency = 0

    def open_stats(self, filename):
        return self.open_file(ext='dump')

    def open_log(self, filename):
        return self.open_file(ext='log')

    def open_file(self, ext):
        basedir_contents = os.listdir(self.tsung_basedir)

        if not basedir_contents:
            self.log.debug("Tsung artifacts not appeared yet")
            return

        if len(basedir_contents) != 1:
            self.log.warning("Multiple files in Tsung basedir %s, this shouldn't happen", self.tsung_basedir)
            return

        filename = os.path.join(self.tsung_basedir, basedir_contents[0], "tsung." + ext)

        if not os.path.isfile(filename):
            self.log.debug("File not appeared yet: %s", filename)
            return
        if not os.path.getsize(filename):
            self.log.debug("File is empty: %s", filename)
            return

        self.log.debug('Opening file: %s', filename)
        return open(filename, mode='rb')

    def _read_concurrency(self, last_pass):
        lines = self.log_file.get_lines(size=1024 * 1024, last_pass=last_pass)
        extractor = re.compile(r'^stats: users (\d+) (\d+)$')

        for line in lines:
            match = extractor.match(line.strip())
            if not match:
                continue
            self.concurrency = int(match.group(2))
            self.log.debug("Actual Tsung concurrency: %s", self.concurrency)

    def _read(self, last_pass=False):
        self.log.debug("Reading Tsung results")

        self._read_concurrency(last_pass)
        lines = self.stats_file.get_lines(size=1024 * 1024, last_pass=last_pass)

        for line in lines:
            if not line.endswith("\n"):
                self.partial_buffer += line
                continue

            if not self.skipped_header and line.startswith("#"):
                self.skipped_header = True
                continue

            line = "%s%s" % (self.partial_buffer, line)
            self.partial_buffer = ""

            line = line.strip()
            fields = line.split(self.delimiter)

            tstamp = int(float(fields[0]))
            url = fields[4] + fields[5]
            rstatus = fields[6]
            rsize = int(fields[7])
            etime = float(fields[8]) / 1000
            trname = fields[9]
            error = fields[10] or None

            con_time = 0
            latency = 0

            yield tstamp, url, self.concurrency, etime, con_time, latency, rstatus, error, trname, rsize