class DataLogReader(ResultsReader): def __init__(self, filename, parent_logger): super(DataLogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.concurrency = None def _read(self, last_pass=False): lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if line.count(chr(0x1b)) != 2: # skip garbage continue l_start = line.index('m') + 1 l_end = line.index(chr(0x1b), l_start) line = line[l_start:l_end] log_vals = [val.strip() for val in line.split(',')] # _mark = log_vals[0] # 0. current test mark, defined by --mark key # _http = log_vals[1] # 1. http protocol _rstatus = log_vals[2] # 2. response status code _etime = float(log_vals[3]) # 3. elapsed time (total time - connection time) _rsize = int(log_vals[4]) # 4. size of response _url = log_vals[5] # 6. long or short URL value # _url_id = int(log_vals[7]) # 7. url number _tstamp = time.strptime(log_vals[7], "%Y-%m-%d %H:%M:%S") _tstamp = int(time.mktime(_tstamp)) # 8. moment of request sending _con_time = 0 _latency = 0 _error = None _concur = self.concurrency yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _rsize
class DataLogReader(ResultsReader): def __init__(self, filename, parent_logger): super(DataLogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.concurrency = None def _read(self, last_pass=False): lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if line.count(chr(0x1b)) != 2: # skip garbage continue l_start = line.index('m') + 1 l_end = line.index(chr(0x1b), l_start) line = line[l_start:l_end] log_vals = [val.strip() for val in line.split(',')] # _mark = log_vals[0] # 0. current test mark, defined by --mark key # _http = log_vals[1] # 1. http protocol _rstatus = log_vals[2] # 2. response status code _etime = float( log_vals[3]) # 3. elapsed time (total time - connection time) _rsize = int(log_vals[4]) # 4. size of response _url = log_vals[5] # 6. long or short URL value # _url_id = int(log_vals[7]) # 7. url number _tstamp = time.strptime(log_vals[7], "%Y-%m-%d %H:%M:%S") _tstamp = int(time.mktime(_tstamp)) # 8. moment of request sending _con_time = 0 _latency = 0 _error = None _concur = self.concurrency yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _rsize
def test_requests(self): self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read())) self.obj.prepare() self.obj.get_widget() self.obj.startup() while not self.obj.check(): time.sleep(self.obj.engine.check_interval) self.obj.shutdown() reader = FileReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv")) lines = reader.get_lines(last_pass=True) self.assertEquals(4, len(list(lines)))
def test_requests(self): self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read())) self.obj.prepare() self.obj.get_widget() self.obj.startup() while not self.obj.check(): time.sleep(1) self.obj.shutdown() reader = FileReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv")) lines = reader.get_lines(last_pass=True) self.assertEquals(4, len(list(lines)))
class VegetaLogReader(ResultsReader): def __init__(self, filename, parent_logger): super(VegetaLogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) def _read(self, last_pass=False): lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: log_vals = [val.strip() for val in line.split(',')] _tstamp = int(log_vals[0][:10]) _url = log_vals[10] _concur = 1 _etime = float(log_vals[2]) / 1000000000.0 _con_time = 0 _latency = 0 _rstatus = log_vals[1] _error = log_vals[5] or None _bytes = int(log_vals[4]) yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
class TSVDataReader(ResultsReader): def __init__(self, filename, parent_logger): super(TSVDataReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.skipped_header = False self.concurrency = None self.url_label = None def setup(self, concurrency, url_label): self.concurrency = concurrency self.url_label = url_label return True def _read(self, last_pass=False): lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not self.skipped_header: self.skipped_header = True continue log_vals = [val.strip() for val in line.split('\t')] _error = None _rstatus = None _url = self.url_label _concur = self.concurrency _tstamp = int(log_vals[1]) # timestamp - moment of request sending _con_time = float(log_vals[2]) / 1000.0 # connection time _etime = float(log_vals[4]) / 1000.0 # elapsed time _latency = float(log_vals[5]) / 1000.0 # latency (aka waittime) _bytes = None yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
class TSVDataReader(ResultsReader): def __init__(self, filename, parent_logger): super(TSVDataReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.skipped_header = False self.concurrency = None self.url_label = None def setup(self, concurrency, url_label): self.concurrency = concurrency self.url_label = url_label return True def _read(self, last_pass=False): lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not self.skipped_header: self.skipped_header = True continue log_vals = [val.strip() for val in line.split('\t')] _error = None _rstatus = None _url = self.url_label _concur = self.concurrency _tstamp = int(log_vals[1]) # timestamp - moment of request sending _con_time = float(log_vals[2]) / 1000 # connection time _etime = float(log_vals[4]) / 1000 # elapsed time _latency = float(log_vals[5]) / 1000 # latency (aka waittime) _bytes = None yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
class PBenchKPIReader(ResultsReader): """ Class to read KPI :type stats_reader: PBenchStatsReader """ def __init__(self, filename, parent_logger, stats_filename): super(PBenchKPIReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.stats_reader = PBenchStatsReader(stats_filename, parent_logger) def _read(self, last_pass=False): """ Generator method that returns next portion of data :type last_pass: bool """ def mcs2sec(val): return int(val) / 1000000.0 self.stats_reader.read_file() lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) fields = ("timeStamp", "label", "elapsed", "Connect", "Send", "Latency", "Receive", "internal", "bsent", "brecv", "opretcode", "responseCode") dialect = csv.excel_tab() rows = csv.DictReader(lines, fields, dialect=dialect) for row in rows: label = row["label"] try: rtm = mcs2sec(row["elapsed"]) ltc = mcs2sec(row["Latency"]) cnn = mcs2sec(row["Connect"]) # NOTE: actually we have precise send and receive time here... except BaseException: raise ToolError("PBench reader: failed record: %s" % row) if row["opretcode"] != "0": error = strerror(int(row["opretcode"])) rcd = error else: error = None rcd = row["responseCode"] tstmp = int(float(row["timeStamp"]) + rtm) byte_count = int(row["brecv"]) concur = 0 yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, '', byte_count def _calculate_datapoints(self, final_pass=False): for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass): concurrency = self.stats_reader.get_data( point[DataPoint.TIMESTAMP]) for label_data in viewvalues(point[DataPoint.CURRENT]): label_data[KPISet.CONCURRENCY] = concurrency yield point
class PyTestExecutor(SubprocessedExecutor, HavingInstallableTools): def __init__(self): super(PyTestExecutor, self).__init__() self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py") self._tailer = FileReader('', file_opener=lambda _: None, parent_logger=self.log) self._additional_args = [] def prepare(self): super(PyTestExecutor, self).prepare() self.install_required_tools() self.script = self.get_script_path() if not self.script: raise TaurusConfigError("'script' should be present for pytest executor") scenario = self.get_scenario() if "additional-args" in scenario: argv = scenario.get("additional-args") self._additional_args = shlex.split(argv) self.reporting_setup(suffix=".ldjson") def __is_verbose(self): engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False) executor_verbose = self.settings.get("verbose", engine_verbose) return executor_verbose def install_required_tools(self): """ we need installed nose plugin """ if sys.version >= '3': self.log.warning("You are using Python 3, make sure that your scripts are able to run in Python 3") self._check_tools([self._get_tool(TaurusPytestRunner, tool_path=self.runner_path)]) def startup(self): """ run python tests """ executable = self.settings.get("interpreter", sys.executable) cmdline = [executable, self.runner_path, '--report-file', self.report_file] load = self.get_load() if load.iterations: cmdline += ['-i', str(load.iterations)] if load.hold: cmdline += ['-d', str(load.hold)] cmdline += self._additional_args cmdline += [self.script] self.process = self._execute(cmdline) if self.__is_verbose(): self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log) def check(self): self.__log_lines() return super(PyTestExecutor, self).check() def post_process(self): super(PyTestExecutor, self).post_process() self.__log_lines() def __log_lines(self): lines = [] for line in self._tailer.get_lines(): if not IGNORED_LINE.match(line): lines.append(line) if lines: self.log.info("\n".join(lines))
class DataLogReader(ResultsReader): """ Class to read KPI from data log """ DELIMITER = "," DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes") def __init__(self, filename, parent_logger): super(DataLogReader, self).__init__() self.report_by_url = False self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.idx = {} self.partial_buffer = "" self.start_time = 0 self.end_time = 0 self.concurrency = 0 self.test_names = {} self.known_threads = set() def _read(self, last_pass=False): """ Generator method that returns next portion of data :param last_pass: """ self.log.debug("Reading grinder results...") self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) lnum = None start = time.time() for lnum, line in enumerate(self.lines): if not self.idx: if not line.startswith('data.'): self.__split(line) # to capture early test name records continue line = line[line.find(' '):] header_list = line.strip().split(self.DELIMITER) for _ix, field in enumerate(header_list): self.idx[field.strip()] = _ix data_fields, worker_id = self.__split(line) if not data_fields: self.log.debug("Skipping line: %s", line.strip()) continue yield self.parse_line(data_fields, worker_id, lnum) if lnum is not None: duration = time.time() - start if duration < 0.001: duration = 0.001 self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration) def parse_line(self, data_fields, worker_id, lnum): worker_id = worker_id.split('.')[1] t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0) r_time = int(data_fields[self.idx["Test time"]]) / 1000.0 latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0 r_code = data_fields[self.idx["HTTP response code"]].strip() con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0 con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0 bytes_count = int(data_fields[self.idx["HTTP response length"]].strip()) test_id = data_fields[self.idx["Test"]].strip() thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip() if thread_id not in self.known_threads: self.known_threads.add(thread_id) self.concurrency += 1 url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count) if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]): if not error_msg: if r_code != '0': error_msg = "HTTP %s" % r_code else: error_msg = "Java exception calling TestRunner" else: error_msg = None # suppress errors if self.report_by_url: label = url elif test_id in self.test_names: label = self.test_names[test_id] else: label = "Test #%s" % test_id source_id = '' # maybe use worker_id somehow? return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count def __split(self, line): if not line.endswith("\n"): self.partial_buffer += line return None, None line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() if not line.startswith('data.'): line_parts = line.split(' ') if len(line_parts) > 1: if line_parts[1] == 'starting,': # self.concurrency += 1 pass elif line_parts[1] == 'finished': if self.concurrency > 0: self.concurrency -= 1 elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}: test_id = line_parts[5][:-1] test_name = ' '.join(line_parts[6:]) self.test_names[test_id] = test_name self.log.debug("Recognized test id %s => %s", test_id, test_name) return None, None worker_id = line[:line.find(' ')] line = line[line.find(' '):] data_fields = line.split(self.DELIMITER) if not data_fields[1].strip().isdigit(): return None, None if len(data_fields) < max(self.idx.values()): return None, None return data_fields, worker_id def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count): url = '' error_msg = None for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize? line = self.lines[lineNo].strip() matched = self.DETAILS_REGEX.match(line) if not matched: continue if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5): return matched.group(2), matched.group(4) return url, error_msg
class ApiritifNoseExecutor(SubprocessedExecutor): """ :type _tailer: FileReader """ def __init__(self): super(ApiritifNoseExecutor, self).__init__() self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log) def resource_files(self): files = super(ApiritifNoseExecutor, self).resource_files() for source in self.get_scenario().get_data_sources(): files.append(source['path']) return files def create_func_reader(self, report_file): del report_file return ApiritifFuncReader(self.engine, self.log) def create_load_reader(self, report_file): del report_file reader = ApiritifLoadReader(self.log) reader.engine = self.engine return reader def prepare(self): super(ApiritifNoseExecutor, self).prepare() self.script = self.get_script_path() if not self.script: if "requests" in self.get_scenario(): self.script = self.__tests_from_requests() else: raise TaurusConfigError( "Nothing to test, no requests were provided in scenario") # todo: requred tools? # path to taurus dir. It's necessary for bzt usage inside tools/helpers self.env.add_path({"PYTHONPATH": get_full_path(BZT_DIR, step_up=1)}) self.reporting_setup( ) # no prefix/suffix because we don't fully control report file names def __tests_from_requests(self): filename = self.engine.create_artifact("test_requests", ".py") test_mode = self.execution.get("test-mode", "apiritif") scenario = self.get_scenario() if test_mode == "apiritif": builder = ApiritifScriptGenerator(self.engine, scenario, self.label, self.log) builder.verbose = self.__is_verbose() else: wdlog = self.engine.create_artifact('webdriver', '.log') ignore_unknown_actions = self.settings.get( "ignore-unknown-actions", False) generate_markers = scenario.get( 'generate-flow-markers', self.settings.get('generate-flow-markers', None)) extra_utilities = os.path.join(RESOURCES_DIR, "selenium_taurus_extras.py") builder = SeleniumScriptBuilder(scenario, self.log, wdlog, extra_utilities, ignore_unknown_actions, generate_markers) builder.label = self.label builder.webdriver_address = self.settings.get( "remote", builder.webdriver_address) builder.webdriver_address = self.execution.get( "remote", builder.webdriver_address) builder.capabilities_from_outside = self.settings.get( "capabilities") builder.capabilities_from_outside.merge( self.execution.get("capabilities")) builder.build_source_code() builder.save(filename) if isinstance(self.engine.aggregator, ConsolidatingAggregator) and isinstance( builder, ApiritifScriptGenerator): self.engine.aggregator.ignored_labels.extend( builder.service_methods) return filename def startup(self): executable = self.settings.get("interpreter", sys.executable) report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv" report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type cmdline = [ executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl ] load = self.get_load() if load.concurrency: cmdline += ['--concurrency', str(load.concurrency)] if load.iterations: cmdline += ['--iterations', str(load.iterations)] if load.hold: cmdline += ['--hold-for', str(load.hold)] if load.ramp_up: cmdline += ['--ramp-up', str(load.ramp_up)] if load.steps: cmdline += ['--steps', str(load.steps)] if self.__is_verbose(): cmdline += ['--verbose'] cmdline += [self.script] self.process = self.execute(cmdline) self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log) def has_results(self): if not self.reader: return False return self.reader.read_records @staticmethod def _normalize_label(label): for char in ":/": if char in label: label = label.replace(char, '_') return label def _check_stdout(self): for line in self._tailer.get_lines(): if "Adding worker" in line: marker = "results=" pos = line.index(marker) fname = line[pos + len(marker):].strip() self.log.debug("Adding result reader for %s", fname) self.reader.register_file(fname) elif "Transaction started" in line: colon = line.index('::') values = { part.split('=')[0]: part.split('=')[1] for part in line[colon + 2:].strip().split(',') } label = self._normalize_label(values['name']) start_time = float(values['start_time']) self.transaction_started(label, start_time) elif "Transaction ended" in line: colon = line.index('::') values = { part.split('=')[0]: part.split('=')[1] for part in line[colon + 2:].strip().split(',') } label = self._normalize_label(values['name']) duration = float(values['duration']) self.transacion_ended(label, duration) def check(self): self._check_stdout() return super(ApiritifNoseExecutor, self).check() def __log_lines(self): lines = [] for line in self._tailer.get_lines(): if not IGNORED_LINE.match(line): lines.append(line) if lines: self.log.info("\n".join(lines)) def post_process(self): self._check_stdout() self.__log_lines() self._tailer.close() super(ApiritifNoseExecutor, self).post_process() def __is_verbose(self): engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False) executor_verbose = self.settings.get("verbose", engine_verbose) return executor_verbose
class TsungStatsReader(ResultsReader): def __init__(self, tsung_basedir, parent_logger): super(TsungStatsReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.tsung_basedir = tsung_basedir self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats) self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log) self.delimiter = ";" self.partial_buffer = "" self.skipped_header = False self.concurrency = 0 def open_stats(self, filename): return self.open_file(ext='dump') def open_log(self, filename): return self.open_file(ext='log') def open_file(self, ext): basedir_contents = os.listdir(self.tsung_basedir) if not basedir_contents: self.log.debug("Tsung artifacts not appeared yet") return if len(basedir_contents) != 1: self.log.warning("Multiple files in Tsung basedir %s, this shouldn't happen", self.tsung_basedir) return filename = os.path.join(self.tsung_basedir, basedir_contents[0], "tsung." + ext) if not os.path.isfile(filename): self.log.debug("File not appeared yet: %s", filename) return if not os.path.getsize(filename): self.log.debug("File is empty: %s", filename) return self.log.debug('Opening file: %s', filename) return open(filename, mode='rb') def _read_concurrency(self, last_pass): lines = self.log_file.get_lines(size=1024 * 1024, last_pass=last_pass) extractor = re.compile(r'^stats: users (\d+) (\d+)$') for line in lines: match = extractor.match(line.strip()) if not match: continue self.concurrency = int(match.group(2)) self.log.debug("Actual Tsung concurrency: %s", self.concurrency) def _read(self, last_pass=False): self.log.debug("Reading Tsung results") self._read_concurrency(last_pass) lines = self.stats_file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not line.endswith("\n"): self.partial_buffer += line continue if not self.skipped_header and line.startswith("#"): self.skipped_header = True continue line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() fields = line.split(self.delimiter) tstamp = int(float(fields[0])) url = fields[4] + fields[5] rstatus = fields[6] rsize = int(fields[7]) etime = float(fields[8]) / 1000 trname = fields[9] error = fields[10] or None con_time = 0 latency = 0 yield tstamp, url, self.concurrency, etime, con_time, latency, rstatus, error, trname, rsize
class ApiritifNoseExecutor(SubprocessedExecutor): """ :type _tailer: FileReader """ def __init__(self): super(ApiritifNoseExecutor, self).__init__() self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log) def resource_files(self): files = super(ApiritifNoseExecutor, self).resource_files() for source in self.get_scenario().get_data_sources(): files.append(source['path']) return files def create_func_reader(self, report_file): del report_file return ApiritifFuncReader(self.engine, self.log) def create_load_reader(self, report_file): del report_file reader = ApiritifLoadReader(self.log) reader.engine = self.engine return reader def prepare(self): super(ApiritifNoseExecutor, self).prepare() self.script = self.get_script_path() if not self.script: if "requests" in self.get_scenario(): self.script = self.__tests_from_requests() else: raise TaurusConfigError( "Nothing to test, no requests were provided in scenario") # todo: requred tools? # path to taurus dir. It's necessary for bzt usage inside tools/helpers self.env.add_path({"PYTHONPATH": get_full_path(BZT_DIR, step_up=1)}) self.reporting_setup( ) # no prefix/suffix because we don't fully control report file names def __tests_from_requests(self): filename = self.engine.create_artifact("test_requests", ".py") test_mode = self.execution.get("test-mode", "apiritif") scenario = self.get_scenario() if test_mode == "apiritif": builder = ApiritifScriptGenerator( scenario, self.label, executor=self, test_mode=test_mode, ignore_unknown_actions=self.settings.get( "ignore-unknown-actions", False)) builder.verbose = self.__is_verbose() else: wdlog = self.engine.create_artifact('webdriver', '.log') generate_markers = self.settings.get('generate-flow-markers', None) generate_markers = scenario.get('generate-flow-markers', generate_markers) scenario_caps = scenario.get("capabilities") # todo: just for legacy support, remove it later if isinstance(scenario_caps, list): self.log.warning( "Obsolete format of capabilities found (list), should be dict" ) scenario["capabilities"] = { item.keys()[0]: item.values()[0] for item in scenario_caps } configs = (self.settings, scenario, self.execution) capabilities = get_assembled_value(configs, "capabilities") remote = get_assembled_value(configs, "remote") builder = ApiritifScriptGenerator( scenario, self.label, wdlog, executor=self, ignore_unknown_actions=self.settings.get( "ignore-unknown-actions", False), generate_markers=generate_markers, capabilities=capabilities, wd_addr=remote, test_mode=test_mode) builder.build_source_code() builder.save(filename) if isinstance(self.engine.aggregator, ConsolidatingAggregator) and isinstance( builder, ApiritifScriptGenerator): self.engine.aggregator.ignored_labels.extend( builder.service_methods) return filename def startup(self): executable = self.settings.get("interpreter", sys.executable) report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv" report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type cmdline = [ executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl ] load = self.get_load() if load.concurrency: cmdline += ['--concurrency', str(load.concurrency)] iterations = self.get_raw_load().iterations if iterations is None: # defaults: msg = "No iterations limit in config, choosing anything... set " if load.duration or self.engine.is_functional_mode() and list( self.get_scenario().get_data_sources()): iterations = 0 # infinite for func mode and ds msg += "0 (infinite) as " if load.duration: msg += "duration found (hold-for + ramp-up)" elif self.engine.is_functional_mode(): msg += "taurus works in functional mode" else: msg += "data-sources found" else: iterations = 1 # run once otherwise msg += "1" self.log.debug(msg) if iterations: cmdline += ['--iterations', str(iterations)] if load.hold: cmdline += ['--hold-for', str(load.hold)] if load.ramp_up: cmdline += ['--ramp-up', str(load.ramp_up)] if load.steps: cmdline += ['--steps', str(load.steps)] if self.__is_verbose(): cmdline += ['--verbose'] cmdline += [self.script] self.process = self._execute(cmdline) self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log) def has_results(self): if not self.reader: return False return self.reader.read_records @staticmethod def _normalize_label(label): for char in ":/": if char in label: label = label.replace(char, '_') return label def _check_stdout(self): for line in self._tailer.get_lines(): if "Adding worker" in line: marker = "results=" pos = line.index(marker) fname = line[pos + len(marker):].strip() self.log.debug("Adding result reader for %s", fname) self.reader.register_file(fname) elif "Transaction started" in line: colon = line.index('::') values = { part.split('=')[0]: part.split('=')[1] for part in line[colon + 2:].strip().split(',') } label = self._normalize_label(values['name']) start_time = float(values['start_time']) self.transaction_started(label, start_time) elif "Transaction ended" in line: colon = line.index('::') values = { part.split('=')[0]: part.split('=')[1] for part in line[colon + 2:].strip().split(',') } label = self._normalize_label(values['name']) duration = float(values['duration']) self.transacion_ended(label, duration) def check(self): self._check_stdout() return super(ApiritifNoseExecutor, self).check() def __log_lines(self): lines = [] for line in self._tailer.get_lines(): if not IGNORED_LINE.match(line): lines.append(line) if lines: self.log.info("\n".join(lines)) def post_process(self): self._check_stdout() self.__log_lines() self._tailer.close() super(ApiritifNoseExecutor, self).post_process() def __is_verbose(self): engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False) executor_verbose = self.settings.get("verbose", engine_verbose) return executor_verbose
class DataLogReader(ResultsReader): """ Class to read KPI from data log """ def __init__(self, basedir, parent_logger, dir_prefix): super(DataLogReader, self).__init__() self.concurrency = 0 self.log = parent_logger.getChild(self.__class__.__name__) self.basedir = basedir self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log) self.partial_buffer = "" self.delimiter = "\t" self.dir_prefix = dir_prefix self.guessed_gatling_version = None self._group_errors = defaultdict(lambda: defaultdict(set)) def _extract_log_gatling_21(self, fields): """ Extract stats from Gatling 2.1 format. :param fields: :return: """ # $scenario $userId ${RequestRecordHeader.value} # ${serializeGroups(groupHierarchy)} $name # 5requestStartDate 6requestEndDate # 7responseStartDate 8responseEndDate # 9status # ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}$Eol" if fields[2].strip() == "USER": if fields[3].strip() == "START": self.concurrency += 1 elif fields[3].strip() == "END": self.concurrency -= 1 if fields[2].strip() != "REQUEST": return None label = fields[4] t_stamp = int(fields[8]) / 1000.0 r_time = (int(fields[8]) - int(fields[5])) / 1000.0 latency = (int(fields[7]) - int(fields[6])) / 1000.0 con_time = (int(fields[6]) - int(fields[5])) / 1000.0 if fields[-1] == 'OK': r_code = '200' else: _tmp_rc = fields[-1].split(" ")[-1] r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC' if len(fields) >= 11 and fields[10]: error = fields[10] else: error = None return int(t_stamp), label, r_time, con_time, latency, r_code, error def _extract_log_gatling_22(self, fields): """ Extract stats from Gatling 2.2 format :param fields: :return: """ # 0 ${RequestRecordHeader.value} # 1 $scenario # 2 $userId # 3 ${serializeGroups(groupHierarchy)} # 4 $label # 5 $startTimestamp # 6 $endTimestamp # 7 $status # [8] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)} if fields[0].strip() == "USER": user_id = fields[2] if fields[3].strip() == "START": self.concurrency += 1 self._group_errors[user_id].clear() elif fields[3].strip() == "END": self.concurrency -= 1 self._group_errors.pop(user_id) if fields[0].strip() == "GROUP": return self.__parse_group(fields) elif fields[0].strip() == "REQUEST": return self.__parse_request(fields) else: return None def __parse_group(self, fields): user_id = fields[2] label = fields[3] if ',' in label: return None # skip nested groups for now t_stamp = int(fields[5]) / 1000.0 r_time = int(fields[6]) / 1000.0 latency = 0.0 con_time = 0.0 if label in self._group_errors[user_id]: error = ';'.join(self._group_errors[user_id].pop(label)) else: error = None if fields[7] == 'OK': r_code = '200' else: _tmp_rc = fields[-1].split(" ")[-1] r_code = _tmp_rc if _tmp_rc.isdigit() else 'N/A' assert error, label return int(t_stamp), label, r_time, con_time, latency, r_code, error def __parse_request(self, fields): # see LogFileDataWriter.ResponseMessageSerializer in gatling-core if len(fields) >= 9 and fields[8]: error = fields[8] else: error = None req_hierarchy = fields[3].split(',')[0] if req_hierarchy: user_id = fields[2] if error: self._group_errors[user_id][req_hierarchy].add(error) return None label = fields[4] t_stamp = int(fields[6]) / 1000.0 r_time = (int(fields[6]) - int(fields[5])) / 1000.0 latency = 0.0 con_time = 0.0 if fields[7] == 'OK': r_code = '200' else: _tmp_rc = fields[-1].split(" ")[-1] r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC' return int(t_stamp), label, r_time, con_time, latency, r_code, error def _guess_gatling_version(self, fields): if fields[0].strip() in ["USER", "REQUEST", "RUN"]: self.log.debug("Parsing Gatling 2.2+ stats") return "2.2+" elif len(fields) >= 3 and fields[2].strip() in [ "USER", "REQUEST", "RUN" ]: self.log.debug("Parsing Gatling 2.1 stats") return "2.1" else: return None def _extract_log_data(self, fields): if self.guessed_gatling_version is None: self.guessed_gatling_version = self._guess_gatling_version(fields) if self.guessed_gatling_version == "2.1": return self._extract_log_gatling_21(fields) elif self.guessed_gatling_version == "2.2+": return self._extract_log_gatling_22(fields) else: return None def _read(self, last_pass=False): """ Generator method that returns next portion of data :param last_pass: """ lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not line.endswith("\n"): self.partial_buffer += line continue line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() fields = line.split(self.delimiter) data = self._extract_log_data(fields) if data is None: continue t_stamp, label, r_time, con_time, latency, r_code, error = data bytes_count = None yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count def open_fds(self, filename): """ open gatling simulation.log """ if os.path.isdir(self.basedir): prog = re.compile("^%s-[0-9]+$" % self.dir_prefix) for fname in os.listdir(self.basedir): if prog.match(fname): filename = os.path.join(self.basedir, fname, "simulation.log") break if not filename or not os.path.isfile(filename): self.log.debug('simulation.log not found') return elif os.path.isfile(self.basedir): filename = self.basedir else: self.log.debug('Path not found: %s', self.basedir) return if not os.path.getsize(filename): self.log.debug('simulation.log is empty') else: return open(filename, 'rb')
class PyTestExecutor(SubprocessedExecutor, HavingInstallableTools): def __init__(self): super(PyTestExecutor, self).__init__() self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py") self._tailer = FileReader('', file_opener=lambda _: None, parent_logger=self.log) self._additional_args = [] def prepare(self): super(PyTestExecutor, self).prepare() self.install_required_tools() self.script = self.get_script_path() if not self.script: raise TaurusConfigError( "'script' should be present for pytest executor") scenario = self.get_scenario() if "additional-args" in scenario: argv = scenario.get("additional-args") self._additional_args = shlex.split(argv) self.reporting_setup(suffix=".ldjson") def __is_verbose(self): engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False) executor_verbose = self.settings.get("verbose", engine_verbose) return executor_verbose def install_required_tools(self): """ we need installed nose plugin """ if sys.version >= '3': self.log.warning( "You are using Python 3, make sure that your scripts are able to run in Python 3" ) self._check_tools( [self._get_tool(TaurusPytestRunner, tool_path=self.runner_path)]) def startup(self): """ run python tests """ executable = self.settings.get("interpreter", sys.executable) cmdline = [ executable, self.runner_path, '--report-file', self.report_file ] load = self.get_load() if load.iterations: cmdline += ['-i', str(load.iterations)] if load.hold: cmdline += ['-d', str(load.hold)] cmdline += self._additional_args cmdline += [self.script] self.process = self._execute(cmdline) if self.__is_verbose(): self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log) def check(self): self.__log_lines() return super(PyTestExecutor, self).check() def post_process(self): super(PyTestExecutor, self).post_process() self.__log_lines() def __log_lines(self): lines = [] for line in self._tailer.get_lines(): if not IGNORED_LINE.match(line): lines.append(line) if lines: self.log.info("\n".join(lines))
class DataLogReader(ResultsReader): """ Class to read KPI from data log """ def __init__(self, basedir, parent_logger, dir_prefix): super(DataLogReader, self).__init__() self.concurrency = 0 self.log = parent_logger.getChild(self.__class__.__name__) self.basedir = basedir self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log) self.partial_buffer = "" self.delimiter = "\t" self.dir_prefix = dir_prefix self.guessed_gatling_version = None self._group_errors = defaultdict(set) def _extract_log(self, fields): """ Extract stats from Gatling format of version 3.1 and after :param fields: :return: """ # 0 ${RequestRecordHeader.value} # 1 $scenario # -|2 $userId, absent in Gatling 3.4+ # 2 ${serializeGroups(groupHierarchy)} # 3 $label # 4 $startTimestamp # 5 $endTimestamp # 6 $status # [7] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)} if fields[0].strip() == "USER": if self.guessed_gatling_version < "3.4+": del fields[2] # ignore obsolete $userId if fields[2].strip() == "START": self.concurrency += 1 elif fields[2].strip() == "END": self.concurrency -= 1 elif fields[0].strip() == "GROUP": del fields[0] return self.__parse_group(fields) elif fields[0].strip() == "REQUEST": del fields[0] return self.__parse_request(fields) else: return None def __parse_group(self, fields): latency = 0.0 con_time = 0.0 if len(fields) < 3: label = "" t_stamp = int(fields[1]) / 1000.0 r_time = 0 error = fields[0] r_code = "N/A" else: if self.guessed_gatling_version < "3.4+": del fields[0] # ignore obsolete $userId label = fields[0] if ',' in label: return None # skip nested groups for now t_stamp = int(fields[2]) / 1000.0 r_time = int(fields[3]) / 1000.0 if label in self._group_errors: error = ';'.join(self._group_errors.pop(label)) else: error = None if fields[4] == 'OK': r_code = '200' else: r_code = self.__rc_from_msg(fields[-1]) return int(t_stamp), label, r_time, con_time, latency, r_code, error def __parse_request(self, fields): # see LogFileDataWriter.ResponseMessageSerializer in gatling-core if self.guessed_gatling_version < "3.4+": del fields[0] # ignore obsolete $userId if len(fields) >= 6 and fields[5]: error = fields[5] else: error = None req_hierarchy = fields[0].split(',')[0] if req_hierarchy: if error: self._group_errors[req_hierarchy].add(error) return None label = fields[1] t_stamp = int(fields[3]) / 1000.0 r_time = (int(fields[3]) - int(fields[2])) / 1000.0 latency = 0.0 con_time = 0.0 if fields[4] == 'OK': r_code = '200' else: r_code = self.__rc_from_msg(fields[-1]) return int(t_stamp), label, r_time, con_time, latency, r_code, error def __rc_from_msg(self, msg): _tmp_rc = msg.split( "but actually " )[-1] # gatling-core/src/main/scala/io/gatling/core/check/Validator.scala if _tmp_rc.startswith("unexpectedly "): _tmp_rc = _tmp_rc[len("unexpectedly "):] if _tmp_rc.startswith("found "): _tmp_rc = _tmp_rc[len("found "):] parts = _tmp_rc.split(' ') if len(parts) > 1 and parts[1] == 'is': _tmp_rc = parts[0] return _tmp_rc if _tmp_rc.isdigit() else 'N/A' def _guess_gatling_version(self, fields): if fields and fields[-1].strip() < "3.4": return "3.3.X" elif fields[-1].strip() >= "3.4": return "3.4+" else: return "" def _extract_log_data(self, fields): if self.guessed_gatling_version is None: self.guessed_gatling_version = self._guess_gatling_version(fields) return self._extract_log( fields) if self.guessed_gatling_version else None def _read(self, last_pass=False): """ Generator method that returns next portion of data :param last_pass: """ lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not line.endswith("\n"): self.partial_buffer += line continue line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() fields = line.split(self.delimiter) data = self._extract_log_data(fields) if data is None: continue t_stamp, label, r_time, con_time, latency, r_code, error = data bytes_count = None yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count def open_fds(self, filename): """ open gatling simulation.log """ if os.path.isdir(self.basedir): prog = re.compile("^%s-[0-9]+$" % self.dir_prefix) for fname in os.listdir(self.basedir): if prog.match(fname): filename = os.path.join(self.basedir, fname, "simulation.log") break if not filename or not os.path.isfile(filename): self.log.debug('simulation.log not found') return elif os.path.isfile(self.basedir): filename = self.basedir else: self.log.debug('Path not found: %s', self.basedir) return if not os.path.getsize(filename): self.log.debug('simulation.log is empty') else: return open(filename, 'rb')
class ApiritifNoseExecutor(SubprocessedExecutor): """ :type _readers: list[JTLReader] """ def __init__(self): super(ApiritifNoseExecutor, self).__init__() self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log) def reporting_setup(self, prefix=None, suffix=None): if not self.reported: self.log.debug("Skipping reporting setup for executor %s", self) return if self.engine.is_functional_mode(): self.reader = ApiritifFuncReader(self.engine, self.log) else: self.reader = ApiritifLoadReader(self.log) if not self.register_reader: self.log.debug("Skipping reader registration for executor %s", self) return if isinstance(self.engine.aggregator, (ConsolidatingAggregator, FunctionalAggregator)): self.engine.aggregator.add_underling(self.reader) def prepare(self): self.script = self.get_script_path() if not self.script: if "requests" in self.get_scenario(): self.script = self.__tests_from_requests() else: raise TaurusConfigError("Nothing to test, no requests were provided in scenario") self.reporting_setup() # no prefix/suffix because we don't fully control report file names def __tests_from_requests(self): filename = self.engine.create_artifact("test_requests", ".py") test_mode = self.execution.get("test-mode", None) or "apiritif" if test_mode == "apiritif": builder = ApiritifScriptGenerator(self.get_scenario(), self.log) builder.verbose = self.__is_verbose() else: wdlog = self.engine.create_artifact('webdriver', '.log') builder = SeleniumScriptBuilder(self.get_scenario(), self.log, wdlog) builder.build_source_code() builder.save(filename) return filename def startup(self): executable = self.settings.get("interpreter", sys.executable) self.env.add_path({"PYTHONPATH": get_full_path(__file__, step_up=3)}) report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv" report_tpl = self.engine.create_artifact("apiritif-", "") + "%s" + report_type cmdline = [executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl] load = self.get_load() if load.concurrency: cmdline += ['--concurrency', str(load.concurrency)] if load.iterations: cmdline += ['--iterations', str(load.iterations)] if load.hold: cmdline += ['--hold-for', str(load.hold)] if load.ramp_up: cmdline += ['--ramp-up', str(load.ramp_up)] if load.steps: cmdline += ['--steps', str(load.steps)] if self.__is_verbose(): cmdline += ['--verbose'] cmdline += [self.script] self.start_time = time.time() self._start_subprocess(cmdline) self._tailer = FileReader(filename=self.stdout_file, parent_logger=self.log) def has_results(self): if not self.reader: return False return self.reader.read_records > 0 def check(self): for line in self._tailer.get_lines(): if "Adding worker" in line: marker = "results=" pos = line.index(marker) fname = line[pos + len(marker):].strip() self.log.debug("Adding result reader for %s", fname) self.reader.register_file(fname) return super(ApiritifNoseExecutor, self).check() def __log_lines(self): lines = [] for line in self._tailer.get_lines(): if not IGNORED_LINE.match(line): lines.append(line) if lines: self.log.info("\n".join(lines)) def post_process(self): super(ApiritifNoseExecutor, self).post_process() self.__log_lines() def __is_verbose(self): engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False) executor_verbose = self.settings.get("verbose", engine_verbose) return executor_verbose
class PBenchKPIReader(ResultsReader): """ Class to read KPI :type stats_reader: PBenchStatsReader """ def __init__(self, filename, parent_logger, stats_filename): super(PBenchKPIReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.stats_reader = PBenchStatsReader(stats_filename, parent_logger) def _read(self, last_pass=False): """ Generator method that returns next portion of data :type last_pass: bool """ def mcs2sec(val): return int(val) / 1000000.0 self.stats_reader.read_file() lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) fields = ("timeStamp", "label", "elapsed", "Connect", "Send", "Latency", "Receive", "internal", "bsent", "brecv", "opretcode", "responseCode") dialect = csv.excel_tab() rows = csv.DictReader(lines, fields, dialect=dialect) for row in rows: label = row["label"] try: rtm = mcs2sec(row["elapsed"]) ltc = mcs2sec(row["Latency"]) cnn = mcs2sec(row["Connect"]) # NOTE: actually we have precise send and receive time here... except BaseException: raise ToolError("PBench reader: failed record: %s" % row) if row["opretcode"] != "0": error = strerror(int(row["opretcode"])) rcd = error else: error = None rcd = row["responseCode"] tstmp = int(float(row["timeStamp"]) + rtm) byte_count = int(row["brecv"]) concur = 0 yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, '', byte_count def _calculate_datapoints(self, final_pass=False): for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass): concurrency = self.stats_reader.get_data(point[DataPoint.TIMESTAMP]) for label_data in viewvalues(point[DataPoint.CURRENT]): label_data[KPISet.CONCURRENCY] = concurrency yield point
class K6LogReader(ResultsReader): def __init__(self, filename, parent_logger): super(K6LogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.data = { 'timestamp': [], 'label': [], 'r_code': [], 'error_msg': [], 'http_req_duration': [], 'http_req_connecting': [], 'http_req_tls_handshaking': [], 'http_req_waiting': [], 'vus': [], 'data_received': [] } self.position = { 'timestamp': None, 'metric_value': None, 'error': None, 'expected_response': None, 'name': None, 'status': None } def _read(self, last_pass=False): self.lines = list( self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) for line in self.lines: if line.startswith("metric_name"): parts = line[:-1].split(",") self.position['timestamp'] = parts.index('timestamp') self.position['metric_value'] = parts.index('metric_value') self.position['error'] = parts.index('error') self.position['expected_response'] = parts.index( 'expected_response') self.position['name'] = parts.index('name') self.position['status'] = parts.index('status') elif line.startswith("http_reqs"): self.data['timestamp'].append( int(line.split(',')[self.position['timestamp']])) self.data['label'].append( line.split(',')[self.position['name']]) self.data['r_code'].append( line.split(',')[self.position['status']]) error = line.split(',')[self.position['error']] if not error and line.split(',')[ self.position['expected_response']] == 'false': error = f"Response code: {line.split(',')[self.position['status']]}" self.data['error_msg'].append(error) elif line.startswith("http_req_duration"): self.data['http_req_duration'].append( float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_connecting"): self.data['http_req_connecting'].append( float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_tls_handshaking"): self.data['http_req_tls_handshaking'].append( float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_waiting"): self.data['http_req_waiting'].append( float(line.split(',')[self.position['metric_value']])) elif line.startswith("vus") and not line.startswith("vus_max"): self.data['vus'].append( int(float(line.split(',')[self.position['metric_value']]))) elif line.startswith("data_received"): self.data['data_received'].append( float(line.split(',')[self.position['metric_value']])) if self.data['vus'] and len(self.data['data_received']) >= self.data['vus'][0] and \ len(self.data['http_req_waiting']) >= self.data['vus'][0]: for i in range(self.data['vus'][0]): kpi_set = (self.data['timestamp'][0], self.data['label'][0], self.data['vus'][0], self.data['http_req_duration'][0] / 1000, (self.data['http_req_connecting'][0] + self.data['http_req_tls_handshaking'][0]) / 1000, self.data['http_req_waiting'][0] / 1000, self.data['r_code'][0], None if not self.data['error_msg'][0] else self.data['error_msg'][0], '', self.data['data_received'][0]) for key in self.data.keys(): if key != 'vus': self.data[key].pop(0) yield kpi_set self.data['vus'].pop(0)
class DataLogReader(ResultsReader): """ Class to read KPI from data log """ def __init__(self, basedir, parent_logger, dir_prefix): super(DataLogReader, self).__init__() self.concurrency = 0 self.log = parent_logger.getChild(self.__class__.__name__) self.basedir = basedir self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log) self.partial_buffer = "" self.delimiter = "\t" self.dir_prefix = dir_prefix self.guessed_gatling_version = None def _extract_log_gatling_21(self, fields): """ Extract stats from Gatling 2.1 format. :param fields: :return: """ # $scenario $userId ${RequestRecordHeader.value} # ${serializeGroups(groupHierarchy)} $name # 5requestStartDate 6requestEndDate # 7responseStartDate 8responseEndDate # 9status # ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}$Eol" if fields[2].strip() == "USER": if fields[3].strip() == "START": self.concurrency += 1 elif fields[3].strip() == "END": self.concurrency -= 1 if fields[2].strip() != "REQUEST": return None label = fields[4] t_stamp = int(fields[8]) / 1000.0 r_time = (int(fields[8]) - int(fields[5])) / 1000.0 latency = (int(fields[7]) - int(fields[6])) / 1000.0 con_time = (int(fields[6]) - int(fields[5])) / 1000.0 if fields[-1] == 'OK': r_code = '200' else: _tmp_rc = fields[-1].split(" ")[-1] r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC' if len(fields) >= 11 and fields[10]: error = fields[10] else: error = None return int(t_stamp), label, r_time, con_time, latency, r_code, error def _extract_log_gatling_22(self, fields): """ Extract stats from Gatling 2.2 format :param fields: :return: """ # 0 ${RequestRecordHeader.value} # 1 $scenario # 2 $userId # 3 ${serializeGroups(groupHierarchy)} # 4 $label # 5 $startTimestamp # 6 $endTimestamp # 7 $status # [8] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)} if fields[0].strip() == "USER": if fields[3].strip() == "START": self.concurrency += 1 elif fields[3].strip() == "END": self.concurrency -= 1 if fields[0].strip() != "REQUEST": return None label = fields[4] t_stamp = int(fields[6]) / 1000.0 r_time = (int(fields[6]) - int(fields[5])) / 1000.0 latency = 0.0 con_time = 0.0 if fields[7] == 'OK': r_code = '200' else: _tmp_rc = fields[-1].split(" ")[-1] r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC' if len(fields) >= 9 and fields[8]: error = fields[8] else: error = None return int(t_stamp), label, r_time, con_time, latency, r_code, error def _guess_gatling_version(self, fields): if fields[0].strip() in ["USER", "REQUEST", "RUN"]: self.log.debug("Parsing Gatling 2.2+ stats") return "2.2+" elif len(fields) >= 3 and fields[2].strip() in ["USER", "REQUEST", "RUN"]: self.log.debug("Parsing Gatling 2.1 stats") return "2.1" else: return None def _extract_log_data(self, fields): if self.guessed_gatling_version is None: self.guessed_gatling_version = self._guess_gatling_version(fields) if self.guessed_gatling_version == "2.1": return self._extract_log_gatling_21(fields) elif self.guessed_gatling_version == "2.2+": return self._extract_log_gatling_22(fields) else: return None def _read(self, last_pass=False): """ Generator method that returns next portion of data :param last_pass: """ lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not line.endswith("\n"): self.partial_buffer += line continue line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() fields = line.split(self.delimiter) data = self._extract_log_data(fields) if data is None: continue t_stamp, label, r_time, con_time, latency, r_code, error = data bytes_count = None yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count def open_fds(self, filename): """ open gatling simulation.log """ if os.path.isdir(self.basedir): prog = re.compile("^%s-[0-9]+$" % self.dir_prefix) for fname in os.listdir(self.basedir): if prog.match(fname): filename = os.path.join(self.basedir, fname, "simulation.log") break if not filename or not os.path.isfile(filename): self.log.debug('simulation.log not found') return elif os.path.isfile(self.basedir): filename = self.basedir else: self.log.debug('Path not found: %s', self.basedir) return if not os.path.getsize(filename): self.log.debug('simulation.log is empty') else: return open(filename, 'rb')
class K6LogReader(ResultsReader): def __init__(self, filename, parent_logger): super(K6LogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.data = { 'timestamp': [], 'label': [], 'r_code': [], 'error_msg': [], 'http_req_duration': [], 'http_req_connecting': [], 'http_req_tls_handshaking': [], 'http_req_waiting': [], 'vus': [], 'data_received': [] } def _read(self, last_pass=False): self.lines = list( self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) for line in self.lines: if line.startswith("http_reqs"): self.data['timestamp'].append(int(line.split(',')[1])) self.data['label'].append(line.split(',')[8]) self.data['r_code'].append(line.split(',')[12]) self.data['error_msg'].append(line.split(',')[4]) elif line.startswith("http_req_duration"): self.data['http_req_duration'].append(float( line.split(',')[2])) elif line.startswith("http_req_connecting"): self.data['http_req_connecting'].append( float(line.split(',')[2])) elif line.startswith("http_req_tls_handshaking"): self.data['http_req_tls_handshaking'].append( float(line.split(',')[2])) elif line.startswith("http_req_waiting"): self.data['http_req_waiting'].append(float(line.split(',')[2])) elif line.startswith("vus") and not line.startswith("vus_max"): self.data['vus'].append(int(float(line.split(',')[2]))) elif line.startswith("data_received"): self.data['data_received'].append(float(line.split(',')[2])) if self.data['vus'] and len(self.data['data_received']) >= self.data['vus'][0] and \ len(self.data['http_req_waiting']) >= self.data['vus'][0]: for i in range(self.data['vus'][0]): kpi_set = (self.data['timestamp'][0], self.data['label'][0], self.data['vus'][0], self.data['http_req_duration'][0] / 1000, (self.data['http_req_connecting'][0] + self.data['http_req_tls_handshaking'][0]) / 1000, self.data['http_req_waiting'][0] / 1000, self.data['r_code'][0], None if not self.data['error_msg'][0] else self.data['error_msg'][0], '', self.data['data_received'][0]) for key in self.data.keys(): if key != 'vus': self.data[key].pop(0) yield kpi_set self.data['vus'].pop(0)
class TsungStatsReader(ResultsReader): def __init__(self, tsung_basedir, parent_logger): super(TsungStatsReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.tsung_basedir = tsung_basedir self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats) self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log) self.delimiter = ";" self.partial_buffer = "" self.skipped_header = False self.concurrency = 0 def open_stats(self, filename): return self.open_file(ext='dump') def open_log(self, filename): return self.open_file(ext='log') def open_file(self, ext): basedir_contents = os.listdir(self.tsung_basedir) if not basedir_contents: self.log.debug("Tsung artifacts not appeared yet") return if len(basedir_contents) != 1: self.log.warning( "Multiple files in Tsung basedir %s, this shouldn't happen", self.tsung_basedir) return filename = os.path.join(self.tsung_basedir, basedir_contents[0], "tsung." + ext) if not os.path.isfile(filename): self.log.debug("File not appeared yet: %s", filename) return if not os.path.getsize(filename): self.log.debug("File is empty: %s", filename) return self.log.debug('Opening file: %s', filename) return open(filename, mode='rb') def _read_concurrency(self, last_pass): lines = self.log_file.get_lines(size=1024 * 1024, last_pass=last_pass) extractor = re.compile(r'^stats: users (\d+) (\d+)$') for line in lines: match = extractor.match(line.strip()) if not match: continue self.concurrency = int(match.group(2)) self.log.debug("Actual Tsung concurrency: %s", self.concurrency) def _read(self, last_pass=False): self.log.debug("Reading Tsung results") self._read_concurrency(last_pass) lines = self.stats_file.get_lines(size=1024 * 1024, last_pass=last_pass) for line in lines: if not line.endswith("\n"): self.partial_buffer += line continue if not self.skipped_header and line.startswith("#"): self.skipped_header = True continue line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() fields = line.split(self.delimiter) tstamp = int(float(fields[0])) url = fields[4] + fields[5] rstatus = fields[6] rsize = int(fields[7]) etime = float(fields[8]) / 1000.0 trname = fields[9] error = fields[10] or None con_time = 0 latency = 0 yield tstamp, url, self.concurrency, etime, con_time, latency, rstatus, error, trname, rsize
class ApiritifNoseExecutor(SubprocessedExecutor): """ :type _tailer: FileReader """ def __init__(self): super(ApiritifNoseExecutor, self).__init__() self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log) def resource_files(self): files = super(ApiritifNoseExecutor, self).resource_files() for source in self.get_scenario().get_data_sources(): files.append(source['path']) return files def create_func_reader(self, report_file): del report_file return ApiritifFuncReader(self.engine, self.log) def create_load_reader(self, report_file): del report_file reader = ApiritifLoadReader(self.log) reader.engine = self.engine return reader def prepare(self): super(ApiritifNoseExecutor, self).prepare() self.script = self.get_script_path() if not self.script: if "requests" in self.get_scenario(): self.script = self.__tests_from_requests() else: raise TaurusConfigError("Nothing to test, no requests were provided in scenario") # todo: requred tools? # path to taurus dir. It's necessary for bzt usage inside tools/helpers self.env.add_path({"PYTHONPATH": get_full_path(BZT_DIR, step_up=1)}) self.reporting_setup() # no prefix/suffix because we don't fully control report file names def __tests_from_requests(self): filename = self.engine.create_artifact("test_requests", ".py") test_mode = self.execution.get("test-mode", "apiritif") scenario = self.get_scenario() if test_mode == "apiritif": builder = ApiritifScriptGenerator(self.engine, scenario, self.label, self.log, test_mode=test_mode) builder.verbose = self.__is_verbose() else: wdlog = self.engine.create_artifact('webdriver', '.log') generate_markers = self.settings.get('generate-flow-markers', None) generate_markers = scenario.get('generate-flow-markers', generate_markers) capabilities = copy.deepcopy(self.settings.get("capabilities")) capabilities.merge(copy.deepcopy(self.execution.get("capabilities"))) scenario_caps = copy.deepcopy(scenario.get("capabilities")) # todo: just for legacy support, remove it later if isinstance(scenario_caps, list): self.log.warning("Obsolete format of capabilities found (list), should be dict") scenario_caps = {item.keys()[0]: item.values()[0] for item in scenario_caps} capabilities.merge(scenario_caps) remote = self.settings.get("remote", None) remote = self.execution.get("remote", remote) remote = scenario.get("remote", remote) builder = ApiritifScriptGenerator( self.engine, scenario, self.label, self.log, wdlog, utils_file=os.path.join(RESOURCES_DIR, "selenium_taurus_extras.py"), ignore_unknown_actions=self.settings.get("ignore-unknown-actions", False), generate_markers=generate_markers, capabilities=capabilities, wd_addr=remote, test_mode=test_mode) builder.build_source_code() builder.save(filename) if isinstance(self.engine.aggregator, ConsolidatingAggregator) and isinstance(builder, ApiritifScriptGenerator): self.engine.aggregator.ignored_labels.extend(builder.service_methods) return filename def startup(self): executable = self.settings.get("interpreter", sys.executable) report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv" report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type cmdline = [executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl] load = self.get_load() if load.concurrency: cmdline += ['--concurrency', str(load.concurrency)] if load.iterations: cmdline += ['--iterations', str(load.iterations)] if load.hold: cmdline += ['--hold-for', str(load.hold)] if load.ramp_up: cmdline += ['--ramp-up', str(load.ramp_up)] if load.steps: cmdline += ['--steps', str(load.steps)] if self.__is_verbose(): cmdline += ['--verbose'] cmdline += [self.script] self.process = self._execute(cmdline) self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log) def has_results(self): if not self.reader: return False return self.reader.read_records @staticmethod def _normalize_label(label): for char in ":/": if char in label: label = label.replace(char, '_') return label def _check_stdout(self): for line in self._tailer.get_lines(): if "Adding worker" in line: marker = "results=" pos = line.index(marker) fname = line[pos + len(marker):].strip() self.log.debug("Adding result reader for %s", fname) self.reader.register_file(fname) elif "Transaction started" in line: colon = line.index('::') values = { part.split('=')[0]: part.split('=')[1] for part in line[colon + 2:].strip().split(',') } label = self._normalize_label(values['name']) start_time = float(values['start_time']) self.transaction_started(label, start_time) elif "Transaction ended" in line: colon = line.index('::') values = { part.split('=')[0]: part.split('=')[1] for part in line[colon + 2:].strip().split(',') } label = self._normalize_label(values['name']) duration = float(values['duration']) self.transacion_ended(label, duration) def check(self): self._check_stdout() return super(ApiritifNoseExecutor, self).check() def __log_lines(self): lines = [] for line in self._tailer.get_lines(): if not IGNORED_LINE.match(line): lines.append(line) if lines: self.log.info("\n".join(lines)) def post_process(self): self._check_stdout() self.__log_lines() self._tailer.close() super(ApiritifNoseExecutor, self).post_process() def __is_verbose(self): engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False) executor_verbose = self.settings.get("verbose", engine_verbose) return executor_verbose