def __get_jtl_shorthands(self, configs): """ Generate json file with execution, executor and scenario settings :type configs: list :return: list """ jtls = [] for filename in configs[:]: if filename.lower().endswith(".jtl"): jtls.append(filename) configs.remove(filename) if jtls: self.log.debug("Adding JTL shorthand config for: %s", jtls) fds = NamedTemporaryFile(prefix="jtl_", suffix=".json") fname = fds.name fds.close() config = Configuration() for jtl in jtls: piece = BetterDict.from_dict({ "executor": "external-results-loader", "data-file": jtl }) config.get(EXEC, [], force_set=True).append(piece) config.dump(fname, Configuration.JSON) return [fname] else: return []
def test_log_messages_percentiles(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"failed-labels": False, "percentiles": True, "summary": False, "test-duration": False, "summary-labels": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() target_output = ("Average times: total 0.001, latency 0.000, connect 0.000\n" "Percentiles:\n" "+---------------+---------------+\n" "| Percentile, % | Resp. Time, s |\n" "+---------------+---------------+\n" "| 0.0 | 0.0 |\n" "| 50.0 | 0.0 |\n" "| 90.0 | 0.001 |\n" "| 95.0 | 0.001 |\n" "| 99.0 | 0.003 |\n" "| 99.9 | 0.008 |\n" "| 100.0 | 0.081 |\n" "+---------------+---------------+\n" ) self.assertEqual(target_output, self.log_recorder.info_buff.getvalue())
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, int): if part < 0: if isinstance(parts[index + 1], int): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], int) and isinstance( pointer, dict): pointer = pointer.get(part, [], force_set=True) else: pointer = pointer.get(part, force_set=True) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], str) and parts[-1][0] == '*': return self.__apply_mult_override(pointer, parts[-1][1:], value) if isinstance(parts[-1], str) and parts[-1][0] == '^': item = parts[-1][1:] if isinstance(pointer, list): item = int(item) if -len(pointer) <= item < len(pointer): del pointer[item] else: self.log.debug("No value to delete: %s", item) elif isinstance(pointer, dict): if item in pointer: del pointer[item] else: self.log.debug("No value to delete: %s", item) else: raise ValueError( "Cannot handle override %s in non-iterable type %s" % (item, pointer)) else: parsed_value = self.__parse_override_value(value) self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value)) if isinstance(parsed_value, dict): parsed_value = BetterDict.from_dict(parsed_value) if isinstance(pointer, list) and parts[-1] < 0: pointer.append(parsed_value) else: pointer[parts[-1]] = parsed_value
def _get_scenario_label(self, name, scenarios): if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path( scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name return label
def __pack_dirs(self, source_list): result_list = [] # files for upload packed_list = [] # files for unpacking for source in source_list: source = get_full_path(source) if os.path.isfile(source): result_list.append(source) else: # source is dir self.log.debug("Compress directory '%s'", source) base_dir_name = os.path.basename(source) zip_name = self.engine.create_artifact(base_dir_name, '.zip') relative_prefix_len = len(os.path.dirname(source)) with zipfile.ZipFile(zip_name, 'w') as zip_file: for _file in get_files_recursive(source): zip_file.write(_file, _file[relative_prefix_len:]) result_list.append(zip_name) packed_list.append(base_dir_name + '.zip') if packed_list: services = self.engine.config.get(Service.SERV, [], force_set=True) unpacker = BetterDict.from_dict({ 'module': Unpacker.UNPACK, Unpacker.FILES: packed_list, 'run-at': 'local' }) services.append(unpacker) return result_list
def test_ramp_up_exclude(self): self.obj.track_percentiles = [50] self.obj.prepare() self.obj.engine.config['settings']['ramp-up-exclude'] = True self.obj.engine.config['execution'] = [{ 'scenario': 'first', 'ramp-up': 50 }, { 'scenario': 'second', 'ramp-up': '1s' }, { 'scenario': 'third' }] self.obj.engine.config['scenarios'] = BetterDict.from_dict({ 'first': { 'requests': [{ 'url': 'first' }] }, 'second': { 'requests': [{ 'url': 'second' }] }, 'third': { 'requests': [{ 'url': 'third' }] } }) reader = get_success_reader() self.obj.add_underling(reader) self.obj.shutdown() self.obj.post_process() self.assertEquals(self.obj.cumulative, {})
def _extract_scenario_from_soapui(self, base_scenario, script_path): test_case = base_scenario.get("test-case", None) converter = SoapUIScriptConverter(self.log) conv_config = converter.convert_script(script_path) conv_scenarios = conv_config["scenarios"] scenario_name, conv_scenario = converter.find_soapui_test_case( test_case, conv_scenarios) new_name = scenario_name counter = 1 while new_name in self.engine.config["scenarios"]: new_name = scenario_name + ("-%s" % counter) counter += 1 if new_name != scenario_name: self.log.info( "Scenario name '%s' is already taken, renaming to '%s'", scenario_name, new_name) scenario_name = new_name merged_scenario = BetterDict.from_dict(conv_scenario) merged_scenario.merge(base_scenario.data) for field in [Scenario.SCRIPT, "test-case"]: if field in merged_scenario: merged_scenario.pop(field) return scenario_name, merged_scenario
def prepare_locations(self, executors, engine_config): available_locations = {} is_taurus4 = True workspace = Workspace(self._project, {'id': self._project['workspaceId']}) for loc in workspace.locations(include_private=is_taurus4): available_locations[loc['id']] = loc if LOC in engine_config and not is_taurus4: self.log.warning( "Deprecated test API doesn't support global locations") for executor in executors: if LOC in executor.execution \ and isinstance(executor.execution[LOC], dict): exec_locations = executor.execution[LOC] self._check_locations(exec_locations, available_locations) elif LOC in engine_config and is_taurus4: self._check_locations(engine_config[LOC], available_locations) else: default_loc = self._get_default_location(available_locations) executor.execution[LOC] = BetterDict.from_dict( {default_loc: 1}) executor.get_load( ) # we need it to resolve load settings into full form
def _extract_config(self, project, test_suites, target_test_case=None): execution = [] scenarios = {} project_properties = self._extract_properties(project, key_prefix="#Project#") for suite in test_suites: suite_props = BetterDict.from_dict(project_properties) suite_props.merge(self._extract_properties(suite, key_prefix="#TestSuite#")) test_cases = suite.findall('.//con:testCase', namespaces=self.NAMESPACES) for case in test_cases: case_name = case.get("name") scenario_name, scenario = self._extract_test_case(case, suite, suite_props) load_exec = self._extract_execution(case) load_exec['scenario'] = scenario_name self.log.debug("Extracted execution for scenario %s", scenario_name) if not scenario["requests"]: self.log.warning("No requests extracted for scenario %s, skipping it" % scenario_name) continue if target_test_case is None or target_test_case == case_name: self.log.debug("Extracted scenario: %s", scenario_name) scenarios[scenario_name] = scenario execution.append(load_exec) return { "execution": execution, "scenarios": scenarios, }
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict.from_dict({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, string_types): body = request.body elif request.body: msg = "Cannot handle 'body' option of type %s: %s" raise TaurusConfigError(msg % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get_headers()) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body, ) return http
def __get_jmx_shorthands(self, configs): """ Generate json file with execution, executor and scenario settings :type configs: list :return: list """ jmxes = [] for filename in configs[:]: if filename.lower().endswith(".jmx"): jmxes.append(filename) configs.remove(filename) if jmxes: self.log.debug("Adding JMX shorthand config for: %s", jmxes) fds = NamedTemporaryFile(prefix="jmx_", suffix=".json") fname = fds.name fds.close() config = Configuration() for jmx_file in jmxes: piece = BetterDict.from_dict({ "executor": "jmeter", "scenario": { "script": jmx_file } }) config.get(EXEC, [], force_set=True).append( piece) # Does it brake single execution? config.dump(fname, Configuration.JSON) return [fname] else: return []
def install_required_tools(self): local_path = self.settings.get("path", None) config = {} if local_path: config = BetterDict.from_dict({"config": local_path}) self._add_jar_tool(TestNG, config=config) super(TestNGTester, self).install_required_tools()
def setUp(self): super(TestTsungExecutor, self).setUp() self.obj = TsungExecutor() self.obj.engine = EngineEmul() self.obj.env = self.obj.engine.env self.obj.settings = BetterDict.from_dict({"path": get_res_path(TOOL_NAME)}) self.obj.execution = BetterDict()
def __get_jmx_shorthands(self, configs): """ Generate json file with execution, executor and scenario settings :type configs: list :return: list """ jmxes = [] for filename in configs[:]: if filename.lower().endswith(".jmx"): jmxes.append(filename) configs.remove(filename) if jmxes: self.log.debug("Adding JMX shorthand config for: %s", jmxes) fds = NamedTemporaryFile(prefix="jmx_", suffix=".json") fname = fds.name fds.close() config = Configuration() for jmx_file in jmxes: piece = BetterDict.from_dict({"executor": "jmeter", "scenario": {"script": jmx_file}}) config.get(ScenarioExecutor.EXEC, [], force_set=True).append(piece) # Does it brake single execution? config.dump(fname, Configuration.JSON) return [fname] else: return []
def filter_conf(conf): if not any(key in conf for key in prov_configs.values() ): # if no specific provisioning is configured conf = BetterDict.from_dict({prov_configs['local']: conf }) # all configuration is local return conf.get(current_prov)
def __init__(self, config, **kwargs): if not isinstance(config, dict): config = BetterDict.from_dict({"path": config}) version = config.get("version", self.VERSION) version = str(version).split('.') version.extend(['0'] * (3 - len(version))) short_version = '.'.join(version[:2]) # 2 elements full_version = '.'.join(version) # 3+ elements remote_path = config.get("remote-path", self.REMOTE_PATH) remote_path = remote_path.format(short_version=short_version, full_version=full_version) tool_file = config.get("tool-file", self.TOOL_FILE) tool_file = tool_file.format(version=full_version) local_path = config.get("path", JarTool.LOCAL_PATH) local_path = local_path.format(tool_file=tool_file) download_link = config.get("download-link", JarTool.URL) download_link = download_link.format(remote_addr=self.REMOTE_ADDR, remote_path=remote_path) super(SeleniumServer, self).__init__(tool_path=local_path, download_link=download_link, version=full_version, **kwargs)
def __get_jtl_shorthands(self, configs): """ Generate json file with execution, executor and scenario settings :type configs: list :return: list """ jtls = [] for filename in configs[:]: if filename.lower().endswith(".jtl"): jtls.append(filename) configs.remove(filename) if jtls: self.log.debug("Adding JTL shorthand config for: %s", jtls) fds = NamedTemporaryFile(prefix="jtl_", suffix=".json") fname = fds.name fds.close() config = Configuration() for jtl in jtls: piece = BetterDict.from_dict({"executor": "external-results-loader", "data-file": jtl}) config.get(ScenarioExecutor.EXEC, [], force_set=True).append(piece) config.dump(fname, Configuration.JSON) return [fname] else: return []
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict.from_dict({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, string_types): body = request.body elif request.body: msg = "Cannot handle 'body' option of type %s: %s" raise TaurusConfigError(msg % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get_headers()) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body,) return http
def test_csv_report_fieldname_order(self): obj = FinalStatus() obj.engine = EngineEmul() csv_report = obj.engine.create_artifact("report", ".csv") obj.parameters = BetterDict.from_dict({ "dump-csv": csv_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(csv_report)) with open(csv_report) as fds: fieldnames = fds.readline().strip().split(",") perc_fields = [ float(name[5:]) for name in fieldnames if name.startswith('perc_') ] self.assertTrue(sorted(perc_fields) == perc_fields) rc_fields = [ float(name[3:]) for name in fieldnames if name.startswith('rc_') ] self.assertTrue(sorted(rc_fields) == rc_fields)
def test_log_messages_summary_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({ "summary-labels": True, "percentiles": False, "summary": False, "test-duration": False }) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() expected = ( "Request label stats:\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| label | status | succ | avg_rt | error |\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| http://192.168.1.1/anotherquery | FAIL | 0.00% | 0.001 | Forbidden |\n" "| http://192.168.1.1/somequery | OK | 100.00% | 0.001 | |\n" "| http://192.168.100.100/somequery | OK | 100.00% | 0.001 | |\n" "+----------------------------------+--------+---------+--------+-----------+\n" ) self.assertIn(expected, self.log_recorder.info_buff.getvalue())
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd # todo: move it to new env env = BetterDict.from_dict( {k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv( "PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def test_full_generation(self): # check mqtt protocol handling: getting request, parsing of it, generation of jmx engine = EngineEmul() jmeter = MockJMeterExecutor() jmeter.engine = engine jmeter.configure({'scenario': 'sc1'}) scenario = BetterDict.from_dict({ 'protocol': 'mqtt', 'requests': [{ 'cmd': 'connect', 'addr': 'server.com' }, { 'cmd': 'disconnect' }] }) jmeter.engine.config.merge({'scenarios': {'sc1': scenario}}) jmeter.settings.merge({ 'protocol-handlers': { 'mqtt': 'bzt.jmx.mqtt.MQTTProtocolHandler' } }) builder = JMeterScenarioBuilder(jmeter) elements = builder.compile_scenario(jmeter.get_scenario()) self.assertEqual(4, len(elements)) # appropriate classes has been generated self.assertEqual('net.xmeter.samplers.ConnectSampler', elements[0].attrib['testclass']) self.assertEqual('net.xmeter.samplers.DisConnectSampler', elements[2].attrib['testclass'])
def test_different_prov_context(self): command = "my_echo" var_name, var_value = "VAR_NAME", "VAR_VALUE" target_prov = "cloud" self.obj.parameters.merge( {target_prov: { "startup": [{ "command": command }] }}) self.obj.settings.get("env", force_set=True).update({var_name: var_value}) self.obj.settings = BetterDict.from_dict( {target_prov: self.obj.settings}) self.obj.engine.config.merge( {"provisioning": "local"}) # the same as setUp value, just for emphasizing self.obj.prepare() commands = [ task.command for task in self.obj.prepare_tasks + self.obj.startup_tasks + self.obj.check_tasks + self.obj.shutdown_tasks + self.obj.postprocess_tasks ] env_vars = self.obj.env.get() # mustn't be handled because settings are specified for different prov self.assertNotEqual(env_vars.get(var_name), var_value) self.assertNotIn(command, commands)
def test_prepare_no_filename_in_settings(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"data-source": "sample-labels"}) obj.prepare() datapoint = DataPoint(0, []) cumul_data = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.aggregated_second(datapoint) obj.post_process() self.assertTrue(os.path.exists(obj.report_file_path))
def __init__(self, config, **kwargs): if not isinstance(config, dict): config = BetterDict.from_dict({"path": config}) version = config.get("version", self.VERSION) version = str(version).split('.') version.extend(['0'] * (3 - len(version))) short_version = '.'.join(version[:2]) # 2 elements full_version = '.'.join(version) # 3+ elements remote_path = config.get("remote-path", self.REMOTE_PATH) remote_path = remote_path.format(short_version=short_version, full_version=full_version) tool_file = config.get("tool-file", self.TOOL_FILE) tool_file = tool_file.format(version=full_version) local_path = config.get("path", JarTool.LOCAL_PATH) local_path = local_path.format(tool_file=tool_file) download_link = config.get("download-link", JarTool.URL) download_link = download_link.format(remote_addr=self.REMOTE_ADDR, remote_path=remote_path) super(SeleniumServer, self).__init__( tool_path=local_path, download_link=download_link, version=full_version, **kwargs)
def test_report_criteria_without_label(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() pass_fail = PassFailStatus() crit_cfg = BetterDict.from_dict({ 'stop': True, 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt' }) criteria = DataCriterion(crit_cfg, pass_fail) pass_fail.criteria.append(criteria) criteria.is_triggered = True obj.engine.reporters.append(pass_fail) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({ "filename": path_from_config, "data-source": "pass-fail" }) obj.prepare() obj.last_second = DataPoint(0) obj.post_process()
def get_header(self, name): def dic_lower(dic): return {str(k).lower(): str(dic[k]).lower() for k in dic} scenario_headers = dic_lower(self.scenario.get_headers()) request_headers = dic_lower(self.headers) headers = BetterDict.from_dict(scenario_headers) headers.merge(request_headers) return headers.get(name.lower(), None)
def string_to_config(crit_config): """ Parse string like "avg-rt of label>100ms for 1m, continue as non-failed" into config dict :type crit_config: str :rtype: dict """ res = BetterDict.from_dict({ "subject": None, "condition": None, "threshold": None, "logic": "for", "timeframe": 0, "label": "", "stop": True, "fail": True, "message": None, }) if ':' in crit_config: res['message'] = crit_config[:crit_config.index(':')].strip() crit_config = crit_config[crit_config.index(':') + 1:].strip() if ',' in crit_config: crit_str = crit_config[:crit_config.index(',')].strip() action_str = crit_config[crit_config.index(',') + 1:].strip() else: crit_str = crit_config action_str = "" crit_pat = re.compile(r"([\w?*.-]+)(\s*of\s*([\S ]+))?\s*([<>=]+)\s*(\S+)(\s+(for|within|over)\s+(\S+))?") crit_match = crit_pat.match(crit_str.strip()) if not crit_match: raise TaurusConfigError("Criteria string is malformed in its condition part: %s" % crit_str) crit_groups = crit_match.groups() res["subject"] = crit_groups[0] res["condition"] = crit_groups[3] res["threshold"] = crit_groups[4] if crit_groups[2]: res["label"] = crit_groups[2] if crit_groups[6]: res["logic"] = crit_groups[6] if crit_groups[7]: res["timeframe"] = crit_groups[7] if action_str: action_pat = re.compile(r"(stop|continue)(\s+as\s+(failed|non-failed))?") act_match = action_pat.match(action_str.strip()) if not act_match: raise TaurusConfigError("Criteria string is malformed in its action part: %s" % action_str) action_groups = act_match.groups() res["stop"] = action_groups[0] != "continue" res["fail"] = action_groups[2] is None or action_groups[2] == "failed" return res
def _add_jar_tool(self, req_tool_class, **kwargs): # todo: it's for backward compatibility only, remove it later if "local_path" in kwargs: local_path = kwargs.pop("local_path") if local_path: kwargs["config"] = BetterDict.from_dict({"config": local_path}) req_tool = self._get_tool(req_tool_class, **kwargs) self._tools.append(req_tool) self.class_path.append(req_tool.tool_path)
def test_log_messages_failed_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"failed-labels": True, "percentiles": False, "summary": False, "test-duration": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertIn("29656 failed samples: http://192.168.1.1/anotherquery\n", self.log_recorder.info_buff.getvalue())
def test_server_agent(self): obj = Monitoring() obj.engine = EngineEmul() obj.parameters.merge({ "server-agent": [{ "address": "127.0.0.1:4444", "logging": "True", "metrics": [ "cpu", "disks" ] }, { "address": "10.0.0.1", "metrics": [ "something1", "something2" ] }] }) listener = LoggingMonListener() obj.add_listener(listener) widget = obj.get_widget() obj.add_listener(widget) crit_conf = BetterDict.from_dict({"condition": ">", "threshold": 5, "subject": "127.0.0.1:4444/cpu"}) criteria = MonitoringCriteria(crit_conf, obj) obj.add_listener(criteria) obj.client_classes = {'server-agent': ServerAgentClientEmul} obj.prepare() obj.startup() for i in range(1, 10): obj.clients[0].socket.recv_data += b("%s\t%s\t\n" % (i, i*10)) obj.check() ROOT_LOGGER.debug("Criteria state: %s", criteria) time.sleep(obj.engine.check_interval) obj.shutdown() obj.post_process() self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"), obj.clients[0].socket.sent_data) if PY3: self.assertIsNotNone(obj.clients[0].logs_file) with open(obj.clients[0].logs_file) as serveragent_logs: logs_reader = csv.reader(serveragent_logs) logs_reader = list(logs_reader) self.assertEquals(['ts', 'cpu', 'disks'], logs_reader[0]) for i in range(1, 10): self.assertEquals([str(i), str(i * 10)], logs_reader[i][1:])
def test_log_messages_samples_count(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"failed-labels": False, "percentiles": False, "summary": True, "test-duration": False, "summary-labels": False}) self.sniff_log(obj.log) obj.aggregated_second(self.__get_datapoint()) obj.startup() obj.shutdown() obj.post_process() self.assertEqual("Samples count: 59314, 50.00% failures\n", self.log_recorder.info_buff.getvalue())
def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios", force_set=True) if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path( scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, integer_types): if part < 0: if isinstance(parts[index + 1], integer_types): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], integer_types) and isinstance(pointer, dict): pointer = pointer.get(part, [], force_set=True) else: pointer = pointer.get(part, force_set=True) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], string_types) and parts[-1][0] == '^': item = parts[-1][1:] if isinstance(pointer, list): item = int(item) if -len(pointer) <= item < len(pointer): del pointer[item] else: self.log.debug("No value to delete: %s", item) elif isinstance(pointer, dict): if item in pointer: del pointer[item] else: self.log.debug("No value to delete: %s", item) else: raise ValueError("Cannot handle override %s in non-iterable type %s" % (item, pointer)) else: parsed_value = self.__parse_override_value(value) self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value)) if isinstance(parsed_value, dict): parsed_value = BetterDict.from_dict(parsed_value) if isinstance(pointer, list) and parts[-1] < 0: pointer.append(parsed_value) else: pointer[parts[-1]] = parsed_value
def _get_merged_ci_headers(self, scenario, req, header): def dic_lower(dic): return {str(k).lower(): str(dic[k]).lower() for k in dic} ci_scenario_headers = dic_lower(scenario.get_headers()) ci_request_headers = dic_lower(req.headers) headers = BetterDict.from_dict(ci_scenario_headers) headers.merge(ci_request_headers) if header.lower() in headers: return headers[header] else: return None
def test_long_kpi(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"dump-xml": obj.engine.create_artifact("status", ".xml")}) datapoint = random_datapoint(time.time()) datapoint[datapoint.CUMULATIVE][""]["stdev_rt"] = long(0) obj.aggregated_second(datapoint) obj.startup() obj.shutdown() obj.post_process()
def test_long_kpi(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict( {"dump-xml": obj.engine.create_artifact("status", ".xml")}) datapoint = random_datapoint(time.time()) datapoint[datapoint.CUMULATIVE][""]["stdev_rt"] = long(0) obj.aggregated_second(datapoint) obj.startup() obj.shutdown() obj.post_process()
def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios", force_set=True) if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj
def test_dump(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({ "dump-xml": obj.engine.create_artifact("status", ".xml"), "dump-csv": obj.engine.create_artifact("status", ".csv") }) self.sniff_log(obj.log) obj.aggregated_second(random_datapoint(time.time())) obj.startup() obj.shutdown() obj.post_process() self.assertIn("XML", self.log_recorder.info_buff.getvalue())
def _extract_test_case(self, test_case, test_suite, suite_level_props): case_name = test_case.get("name") scenario_name = test_suite.get("name") + "-" + case_name case_properties = self._extract_properties(test_case) case_properties = { "#TestCase#" + key: value for key, value in iteritems(case_properties) } case_level_props = BetterDict.from_dict(suite_level_props) case_level_props.merge(case_properties) scenario = self._extract_scenario(test_case, case_level_props) scenario['test-suite'] = test_suite.get("name") return scenario_name, scenario
def test_func_report_all_no_stacktrace(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"report-tests": "all", "print-stacktrace": False}) self.sniff_log(obj.log) obj.prepare() obj.startup() obj.shutdown() obj.aggregated_results(*self.__get_func_tree()) obj.post_process() info_log = self.log_recorder.info_buff.getvalue() self.assertIn("Total: 3 tests", info_log) self.assertIn("Test TestClass.case1 - PASSED", info_log) self.assertIn("Test TestClass.case2 - FAILED", info_log) self.assertIn("Test TestClass.case3 - BROKEN", info_log) self.assertNotIn("stacktrace2", info_log) self.assertNotIn("stacktrace3", info_log)
def test_xml_report_test_duration(self): obj = FinalStatus() obj.engine = EngineEmul() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters = BetterDict.from_dict({ "dump-xml": xml_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<TestDuration>10.0</TestDuration>', report_content)
def _extract_scenario(self, test_case, case_level_props): variables = BetterDict.from_dict(case_level_props) requests = [] extractors = BetterDict() steps = test_case.findall('.//con:testStep', namespaces=self.NAMESPACES) for step in steps: request = None if step.get("type") == "httprequest": request = self._extract_http_request(step) elif step.get("type") == "restrequest": request = self._extract_rest_request(step) elif step.get("type") == "request": request = self._extract_soap_request(step) elif step.get("type") == "properties": config_block = step.find('./con:config', namespaces=self.NAMESPACES) if config_block is not None: props = self._extract_properties(config_block) variables.merge(props) elif step.get("type") == "transfer": extracted_extractors = self._extract_property_transfers(step) # label -> extractor if extracted_extractors: extractors.merge(extracted_extractors) elif step.get("type") == "groovy": request = self._extract_script(step) if request is not None: requests.append(request) for request in requests: label = request["label"] if label in extractors: request.update(extractors[label]) scenario = { "test-case": test_case.get("name"), "requests": requests } if variables: scenario["variables"] = variables return scenario
def __init__(self, config=None, **kwargs): if config is None: config = BetterDict() if not isinstance(config, dict): config = BetterDict.from_dict({"path": config}) version = config.get("version", self.VERSION) tool_file = self.TOOL_FILE.format(version=version) local_path = config.get("path", self.LOCAL_PATH) local_path = local_path.format(tool_file=tool_file) download_link = config.get("download-link", self.URL) remote_path = self.REMOTE_PATH.format(version=version) download_link = download_link.format(remote_addr=self.REMOTE_ADDR, remote_path=remote_path) super(JarTool, self).__init__(tool_path=local_path, download_link=download_link, version=version, **kwargs)
def test_report_criteria_without_label(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() pass_fail = PassFailStatus() crit_cfg = BetterDict.from_dict({'stop': True, 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt'}) criteria = DataCriterion(crit_cfg, pass_fail) pass_fail.criteria.append(criteria) criteria.is_triggered = True obj.engine.reporters.append(pass_fail) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({"filename": path_from_config, "data-source": "pass-fail"}) obj.prepare() obj.last_second = DataPoint(0) obj.post_process()
def test_log_messages_summary_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"summary-labels": True, "percentiles": False, "summary": False, "test-duration": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() expected = ("Request label stats:\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| label | status | succ | avg_rt | error |\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| http://192.168.1.1/anotherquery | FAIL | 0.00% | 0.001 | Forbidden |\n" "| http://192.168.1.1/somequery | OK | 100.00% | 0.001 | |\n" "| http://192.168.100.100/somequery | OK | 100.00% | 0.001 | |\n" "+----------------------------------+--------+---------+--------+-----------+\n") self.assertIn(expected, self.log_recorder.info_buff.getvalue())
def test_blazemeter_cloud_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters = BetterDict.from_dict({ "dump-xml": xml_report, }) prov = CloudProvisioning() prov.results_url = "http://report/link" obj.engine.provisioning = prov obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_csv_report_fieldname_order(self): obj = FinalStatus() obj.engine = EngineEmul() csv_report = obj.engine.create_artifact("report", ".csv") obj.parameters = BetterDict.from_dict({ "dump-csv": csv_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(csv_report)) with open(csv_report) as fds: fieldnames = fds.readline().strip().split(",") perc_fields = [float(name[5:]) for name in fieldnames if name.startswith('perc_')] self.assertTrue(sorted(perc_fields) == perc_fields) rc_fields = [float(name[3:]) for name in fieldnames if name.startswith('rc_')] self.assertTrue(sorted(rc_fields) == rc_fields)
def test_xml_format_sample_labels(self): # generate xml, compare hash obj = JUnitXMLReporter() obj.engine = EngineEmul() rep = BlazeMeterUploader() rep.results_url = "http://report/123" obj.engine.reporters.append(rep) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml-sample-labels', dir=obj.engine.artifacts_dir) # data-source: finalstats by default obj.parameters = BetterDict.from_dict({"filename": path_from_config}) obj.prepare() datapoint = DataPoint(0, []) cumul_data = datapoint[DataPoint.CUMULATIVE] cumul_data[""] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1}), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7300, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7300}), KPISet.RESP_CODES: '403'}, {'msg': 'Assertion failed: text /smth/ not found', 'cnt': 73, 'type': 1, 'urls': Counter({'http://192.168.1.1/anotherquery': 73}), KPISet.RESP_CODES: '200'}, ], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0}) cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 6.1707580253574335e-06, KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506, 0.003: 289, 0.004: 103, 0.005: 59, 0.006: 37, 0.008: 14, 0.007: 13, 0.009: 8, 0.01: 3, 0.011: 2, 0.016: 2, 0.014: 2, 0.017: 1, 0.013: 1, 0.015: 1, 0.04: 1}), KPISet.ERRORS: [ {'msg': 'Forbidden', 'cnt': 7300, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7300}), KPISet.RESP_CODES: '403'}, {'msg': 'Assertion failed: text /smth/ not found', 'cnt': 73, 'type': 1, 'urls': Counter({'http://192.168.1.1/anotherquery': 73}), KPISet.RESP_CODES: '200'}, ], KPISet.STDEV_RESP_TIME: 0.032465137860758844, KPISet.AVG_LATENCY: 0.0005634272997032645, KPISet.RESP_CODES: Counter({'403': 29656}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.04, '99.0': 0.003, '50.0': 0.0}, KPISet.SUCCESSES: 0, KPISet.SAMPLE_COUNT: 29656, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005716549770704078, KPISet.FAILURES: 29656}) cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0}) obj.aggregated_second(datapoint) obj.post_process() with open(obj.report_file_path, 'rb') as fds: f_contents = fds.read() ROOT_LOGGER.info("File: %s", f_contents) xml_tree = etree.fromstring(f_contents) self.assertEqual('testsuites', xml_tree.tag) suite = xml_tree.getchildren()[0] self.assertEqual('testsuite', suite.tag) self.assertListEqual(['sample_labels', "bzt"], suite.values()) test_cases = suite.getchildren() self.assertEqual(3, len(test_cases)) self.assertEqual('testcase', test_cases[0].tag) self.assertEqual('error', test_cases[0].getchildren()[1].tag) self.assertEqual('failure', test_cases[0].getchildren()[2].tag) self.assertEqual('system-out', test_cases[0].getchildren()[0].tag) self.assertIn('BlazeMeter report link: http://report/123', test_cases[0].getchildren()[0].text)
def setUp(self): super(TestTsungExecutor, self).setUp() self.obj.settings = BetterDict.from_dict({"path": get_res_path(TOOL_NAME)})