def startup(self): args = [self.tool_path] load = self.get_load() if load.iterations: args += ['--reps', str(load.iterations)] elif load.hold: hold_for = ceil(dehumanize_time(load.hold)) args += ['--time', '%sS' % hold_for] else: raise ValueError("You must specify either 'hold-for' or 'iterations' for siege") if self.scenario.get('think-time'): think_time = dehumanize_time(self.scenario.get('think-time')) args += ['--delay', str(think_time)] else: args += ['--benchmark'] load_concurrency = load.concurrency args += ['--concurrent', str(load_concurrency)] self.reader.concurrency = load_concurrency args += ['--file', self.__url_name] for key, val in iteritems(self.scenario.get_headers()): args += ['--header', "%s: %s" % (key, val)] env = BetterDict() env.merge(dict(environ)) env.merge({"SIEGERC": self.__rc_name}) self.start_time = time.time() self.process = shell_exec(args, stdout=self.__out, stderr=self.__err, env=env)
def _extract_request(self, path, path_obj, method, operation): request = {} if method != "get": request["method"] = method.upper() if operation.operation_id is not None: request["label"] = operation.operation_id parameters = BetterDict() if path_obj.parameters: parameters.merge(path_obj.parameters) if operation.parameters: parameters.merge(operation.parameters) query_params, form_data, request_body, headers = self._handle_parameters(parameters) if headers: request["headers"] = headers if form_data and request_body: self.log.warning("Both form data and request body are specified. Omitting form data") if request_body: request["body"] = request_body elif form_data: request["body"] = form_data if query_params: url = self._embed_query_in_path(path, query_params) else: url = path request["url"] = url return request
def startup(self): self.start_time = time.time() load = self.get_load() hatch = load.concurrency / load.ramp_up if load.ramp_up else load.concurrency wrapper = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, "resources", "locustio-taurus-wrapper.py") env = BetterDict() env.merge({"PYTHONPATH": self.engine.artifacts_dir + os.pathsep + os.getcwd()}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] args = [sys.executable, os.path.realpath(wrapper), '-f', os.path.realpath(self.locustfile)] args += ['--logfile=%s' % self.engine.create_artifact("locust", ".log")] args += ["--no-web", "--only-summary", ] args += ["--clients=%d" % load.concurrency, "--hatch-rate=%d" % math.ceil(hatch), ] if load.iterations: args.append("--num-request=%d" % load.iterations) if self.is_master: args.extend(["--master", '--expect-slaves=%s' % self.expected_slaves]) env["SLAVES_LDJSON"] = self.slaves_ldjson else: env["JTL"] = self.kpi_jtl host = self.get_scenario().get("default-address", None) if host: args.append("--host=%s" % host) self.__out = open(self.engine.create_artifact("locust", ".out"), 'w') self.process = self.execute(args, stderr=STDOUT, stdout=self.__out, env=env)
def prepare(self): super(Monitoring, self).prepare() clients = (param for param in self.parameters if param not in ('run-at', 'module')) for client_name in clients: if client_name in self.client_classes: client_class = self.client_classes[client_name] else: self.log.warning('Unknown monitoring found: %s', client_name) continue for config in self.parameters.get(client_name, []): label = config.get('label', None) if client_name == 'local': if any([client for client in self.clients if isinstance(client, self.client_classes[client_name])]): break # skip the second and following local monitoring clients else: if len(self.parameters.get(client_name, [])) > 1: self.log.warning('LocalMonitoring client found twice, configs will be joined') config = BetterDict() for cfg in self.parameters.get(client_name, []): config.merge(cfg) client = client_class(self.log, label, config, self.engine) self.clients.append(client) client.connect()
def run_tests(self): """ run rspec plugin """ mocha_cmdline = [ self.node_tool.executable, self.plugin_path, "--report-file", self.settings.get("report-file"), "--test-suite", self.script ] if self.load.iterations: mocha_cmdline += ['--iterations', str(self.load.iterations)] if self.load.hold: mocha_cmdline += ['--hold-for', str(self.load.hold)] std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) self.process = self.executor.execute(mocha_cmdline, stdout=std_out, stderr=std_err, env=env)
def __load_module(self, alias): """ Load module class by alias :param alias: str :return: class """ if alias in self.modules: return self.modules[alias] mod_conf = self.config.get('modules') if alias not in mod_conf: self.log.info("Possible module aliases: %s", [str(x) for x in sorted(mod_conf.keys())]) raise ValueError("Module alias '%s' not found in module settings" % alias) settings = ensure_is_dict(mod_conf, alias, "class") acopy = copy.deepcopy(settings) BetterDict.traverse(acopy, Configuration.masq_sensitive) self.log.debug("Module config: %s %s", alias, acopy) clsname = settings.get('class', None) if clsname is None: raise ValueError("Class name not found in module settings: %s" % settings) try: self.modules[alias] = load_class(clsname) if not issubclass(self.modules[alias], EngineModule): raise TypeError("Module class does not inherit from EngineModule: %s" % clsname) except BaseException: self.log.debug("Failed to load class %s: %s", clsname, traceback.format_exc()) raise ValueError("Cannot load module '%s' with class %s" % (alias, clsname)) return self.modules[alias]
def run_tests(self): """ run python tests """ executable = self.settings.get("interpreter", sys.executable) nose_command_line = [executable, self.plugin_path, '-k', self.settings.get("report-file"), '-e', self.settings.get("err-file")] if self.load.iterations: nose_command_line += ['-i', str(self.load.iterations)] if self.load.hold: nose_command_line += ['-d', str(self.load.hold)] nose_command_line += [self.working_dir] std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) self.process = self.executor.execute(nose_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def update(self): if not self._sessions: self._sessions = self.prov.client.get_master_sessions(self.prov.client.active_session_id) if not self._sessions: return mapping = BetterDict() cnt = 0 for session in self._sessions: try: cnt += 1 name_split = session['name'].split('/') location = session['configuration']['location'] count = session['configuration']['serversCount'] mapping.get(name_split[0]).get(name_split[1])[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self.prov.test_name, self.prov.client.active_session_id) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) self.text.set_text(txt)
def _extract_transfer(self, transfer): source_type = transfer.findtext('./con:sourceType', namespaces=self.NAMESPACES) source_step_name = transfer.findtext('./con:sourceStep', namespaces=self.NAMESPACES) query = transfer.findtext('./con:sourcePath', namespaces=self.NAMESPACES) transfer_type = transfer.findtext('./con:type', namespaces=self.NAMESPACES) target_step_name = transfer.findtext('./con:targetStep', namespaces=self.NAMESPACES) target_prop = transfer.findtext('./con:targetType', namespaces=self.NAMESPACES) if source_step_name.startswith("#") and source_step_name.endswith("#"): source_step_name = source_step_name[1:-1] if not self._validate_transfer(source_type, source_step_name, transfer_type, target_step_name): return None extractor = BetterDict() if transfer_type == "JSONPATH": extractor.merge({ 'extract-jsonpath': { target_prop: { 'jsonpath': query, 'default': 'NOT_FOUND', } } }) elif transfer_type == "XPATH": extractor.merge({ 'extract-xpath': { target_prop: { 'xpath': query, 'default': 'NOT_FOUND', } } }) return {source_step_name: extractor}
def __load_module(self, alias): """ Load module class by alias :param alias: str :return: class """ if alias in self.modules: return self.modules[alias] mod_conf = self.config.get('modules') if alias not in mod_conf: msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys())) raise TaurusConfigError(msg) settings = ensure_is_dict(mod_conf, alias, "class") acopy = copy.deepcopy(settings) BetterDict.traverse(acopy, Configuration.masq_sensitive) self.log.debug("Module config: %s %s", alias, acopy) err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings)) clsname = settings.get('class', err) self.modules[alias] = load_class(clsname) if not issubclass(self.modules[alias], EngineModule): raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname) return self.modules[alias]
def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")] self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write("kpi_log=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/')) props.write("error_log=%s\n" % self.settings.get("err-file").replace(os.path.sep, '/')) if self.load.iterations: props.write("iterations=%s\n" % self.load.iterations) if self.load.hold: props.write("hold_for=%s\n" % self.load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner", self.props_file] self.process = shell_exec(junit_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def _create_runner(self, report_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == "python-nose": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) elif script_type == "java-junit": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['working-dir'] = self.get_runner_working_dir() runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") elif script_type == "ruby-rspec": runner_class = RSpecTester elif script_type == "js-mocha": runner_class = MochaTester else: raise ValueError("Unsupported script type: %s" % script_type) runner_config["script"] = script_path runner_config["script-type"] = script_type runner_config["artifacts-dir"] = self.engine.artifacts_dir runner_config["report-file"] = report_file runner_config["stdout"] = self.engine.create_artifact("selenium", ".out") runner_config["stderr"] = self.engine.create_artifact("selenium", ".err") return runner_class(runner_config, self)
def _extract_rest_request(self, test_step): label = test_step.get('name') config = test_step.find('./con:config', namespaces=self.NAMESPACES) method = config.get('method') method_name = config.get('methodName') method_obj = self.interface.find('.//con:method[@name="%s"]' % method_name, namespaces=self.NAMESPACES) params = BetterDict() if method_obj is not None: parent = method_obj.getparent() while parent.tag.endswith('resource'): for param in parent.findall('./con:parameters/con:parameter', namespaces=self.NAMESPACES): param_name = param.findtext('./con:name', namespaces=self.NAMESPACES) param_value = param.findtext('./con:value', namespaces=self.NAMESPACES) def_value = param.findtext('./con:default', namespaces=self.NAMESPACES) if param_value: params[param_name] = param_value elif def_value: params[param_name] = def_value parent = parent.getparent() url = self._calc_base_address(test_step) + config.get('resourcePath') headers = self._extract_headers(config) assertions = self._extract_assertions(config) params.merge({ entry.get("key"): entry.get("value") for entry in config.findall('./con:restRequest/con:parameters/con:entry', namespaces=self.NAMESPACES) }) for param_name in copy.copy(list(params.keys())): template = "{" + param_name + "}" if template in url: param_value = params.pop(param_name) url = url.replace(template, param_value) request = {"url": url, "label": label} if method is not None and method != "GET": request["method"] = method if headers: request["headers"] = headers if assertions: request["assert"] = assertions body = {} for key, value in iteritems(params): body[key] = value if body: request["body"] = body return request
def test_merge_configs(self): a = {"modules": {"local": "class_name"}} b = {"modules": {"local": {"class": "another_class"}}} res = BetterDict() res.merge(a) res.merge(b) self.assertEqual(BetterDict.__name__, type(res["modules"]["local"]).__name__) modules = res["modules"] ensure_is_dict(modules, "local", "class") self.assertEqual("another_class", res["modules"]["local"]["class"])
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, int): if part < 0: if isinstance(parts[index + 1], int): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], int) and isinstance(pointer, dict): pointer = pointer.get(part, [], force_set=True) else: pointer = pointer.get(part, force_set=True) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], string_types) and parts[-1][0] == '^': item = parts[-1][1:] if isinstance(pointer, list): item = int(item) if -len(pointer) <= item < len(pointer): del pointer[item] else: self.log.debug("No value to delete: %s", item) elif isinstance(pointer, dict): if item in pointer: del pointer[item] else: self.log.debug("No value to delete: %s", item) else: raise ValueError("Cannot handle override %s in non-iterable type %s" % (item, pointer)) else: parsed_value = self.__parse_override_value(value) self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value)) if isinstance(parsed_value, dict): dict_value = BetterDict() dict_value.merge(parsed_value) parsed_value = dict_value if isinstance(pointer, list) and parts[-1] < 0: pointer.append(parsed_value) else: pointer[parts[-1]] = parsed_value
def test_configuration_smoothness(self): def find_ad_dict_ed(*args): if isinstance(args[0], dict) and not isinstance(args[0], BetterDict): raise BaseException("dict found in Configuration") configs = [ RESOURCES_DIR + "json/get-post.json", self.paths] self.obj.configure(configs) self.assertTrue(isinstance(self.obj.config, Configuration)) BetterDict.traverse(self.obj.config, find_ad_dict_ed)
def _extract_property_transfers(self, test_step): extractors = BetterDict() # label -> {extract-xpath: ..., extract-jsonpath: ...} transfers = test_step.findall('./con:config/con:transfers', namespaces=self.NAMESPACES) if not transfers: return None for transfer in transfers: extracted_transfer = self._extract_transfer(transfer) if extracted_transfer is not None: extractors.merge(extracted_transfer) return extractors
def _get_merged_ci_headers(self, req, header): def dic_lower(dic): return {k.lower(): dic[k].lower() for k in dic} ci_scenario_headers = dic_lower(self.scenario.get_headers()) ci_request_headers = dic_lower(req.headers) headers = BetterDict() headers.merge(ci_scenario_headers) headers.merge(ci_request_headers) if header.lower() in headers: return headers[header] else: return None
def test_masq_sensitive(self): obj = Configuration() obj.merge({ "token": "my-precious", "my_password": "******", "secret": "secret", "secret_story": "story", }) BetterDict.traverse(obj, Configuration.masq_sensitive) self.assertEquals(obj["token"], "*" * 8) self.assertEquals(obj["my_password"], "*" * 8) self.assertEquals(obj["secret"], "*" * 8) self.assertEquals(obj["secret_story"], "story")
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): hosts_file = self.engine.create_artifact("hostaliases", "") aliases = self.get_hostaliases() with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get('scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): raise ValueError("Invalid content of scenario, list type instead of dict or string") if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj
def __init__(self, executor, original=None): super(JMeterScenarioBuilder, self).__init__(original) self.executor = executor self.scenario = executor.get_scenario() self.engine = executor.engine self.system_props = BetterDict() self.request_compiler = None
def __get_jmx_shorthands(self, configs): """ Generate json file with execution, executor and scenario settings :type configs: list :return: list """ jmxes = [] for filename in configs[:]: if filename.lower().endswith(".jmx"): jmxes.append(filename) configs.remove(filename) if jmxes: self.log.debug("Adding JMX shorthand config for: %s", jmxes) fds = NamedTemporaryFile(prefix="jmx_", suffix=".json") fname = fds.name fds.close() config = Configuration() for jmx_file in jmxes: piece = BetterDict.from_dict({"executor": "jmeter", "scenario": {"script": jmx_file}}) config.get(ScenarioExecutor.EXEC, [], force_set=True).append(piece) # Does it brake single execution? config.dump(fname, Configuration.JSON) return [fname] else: return []
def __init__(self): Aggregator.__init__(self, is_functional=False) ResultsProvider.__init__(self) self.generalize_labels = False self.ignored_labels = [] self.underlings = [] self.buffer = BetterDict()
def __init__(self, config, **kwargs): if not isinstance(config, dict): config = BetterDict.from_dict({"path": config}) version = config.get("version", self.VERSION) version = str(version).split('.') version.extend(['0'] * (3 - len(version))) short_version = '.'.join(version[:2]) # 2 elements full_version = '.'.join(version) # 3+ elements remote_path = config.get("remote-path", self.REMOTE_PATH) remote_path = remote_path.format(short_version=short_version, full_version=full_version) tool_file = config.get("tool-file", self.TOOL_FILE) tool_file = tool_file.format(version=full_version) local_path = config.get("path", JarTool.LOCAL_PATH) local_path = local_path.format(tool_file=tool_file) download_link = config.get("download-link", JarTool.URL) download_link = download_link.format(remote_addr=self.REMOTE_ADDR, remote_path=remote_path) super(SeleniumServer, self).__init__( tool_path=local_path, download_link=download_link, version=full_version, **kwargs)
def __get_jtl_shorthands(self, configs): """ Generate json file with execution, executor and scenario settings :type configs: list :return: list """ jtls = [] for filename in configs[:]: if filename.lower().endswith(".jtl"): jtls.append(filename) configs.remove(filename) if jtls: self.log.debug("Adding JTL shorthand config for: %s", jtls) fds = NamedTemporaryFile(prefix="jtl_", suffix=".json") fname = fds.name fds.close() config = Configuration() for jtl in jtls: piece = BetterDict.from_dict({"executor": "external-results-loader", "data-file": jtl}) config.get(ScenarioExecutor.EXEC, [], force_set=True).append(piece) config.dump(fname, Configuration.JSON) return [fname] else: return []
def __init__(self): EngineModule.__init__(self) ResultsProvider.__init__(self) self.generalize_labels = False self.ignored_labels = [] self.underlings = [] self.buffer = BetterDict()
def test_prepare_no_filename_in_settings(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"data-source": "sample-labels"}) obj.prepare() datapoint = DataPoint(0, []) cumul_data = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.aggregated_second(datapoint) obj.post_process() self.assertTrue(os.path.exists(obj.report_file_path))
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict.from_dict({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, string_types): body = request.body elif request.body: msg = "Cannot handle 'body' option of type %s: %s" raise TaurusConfigError(msg % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get_headers()) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body,) return http
def test_step_shaper(self): obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge(yaml.load(open("tests/yaml/stepping_ramp_up.yml").read())) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.execution['throughput'] = 100 obj.prepare() load = obj.get_load() modified_xml_tree = etree.fromstring(open(obj.modified_jmx, "rb").read()) timer = modified_xml_tree.findall(".//kg.apc.jmeter.timers.VariableThroughputTimer") self.assertEqual(len(timer), 1) for num, step_collection in enumerate(timer[0].findall(".//load_profile")): step_start_rps = step_collection.find(".//stringProp[@name='49']") step_stop_rps = step_collection.find(".//stringProp[@name='1567']") self.assertTrue(step_start_rps == step_stop_rps == str(int(round(float(load.throughput) / load.steps)))) if num + 1 == load.steps: self.assertEqual(step_collection.find(".//stringProp[@name='53']"), load.hold + load.ramp_up / load.steps) else: self.assertEqual(step_collection.find(".//stringProp[@name='53']"), load.ramp_up / load.steps)
def test_log_messages_percentiles(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.parameters.merge({"failed-labels": False, "percentiles": True, "summary": False, "test-duration": False}) obj.aggregated_second(self.__get_datapoint()) obj.post_process() target_output = ("Average times: total 0.001, latency 0.000, connect 0.000\n" "Percentile 0.0%: 0.000\n" "Percentile 50.0%: 0.000\n" "Percentile 90.0%: 0.001\n" "Percentile 95.0%: 0.001\n" "Percentile 99.0%: 0.003\n" "Percentile 99.9%: 0.008\n" "Percentile 100.0%: 0.081\n" ) self.assertEqual(target_output, log_recorder.info_buff.getvalue()) obj.log.removeHandler(log_recorder)
def test_blazemeter_cloud_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters = BetterDict.from_dict({ "dump-xml": xml_report, }) prov = CloudProvisioning() prov.results_url = "http://report/link" obj.engine.provisioning = prov obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_log_messages_summary_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"summary-labels": True, "percentiles": False, "summary": False, "test-duration": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() expected = ("Request label stats:\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| label | status | succ | avg_rt | error |\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| http://192.168.1.1/anotherquery | FAIL | 0.00% | 0.001 | Forbidden |\n" "| http://192.168.1.1/somequery | OK | 100.00% | 0.001 | |\n" "| http://192.168.100.100/somequery | OK | 100.00% | 0.001 | |\n" "+----------------------------------+--------+---------+--------+-----------+\n") self.assertIn(expected, self.log_recorder.info_buff.getvalue())
def test_not_junit(self): """ Check that JUnit runner fails if no tests were found :return: """ obj = SeleniumExecutor() obj.engine = self.engine_obj obj.settings = self.selenium_config obj.engine.config = BetterDict() obj.engine.config.merge( {"execution": {"executor": "selenium", "scenario": {"script": __dir__() + "/../selenium/invalid/NotJUnittest.java"}}}) obj.execution = obj.engine.config['execution'] obj.prepare() obj.startup() try: while not obj.check(): time.sleep(1) self.fail() except BaseException as exc: self.assertIn("Nothing to test", exc.args[0]) obj.shutdown()
def test_prepare_filename_in_settings(self): # test path parameter from config obj = JUnitXMLReporter() obj.engine = EngineEmul() path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml-path-in-settings', dir=obj.engine.artifacts_dir) obj.parameters = BetterDict.from_dict({"filename": path_from_config, "data-source": "sample-labels"}) obj.prepare() datapoint = DataPoint(0, []) cumul_data = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.aggregated_second(datapoint) obj.post_process() self.assertTrue(os.path.exists(obj.report_file_path))
def _extract_test_case(self, test_case, test_suite, suite_level_props): case_name = test_case.get("name") scenario_name = test_suite.get("name") + "-" + case_name case_properties = self._extract_properties(test_case) case_properties = { "#TestCase#" + key: value for key, value in iteritems(case_properties) } case_level_props = BetterDict() case_level_props.merge(suite_level_props) case_level_props.merge(case_properties) scenario = self._extract_scenario(test_case, case_level_props) scenario['test-suite'] = test_suite.get("name") return scenario_name, scenario
def test_blazemeter_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters.merge({ "dump-xml": xml_report, }) rep = BlazeMeterUploader() rep.results_url = "http://report/link" obj.engine.reporters.append(rep) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_prepare_java_folder(self): """ Check if scripts exist in working dir :return: """ obj = self.get_selenium_executor() obj.execution = BetterDict() obj.execution.merge( {"scenario": { "script": __dir__() + "/../selenium/java/" }}) obj.prepare() prepared_files = os.listdir(obj.runner.working_dir) java_files = [ fname for fname in prepared_files if fname.endswith(".java") ] class_files = [ fname for fname in prepared_files if fname.endswith(".class") ] jars = [fname for fname in prepared_files if fname.endswith(".jar")] self.assertEqual(len(java_files), 2) self.assertEqual(len(class_files), 2) self.assertEqual(len(jars), 1)
def test_dns_cache_mgr_script(self): """ :return: """ obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge( yaml.load(open("tests/yaml/dns_mgr_script.yml").read())) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.settings.merge(obj.engine.config.get("modules").get("jmeter")) obj.prepare() xml_tree = etree.fromstring(open(obj.modified_jmx, "rb").read()) dns_managers = xml_tree.findall(".//DNSCacheManager") # 0 dns_managers self.assertEqual(len(dns_managers), 0) sys_prop = open( os.path.join(obj.engine.artifacts_dir, "system.properties")).read() self.assertTrue("any_prop=true" in sys_prop) self.assertFalse("sun.net.inetaddr.ttl=0" in sys_prop)
def test_selenium_startup_shutdown_python_folder(self): """ run tests from .py files :return: """ obj = SeleniumExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge( {'execution': {'scenario': {'script': __dir__() + '/../selenium/python/'}, 'executor': 'selenium'}, 'reporting': [{'module': 'junit-xml'}]}) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.settings.merge(obj.engine.config.get("modules").get("selenium")) obj.prepare() obj.startup() while not obj.check(): time.sleep(1) obj.shutdown() prepared_files = os.listdir(obj.runner.working_dir) python_files = [fname for fname in prepared_files if fname.endswith(".py")] self.assertEqual(2, len(python_files)) self.assertTrue(os.path.exists(obj.runner.settings.get("report-file")))
def test_functional_report(self): engine = EngineEmul() aggregator = FunctionalAggregator() aggregator.engine = engine engine.aggregator = aggregator obj = JUnitXMLReporter() obj.engine = engine obj.parameters = BetterDict() reader = FuncSamplesReader( __dir__() + "/../resources/functional/nose.ldjson", engine, logging.getLogger()) aggregator.add_underling(reader) aggregator.prepare() obj.prepare() aggregator.post_process() obj.post_process() self.assertFilesEqual( obj.report_file_path, __dir__() + "/../resources/functional/xunit-report.xml")
def check_schedule_size_estimate(self, obj, execution): obj.engine.config = BetterDict() obj.engine.config.merge({ ScenarioExecutor.EXEC: execution, "provisioning": "local", }) obj.execution = obj.engine.config['execution'] load = obj.get_load() obj.pbench = TaurusPBenchTool(obj, logging.getLogger('')) obj.pbench.generate_payload(obj.get_scenario()) payload_count = len(obj.get_scenario().get('requests', [])) sch = Scheduler(load, open(obj.pbench.payload_file, 'rb'), logging.getLogger('')) estimated_schedule_size = obj.pbench._estimate_schedule_size(load, payload_count) logging.debug("Estimated schedule size: %s", estimated_schedule_size) items = list(sch.generate()) actual_schedule_size = len(items) logging.debug("Actual schedule size: %s", actual_schedule_size) if actual_schedule_size != 0: error = abs(estimated_schedule_size - actual_schedule_size) error_rel = error / float(actual_schedule_size) logging.debug("Estimation error: %s", error) if error_rel >= 0.1: self.fail("Estimation failed (error=%s) on config %s" % (error_rel, pprint.pformat(execution)))
def test_csv_report_fieldname_order(self): obj = FinalStatus() obj.engine = EngineEmul() csv_report = obj.engine.create_artifact("report", ".csv") obj.parameters = BetterDict.from_dict({ "dump-csv": csv_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(csv_report)) with open(csv_report) as fds: fieldnames = fds.readline().strip().split(",") perc_fields = [float(name[5:]) for name in fieldnames if name.startswith('perc_')] self.assertTrue(sorted(perc_fields) == perc_fields) rc_fields = [float(name[3:]) for name in fieldnames if name.startswith('rc_')] self.assertTrue(sorted(rc_fields) == rc_fields)
def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [ os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar") ] self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write( "report_file=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/')) if self.load.iterations: props.write("iterations=%s\n" % self.load.iterations) if self.load.hold: props.write("hold_for=%s\n" % self.load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) junit_command_line = [ "java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner", self.props_file ] self.process = self.executor.execute(junit_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def test_add_shaper_ramp_up(self): obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge({'execution': {'ramp-up': '1m', 'throughput': 10, 'hold-for': '2m', 'concurrency': 20, 'scenario': {'script': __dir__() + '/../jmx/http.jmx'}}}) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.prepare() xml_tree = etree.fromstring(open(obj.modified_jmx, "rb").read()) timer_ = ".//kg.apc.jmeter.timers.VariableThroughputTimer" timer_ += "[@testclass='kg.apc.jmeter.timers.VariableThroughputTimer']" shaper_elements = xml_tree.findall(timer_) self.assertEqual(1, len(shaper_elements)) shaper_coll_element = shaper_elements[0].find(".//collectionProp[@name='load_profile']") self.assertEqual("1", shaper_coll_element.findall(".//stringProp[@name='49']")[0].text) self.assertEqual("10", shaper_coll_element.findall(".//stringProp[@name='1567']")[0].text) self.assertEqual("60", shaper_coll_element.findall(".//stringProp[@name='53']")[0].text) self.assertEqual("10", shaper_coll_element.findall(".//stringProp[@name='49']")[1].text) self.assertEqual("10", shaper_coll_element.findall(".//stringProp[@name='1567']")[1].text) self.assertEqual("120", shaper_coll_element.findall(".//stringProp[@name='53']")[1].text)
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, int): if part < 0: if isinstance(parts[index + 1], int): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], int) and isinstance( pointer, dict): pointer = pointer.get(part, []) else: pointer = pointer.get(part) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], string_types) and parts[-1][0] == '^': del pointer[parts[-1][1:]] else: if value.isdigit(): value = float(value) if isinstance(pointer, list) and parts[-1] < 0: pointer.append(value) else: pointer[parts[-1]] = value
def startup(self): """ Should start the tool as fast as possible. """ self.start_time = time.time() out = self.engine.create_artifact("grinder-stdout", ".log") err = self.engine.create_artifact("grinder-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") env = BetterDict() env.merge(dict(os.environ)) env.merge({"T_GRINDER_PREFIX": self.exec_id}) self.process = shell_exec(self.cmd_line, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def test_convert_tgroups_load_modifications(self): obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config[Provisioning.PROV] = 'test' obj.execution = BetterDict() obj.execution.merge({ "iterations": 20, "ramp-up": 10, "hold-for": "2m", "scenario": { "script": __dir__() + "/../jmx/SteppingThreadGroup.jmx" } }) obj.prepare() modified_xml_tree = etree.fromstring( open(obj.modified_jmx, "rb").read()) st_tg = modified_xml_tree.find( ".//kg.apc.jmeter.threads.SteppingThreadGroup") self.assertEqual(st_tg, None) ul_tg = modified_xml_tree.find( ".//kg.apc.jmeter.threads.UltimateThreadGroup") self.assertEqual(ul_tg, None) converted_st_tg = modified_xml_tree.find( ".//ThreadGroup[@testname='stepping tg']") loop_ctrl = converted_st_tg.find( ".//elementProp[@name='ThreadGroup.main_controller']") tg_loops = loop_ctrl.find(".//*[@name='LoopController.loops']") tg_forever = loop_ctrl.find( ".//boolProp[@name='LoopController.continue_forever']") self.assertEqual(tg_loops.text, "20") self.assertEqual(tg_forever.text, "false") st_tg_concurrency = converted_st_tg.find( ".//stringProp[@name='ThreadGroup.num_threads']") self.assertEqual(st_tg_concurrency.text, "123")
def __write_props_file(self): def write_prop(name, val): if val: if isinstance(val, list): val = ",".join(val) fds.write("{name}={val}\n".format(name=name, val=val)) props = get_assembled_value( configs=[self.settings, self.get_scenario(), self.execution], key="properties") props = props or BetterDict() junit_version = str(self.settings.get("junit-version", "4")) if junit_version == "5": props.merge({"junit_version": 5}) with open(self.props_file, 'wt') as fds: for key in sorted(props.keys()): write_prop(key, props[key]) fds.write("report_file=%s\n" % self.report_file) load = self.get_load() write_prop("iterations", load.iterations) write_prop("hold_for", load.hold) write_prop("concurrency", load.concurrency) write_prop("ramp_up", load.ramp_up) write_prop("steps", load.steps) write_prop("run_items", self._get_items_list("run-items")) write_prop("include_category", self._get_items_list("include-categories")) write_prop("exclude_category", self._get_items_list("exclude-categories"))
def _extract_scenario_from_soapui(self, base_scenario, script_path): test_case = base_scenario.get("test-case", None) converter = SoapUIScriptConverter(self.log) conv_config = converter.convert_script(script_path) conv_scenarios = conv_config["scenarios"] scenario_name, conv_scenario = converter.find_soapui_test_case(test_case, conv_scenarios) new_name = scenario_name counter = 1 while new_name in self.engine.config["scenarios"]: new_name = scenario_name + ("-%s" % counter) counter += 1 if new_name != scenario_name: self.log.info("Scenario name '%s' is already taken, renaming to '%s'", scenario_name, new_name) scenario_name = new_name merged_scenario = BetterDict.from_dict(conv_scenario) merged_scenario.merge(base_scenario.data) for field in [Scenario.SCRIPT, "test-case"]: if field in merged_scenario: merged_scenario.pop(field) return scenario_name, merged_scenario
def test_diagnostics(self): obj = PBenchExecutor() obj.engine = EngineEmul() obj.settings = BetterDict() obj.engine.config.merge({ "provisioning": "test", ScenarioExecutor.EXEC: [ { "throughput": 10, "hold-for": 30, "scenario": { "default-address": "http://blazedemo.com/", "requests": ["/"]}}]}) obj.execution = obj.engine.config['execution'][0] obj.settings.merge({ "path": os.path.join(os.path.dirname(__file__), "..", "resources", "pbench", "phantom.sh"), }) obj.prepare() obj.startup() for _ in range(3): obj.check() obj.shutdown() obj.post_process() self.assertIsNotNone(obj.get_error_diagnostics())
def test_add_shaper_ramp_up(self): obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge( yaml.load(open("tests/yaml/throughput_ramp_up.yml").read())) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.prepare() xml_tree = etree.fromstring(open(obj.modified_jmx, "rb").read()) shaper_elements = xml_tree.findall( ".//kg.apc.jmeter.timers.VariableThroughputTimer[@testclass='kg.apc.jmeter.timers.VariableThroughputTimer']" ) self.assertEqual(1, len(shaper_elements)) shaper_coll_element = shaper_elements[0].find( ".//collectionProp[@name='load_profile']") self.assertEqual( "1", shaper_coll_element.findall(".//stringProp[@name='49']")[0].text) self.assertEqual( "10", shaper_coll_element.findall(".//stringProp[@name='1567']")[0].text) self.assertEqual( "60", shaper_coll_element.findall(".//stringProp[@name='53']")[0].text) self.assertEqual( "10", shaper_coll_element.findall(".//stringProp[@name='49']")[1].text) self.assertEqual( "10", shaper_coll_element.findall(".//stringProp[@name='1567']")[1].text) self.assertEqual( "120", shaper_coll_element.findall(".//stringProp[@name='53']")[1].text)
def test_selenium_startup_shutdown_python_folder(self): """ run tests from .py files :return: """ obj = SeleniumExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge( yaml.load(open("tests/yaml/selenium_executor_python.yml").read())) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.settings.merge(obj.engine.config.get("modules").get("selenium")) obj.prepare() obj.startup() while not obj.check(): time.sleep(1) obj.shutdown() prepared_files = os.listdir(obj.runner.working_dir) python_files = [ file for file in prepared_files if file.endswith(".py") ] self.assertEqual(2, len(python_files)) self.assertTrue(os.path.exists(obj.runner.report_file))
def test_stepping_tg_ramp_no_proportion(self): """ Tested without concurrency proportions :return: """ obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge( yaml.load(open("tests/yaml/stepping_ramp_up.yml").read())) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.prepare() load = obj.get_load() orig_xml_tree = etree.fromstring(open(obj.original_jmx, "rb").read()) modified_xml_tree = etree.fromstring( open(obj.modified_jmx, "rb").read()) mod_stepping_tgs = modified_xml_tree.findall( ".//kg.apc.jmeter.threads.SteppingThreadGroup") orig_tgs = orig_xml_tree.findall(".//ThreadGroup") self.assertEqual(len(mod_stepping_tgs), len(orig_tgs)) for orig_th, step_th in zip(orig_tgs, mod_stepping_tgs): orig_num_threads = int( orig_th.find( ".//stringProp[@name='ThreadGroup.num_threads']").text) mod_num_threads = int( step_th.find( ".//stringProp[@name='ThreadGroup.num_threads']").text) self.assertEqual(orig_num_threads, mod_num_threads) self.assertEqual( step_th.find(".//stringProp[@name='Start users period']").text, str(int(load.ramp_up / load.steps))) self.assertEqual( step_th.find(".//stringProp[@name='Start users count']").text, str(int(orig_num_threads / load.steps)))
def test_step_shaper(self): obj = JMeterExecutor() obj.engine = EngineEmul() obj.engine.config = BetterDict() obj.engine.config.merge({'execution': {'steps': 5, 'concurrency': 170, 'scenario': {'script': 'tests/jmx/stepping_ramp_up.jmx'}, 'ramp-up': '1m', 'distributed': ['127.0.0.1'], 'hold-for': '2m'}}) obj.engine.config.merge({"provisioning": "local"}) obj.execution = obj.engine.config['execution'] obj.execution['throughput'] = 100 obj.prepare() load = obj.get_load() modified_xml_tree = etree.fromstring(open(obj.modified_jmx, "rb").read()) timer = modified_xml_tree.findall(".//kg.apc.jmeter.timers.VariableThroughputTimer") self.assertEqual(len(timer), 1) for num, step_collection in enumerate(timer[0].findall(".//load_profile")): step_start_rps = step_collection.find(".//stringProp[@name='49']") step_stop_rps = step_collection.find(".//stringProp[@name='1567']") self.assertTrue(step_start_rps == step_stop_rps == str(int(round(float(load.throughput) / load.steps)))) if num + 1 == load.steps: self.assertEqual(step_collection.find(".//stringProp[@name='53']"), load.hold + load.ramp_up / load.steps) else: self.assertEqual(step_collection.find(".//stringProp[@name='53']"), load.ramp_up / load.steps)
def test_install_Gatling(self): path = os.path.abspath(__dir__() + "/../../build/tmp/gatling-taurus/bin/gatling" + EXE_SUFFIX) shutil.rmtree(os.path.dirname(os.path.dirname(path)), ignore_errors=True) # backup download link and version gatling_link = GatlingExecutor.DOWNLOAD_LINK gatling_ver = GatlingExecutor.VERSION GatlingExecutor.DOWNLOAD_LINK = "file:///" + __dir__() + "/../data/gatling-dist-{version}_{version}.zip" GatlingExecutor.VERSION = '2.1.4' self.assertFalse(os.path.exists(path)) obj = GatlingExecutor() obj.engine = EngineEmul() obj.settings.merge({"path": path}) obj.execution = BetterDict() obj.execution.merge({"scenario": {"script": "tests/gatling/BasicSimulation.scala", "simulation": "mytest.BasicSimulation"}}) obj.prepare() self.assertTrue(os.path.exists(path)) obj.prepare() GatlingExecutor.DOWNLOAD_LINK = gatling_link GatlingExecutor.VERSION = gatling_ver
def _set_env(self): props = BetterDict() props.merge(self.settings.get('properties')) props.merge(self.get_scenario().get("properties")) props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix props['gatling.core.directory.resources'] = self.engine.artifacts_dir props['gatling.core.directory.results'] = self.engine.artifacts_dir self._set_simulation_props(props) self._set_load_props(props) self._set_scenario_props(props) self.env.set({"NO_PAUSE": "TRUE"}) self.env.add_java_param( {"JAVA_OPTS": self.settings.get("java-opts", None)}) for key in props: self.env.add_java_param( {"JAVA_OPTS": "-D%s=%s" % (key, props[key])}) self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
def _extract_config(self, project, test_suites, target_test_case=None): execution = [] scenarios = {} project_properties = self._extract_properties(project, key_prefix="#Project#") for suite in test_suites: suite_props = BetterDict() suite_props.merge(project_properties) suite_props.merge( self._extract_properties(suite, key_prefix="#TestSuite#")) test_cases = suite.findall('.//con:testCase', namespaces=self.NAMESPACES) for case in test_cases: case_name = case.get("name") scenario_name, scenario = self._extract_test_case( case, suite, suite_props) load_exec = self._extract_execution(case) load_exec['scenario'] = scenario_name self.log.debug("Extracted execution for scenario %s", scenario_name) if not scenario["requests"]: self.log.warning( "No requests extracted for scenario %s, skipping it" % scenario_name) continue if target_test_case is None or target_test_case == case_name: self.log.debug("Extracted scenario: %s", scenario_name) scenarios[scenario_name] = scenario execution.append(load_exec) return { "execution": execution, "scenarios": scenarios, }