def _create_runner(self, working_dir, report_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge( self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge( self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact( "customrunner", ".properties") runner_config["script-type"] = script_type runner_config["working-dir"] = working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("report-file", report_file) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) return runner_class(runner_config, self)
def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")] self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write("kpi_log=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/')) props.write("error_log=%s\n" % self.settings.get("err-file").replace(os.path.sep, '/')) if self.load.iterations: props.write("iterations=%s\n" % self.load.iterations) if self.load.hold: props.write("hold_for=%s\n" % self.load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner", self.props_file] self.process = shell_exec(junit_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict() headers.merge({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, string_types): body = request.body elif request.body: raise ValueError("Cannot handle 'body' option of type %s: %s" % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get("headers")) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body, ) return http
def startup(self): self.start_time = time.time() load = self.get_load() hatch = load.concurrency / load.ramp_up if load.ramp_up else load.concurrency wrapper = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, "resources", "locustio-taurus-wrapper.py") env = BetterDict() env.merge({"PYTHONPATH": self.engine.artifacts_dir + os.pathsep + os.getcwd()}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] args = [sys.executable, os.path.realpath(wrapper), '-f', os.path.realpath(self.locustfile)] args += ['--logfile=%s' % self.engine.create_artifact("locust", ".log")] args += ["--no-web", "--only-summary", ] args += ["--clients=%d" % load.concurrency, "--hatch-rate=%d" % math.ceil(hatch), ] if load.iterations: args.append("--num-request=%d" % load.iterations) if self.is_master: args.extend(["--master", '--expect-slaves=%s' % self.expected_slaves]) env["SLAVES_LDJSON"] = self.slaves_ldjson else: env["JTL"] = self.kpi_jtl host = self.get_scenario().get("default-address", None) if host: args.append("--host=%s" % host) self.__out = open(self.engine.create_artifact("locust", ".out"), 'w') self.process = self.execute(args, stderr=STDOUT, stdout=self.__out, env=env)
def _create_runner(self, report_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == "python-nose": runner_class = NoseTester runner_config.merge( self.settings.get("selenium-tools").get("nose")) elif script_type == "java-junit": runner_class = JUnitTester runner_config.merge( self.settings.get("selenium-tools").get("junit")) runner_config['working-dir'] = self.get_runner_working_dir() runner_config['props-file'] = self.engine.create_artifact( "customrunner", ".properties") else: raise ValueError("Unsupported script type: %s", script_type) runner_config["script"] = script_path runner_config["script-type"] = script_type runner_config["artifacts-dir"] = self.engine.artifacts_dir runner_config["report-file"] = report_file runner_config["stdout"] = self.engine.create_artifact( "selenium", ".out") runner_config["stderr"] = self.engine.create_artifact( "selenium", ".err") return runner_class(runner_config, self)
def _create_runner(self, report_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == "python-nose": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) elif script_type == "java-junit": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['working-dir'] = self.get_runner_working_dir() runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") elif script_type == "ruby-rspec": runner_class = RSpecTester elif script_type == "js-mocha": runner_class = MochaTester else: raise ValueError("Unsupported script type: %s" % script_type) runner_config["script"] = script_path runner_config["script-type"] = script_type runner_config["artifacts-dir"] = self.engine.artifacts_dir runner_config["report-file"] = report_file runner_config["stdout"] = self.engine.create_artifact("selenium", ".out") runner_config["stderr"] = self.engine.create_artifact("selenium", ".err") return runner_class(runner_config, self)
def _extract_request(self, path, path_obj, method, operation): request = {} if method != "get": request["method"] = method.upper() if operation.operation_id is not None: request["label"] = operation.operation_id parameters = BetterDict() if path_obj.parameters: parameters.merge(path_obj.parameters) if operation.parameters: parameters.merge(operation.parameters) query_params, form_data, request_body, headers = self._handle_parameters(parameters) if headers: request["headers"] = headers if form_data and request_body: self.log.warning("Both form data and request body are specified. Omitting form data") if request_body: request["body"] = request_body elif form_data: request["body"] = form_data if query_params: url = self._embed_query_in_path(path, query_params) else: url = path request["url"] = url return request
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict() headers.merge({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, string_types): body = request.body elif request.body: raise ValueError("Cannot handle 'body' option of type %s: %s" % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get("headers")) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body,) return http
def prepare(self): self.set_virtual_display() self.scenario = self.get_scenario() self._verify_script() self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv") self.err_jtl = self.engine.create_artifact("selenium_tests_err", ".xml") script_type = self.detect_script_type(self.scenario.get(Scenario.SCRIPT)) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") runner_config["script-type"] = script_type self.runner_working_dir = self.engine.create_artifact(runner_config.get("working-dir", "classes"), "") runner_config["working-dir"] = self.runner_working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("working-dir", self.runner_working_dir) runner_config.get("report-file", self.kpi_file) runner_config.get("err-file", self.err_jtl) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) self._cp_resource_files(self.runner_working_dir) self.runner = runner_class(runner_config, self.scenario, self.get_load(), self.log) self.runner.prepare() self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader)
def _set_env(self): props = BetterDict() props.merge(self.settings.get('properties')) props.merge(self.get_scenario().get("properties")) props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix props['gatling.core.directory.resources'] = self.engine.artifacts_dir props['gatling.core.directory.results'] = self.engine.artifacts_dir props.merge(self._get_simulation_props()) props.merge(self._get_load_props()) props.merge(self._get_scenario_props()) for key in sorted(props.keys()): prop = props[key] val_tpl = "%s" if isinstance(prop, string_types): val_tpl = "%r" self.env.add_java_param( {"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)}) self.env.set({"NO_PAUSE": "TRUE"}) self.env.add_java_param( {"JAVA_OPTS": self.settings.get("java-opts", None)}) self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
def startup(self): args = [self.tool_path] load = self.get_load() if load.iterations: args += ['--reps', str(load.iterations)] elif load.hold: hold_for = ceil(dehumanize_time(load.hold)) args += ['--time', '%sS' % hold_for] else: raise ValueError("You must specify either 'hold-for' or 'iterations' for siege") if self.scenario.get('think-time'): think_time = dehumanize_time(self.scenario.get('think-time')) args += ['--delay', str(think_time)] else: args += ['--benchmark'] load_concurrency = load.concurrency args += ['--concurrent', str(load_concurrency)] self.reader.concurrency = load_concurrency args += ['--file', self.__url_name] for key, val in iteritems(self.scenario.get_headers()): args += ['--header', "%s: %s" % (key, val)] env = BetterDict() env.merge(dict(environ)) env.merge({"SIEGERC": self.__rc_name}) self.start_time = time.time() self.process = shell_exec(args, stdout=self.__out, stderr=self.__err, env=env)
def run_tests(self): """ run python tests """ executable = self.settings.get("interpreter", sys.executable) nose_command_line = [executable, self.plugin_path, '-k', self.settings.get("report-file"), '-e', self.settings.get("err-file")] if self.load.iterations: nose_command_line += ['-i', str(self.load.iterations)] if self.load.hold: nose_command_line += ['-d', str(self.load.hold)] nose_command_line += [self.working_dir] std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) self.process = self.executor.execute(nose_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def _extract_transfer(self, transfer): source_type = transfer.findtext('./con:sourceType', namespaces=self.NAMESPACES) source_step_name = transfer.findtext('./con:sourceStep', namespaces=self.NAMESPACES) query = transfer.findtext('./con:sourcePath', namespaces=self.NAMESPACES) transfer_type = transfer.findtext('./con:type', namespaces=self.NAMESPACES) target_step_name = transfer.findtext('./con:targetStep', namespaces=self.NAMESPACES) target_prop = transfer.findtext('./con:targetType', namespaces=self.NAMESPACES) if source_step_name.startswith("#") and source_step_name.endswith("#"): source_step_name = source_step_name[1:-1] if not self._validate_transfer(source_type, source_step_name, transfer_type, target_step_name): return None extractor = BetterDict() if transfer_type == "JSONPATH": extractor.merge({ 'extract-jsonpath': { target_prop: { 'jsonpath': query, 'default': 'NOT_FOUND', } } }) elif transfer_type == "XPATH": extractor.merge({ 'extract-xpath': { target_prop: { 'xpath': query, 'default': 'NOT_FOUND', } } }) return {source_step_name: extractor}
def _set_env(self): props = BetterDict() props.merge(self.settings.get('properties')) props.merge(self.get_scenario().get("properties")) props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix props['gatling.core.directory.resources'] = self.engine.artifacts_dir props['gatling.core.directory.results'] = self.engine.artifacts_dir props.merge(self._get_simulation_props()) props.merge(self._get_load_props()) props.merge(self._get_scenario_props()) for key in sorted(props.keys()): prop = props[key] val_tpl = "%s" if isinstance(prop, string_types): if not is_windows( ): # extend properties support (contained separators/quotes/etc.) on lin/mac val_tpl = "%r" if PY2: prop = prop.encode( "utf-8", 'ignore') # to convert from unicode into str self.env.add_java_param( {"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)}) self.env.set({"NO_PAUSE": "TRUE"}) self.env.add_java_param( {"JAVA_OPTS": self.settings.get("java-opts", None)}) self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
def run_tests(self): """ run rspec plugin """ mocha_cmdline = [ self.node_tool.executable, self.plugin_path, "--report-file", self.settings.get("report-file"), "--test-suite", self.script ] if self.load.iterations: mocha_cmdline += ['--iterations', str(self.load.iterations)] if self.load.hold: mocha_cmdline += ['--hold-for', str(self.load.hold)] std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) self.process = self.executor.execute(mocha_cmdline, stdout=std_out, stderr=std_err, env=env)
def prepare(self): super(Monitoring, self).prepare() clients = (param for param in self.parameters if param not in ('run-at', 'module')) for client_name in clients: if client_name in self.client_classes: client_class = self.client_classes[client_name] else: self.log.warning('Unknown monitoring found: %s', client_name) continue for config in self.parameters.get(client_name, []): label = config.get('label', None) if client_name == 'local': if any([client for client in self.clients if isinstance(client, self.client_classes[client_name])]): break # skip the second and following local monitoring clients else: if len(self.parameters.get(client_name, [])) > 1: self.log.warning('LocalMonitoring client found twice, configs will be joined') config = BetterDict() for cfg in self.parameters.get(client_name, []): config.merge(cfg) client = client_class(self.log, label, config, self.engine) self.clients.append(client) client.connect()
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, int): if part < 0: if isinstance(parts[index + 1], int): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], int) and isinstance( pointer, dict): pointer = pointer.get(part, []) else: pointer = pointer.get(part) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], string_types) and parts[-1][0] == '^': item = parts[-1][1:] if isinstance(pointer, list): item = int(item) if -len(pointer) <= item < len(pointer): del pointer[item] else: self.log.debug("No value to delete: %s", item) elif isinstance(pointer, dict): if item in pointer: del pointer[item] else: self.log.debug("No value to delete: %s", item) else: raise ValueError( "Cannot handle override %s in non-iterable type %s" % (item, pointer)) else: parsed_value = self.__parse_override_value(value) self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value)) if isinstance(parsed_value, dict): dict_value = BetterDict() dict_value.merge(parsed_value) parsed_value = dict_value if isinstance(pointer, list) and parts[-1] < 0: pointer.append(parsed_value) else: pointer[parts[-1]] = parsed_value
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise ValueError("No simulation set") datadir = os.path.realpath(self.engine.artifacts_dir) cmdline = [self.settings["path"]] cmdline += ["-sf", datadir, "-df", datadir, "-rf ", datadir] cmdline += ["-on", "gatling-bzt", "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = {} load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int( dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', None) is not None: params_for_scala['gatling.http.ahc.keepAlive'] = scenario.get( 'keepalive').lower() if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = "".join([ " -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala ]) java_opts += " " + env.get( "JAVA_OPTS", "") + " " + self.engine.config.get("java_opts", "") env.merge({"JAVA_OPTS": java_opts}) self.process = shell_exec(cmdline, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def string_to_config(crit_config): """ Parse string like "avg-rt of label>100ms for 1m, continue as non-failed" into config dict :type crit_config: str :rtype: dict """ res = BetterDict() res.merge({ "subject": None, "condition": None, "threshold": None, "logic": "for", "timeframe": 0, "label": "", "stop": True, "fail": True, "message": None, }) if ':' in crit_config: res['message'] = crit_config[:crit_config.index(':')].strip() crit_config = crit_config[crit_config.index(':') + 1:].strip() if ',' in crit_config: crit_str = crit_config[:crit_config.index(',')].strip() action_str = crit_config[crit_config.index(',') + 1:].strip() else: crit_str = crit_config action_str = "" crit_pat = re.compile(r"([\w?*.-]+)(\s*of\s*([\S ]+))?\s*([<>=]+)\s*(\S+)(\s+(for|within|over)\s+(\S+))?") crit_match = crit_pat.match(crit_str.strip()) if not crit_match: raise TaurusConfigError("Criteria string is malformed in its condition part: %s" % crit_str) crit_groups = crit_match.groups() res["subject"] = crit_groups[0] res["condition"] = crit_groups[3] res["threshold"] = crit_groups[4] if crit_groups[2]: res["label"] = crit_groups[2] if crit_groups[6]: res["logic"] = crit_groups[6] if crit_groups[7]: res["timeframe"] = crit_groups[7] if action_str: action_pat = re.compile(r"(stop|continue)(\s+as\s+(failed|non-failed))?") act_match = action_pat.match(action_str.strip()) if not act_match: raise TaurusConfigError("Criteria string is malformed in its action part: %s" % action_str) action_groups = act_match.groups() res["stop"] = action_groups[0] != "continue" res["fail"] = action_groups[2] is None or action_groups[2] == "failed" return res
def startup(self): self.start_time = time.time() load = self.get_load() concurrency = load.concurrency or 1 if load.ramp_up: hatch = math.ceil(concurrency / load.ramp_up) else: hatch = concurrency wrapper = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, "resources", "locustio-taurus-wrapper.py") env = BetterDict() env.merge({ "PYTHONPATH": self.engine.artifacts_dir + os.pathsep + os.getcwd() }) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv( "PYTHONPATH") + os.pathsep + env['PYTHONPATH'] args = [ sys.executable, os.path.realpath(wrapper), '-f', os.path.realpath(self.script) ] args += [ '--logfile=%s' % self.engine.create_artifact("locust", ".log") ] args += [ "--no-web", "--only-summary", ] args += ["--clients=%d" % concurrency, "--hatch-rate=%d" % hatch] if load.iterations: args.append("--num-request=%d" % load.iterations) env['LOCUST_DURATION'] = dehumanize_time(load.duration) if self.is_master: args.extend( ["--master", '--expect-slaves=%s' % self.expected_slaves]) env["SLAVES_LDJSON"] = self.slaves_ldjson else: env["JTL"] = self.kpi_jtl host = self.get_scenario().get("default-address", None) if host: args.append("--host=%s" % host) self.__out = open(self.engine.create_artifact("locust", ".out"), 'w') self.process = self.execute(args, stderr=STDOUT, stdout=self.__out, env=env)
def _extract_rest_request(self, test_step): label = test_step.get('name') config = test_step.find('./con:config', namespaces=self.NAMESPACES) method = config.get('method') method_name = config.get('methodName') method_obj = self.interface.find('.//con:method[@name="%s"]' % method_name, namespaces=self.NAMESPACES) params = BetterDict() if method_obj is not None: parent = method_obj.getparent() while parent.tag.endswith('resource'): for param in parent.findall('./con:parameters/con:parameter', namespaces=self.NAMESPACES): param_name = param.findtext('./con:name', namespaces=self.NAMESPACES) param_value = param.findtext('./con:value', namespaces=self.NAMESPACES) def_value = param.findtext('./con:default', namespaces=self.NAMESPACES) if param_value: params[param_name] = param_value elif def_value: params[param_name] = def_value parent = parent.getparent() url = self._calc_base_address(test_step) + config.get('resourcePath') headers = self._extract_headers(config) assertions = self._extract_assertions(config) params.merge({ entry.get("key"): entry.get("value") for entry in config.findall('./con:restRequest/con:parameters/con:entry', namespaces=self.NAMESPACES) }) for param_name in copy.copy(list(params.keys())): template = "{" + param_name + "}" if template in url: param_value = params.pop(param_name) url = url.replace(template, param_value) request = {"url": url, "label": label} if method is not None and method != "GET": request["method"] = method if headers: request["headers"] = headers if assertions: request["assert"] = assertions body = {} for key, value in iteritems(params): body[key] = value if body: request["body"] = body return request
def test_merge_configs(self): a = {"modules": {"local": "class_name"}} b = {"modules": {"local": {"class": "another_class"}}} res = BetterDict() res.merge(a) res.merge(b) self.assertEqual(BetterDict.__name__, type(res["modules"]["local"]).__name__) modules = res["modules"] ensure_is_dict(modules, "local", "class") self.assertEqual("another_class", res["modules"]["local"]["class"])
def __apply_single_override(self, dest, name, value): """ Apply single override :type name: str :type value: str """ self.log.debug("Applying %s=%s", name, value) parts = [(int(x) if is_int(x) else x) for x in name.split(".")] pointer = dest for index, part in enumerate(parts[:-1]): self.__ensure_list_capacity(pointer, part, parts[index + 1]) if isinstance(part, int): if part < 0: if isinstance(parts[index + 1], int): pointer.append([]) else: pointer.append(BetterDict()) pointer = pointer[-1] else: pointer = pointer[part] elif isinstance(parts[index + 1], int) and isinstance(pointer, dict): pointer = pointer.get(part, [], force_set=True) else: pointer = pointer.get(part, force_set=True) self.__ensure_list_capacity(pointer, parts[-1]) self.log.debug("Applying: [%s]=%s", parts[-1], value) if isinstance(parts[-1], string_types) and parts[-1][0] == '^': item = parts[-1][1:] if isinstance(pointer, list): item = int(item) if -len(pointer) <= item < len(pointer): del pointer[item] else: self.log.debug("No value to delete: %s", item) elif isinstance(pointer, dict): if item in pointer: del pointer[item] else: self.log.debug("No value to delete: %s", item) else: raise ValueError("Cannot handle override %s in non-iterable type %s" % (item, pointer)) else: parsed_value = self.__parse_override_value(value) self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value)) if isinstance(parsed_value, dict): dict_value = BetterDict() dict_value.merge(parsed_value) parsed_value = dict_value if isinstance(pointer, list) and parts[-1] < 0: pointer.append(parsed_value) else: pointer[parts[-1]] = parsed_value
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise ValueError("No simulation set") datadir = os.path.realpath(self.engine.artifacts_dir) if os.path.isfile(self.script): script_path = os.path.dirname(get_full_path(self.script)) else: script_path = self.script cmdline = [self.settings["path"]] cmdline += ["-sf", script_path, "-df", datadir, "-rf ", datadir] cmdline += ["-on", self.dir_prefix, "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = self.settings.get('properties') load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', None) is not None: params_for_scala['gatling.http.ahc.keepAlive'] = scenario.get('keepalive').lower() if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = ''.join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') env.merge({"JAVA_OPTS": java_opts}) self.process = self.execute(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def _extract_property_transfers(self, test_step): extractors = BetterDict() # label -> {extract-xpath: ..., extract-jsonpath: ...} transfers = test_step.findall('./con:config/con:transfers', namespaces=self.NAMESPACES) if not transfers: return None for transfer in transfers: extracted_transfer = self._extract_transfer(transfer) if extracted_transfer is not None: extractors.merge(extracted_transfer) return extractors
def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj
def _get_merged_ci_headers(self, req, header): def dic_lower(dic): return {k.lower(): dic[k].lower() for k in dic} ci_scenario_headers = dic_lower(self.scenario.get_headers()) ci_request_headers = dic_lower(req.headers) headers = BetterDict() headers.merge(ci_scenario_headers) headers.merge(ci_request_headers) if header.lower() in headers: return headers[header] else: return None
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): hosts_file = self.engine.create_artifact("hostaliases", "") aliases = self.get_hostaliases() with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd environ = BetterDict() environ.merge(dict(os.environ)) if env is not None: if is_windows(): # as variables in windows are case insensitive we should provide correct merging cur_env = {name.upper(): environ[name] for name in environ} old_keys = set(env.keys()) env = {name.upper(): env[name] for name in env} new_keys = set(env.keys()) if old_keys != new_keys: msg = 'Some taurus environment variables might be been lost: %s' self.log.debug(msg, list(old_keys - new_keys)) environ = BetterDict() environ.merge(cur_env) environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = {key: environ[key] for key in environ.keys() if environ[key] is not None} self.log.debug("Executing shell from %s: %s", cwd, args) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: if is_windows: # as variables in windows are case insensitive we should provide correct merging cur_env = {name.upper(): environ[name] for name in environ} old_keys = set(env.keys()) env = {name.upper(): env[name] for name in env} new_keys = set(env.keys()) if old_keys != new_keys: msg = 'Some taurus environment variables has been lost: %s' self.log.warning(msg, list(old_keys - new_keys)) environ = BetterDict() environ.merge(cur_env) environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = {key: environ[key] for key in environ.keys() if environ[key] is not None} return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def _set_env(self): props = BetterDict() props.merge(self.settings.get('properties')) props.merge(self.get_scenario().get("properties")) props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix props['gatling.core.directory.resources'] = self.engine.artifacts_dir props['gatling.core.directory.results'] = self.engine.artifacts_dir props.merge(self._get_simulation_props()) props.merge(self._get_load_props()) props.merge(self._get_scenario_props()) for key in sorted(props.keys()): prop = props[key] val_tpl = "%s" if isinstance(prop, string_types): if not is_windows( ): # extend properties support (contained separators/quotes/etc.) on lin/mac val_tpl = "%r" if PY2: prop = prop.encode( "utf-8", 'ignore') # to convert from unicode into str if is_gatling2(self.tool.version) or not key.startswith( 'gatling.'): # send param through java_opts self.env.add_java_param( {"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)}) self.env.set({"NO_PAUSE": "TRUE"}) self.env.add_java_param( {"JAVA_OPTS": self.settings.get("java-opts", None)}) self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS")) if not is_gatling2(self.tool.version): # cook prop file prop_lines = [] for key in props: if key.startswith("gatling."): prop_lines.append("%s = %s" % (key, props[key])) conf_dir = self.engine.create_artifact("conf", "") os.mkdir(conf_dir) with open(os.path.join(conf_dir, "gatling.conf"), 'w') as conf_file: conf_file.write('\n'.join(prop_lines)) self.env.add_path({"GATLING_CONF": conf_dir})
def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get( 'scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj
def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get('scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): raise ValueError("Invalid content of scenario, list type instead of dict or string") if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj
def test_passfail_crash(self): passfail = BetterDict() passfail.merge({ "module": "passfail", "criteria": [ "fail>10% within 5s", ] }) obj = PassFailStatus() obj.engine = EngineEmul() obj.parameters = passfail obj.engine.config.merge({ "services": [passfail], }) obj.prepare() self.assertTrue(all(isinstance(obj, dict) for obj in passfail["criteria"])) self.assertTrue(all(isinstance(obj, dict) for obj in passfail["criterias"]))
def _extract_test_case(self, test_case, test_suite, suite_level_props): case_name = test_case.get("name") scenario_name = test_suite.get("name") + "-" + case_name case_properties = self._extract_properties(test_case) case_properties = { "#TestCase#" + key: value for key, value in iteritems(case_properties) } case_level_props = BetterDict() case_level_props.merge(suite_level_props) case_level_props.merge(case_properties) scenario = self._extract_scenario(test_case, case_level_props) scenario['test-suite'] = test_suite.get("name") return scenario_name, scenario
def test_passfail_crash(self): passfail = BetterDict() passfail.merge({ "module": "passfail", "criteria": [ "fail>10% within 5s", ] }) obj = PassFailStatus() obj.engine = EngineEmul() obj.parameters = passfail obj.engine.config.merge({ "services": [passfail], }) obj.prepare() self.assertTrue( all(isinstance(obj, dict) for obj in passfail["criteria"]))
def test_server_agent(self): obj = Monitoring() obj.engine = EngineEmul() obj.parameters.merge({ "server-agent": [{ "address": "127.0.0.1:4444", "metrics": [ "cpu", "disks" ] }, { "address": "10.0.0.1", "metrics": [ "something1", "something2" ] }] }) listener = LoggingMonListener() obj.add_listener(listener) widget = obj.get_widget() obj.add_listener(widget) crit_conf = BetterDict() crit_conf.merge({"threshold": 5, "subject": "127.0.0.1:4444/cpu"}) criteria = MonitoringCriteria(crit_conf, obj) obj.add_listener(criteria) obj.client_classes = {'server-agent': ServerAgentClientEmul} obj.prepare() obj.startup() for _ in range(1, 10): obj.clients[0].socket.recv_data += "%s\t%s\n" % (random.random(), random.random()) obj.check() logging.debug("Criteria state: %s", criteria) time.sleep(1) obj.shutdown() obj.post_process() self.assertEquals("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n", obj.clients[0].socket.sent_data)
def startup(self): """ Should start the tool as fast as possible. """ self.start_time = time.time() out = self.engine.create_artifact("grinder-stdout", ".log") err = self.engine.create_artifact("grinder-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") env = BetterDict() env.merge(dict(os.environ)) env.merge({"T_GRINDER_PREFIX": self.exec_id}) self.process = shell_exec(self.cmd_line, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def _extract_scenario(self, test_case, case_level_props): variables = BetterDict.from_dict(case_level_props) requests = [] extractors = BetterDict() steps = test_case.findall('.//con:testStep', namespaces=self.NAMESPACES) for step in steps: request = None if step.get("type") == "httprequest": request = self._extract_http_request(step) elif step.get("type") == "restrequest": request = self._extract_rest_request(step) elif step.get("type") == "request": request = self._extract_soap_request(step) elif step.get("type") == "properties": config_block = step.find('./con:config', namespaces=self.NAMESPACES) if config_block is not None: props = self._extract_properties(config_block) variables.merge(props) elif step.get("type") == "transfer": extracted_extractors = self._extract_property_transfers(step) # label -> extractor if extracted_extractors: extractors.merge(extracted_extractors) elif step.get("type") == "groovy": request = self._extract_script(step) if request is not None: requests.append(request) for request in requests: label = request["label"] if label in extractors: request.update(extractors[label]) scenario = { "test-case": test_case.get("name"), "requests": requests } if variables: scenario["variables"] = variables return scenario
def test_report_criteria_without_label(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() pass_fail = PassFailStatus() crit_cfg = BetterDict() crit_cfg.merge({'stop': True, 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt'}) criteria = DataCriterion(crit_cfg, pass_fail) pass_fail.criteria.append(criteria) criteria.is_triggered = True obj.engine.reporters.append(pass_fail) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({"filename": path_from_config, "data-source": "pass-fail"}) obj.prepare() obj.last_second = DataPoint(0) obj.post_process()
def _create_runner(self, working_dir, kpi_file, err_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") runner_config["script-type"] = script_type runner_config["working-dir"] = working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("report-file", kpi_file) runner_config.get("err-file", err_file) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) return runner_class(runner_config, self)
def test_xml_format_passfail(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() obj.engine.provisioning = CloudProvisioning() obj.engine.provisioning.results_url = "http://test/report/123" pass_fail1 = CriteriaProcessor([], None) crit_cfg1 = BetterDict() crit_cfg2 = BetterDict() crit_cfg3 = BetterDict() crit_cfg4 = BetterDict() crit_cfg1.merge({ 'stop': True, 'label': 'Sample 1 Triggered', 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt'}) crit_cfg2.merge({ 'stop': True, 'label': 'Sample 1 Not Triggered', 'fail': True, 'timeframe': -1, 'threshold': '300ms', 'condition': '>', 'subject': 'avg-rt'}) crit_cfg3.merge({ 'stop': True, 'label': 'Sample 2 Triggered', 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<=', 'subject': 'avg-rt'}) crit_cfg4.merge({ 'stop': True, 'label': 'Sample 2 Not Triggered', 'fail': True, 'timeframe': -1, 'threshold': '300ms', 'condition': '=', 'subject': 'avg-rt'}) fc1_triggered = DataCriterion(crit_cfg1, pass_fail1) fc1_not_triggered = DataCriterion(crit_cfg2, pass_fail1) pass_fail2 = CriteriaProcessor([], None) fc2_triggered = DataCriterion(crit_cfg3, pass_fail1) fc2_not_triggered = DataCriterion(crit_cfg4, pass_fail1) pass_fail1.criteria.append(fc1_triggered) pass_fail1.criteria.append(fc1_not_triggered) pass_fail2.criteria.append(fc2_triggered) pass_fail2.criteria.append(fc2_not_triggered) fc1_triggered.is_triggered = True fc2_triggered.is_triggered = True pass_fail = PassFailStatus() pass_fail.processors.append(pass_fail1) pass_fail.processors.append(pass_fail2) obj.engine.reporters.append(pass_fail) obj.engine.reporters.append(BlazeMeterUploader()) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({"filename": path_from_config, "data-source": "pass-fail"}) obj.prepare() obj.last_second = DataPoint(0) obj.post_process() with open(obj.report_file_path, 'rb') as fds: f_contents = fds.read() ROOT_LOGGER.info("File: %s", f_contents) xml_tree = etree.fromstring(f_contents) self.assertEqual('testsuites', xml_tree.tag) suite = xml_tree.getchildren()[0] self.assertEqual('testsuite', suite.tag) test_cases = suite.getchildren() self.assertEqual(4, len(test_cases)) self.assertEqual('testcase', test_cases[0].tag) self.assertEqual('error', test_cases[0].getchildren()[1].tag) self.assertEqual('error', test_cases[2].getchildren()[1].tag) sys_out = test_cases[0].getchildren()[0] self.assertEqual('system-out', sys_out.tag) self.assertIn('BlazeMeter report link: http://test/report/123', sys_out.text)
class JMeterScenarioBuilder(JMX): """ Helper to build JMeter test plan from Scenario :param executor: ScenarioExecutor :param original: inherited from JMX """ def __init__(self, executor, original=None): super(JMeterScenarioBuilder, self).__init__(original) self.executor = executor self.scenario = executor.get_scenario() self.engine = executor.engine self.system_props = BetterDict() self.request_compiler = None def __gen_managers(self, scenario): elements = [] if scenario.get("store-cache", True): elements.append(self._get_cache_mgr()) elements.append(etree.Element("hashTree")) if scenario.get("store-cookie", True): elements.append(self._get_cookie_mgr(scenario)) elements.append(etree.Element("hashTree")) if scenario.get("use-dns-cache-mgr", True): elements.append(self.get_dns_cache_mgr()) elements.append(etree.Element("hashTree")) self.system_props.merge({"system-properties": {"sun.net.inetaddr.ttl": 0}}) return elements @staticmethod def smart_time(any_time): # FIXME: bad name for the function, does not reflect what it does try: smart_time = int(1000 * dehumanize_time(any_time)) except TaurusInternalException: smart_time = any_time return smart_time def __gen_defaults(self, scenario): default_address = scenario.get("default-address", None) retrieve_resources = scenario.get("retrieve-resources", True) resources_regex = scenario.get("retrieve-resources-regex", None) concurrent_pool_size = scenario.get("concurrent-pool-size", 4) content_encoding = scenario.get("content-encoding", None) timeout = scenario.get("timeout", None) timeout = self.smart_time(timeout) elements = [self._get_http_defaults(default_address, timeout, retrieve_resources, concurrent_pool_size, content_encoding, resources_regex), etree.Element("hashTree")] return elements def __add_think_time(self, children, req): think_time = req.priority_option('think-time') if think_time is not None: children.append(JMX._get_constant_timer(self.smart_time(think_time))) children.append(etree.Element("hashTree")) def __add_extractors(self, children, req): self.__add_regexp_ext(children, req) self.__add_json_ext(children, req) self.__add_jquery_ext(children, req) self.__add_xpath_ext(children, req) def __add_regexp_ext(self, children, req): extractors = req.config.get("extract-regexp") for varname in extractors: cfg = ensure_is_dict(extractors, varname, "regexp") extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1), cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND')) children.append(extractor) children.append(etree.Element("hashTree")) def __add_json_ext(self, children, req): jextractors = req.config.get("extract-jsonpath") for varname in jextractors: cfg = ensure_is_dict(jextractors, varname, "jsonpath") if LooseVersion(str(self.executor.settings.get("version"))) < LooseVersion("3.0"): extractor = JMX._get_json_extractor(varname, cfg["jsonpath"], cfg.get("default", "NOT_FOUND"), cfg.get("from-variable", None)) else: extractor = JMX._get_internal_json_extractor(varname, cfg["jsonpath"], cfg.get("default", "NOT_FOUND"), cfg.get("scope", None), cfg.get("from-variable", None), cfg.get("match-no", "-1"), cfg.get("concat", False)) children.append(extractor) children.append(etree.Element("hashTree")) def __add_jquery_ext(self, children, req): css_jquery_extors = req.config.get("extract-css-jquery") for varname in css_jquery_extors: cfg = ensure_is_dict(css_jquery_extors, varname, "expression") extractor = self._get_jquerycss_extractor(varname, cfg['expression'], cfg.get('attribute', ""), cfg.get('match-no', 0), cfg.get('default', 'NOT_FOUND')) children.append(extractor) children.append(etree.Element("hashTree")) def __add_xpath_ext(self, children, req): xpath_extractors = req.config.get("extract-xpath") for varname in xpath_extractors: cfg = ensure_is_dict(xpath_extractors, varname, "xpath") children.append(JMX._get_xpath_extractor(varname, cfg['xpath'], cfg.get('default', 'NOT_FOUND'), cfg.get('validate-xml', False), cfg.get('ignore-whitespace', True), cfg.get('use-tolerant-parser', False))) children.append(etree.Element("hashTree")) @staticmethod def __add_assertions(children, req): assertions = req.config.get("assert", []) for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") if not isinstance(assertion['contains'], list): assertion['contains'] = [assertion['contains']] children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY), assertion['contains'], assertion.get('regexp', True), assertion.get('not', False), assertion.get('assume-success', False))) children.append(etree.Element("hashTree")) jpath_assertions = req.config.get("assert-jsonpath", []) for idx, assertion in enumerate(jpath_assertions): assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath") exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion) component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc), assertion.get('expected-value', ''), assertion.get('validate', False), assertion.get('expect-null', False), assertion.get('invert', False), assertion.get('regexp', True)) children.append(component) children.append(etree.Element("hashTree")) xpath_assertions = req.config.get("assert-xpath", []) for idx, assertion in enumerate(xpath_assertions): assertion = ensure_is_dict(xpath_assertions, idx, "xpath") exc = TaurusConfigError('XPath not found in assertion: %s' % assertion) component = JMX._get_xpath_assertion(assertion.get('xpath', exc), assertion.get('validate-xml', False), assertion.get('ignore-whitespace', True), assertion.get('use-tolerant-parser', False), assertion.get('invert', False)) children.append(component) children.append(etree.Element("hashTree")) @staticmethod def __add_jsr_elements(children, req): """ :type children: etree.Element :type req: Request """ jsrs = req.config.get("jsr223", []) if not isinstance(jsrs, list): jsrs = [jsrs] for idx, _ in enumerate(jsrs): jsr = ensure_is_dict(jsrs, idx, default_key='script-text') lang = jsr.get("language", "groovy") script_file = jsr.get("script-file", None) script_text = jsr.get("script-text", None) if not script_file and not script_text: raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'") parameters = jsr.get("parameters", "") execute = jsr.get("execute", "after") children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text)) children.append(etree.Element("hashTree")) def _get_merged_ci_headers(self, req, header): def dic_lower(dic): return {k.lower(): dic[k].lower() for k in dic} ci_scenario_headers = dic_lower(self.scenario.get_headers()) ci_request_headers = dic_lower(req.headers) headers = BetterDict() headers.merge(ci_scenario_headers) headers.merge(ci_request_headers) if header.lower() in headers: return headers[header] else: return None def __gen_requests(self, scenario): requests = scenario.get_requests() elements = [] for compiled in self.compile_requests(requests): elements.extend(compiled) return elements def compile_scenario(self, scenario): elements = [] elements.extend(self.__gen_managers(scenario)) elements.extend(self.__gen_defaults(scenario)) elements.extend(self.__gen_datasources(scenario)) elements.extend(self.__gen_requests(scenario)) return elements def compile_http_request(self, request): """ :type request: HierarchicHTTPRequest :return: """ timeout = request.priority_option('timeout') if timeout is not None: timeout = self.smart_time(timeout) content_type = self._get_merged_ci_headers(request, 'content-type') if content_type == 'application/json' and isinstance(request.body, (dict, list)): body = json.dumps(request.body) else: body = request.body use_random_host_ip = request.priority_option('random-source-ip', default=False) host_ips = get_host_ips(filter_loopbacks=True) if use_random_host_ip else [] http = JMX._get_http_request(request.url, request.label, request.method, timeout, body, request.priority_option('keepalive', default=True), request.upload_files, request.content_encoding, request.priority_option('follow-redirects', default=True), use_random_host_ip, host_ips) children = etree.Element("hashTree") if request.headers: children.append(JMX._get_header_mgr(request.headers)) children.append(etree.Element("hashTree")) self.__add_think_time(children, request) self.__add_assertions(children, request) if timeout is not None: children.append(JMX._get_dur_assertion(timeout)) children.append(etree.Element("hashTree")) self.__add_extractors(children, request) self.__add_jsr_elements(children, request) return [http, children] def compile_if_block(self, block): elements = [] # TODO: pass jmeter IfController options if_controller = JMX._get_if_controller(block.condition) then_children = etree.Element("hashTree") for compiled in self.compile_requests(block.then_clause): for element in compiled: then_children.append(element) elements.extend([if_controller, then_children]) if block.else_clause: inverted_condition = "!(" + block.condition + ")" else_controller = JMX._get_if_controller(inverted_condition) else_children = etree.Element("hashTree") for compiled in self.compile_requests(block.else_clause): for element in compiled: else_children.append(element) elements.extend([else_controller, else_children]) return elements def compile_loop_block(self, block): elements = [] loop_controller = JMX._get_loop_controller(block.loops) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([loop_controller, children]) return elements def compile_while_block(self, block): elements = [] controller = JMX._get_while_controller(block.condition) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([controller, children]) return elements def compile_foreach_block(self, block): """ :type block: ForEachBlock """ elements = [] controller = JMX._get_foreach_controller(block.input_var, block.loop_var) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([controller, children]) return elements def compile_transaction_block(self, block): elements = [] controller = JMX._get_transaction_controller(block.name, block.priority_option('force-parent-sample', True)) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([controller, children]) return elements def compile_include_scenario_block(self, block): elements = [] controller = JMX._get_simple_controller(block.scenario_name) children = etree.Element("hashTree") scenario = self.executor.get_scenario(name=block.scenario_name) for element in self.compile_scenario(scenario): children.append(element) elements.extend([controller, children]) return elements def compile_action_block(self, block): """ :type block: ActionBlock :return: """ actions = { 'stop': 0, 'pause': 1, 'stop-now': 2, 'continue': 3, } targets = {'current-thread': 0, 'all-threads': 2} action = actions[block.action] target = targets[block.target] duration = 0 if block.duration is not None: duration = int(block.duration * 1000) test_action = JMX._get_action_block(action, target, duration) children = etree.Element("hashTree") self.__add_jsr_elements(children, block) return [test_action, children] def compile_set_variables_block(self, block): # pause current thread for 0s test_action = JMX._get_action_block(action_index=1, target_index=0, duration_ms=0) children = etree.Element("hashTree") fmt = "vars.put('%s', %r);" block.config["jsr223"] = [{ "language": "groovy", "execute": "before", "script-text": "\n".join(fmt % (var, expr) for var, expr in iteritems(block.mapping)) }] self.__add_jsr_elements(children, block) return [test_action, children] def compile_requests(self, requests): if self.request_compiler is None: self.request_compiler = RequestCompiler(self) compiled = [] for request in requests: compiled.append(self.request_compiler.visit(request)) self.request_compiler.clear_path_cache() return compiled def __generate(self): """ Generate the test plan """ thread_group = JMX.get_thread_group(testname=self.executor.label) thread_group_ht = etree.Element("hashTree", type="tg") # NOTE: set realistic dns-cache and JVM prop by default? self.request_compiler = RequestCompiler(self) for element in self.compile_scenario(self.scenario): thread_group_ht.append(element) results_tree = self._get_results_tree() results_tree_ht = etree.Element("hashTree") self.append(self.TEST_PLAN_SEL, thread_group) self.append(self.TEST_PLAN_SEL, thread_group_ht) self.append(self.TEST_PLAN_SEL, results_tree) self.append(self.TEST_PLAN_SEL, results_tree_ht) def save(self, filename): """ Generate test plan and save :type filename: str """ # NOTE: bad design, as repetitive save will duplicate stuff self.__generate() super(JMeterScenarioBuilder, self).save(filename) def __gen_datasources(self, scenario): sources = scenario.get("data-sources") if not sources: return [] if not isinstance(sources, list): raise TaurusConfigError("data-sources '%s' is not a list" % sources) elements = [] for idx, source in enumerate(sources): source = ensure_is_dict(sources, idx, "path") source_path = source["path"] delimiter = source.get("delimiter") if has_variable_pattern(source_path): msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s" self.log.warning(msg, source_path) if not delimiter: delimiter = ',' self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter) else: modified_path = self.executor.engine.find_file(source_path) if not os.path.isfile(modified_path): raise TaurusConfigError("data-sources path not found: %s" % modified_path) if not delimiter: delimiter = self.__guess_delimiter(modified_path) source_path = get_full_path(modified_path) config = JMX._get_csv_config(source_path, delimiter, source.get("quoted", False), source.get("loop", True), source.get("variable-names", "")) elements.append(config) elements.append(etree.Element("hashTree")) return elements def __guess_delimiter(self, path): with open(path) as fhd: header = fhd.read(4096) # 4KB is enough for header try: delimiter = guess_csv_dialect(header).delimiter except BaseException as exc: self.log.debug(traceback.format_exc()) self.log.warning('CSV dialect detection failed (%s), default delimiter selected (",")', exc) delimiter = "," # default value return delimiter
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation") datadir = os.path.realpath(self.engine.artifacts_dir) if os.path.isfile(self.script): if self.script.endswith('.jar'): self.jar_list += os.pathsep + self.script simulation_folder = None else: simulation_folder = os.path.dirname(get_full_path(self.script)) else: simulation_folder = self.script cmdline = [self.launcher] cmdline += ["-df", datadir, "-rf", datadir] cmdline += ["-on", self.dir_prefix, "-m"] if simulation_folder: cmdline += ["-sf", simulation_folder] if simulation: cmdline += ["-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = self.settings.get('properties') load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', True): params_for_scala['gatling.http.ahc.keepAlive'] = 'true' else: params_for_scala['gatling.http.ahc.keepAlive'] = 'false' if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = ''.join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') env.merge({"JAVA_OPTS": java_opts, "NO_PAUSE": "TRUE"}) if self.jar_list: java_classpath = env.get('JAVA_CLASSPATH', '') compilation_classpath = env.get('COMPILATION_CLASSPATH', '') java_classpath += self.jar_list compilation_classpath += self.jar_list env.merge({'JAVA_CLASSPATH': java_classpath, 'COMPILATION_CLASSPATH': compilation_classpath}) self.process = self.execute(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def _merge_and_compare(self, first, second, result): sample = BetterDict().merge(first) sample.merge(second) result = BetterDict().merge(result) self.assertEqual(sample, result)