def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get( 'scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(self.engine, scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(self.engine, scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: try: error = ValueError("Wrong script in scenario") scen = self.__scenario.get(Scenario.SCRIPT, error) self._label = os.path.basename(scen) except BaseException: self._label = hashlib.md5(to_json( self.__scenario).encode()).hexdigest() return self.__scenario
def get_data_sources(self): data_sources = self.get(self.FIELD_DATA_SOURCES, []) if not isinstance(data_sources, list): raise TaurusConfigError("data-sources '%s' is not a list" % data_sources) for index, _ in enumerate(data_sources): ensure_is_dict(data_sources, index, "path") return self.get(self.FIELD_DATA_SOURCES, [])
def get_load(self): """ Helper method to read load specification :return: """ prov_type = self.engine.config.get(Provisioning.PROV, None) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps)
def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(self.engine, scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(self.engine, scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: try: error = ValueError("Wrong script in scenario") scen = self.__scenario.get(Scenario.SCRIPT, error) self._label = os.path.basename(scen) except BaseException: self._label = hashlib.md5(to_json(self.__scenario).encode()).hexdigest() return self.__scenario
def get_raw_load(self): prov_type = self.engine.config.get(Provisioning.PROV) for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR): ensure_is_dict(self.execution, param, prov_type) throughput = self.execution.get(ScenarioExecutor.THRPT).get( prov_type, None) concurrency = self.execution.get(ScenarioExecutor.CONCURR).get( prov_type, None) iterations = self.execution.get("iterations", None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = self.execution.get(ScenarioExecutor.HOLD_FOR, None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=None, steps=steps)
def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get( Provisioning.PROV, ValueError("There must be provisioning type set")) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get( prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None), )): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps)
def test_merge_configs(self): a = {"modules": {"local": "class_name"}} b = {"modules": {"local": {"class": "another_class"}}} res = BetterDict() res.merge(a) res.merge(b) self.assertEqual(BetterDict.__name__, type(res["modules"]["local"]).__name__) modules = res["modules"] ensure_is_dict(modules, "local", "class") self.assertEqual("another_class", res["modules"]["local"]["class"])
def get_load(self): """ Helper method to read load specification """ def eval_int(value): try: return int(value) except (ValueError, TypeError): return value def eval_float(value): try: return int(value) except (ValueError, TypeError): return value prov_type = self.engine.config.get(Provisioning.PROV) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = eval_float(self.execution[ScenarioExecutor.THRPT].get(prov_type, 0)) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = eval_int(self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0)) iterations = eval_int(self.execution.get("iterations", None)) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = eval_int(self.execution.get(ScenarioExecutor.STEPS, None)) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite msg = '' if not isinstance(concurrency, numeric_types + (type(None),)): msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency) if not isinstance(throughput, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput) if not isinstance(steps, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps) if not isinstance(iterations, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations) if msg: raise TaurusConfigError(msg) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps)
def __get_script(self): scenario = self.get_scenario() if Scenario.SCRIPT not in scenario: return None ensure_is_dict(scenario, Scenario.SCRIPT, "path") fname = scenario[Scenario.SCRIPT]["path"] if fname is not None: return self.engine.find_file(fname) else: return None
def get_load(self): """ Helper method to read load specification :return: """ prov_type = self.engine.config.get(Provisioning.PROV, None) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None),)): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple( "LoadSpec", ("concurrency", "throughput", "ramp_up", "hold", "iterations", "duration", "steps") ) return res( concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps, )
def unify_config(self): executions = self.config.get(ScenarioExecutor.EXEC, []) if isinstance(executions, dict): executions = [executions] self.config[ScenarioExecutor.EXEC] = executions settings = self.config.get(SETTINGS) default_executor = settings.get("default-executor", None) prov_type = self.config.get(Provisioning.PROV) for execution in executions: executor = execution.get("executor", default_executor, force_set=True) if not executor: msg = "Cannot determine executor type and no default executor in %s" raise TaurusConfigError(msg % execution) for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR): ensure_is_dict(execution, param, prov_type) reporting = self.config.get(Reporter.REP, []) for index in range(len(reporting)): ensure_is_dict(reporting, index, "module") services = self.config.get(Service.SERV, []) for index in range(len(services)): ensure_is_dict(services, index, "module") modules = self.config.get("modules") for module in modules: ensure_is_dict(modules, module, "class")
def __gen_datasources(self, scenario): sources = scenario.get("data-sources", []) if not sources: return [] if not isinstance(sources, list): raise TaurusConfigError("data-sources '%s' is not a list" % sources) elements = [] for idx, source in enumerate(sources): source = ensure_is_dict(sources, idx, "path") source_path = source["path"] jmeter_var_pattern = re.compile("^\$\{.*\}$") delimiter = source.get('delimiter', None) if jmeter_var_pattern.match(source_path): self.log.warning('JMeter variable "%s" found, check of file existence is impossible', source_path) if not delimiter: self.log.warning('CSV dialect detection impossible, default delimiter selected (",")') delimiter = ',' else: modified_path = self.executor.engine.find_file(source_path) if not os.path.isfile(modified_path): raise TaurusConfigError("data-sources path not found: %s" % modified_path) if not delimiter: delimiter = self.__guess_delimiter(modified_path) source_path = get_full_path(modified_path) config = JMX._get_csv_config(source_path, delimiter, source.get("quoted", False), source.get("loop", True), source.get("variable-names", "")) elements.append(config) elements.append(etree.Element("hashTree")) return elements
def get_data_sources(self): sources = self.get(self.FIELD_DATA_SOURCES, []) if not isinstance(sources, list): raise TaurusConfigError("data-sources is not a list: '%s'" % sources) for idx, source in enumerate(sources): source = ensure_is_dict(sources, idx, "path") if not source: raise TaurusConfigError("Data source must have valid file path: '%s'" % source) source.get("loop", not self.engine.is_functional_mode(), force_set=True) delimiter = source.get("delimiter") if delimiter: source['delimiter'] = delimiter.replace('\\t', '\t') if delimiter.lower() == 'tab': source['delimiter'] = '\t' quoted, variables = source.get("quoted"), source.get("variable-names") dir_path = self.engine.find_file(source['path']) if quoted == "auto" and not variables: with open(dir_path) as csv_file: header = csv_file.readline() match = re.match(r'[[",\'][\w]+[",\'].?]*', header) if match is not None: source['quoted'] = True if quoted and source['quoted'] == "auto": source['quoted'] = False yield source
def get_requests(self): """ Generator object to read requests """ scenario = self requests = scenario.get("requests", []) for key in range(len(requests)): req = ensure_is_dict(requests, key, "url") res = namedtuple( "HTTPReq", ("url", "label", "method", "headers", "timeout", "think_time", "config", "body") ) url = req["url"] label = req.get("label", url) method = req.get("method", "GET") headers = req.get("headers", {}) timeout = req.get("timeout", None) think_time = req.get("think-time", None) body = None bodyfile = req.get("body-file", None) if bodyfile: with open(bodyfile) as fhd: body = fhd.read() body = req.get("body", body) yield res( config=req, label=label, url=url, method=method, headers=headers, timeout=timeout, think_time=think_time, body=body, )
def get_requests(self): """ Generator object to read requests """ scenario = self requests = scenario.get("requests", []) for key, val in enumerate(requests): request = ensure_is_dict(requests, key, "url") res = namedtuple("HTTPReq", ('url', 'label', 'method', 'headers', 'timeout', 'think_time', 'config', "body")) url = request["url"] label = request.get("label", url) method = request.get("method", "GET") headers = request.get("headers", {}) timeout = request.get("timeout", None) think_time = request.get("think-time", None) body = None bodyfile = request.get("body-file", None) if bodyfile: with open(bodyfile) as fhd: body = fhd.read() body = request.get("body", body) yield res(config=request, label=label, url=url, method=method, headers=headers, timeout=timeout, think_time=think_time, body=body)
def __gen_check(self, method, req, task, timeout, global_headers): assertions = req.config.get("assert", []) first_assert = True if assertions: statement = 'with self.client.%s(%s, catch_response=True) as response:' else: statement = "self.client.%s(%s)" headers = OrderedDict() if global_headers: sorted_headers = OrderedDict(sorted(global_headers.items(), key=lambda t: t[0])) headers.update(sorted_headers) if req.headers: headers.update(req.headers) task.append(self.gen_statement(statement % (method, self.__get_params_line(req, timeout, headers)))) for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") if not isinstance(assertion['contains'], list): assertion['contains'] = [assertion['contains']] self.__gen_assertion(task, assertion, first_assert) first_assert = False if assertions: task.append(self.gen_statement('else:', indent=12)) task.append(self.gen_statement('response.success()', indent=16))
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters[stage]): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] run_at = task_config.get("run-at", "local") default_cwd = self.settings.get("default-cwd", None) if run_at == self.engine.config.get(Provisioning.PROV, None): cwd = task_config.get("cwd", default_cwd) if cwd is None: working_dir = os.getcwd() elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_EVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task) else: self.log.debug("Skipped task: %s", task_config)
def __load_module(self, alias): """ Load module class by alias :param alias: str :return: class """ if alias in self.modules: return self.modules[alias] mod_conf = self.config.get('modules') if alias not in mod_conf: self.log.info("Possible module aliases: %s", [str(x) for x in sorted(mod_conf.keys())]) raise ValueError("Module alias '%s' not found in module settings" % alias) settings = ensure_is_dict(mod_conf, alias, "class") acopy = copy.deepcopy(settings) BetterDict.traverse(acopy, Configuration.masq_sensitive) self.log.debug("Module config: %s %s", alias, acopy) clsname = settings.get('class', None) if clsname is None: raise ValueError("Class name not found in module settings: %s" % settings) try: self.modules[alias] = load_class(clsname) if not issubclass(self.modules[alias], EngineModule): raise TypeError("Module class does not inherit from EngineModule: %s" % clsname) except BaseException: self.log.debug("Failed to load class %s: %s", clsname, traceback.format_exc()) raise ValueError("Cannot load module '%s' with class %s" % (alias, clsname)) return self.modules[alias]
def __prepare_services(self): """ Instantiate service modules, then prepare them """ srv_config = self.config.get(Service.SERV, []) services = [] for index, config in enumerate(srv_config): config = ensure_is_dict(srv_config, index, "module") cls = config.get('module', '') instance = self.instantiate_module(cls) instance.parameters = config if self.__singletone_exists(instance, services): continue assert isinstance(instance, Service) services.append(instance) for service in services[:]: if not service.should_run(): services.remove(service) self.services.extend(services) for module in self.services: self.prepared.append(module) module.prepare()
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd # make copy of env for every task env = Environment(self.log, self.env.get()) env.set(task_config.get('env')) env.add_path({"PYTHONPATH": working_dir}) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def __get_assertions(self, assertions): if len(assertions) == 0: return '' first_check = True check_result = self.indent('.check(\n', level=3) for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") error_str = 'You must specify "contains" parameter for assertion item' a_contains = assertion.get('contains', TaurusConfigError(error_str)) check_template = self.__get_check_template(assertion) if check_template == '': # FIELD_HEADERS self.log.warning( 'Sorry, but "headers" subject is not implemented for gatling asserts' ) return '' if not isinstance(a_contains, list): a_contains = [a_contains] for sample in a_contains: if not first_check: check_result += ',\n' check_result += self.indent(check_template % {'sample': sample}, level=4) first_check = False check_result += '\n' + self.indent(')', level=3) + '\n' return check_result
def __get_assertions(self, assertions): if len(assertions) == 0: return '' first_check = True check_result = '\t' * 4 + '.check(\n' for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") error_str = 'You must specify some assertion argument in config file "contains" list' a_contains = assertion.get('contains', ValueError(error_str)) check_template = self.__get_check_template(assertion) if check_template == '': # FIELD_HEADERS self.log.warning('Sorry, but "headers" subject is not implemented for gatling asserts') return '' if not isinstance(a_contains, list): a_contains = [a_contains] for sample in a_contains: if not first_check: check_result += ',\n' check_result += '\t' * 5 + check_template % {'sample': sample} first_check = False check_result += ')\n' return check_result
def __gen_datasources(self, scenario): sources = scenario.get("data-sources") if not sources: return [] if not isinstance(sources, list): raise TaurusConfigError("data-sources '%s' is not a list" % sources) elements = [] for idx, source in enumerate(sources): source = ensure_is_dict(sources, idx, "path") source_path = source["path"] delimiter = source.get("delimiter") if has_variable_pattern(source_path): msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s" self.log.warning(msg, source_path) if not delimiter: delimiter = ',' self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter) else: source_path = self.executor.engine.find_file(source_path) if not os.path.isfile(source_path): raise TaurusConfigError("data-sources path not found: %s" % source_path) if not delimiter: delimiter = self.__guess_delimiter(source_path) config = JMX._get_csv_config(source_path, delimiter, source.get("quoted", False), source.get("loop", True), source.get("variable-names", "")) elements.append(config) elements.append(etree.Element("hashTree")) return elements
def __load_module(self, alias): """ Load module class by alias :param alias: str :return: class """ if alias in self.modules: return self.modules[alias] mod_conf = self.config.get('modules') if alias not in mod_conf: msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys())) raise TaurusConfigError(msg) settings = ensure_is_dict(mod_conf, alias, "class") acopy = copy.deepcopy(settings) BetterDict.traverse(acopy, Configuration.masq_sensitive) self.log.debug("Module config: %s %s", alias, acopy) err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings)) clsname = settings.get('class', err) self.modules[alias] = load_class(clsname) if not issubclass(self.modules[alias], EngineModule): raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname) return self.modules[alias]
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def get_requests(self): """ Generator object to read requests """ scenario = self requests = scenario.get("requests", []) for key in range(len(requests)): req = ensure_is_dict(requests, key, "url") res = namedtuple("HTTPReq", ('url', 'label', 'method', 'headers', 'timeout', 'think_time', 'config', "body")) url = req.get("url", ValueError("Option 'url' is mandatory for request")) label = req.get("label", url) method = req.get("method", "GET") headers = req.get("headers", {}) timeout = req.get("timeout", None) think_time = req.get("think-time", None) body = None bodyfile = req.get("body-file", None) bodyfile_path = self.engine.find_file(bodyfile) if bodyfile_path: with open(bodyfile_path) as fhd: body = fhd.read() body = req.get("body", body) yield res(config=req, label=label, url=url, method=method, headers=headers, timeout=timeout, think_time=think_time, body=body)
def __get_assertions(self, assertions): if len(assertions) == 0: return '' first_check = True check_result = self.indent('.check(\n', level=3) for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") error_str = 'You must specify "contains" parameter for assertion item' a_contains = assertion.get('contains', TaurusConfigError(error_str)) check_template = self.__get_check_template(assertion) if check_template == '': # FIELD_HEADERS self.log.warning('Sorry, but "headers" subject is not implemented for gatling asserts') return '' if not isinstance(a_contains, list): a_contains = [a_contains] for sample in a_contains: if not first_check: check_result += ',\n' check_result += self.indent(check_template % {'sample': sample}, level=4) first_check = False check_result += '\n' + self.indent(')', level=3) + '\n' return check_result
def __load_module(self, alias): """ Load module class by alias :param alias: str :return: class """ if alias in self.modules: return self.modules[alias] mod_conf = self.config.get('modules') if alias not in mod_conf: msg = "Module '%s' not found in list of available aliases %s" % ( alias, sorted(mod_conf.keys())) raise TaurusConfigError(msg) settings = ensure_is_dict(mod_conf, alias, "class") acopy = copy.deepcopy(settings) BetterDict.traverse(acopy, Configuration.masq_sensitive) self.log.debug("Module config: %s %s", alias, acopy) err = TaurusConfigError( "Class name for alias '%s' is not found in module settings: %s" % (alias, settings)) clsname = settings.get('class', err) self.modules[alias] = load_class(clsname) if not issubclass(self.modules[alias], EngineModule): raise TaurusInternalException( "Module class does not inherit from EngineModule: %s" % clsname) return self.modules[alias]
def __load_module(self, alias): if alias in self.modules: return self.modules[alias] mod_conf = self.config.get('modules') if alias not in mod_conf: raise ValueError("Module alias '%s' not found in module settings" % alias) settings = ensure_is_dict(mod_conf, alias, "class") acopy = copy.deepcopy(settings) BetterDict.traverse(acopy, Configuration.masq_sensitive) self.log.debug("Module config: %s %s", alias, acopy) clsname = settings.get('class', None) try: self.modules[alias] = load_class(clsname) if not issubclass(self.modules[alias], EngineModule): raise TypeError( "Module class does not inherit from EngineModule: %s" % clsname) except BaseException as exc: self.log.debug("Failed to load class %s: %s", clsname, traceback.format_exc()) raise ValueError("Cannot load module '%s' with class %s" % (alias, clsname)) return self.modules[alias]
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd # todo: move it to new env env = BetterDict.from_dict( {k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv( "PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def __add_jsr_elements(children, req, get_from_config=True): """ :type children: etree.Element :type req: Request """ jsrs = [] if get_from_config: jsrs = req.config.get("jsr223", []) else: jsrs = req.get("jsr223", []) if not isinstance(jsrs, list): jsrs = [jsrs] for idx, _ in enumerate(jsrs): jsr = ensure_is_dict(jsrs, idx, sub_key='script-text') lang = jsr.get("language", "groovy") script_file = jsr.get("script-file", None) script_text = jsr.get("script-text", None) if not script_file and not script_text: raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'") parameters = jsr.get("parameters", "") execute = jsr.get("execute", "after") cache_key = str(jsr.get("compile-cache", True)).lower() children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key)) children.append(etree.Element("hashTree"))
def _gen_jsonpath_assertions(self, request): stmts = [] jpath_assertions = request.config.get("assert-jsonpath", []) for idx, assertion in enumerate(jpath_assertions): assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath") exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion) query = assertion.get('jsonpath', exc) expected = assertion.get('expected-value', '') or None method = "assert_not_jsonpath" if assertion.get( 'invert', False) else "assert_jsonpath" stmts.append( ast.Expr( ast.Call(func=ast.Attribute(value=ast.Name(id="response", ctx=ast.Load()), attr=method, ctx=ast.Load()), args=[self.gen_expr(query)], keywords=[ ast.keyword(arg="expected_value", value=self.gen_expr(expected)) ], starargs=None, kwargs=None))) return stmts
def __gen_datasources(self, scenario): sources = scenario.get("data-sources") if not sources: return [] if not isinstance(sources, list): raise TaurusConfigError("data-sources '%s' is not a list" % sources) elements = [] for idx, source in enumerate(sources): source = ensure_is_dict(sources, idx, "path") source_path = source["path"] delimiter = source.get("delimiter") if has_variable_pattern(source_path): msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s" self.log.warning(msg, source_path) if not delimiter: delimiter = ',' self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter) else: modified_path = self.executor.engine.find_file(source_path) if not os.path.isfile(modified_path): raise TaurusConfigError("data-sources path not found: %s" % modified_path) if not delimiter: delimiter = self.__guess_delimiter(modified_path) source_path = get_full_path(modified_path) config = JMX._get_csv_config(source_path, delimiter, source.get("quoted", False), source.get("loop", True), source.get("variable-names", "")) elements.append(config) elements.append(etree.Element("hashTree")) return elements
def _gen_xpath_assertions(self, request): stmts = [] jpath_assertions = request.config.get("assert-xpath", []) for idx, assertion in enumerate(jpath_assertions): assertion = ensure_is_dict(jpath_assertions, idx, "xpath") exc = TaurusConfigError('XPath not found in assertion: %s' % assertion) query = assertion.get('xpath', exc) parser_type = 'html' if assertion.get('use-tolerant-parser', True) else 'xml' validate = assertion.get('validate-xml', False) method = "assert_not_xpath" if assertion.get('invert', False) else "assert_xpath" stmts.append(ast.Expr( ast.Call( func=ast.Attribute( value=ast.Name(id="response", ctx=ast.Load()), attr=method, ctx=ast.Load() ), args=[self.gen_expr(query)], keywords=[ast.keyword(arg="parser_type", value=self.gen_expr(parser_type)), ast.keyword(arg="validate", value=self.gen_expr(validate))], starargs=None, kwargs=None ) )) return stmts
def get_requests(self): """ Generator object to read requests """ scenario = self requests = scenario.get("requests", []) for key in range(len(requests)): req = ensure_is_dict(requests, key, "url") res = namedtuple("HTTPReq", ('url', 'label', 'method', 'headers', 'timeout', 'think_time', 'config', "body")) url = req.get("url", ValueError("Option 'url' is mandatory for request")) label = req.get("label", url) method = req.get("method", "GET") headers = req.get("headers", {}) timeout = req.get("timeout", None) think_time = req.get("think-time", None) body = None bodyfile = req.get("body-file", None) if bodyfile: bodyfile_path = self.engine.find_file(bodyfile) with open(bodyfile_path) as fhd: body = fhd.read() body = req.get("body", body) yield res(config=req, label=label, url=url, method=method, headers=headers, timeout=timeout, think_time=think_time, body=body)
def __parse_requests(self, raw_requests, require_url=True): requests = [] for key in range(len(raw_requests)): # pylint: disable=consider-using-enumerate req = ensure_is_dict(raw_requests, key, "url") if not require_url and "url" not in req: req["url"] = None requests.append(self.__parse_request(req)) return requests
def __add_regexp_ext(self, children, req): extractors = req.config.get("extract-regexp") for varname in extractors: cfg = ensure_is_dict(extractors, varname, "regexp") extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1), cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND')) children.append(extractor) children.append(etree.Element("hashTree"))
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters[stage]): stage_task = ensure_is_dict(self.parameters[stage], index, "command") container.append(Task(self.parameters[stage][index], self.log, os.getcwd())) self.log.debug("Added task: %s, stage: %s", stage_task, stage)
def get_requests(self): """ Generator object to read requests """ requests = self.get("requests", []) for key in range(len(requests)): req = ensure_is_dict(requests, key, "url") yield HTTPRequest(config=req, engine=self.engine)
def get_raw_load(self): prov_type = self.engine.config.get(Provisioning.PROV) for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR): ensure_is_dict(self.execution, param, prov_type) throughput = self.execution.get(ScenarioExecutor.THRPT).get(prov_type, None) concurrency = self.execution.get(ScenarioExecutor.CONCURR).get(prov_type, None) iterations = self.execution.get("iterations", None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = self.execution.get(ScenarioExecutor.HOLD_FOR, None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=None, steps=steps)
def get_requests(self): """ Generator object to read requests :rtype: list[HTTPRequest] """ requests = self.get("requests", []) for key in range(len(requests)): req = ensure_is_dict(requests, key, "url") yield HTTPRequest(config=req, engine=self.engine)
def __add_xpath_ext(self, children, req): xpath_extractors = req.config.get("extract-xpath") for varname in xpath_extractors: cfg = ensure_is_dict(xpath_extractors, varname, "xpath") children.append(JMX._get_xpath_extractor(varname, cfg['xpath'], cfg.get('default', 'NOT_FOUND'), cfg.get('validate-xml', False), cfg.get('ignore-whitespace', True), cfg.get('use-tolerant-parser', False))) children.append(etree.Element("hashTree"))
def __add_jquery_ext(self, children, req): css_jquery_extors = req.config.get("extract-css-jquery") for varname in css_jquery_extors: cfg = ensure_is_dict(css_jquery_extors, varname, "expression") extractor = self._get_jquerycss_extractor(varname, cfg['expression'], cfg.get('attribute', ""), cfg.get('match-no', 0), cfg.get('default', 'NOT_FOUND')) children.append(extractor) children.append(etree.Element("hashTree"))
def get_data_sources(self): sources = self.get(self.FIELD_DATA_SOURCES, []) if not isinstance(sources, list): raise TaurusConfigError("data-sources is not a list: '%s'" % sources) for idx, source in enumerate(sources): source = ensure_is_dict(sources, idx, "path") if not source: raise TaurusConfigError("Data source must have valid file path: '%s'" % source) yield source
def _parse_requests(self, raw_requests, require_url=True): requests = [] for key in range(len(raw_requests)): # pylint: disable=consider-using-enumerate req = ensure_is_dict(raw_requests, key, "url") if not require_url and "url" not in req: req["url"] = None try: requests.append(self._parse_request(req)) except BaseException as exc: logging.debug("%s\n%s" % (exc, traceback.format_exc())) raise TaurusConfigError("Wrong request:\n %s" % req) return requests
def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(scenario) else: raise ValueError("Unsupported type for scenario") return self.__scenario
def unify_config(self): executions = self.config.get(ScenarioExecutor.EXEC, []) if isinstance(executions, dict): executions = [executions] self.config[ScenarioExecutor.EXEC] = executions settings = self.config.get(SETTINGS) default_executor = settings.get("default-executor", None) prov_type = self.config.get(Provisioning.PROV) for execution in executions: # type: BetterDict executor = execution.get("executor", default_executor, force_set=True) if not executor: msg = "Cannot determine executor type and no default executor in %s" raise TaurusConfigError(msg % execution) reporting = self.config.get(Reporter.REP, []) for index in range(len(reporting)): ensure_is_dict(reporting, index, "module") services = self.config.get(Service.SERV, []) for index in range(len(services)): ensure_is_dict(services, index, "module") modules = self.config.get("modules") for module in modules: ensure_is_dict(modules, module, "class")
def __add_assertions(children, req): assertions = req.config.get("assert", []) for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") if not isinstance(assertion['contains'], list): assertion['contains'] = [assertion['contains']] children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY), assertion['contains'], assertion.get('regexp', True), assertion.get('not', False), assertion.get('assume-success', False))) children.append(etree.Element("hashTree")) jpath_assertions = req.config.get("assert-jsonpath", []) for idx, assertion in enumerate(jpath_assertions): assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath") exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion) component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc), assertion.get('expected-value', ''), assertion.get('validate', False), assertion.get('expect-null', False), assertion.get('invert', False), assertion.get('regexp', True)) children.append(component) children.append(etree.Element("hashTree")) xpath_assertions = req.config.get("assert-xpath", []) for idx, assertion in enumerate(xpath_assertions): assertion = ensure_is_dict(xpath_assertions, idx, "xpath") exc = TaurusConfigError('XPath not found in assertion: %s' % assertion) component = JMX._get_xpath_assertion(assertion.get('xpath', exc), assertion.get('validate-xml', False), assertion.get('ignore-whitespace', True), assertion.get('use-tolerant-parser', False), assertion.get('invert', False)) children.append(component) children.append(etree.Element("hashTree"))
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters[stage]): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] run_at = task_config.get("run-at", "local") if run_at == self.engine.config.get(Provisioning.PROV, None): container.append(Task(task_config, self.log, os.getcwd())) self.log.debug("Added task: %s, stage: %s", stage_task, stage) else: self.log.debug("Skipped task: %s", task_config)
def __prepare_services(self): """ Instantiate service modules, then prepare them """ services = self.config.get("services", []) for index, config in enumerate(services): config = ensure_is_dict(services, index, "module") cls = config.get('module', '') instance = self.instantiate_module(cls) instance.parameters = config self.services.append(instance) for module in self.services: module.prepare()