def startup(self): """ Should start the tool as fast as possible. """ self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") env = BetterDict() env.merge(dict(os.environ)) java_opts = env.get('JAVA_OPTS', '') + ' ' + self.settings.get( 'java-opts', '') java_opts += ' ' + self.__get_params_for_scala() env.merge({"JAVA_OPTS": java_opts, "NO_PAUSE": "TRUE"}) if self.jar_list: java_classpath = env.get('JAVA_CLASSPATH', '') compilation_classpath = env.get('COMPILATION_CLASSPATH', '') java_classpath += self.jar_list compilation_classpath += self.jar_list env.merge({ 'JAVA_CLASSPATH': java_classpath, 'COMPILATION_CLASSPATH': compilation_classpath }) self.process = self.execute(self.__get_cmdline(), stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def _create_runner(self, working_dir, report_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge( self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge( self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact( "customrunner", ".properties") runner_config["script-type"] = script_type runner_config["working-dir"] = working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("report-file", report_file) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) return runner_class(runner_config, self)
def update(self): if not self._sessions: self._sessions = self.prov.client.get_master_sessions(self.prov.client.active_session_id) if not self._sessions: return mapping = BetterDict() cnt = 0 for session in self._sessions: try: cnt += 1 name_split = session['name'].split('/') location = session['configuration']['location'] count = session['configuration']['serversCount'] mapping.get(name_split[0]).get(name_split[1])[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self.prov.test_name, self.prov.client.active_session_id) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) self.text.set_text(txt)
def get_test_status_text(self): if not self._sessions: self._sessions = self.master.sessions() if not self._sessions: return mapping = BetterDict( ) # dict(executor -> dict(scenario -> dict(location -> servers count))) for session in self._sessions: try: name_split = [ part.strip() for part in session['name'].split('/') ] location = session['configuration']['location'] count = session['configuration']['serversCount'] ex_item = mapping.get(name_split[0], force_set=True) if len(name_split) > 1: name = name_split[1] else: name = "N/A" ex_item.get(name, force_set=True)[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self._test['name'], self.master['id']) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) return txt
def __get_env(self): env = BetterDict() env.merge(dict(os.environ)) java_opts = env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') java_opts += ' ' + self.__get_params_for_scala() env.merge({"JAVA_OPTS": java_opts, "NO_PAUSE": "TRUE"}) if self.jar_list: java_classpath = env.get('JAVA_CLASSPATH', '') compilation_classpath = env.get('COMPILATION_CLASSPATH', '') java_classpath += self.jar_list compilation_classpath += self.jar_list env.merge({'JAVA_CLASSPATH': java_classpath, 'COMPILATION_CLASSPATH': compilation_classpath}) return env
def update(self): if not self._sessions: self._sessions = self.prov.client.get_master_sessions( self.prov.client.active_session_id) if not self._sessions: return mapping = BetterDict() cnt = 0 for session in self._sessions: try: cnt += 1 name_split = session['name'].split('/') location = session['configuration']['location'] count = session['configuration']['serversCount'] mapping.get(name_split[0]).get(name_split[1])[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self.prov.test_name, self.prov.client.active_session_id) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) self.text.set_text(txt)
def get_kpi_body(self, data_buffer, is_final): # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes that were received on it. report_items = BetterDict() if data_buffer: self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP]) self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP]) # following data is received in the cumulative way for label, kpi_set in iteritems( data_buffer[-1][DataPoint.CUMULATIVE]): if self.owner.extend_report: report_item = {} for state in kpi_set: report_item[state] = self.__get_label( label, kpi_set[state]) self.__add_errors(report_item[state], kpi_set[state]) else: report_item = self.__get_label(label, kpi_set) self.__add_errors(report_item, kpi_set) # 'Errors' tab report_items[label] = report_item # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way if report_items: for dpoint in data_buffer: time_stamp = dpoint[DataPoint.TIMESTAMP] for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]): exc = TaurusInternalException( 'Cumulative KPISet is non-consistent') report_item = report_items.get(label, exc) if self.owner.extend_report: for state in report_item: if state in kpi_set: report_item[state]['intervals'].append( self.__get_interval( kpi_set[state], time_stamp)) else: report_item['intervals'].append( self.__get_interval(kpi_set, time_stamp)) report_items = [ report_items[key] for key in sorted(report_items.keys()) ] # convert dict to list data = {"labels": report_items, "sourceID": id(self.owner)} if is_final: data['final'] = True return to_json(data)
class ResultsProvider(object): """ :type listeners: list[AggregatorListener] """ def __init__(self): super(ResultsProvider, self).__init__() self.cumulative = BetterDict() self.track_percentiles = [0.0, 50.0, 90.0, 95.0, 99.0, 99.9, 100.0] self.listeners = [] self.buffer_len = 2 self.min_buffer_len = 2 self.max_buffer_len = float('inf') self.buffer_multiplier = 2 self.buffer_scale_idx = None self.rtimes_len = None def add_listener(self, listener): """ Add aggregate results listener :type listener: AggregatorListener """ self.listeners.append(listener) def __merge_to_cumulative(self, current): """ Merge current KPISet to cumulative :param current: KPISet """ for label, data in iteritems(current): cumul = self.cumulative.get(label, KPISet(self.track_percentiles, self.rtimes_len), force_set=True) cumul.merge_kpis(data) cumul.recalculate() def datapoints(self, final_pass=False): """ Generator object that returns datapoints from the reader :type final_pass: bool """ for datapoint in self._calculate_datapoints(final_pass): current = datapoint[DataPoint.CURRENT] self.__merge_to_cumulative(current) datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative) datapoint.recalculate() for listener in self.listeners: listener.aggregated_second(datapoint) yield datapoint @abstractmethod def _calculate_datapoints(self, final_pass=False): """ :rtype : list[DataPoint] """ yield
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise ValueError("No simulation set") datadir = os.path.realpath(self.engine.artifacts_dir) cmdline = [self.settings["path"]] cmdline += ["-sf", datadir, "-df", datadir, "-rf ", datadir] cmdline += ["-on", "gatling-bzt", "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = {} load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int( dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', None) is not None: params_for_scala['gatling.http.ahc.keepAlive'] = scenario.get( 'keepalive').lower() if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = "".join([ " -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala ]) java_opts += " " + env.get( "JAVA_OPTS", "") + " " + self.engine.config.get("java_opts", "") env.merge({"JAVA_OPTS": java_opts}) self.process = shell_exec(cmdline, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
class ResultsProvider(object): """ :type listeners: list[AggregatorListener] """ def __init__(self): super(ResultsProvider, self).__init__() self.cumulative = BetterDict() self.track_percentiles = [] self.listeners = [] self.buffer_len = 2 self.min_buffer_len = 2 self.max_buffer_len = float('inf') self.buffer_multiplier = 2 self.buffer_scale_idx = None def add_listener(self, listener): """ Add aggregate results listener :type listener: AggregatorListener """ self.listeners.append(listener) def __merge_to_cumulative(self, current): """ Merge current KPISet to cumulative :param current: KPISet """ for label, data in iteritems(current): cumul = self.cumulative.get(label, KPISet(self.track_percentiles)) cumul.merge_kpis(data) cumul.recalculate() def datapoints(self, final_pass=False): """ Generator object that returns datapoints from the reader :type final_pass: bool """ for datapoint in self._calculate_datapoints(final_pass): current = datapoint[DataPoint.CURRENT] self.__merge_to_cumulative(current) datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative) datapoint.recalculate() for listener in self.listeners: listener.aggregated_second(datapoint) yield datapoint @abstractmethod def _calculate_datapoints(self, final_pass=False): """ :rtype : tuple """ pass
def __init__(self, config=None, **kwargs): if config is None: config = BetterDict() if not isinstance(config, dict): config = BetterDict.from_dict({"path": config}) version = config.get("version", self.VERSION) tool_file = self.TOOL_FILE.format(version=version) local_path = config.get("path", self.LOCAL_PATH) local_path = local_path.format(tool_file=tool_file) download_link = config.get("download-link", self.URL) remote_path = self.REMOTE_PATH.format(version=version) download_link = download_link.format(remote_addr=self.REMOTE_ADDR, remote_path=remote_path) super(JarTool, self).__init__(tool_path=local_path, download_link=download_link, version=version, **kwargs)
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise ValueError("No simulation set") datadir = os.path.realpath(self.engine.artifacts_dir) if os.path.isfile(self.script): script_path = os.path.dirname(get_full_path(self.script)) else: script_path = self.script cmdline = [self.settings["path"]] cmdline += ["-sf", script_path, "-df", datadir, "-rf ", datadir] cmdline += ["-on", self.dir_prefix, "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = self.settings.get('properties') load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', None) is not None: params_for_scala['gatling.http.ahc.keepAlive'] = scenario.get('keepalive').lower() if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = ''.join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') env.merge({"JAVA_OPTS": java_opts}) self.process = self.execute(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
class ResultsProvider(object): """ :type listeners: list[AggregatorListener] """ def __init__(self): super(ResultsProvider, self).__init__() self.cumulative = BetterDict() self.track_percentiles = [] self.listeners = [] def add_listener(self, listener): """ Add aggregate results listener :type listener: AggregatorListener """ self.listeners.append(listener) def __merge_to_cumulative(self, current): """ Merge current KPISet to cumulative :param current: KPISet :return: """ for label, data in iteritems(current): cumul = self.cumulative.get(label, KPISet(self.track_percentiles)) cumul.merge_kpis(data) def datapoints(self, final_pass=False): """ Generator object that returns datapoints from the reader :type final_pass: bool """ for datapoint in self._calculate_datapoints(final_pass): current = datapoint[DataPoint.CURRENT] self.__merge_to_cumulative(current) datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative) datapoint.recalculate() for listener in self.listeners: listener.aggregated_second(datapoint) yield datapoint @abstractmethod def _calculate_datapoints(self, final_pass=False): """ :rtype : tuple """ pass
def prepare(self): """ 1) Locate script or folder 2) detect script type 3) create runner instance, prepare runner """ scenario = self.get_scenario() self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv") script_type, script_is_folder = self.detect_script_type(scenario.get("script")) runner_config = BetterDict() if script_type == ".py": self.runner = NoseTester runner_config = self.settings.get("selenium-tools").get("nose") elif script_type == ".jar" or script_type == ".java": self.runner = JunitTester runner_config = self.settings.get("selenium-tools").get("junit") runner_config["script-type"] = script_type runner_working_dir = self.engine.create_artifact(runner_config.get("working-dir", "classes"), "") runner_config["working-dir"] = runner_working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("working-dir", runner_working_dir) runner_config.get("report-file", self.kpi_file) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) if Scenario.SCRIPT in scenario: if script_is_folder: shutil.copytree(scenario.get("script"), runner_working_dir) else: os.makedirs(runner_working_dir) shutil.copy2(scenario.get("script"), runner_working_dir) self.runner = self.runner(runner_config, scenario, self.log) self.runner.prepare() self.reader = JTLReader(self.kpi_file, self.log, None) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader)
class ResultsProvider(object): """ :type listeners: list[AggregatorListener] """ def __init__(self): super(ResultsProvider, self).__init__() self.cumulative = BetterDict() self.track_percentiles = [] self.listeners = [] def add_listener(self, listener): """ Add aggregate results listener :type listener: AggregatorListener """ self.listeners.append(listener) def __merge_to_cumulative(self, current): """ Merge current KPISet to cumulative :param current: KPISet :return: """ for label, data in six.iteritems(current): cumul = self.cumulative.get(label, KPISet(self.track_percentiles)) cumul.merge_kpis(data) def datapoints(self, final_pass=False): """ Generator object that returns datapoints from the reader :type final_pass: bool """ for datapoint in self._calculate_datapoints(final_pass): current = datapoint[DataPoint.CURRENT] self.__merge_to_cumulative(current) datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative) datapoint.recalculate() for listener in self.listeners: listener.aggregated_second(datapoint) yield datapoint def _calculate_datapoints(self, final_pass=False): raise NotImplementedError()
def _create_runner(self, working_dir, kpi_file, err_file): script_path = self.get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") runner_config["script-type"] = script_type runner_config["working-dir"] = working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("report-file", kpi_file) runner_config.get("err-file", err_file) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) return runner_class(runner_config, self)
def prepare(self): """ 1) Locate script or folder 2) detect script type 3) create runner instance, prepare runner """ scenario = self.get_scenario() self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".txt") script_type, script_is_folder = self.detect_script_type(scenario.get("script")) runner_config = BetterDict() if script_type == ".py": self.runner = NoseTester runner_config = self.settings.get("selenium-tools").get("nose") elif script_type == ".jar" or script_type == ".java": self.runner = JunitTester runner_config = self.settings.get("selenium-tools").get("junit") runner_config["script-type"] = script_type runner_working_dir = self.engine.create_artifact(runner_config.get("working-dir", "classes"), "") runner_config["working-dir"] = runner_working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("working-dir", runner_working_dir) runner_config.get("report-file", self.kpi_file) if Scenario.SCRIPT in scenario: if script_is_folder: shutil.copytree(scenario.get("script"), runner_working_dir) else: os.makedirs(runner_working_dir) shutil.copy2(scenario.get("script"), runner_working_dir) self.runner = self.runner(runner_config, scenario, self.log) self.runner.prepare() self.reader = SeleniumDataReader(self.kpi_file, self.log) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self.label = None self.widget = None self.reader = None def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, scenario=None): """ :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() if Scenario.SCRIPT in scenario and scenario[Scenario.SCRIPT]: return self.engine.find_file(scenario.get(Scenario.SCRIPT)) else: return None def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get('scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): raise ValueError("Invalid content of scenario, list type instead of dict or string") if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get(Provisioning.PROV, ValueError("There must be provisioning type set")) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None),)): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = self.execution.get("files", [])[:] if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list def __repr__(self): return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self)) def get_hostaliases(self): settings = self.engine.config.get(SETTINGS, {}) return settings.get("hostaliases", {}) def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
class ConsolidatingAggregator(Aggregator, ResultsProvider): """ :type underlings: list[bzt.modules.aggregator.ResultsProvider] """ # TODO: switch to underling-count-based completeness criteria def __init__(self): Aggregator.__init__(self, is_functional=False) ResultsProvider.__init__(self) self.generalize_labels = False self.ignored_labels = ["ignore"] self.underlings = [] self.buffer = BetterDict() self.rtimes_len = 1000 def prepare(self): """ Read aggregation options """ super(ConsolidatingAggregator, self).prepare() # make unique & sort self.track_percentiles = self.settings.get("percentiles", self.track_percentiles) self.track_percentiles = list(set(self.track_percentiles)) self.track_percentiles.sort() self.settings["percentiles"] = self.track_percentiles self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels) self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels) self.min_buffer_len = dehumanize_time( self.settings.get("min-buffer-len", self.min_buffer_len)) max_buffer_len = self.settings.get("max-buffer-len", self.max_buffer_len) try: self.max_buffer_len = dehumanize_time(max_buffer_len) except TaurusInternalException as exc: self.log.debug("Exception in dehumanize_time(%s): %s", max_buffer_len, exc) raise TaurusConfigError("Wrong 'max-buffer-len' value: %s" % max_buffer_len) self.buffer_multiplier = self.settings.get("buffer-multiplier", self.buffer_multiplier) count = len(self.track_percentiles) if count == 1: self.buffer_scale_idx = str(float(self.track_percentiles[0])) if count > 1: percentile = self.settings.get("buffer-scale-choice", 0.5) percentiles = [i / (count - 1.0) for i in range(count)] distances = [ abs(percentile - percentiles[i]) for i in range(count) ] index_position = distances.index(min(distances)) self.buffer_scale_idx = str( float(self.track_percentiles[index_position])) debug_str = 'Buffer scaling setup: percentile %s from %s selected' self.log.debug(debug_str, self.buffer_scale_idx, self.track_percentiles) self.rtimes_len = self.settings.get("rtimes-len", self.rtimes_len) def add_underling(self, underling): """ Add source for aggregating :type underling: ResultsProvider """ underling.track_percentiles = self.track_percentiles if isinstance(underling, ResultsReader): underling.ignored_labels = self.ignored_labels underling.generalize_labels = self.generalize_labels underling.min_buffer_len = self.min_buffer_len underling.max_buffer_len = self.max_buffer_len underling.buffer_multiplier = self.buffer_multiplier underling.buffer_scale_idx = self.buffer_scale_idx underling.rtimes_len = self.rtimes_len self.underlings.append(underling) def check(self): """ Check if there is next aggregate data present :rtype: bool """ for point in self.datapoints(): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) return super(ConsolidatingAggregator, self).check() def post_process(self): """ Process all remaining aggregate data """ super(ConsolidatingAggregator, self).post_process() for point in self.datapoints(True): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) def _process_underlings(self, final_pass): for underling in self.underlings: for data in underling.datapoints(final_pass): tstamp = data[DataPoint.TIMESTAMP] if self.buffer: mints = min(self.buffer.keys()) if tstamp < mints: self.log.debug("Putting datapoint %s into %s", tstamp, mints) data[DataPoint.TIMESTAMP] = mints tstamp = mints self.buffer.get(tstamp, [], force_set=True).append(data) def _calculate_datapoints(self, final_pass=False): """ Override ResultsProvider._calculate_datapoints """ self._process_underlings(final_pass) self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys()) if not self.buffer: return timestamps = sorted(self.buffer.keys()) while timestamps and ( final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)): tstamp = timestamps.pop(0) self.log.debug("Merging into %s", tstamp) points_to_consolidate = self.buffer.pop(tstamp) point = DataPoint(tstamp, self.track_percentiles) for subresult in points_to_consolidate: self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP]) point.merge_point(subresult) point.recalculate() yield point
class EngineModule(object): """ Base class for any BZT engine module :type engine: Engine :type settings: BetterDict """ def __init__(self): self.log = logging.getLogger('') self.engine = None self.settings = BetterDict() self.parameters = BetterDict() def prepare(self): """ Preparation stage, at which configuration is being read, configs and tools being prepared. All long preparations and checks should be made here, to make `startup` stage as fast as possible. """ pass def startup(self): """ Startup should be as fast as possible. Launch background processes, do some API calls for initiation of actual work. Consider making all checks and preparations on `prepare` stage. """ pass def check(self): """ Check if work should be finished :rtype: bool :return: True if should be finished """ return False def shutdown(self): """ Stop all processes that were started in `startup` stage. Should also be as fast as possible, deferring all long operations to `post_process` stage. """ pass def post_process(self): """ Do all possibly long analysis and processing on run results """ pass def _should_run(self): """ Returns True if provisioning matches run-at """ prov = self.engine.config.get(Provisioning.PROV) runat = self.parameters.get("run-at", None) if runat is not None and prov != runat: self.log.debug("Should not run because of non-matching prov: %s != %s", prov, runat) return False return True
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ EXEC = EXEC # backward compatibility RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" STEPS = "steps" LOAD_FMT = namedtuple( "LoadSpec", "concurrency throughput ramp_up hold iterations duration steps") def __init__(self): super(ScenarioExecutor, self).__init__() self.env = Environment(log=self.log) self.provisioning = None self.execution = BetterDict( ) # FIXME: why have this field if we have `parameters` from base class? self._cached_scenario = None self.label = None self.widget = None self.reader = None self.stdout = None self.stderr = None self.delay = None self.start_time = None self.preprocess_args = lambda x: None def _get_tool(self, tool, **kwargs): instance = tool(env=self.env, log=self.log, http_client=self.engine.get_http_client(), **kwargs) assert isinstance(instance, RequiredTool) return instance def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, required=False, scenario=None): """ :type required: bool :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() if required: exc = TaurusConfigError("You must provide script for %s" % self) script = scenario.get(Scenario.SCRIPT, exc) else: script = scenario.get(Scenario.SCRIPT) if script: script = self.engine.find_file(script) scenario[Scenario.SCRIPT] = script return script def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self._cached_scenario is not None: return self._cached_scenario scenarios = self.engine.config.get("scenarios", force_set=True) label = self._get_scenario_label(name, scenarios) exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario_dict = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario_dict) if self.engine.provisioning.extend_configs: script = self.get_script_path(required=False, scenario=scenario_dict) if script and script.lower().endswith('xml'): script_content = '' try: with codecs.open(script, encoding="UTF-8") as fds: script_content = fds.read() except UnicodeDecodeError: pass if "con:soapui-project" in script_content: scenario_obj = self._convert_soap_scenario( scenario_obj, script) if name is None: self._cached_scenario = scenario_obj return scenario_obj def _convert_soap_scenario(self, scenario_obj, script): self.log.info("SoapUI project detected") new_scenario_name, scenario_dict = self._extract_scenario_from_soapui( scenario_obj, script) self.engine.config["scenarios"].merge( {new_scenario_name: scenario_dict}) prev_scenario_name = self.execution["scenario"] self.execution["scenario"] = new_scenario_name for execution in self.engine.config.get(EXEC): if execution.get(SCENARIO) == prev_scenario_name: execution[SCENARIO] = new_scenario_name return Scenario(self.engine, scenario_dict) def _get_scenario_label(self, name, scenarios): if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path( scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name return label def _extract_scenario_from_soapui(self, base_scenario, script_path): test_case = base_scenario.get("test-case", None) converter = SoapUIScriptConverter(self.log) conv_config = converter.convert_script(script_path) conv_scenarios = conv_config["scenarios"] scenario_name, conv_scenario = converter.find_soapui_test_case( test_case, conv_scenarios) new_name = scenario_name counter = 1 while new_name in self.engine.config["scenarios"]: new_name = scenario_name + ("-%s" % counter) counter += 1 if new_name != scenario_name: self.log.info( "Scenario name '%s' is already taken, renaming to '%s'", scenario_name, new_name) scenario_name = new_name merged_scenario = BetterDict.from_dict(conv_scenario) merged_scenario.merge(base_scenario.data) for field in [Scenario.SCRIPT, "test-case"]: if field in merged_scenario: merged_scenario.pop(field) return scenario_name, merged_scenario def get_raw_load(self): prov_type = self.engine.config.get(Provisioning.PROV) for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR): ensure_is_dict(self.execution, param, prov_type) throughput = self.execution.get(ScenarioExecutor.THRPT).get( prov_type, None) concurrency = self.execution.get(ScenarioExecutor.CONCURR).get( prov_type, None) iterations = self.execution.get("iterations", None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = self.execution.get(ScenarioExecutor.HOLD_FOR, None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=None, steps=steps) def get_load(self): """ Helper method to read load specification """ def eval_int(value): try: return int(value) except (ValueError, TypeError): return value def eval_float(value): try: return int(value) except (ValueError, TypeError): return value raw_load = self.get_raw_load() iterations = eval_int(raw_load.iterations) ramp_up = raw_load.ramp_up throughput = eval_float(raw_load.throughput or 0) concurrency = eval_int(raw_load.concurrency or 0) steps = eval_int(raw_load.steps) hold = dehumanize_time(raw_load.hold or 0) if ramp_up is None: duration = hold else: ramp_up = dehumanize_time(raw_load.ramp_up) duration = hold + ramp_up if not iterations: if duration: iterations = 0 # infinite else: iterations = 1 msg = '' if not isinstance(concurrency, numeric_types + (type(None), )): msg += "Invalid concurrency value[%s]: %s " % ( type(concurrency).__name__, concurrency) if not isinstance(throughput, numeric_types + (type(None), )): msg += "Invalid throughput value[%s]: %s " % ( type(throughput).__name__, throughput) if not isinstance(steps, numeric_types + (type(None), )): msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps) if not isinstance(iterations, numeric_types + (type(None), )): msg += "Invalid throughput value[%s]: %s " % ( type(iterations).__name__, iterations) if msg: raise TaurusConfigError(msg) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = [] if isinstance(self, FileLister): files_list.extend(self.resource_files()) files_list.extend(self.execution.get("files", [])) return files_list def __repr__(self): return "%s/%s" % (self.execution.get( "executor", None), self.label if self.label else id(self)) def prepare(self): super(ScenarioExecutor, self).prepare() self.env.set(self.execution.get("env")) def _execute(self, args, **kwargs): self.preprocess_args(args) # for compatibility with other executors kwargs["stdout"] = kwargs.get("stdout", self.stdout) or PIPE kwargs["stderr"] = kwargs.get("stderr", self.stderr) or PIPE kwargs["cwd"] = kwargs.get("cwd", None) kwargs["env"] = self.env self.start_time = time.time() try: process = self.engine.start_subprocess(args=args, **kwargs) except OSError as exc: raise ToolError("Failed to start %s: %s (%s)" % (self.__class__.__name__, exc, args)) return process def post_process(self): if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() super(ScenarioExecutor, self).post_process()
def prepare(self): self.set_virtual_display() self.scenario = self.get_scenario() self._verify_script() self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv") self.err_jtl = self.engine.create_artifact("selenium_tests_err", ".xml") script_path = self._get_script_path() script_type = self.detect_script_type(script_path) runner_config = BetterDict() if script_type == ".py": runner_class = NoseTester runner_config.merge(self.settings.get("selenium-tools").get("nose")) else: # script_type == ".jar" or script_type == ".java": runner_class = JUnitTester runner_config.merge(self.settings.get("selenium-tools").get("junit")) runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties") runner_config["script-type"] = script_type if self.runner_working_dir is None: self.runner_working_dir = self.engine.create_artifact(runner_config.get("working-dir", "classes"), "") runner_config["working-dir"] = self.runner_working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("working-dir", self.runner_working_dir) runner_config.get("report-file", self.kpi_file) runner_config.get("err-file", self.err_jtl) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) self._cp_resource_files(self.runner_working_dir) self.runner = runner_class(runner_config, self) self.runner.prepare() self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self._label = None def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: if Scenario.SCRIPT in self.__scenario: # using script name if present error = ValueError("Wrong script in scenario") self._label = os.path.basename(self.__scenario.get(Scenario.SCRIPT, error)) else: # last resort - a checksum of whole scenario self._label = hashlib.md5(to_json(self.__scenario).encode()).hexdigest() return self.__scenario def get_load(self): """ Helper method to read load specification :return: """ prov_type = self.engine.config.get(Provisioning.PROV, None) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None),)): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): """ Return resource files list """ files_list = self.execution.get("files", []) if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list def __repr__(self): return "%s/%s" % (self.execution.get("executor", None), self._label if self._label else id(self))
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self.label = None self.widget = None self.reader = None self.delay = None self.start_time = None def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, scenario=None): """ :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() if Scenario.SCRIPT in scenario and scenario[Scenario.SCRIPT]: return self.engine.find_file(scenario.get(Scenario.SCRIPT)) else: return None def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get(Provisioning.PROV) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get( prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite msg = '' if not isinstance(concurrency, numeric_types + (type(None), )): msg += "Invalid concurrency value[%s]: %s " % ( type(concurrency).__name__, concurrency) if not isinstance(throughput, numeric_types + (type(None), )): msg += "Invalid throughput value[%s]: %s " % ( type(throughput).__name__, throughput) if not isinstance(steps, numeric_types + (type(None), )): msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps) if not isinstance(iterations, numeric_types + (type(None), )): msg += "Invalid throughput value[%s]: %s " % ( type(iterations).__name__, iterations) if msg: raise TaurusConfigError(msg) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = [] if isinstance(self, FileLister): files_list.extend(self.resource_files()) files_list.extend(self.execution.get("files", [])) return files_list def __repr__(self): return "%s/%s" % (self.execution.get( "executor", None), self.label if self.label else id(self)) def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd environ = BetterDict() environ.merge(dict(os.environ)) if env is not None: if is_windows(): # as variables in windows are case insensitive we should provide correct merging cur_env = {name.upper(): environ[name] for name in environ} old_keys = set(env.keys()) env = {name.upper(): env[name] for name in env} new_keys = set(env.keys()) if old_keys != new_keys: msg = 'Some taurus environment variables might be been lost: %s' self.log.debug(msg, list(old_keys - new_keys)) environ = BetterDict() environ.merge(cur_env) environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = { key: environ[key] for key in environ.keys() if environ[key] is not None } self.log.debug("Executing shell from %s: %s", cwd, args) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self._label = None def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(self.engine, scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(self.engine, scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: try: error = ValueError("Wrong script in scenario") scen = self.__scenario.get(Scenario.SCRIPT, error) self._label = os.path.basename(scen) except BaseException: self._label = hashlib.md5(to_json(self.__scenario).encode()).hexdigest() return self.__scenario def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get(Provisioning.PROV, ValueError("There must be provisioning type set")) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None),)): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None),)): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): """ Return resource files list """ files_list = self.execution.get("files", []) if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list def __repr__(self): return "%s/%s" % (self.execution.get("executor", None), self._label if self._label else id(self)) def get_hostaliases(self): settings = self.engine.config.get(SETTINGS, {}) return settings.get("hostaliases", {}) def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps") def __init__(self): super(ScenarioExecutor, self).__init__() self.env = Environment(log=self.log) self.provisioning = None self.execution = BetterDict() # FIXME: why have this field if we have `parameters` from base class? self.__scenario = None self.label = None self.widget = None self.reader = None self.stdout = None self.stderr = None self.delay = None self.start_time = None self.preprocess_args = lambda x: None def _get_tool(self, tool, **kwargs): instance = tool(env=self.env, log=self.log, http_client=self.engine.get_http_client(), **kwargs) assert isinstance(instance, RequiredTool) return instance def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, required=False, scenario=None): """ :type required: bool :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() if required: exc = TaurusConfigError("You must provide script for %s" % self) script = scenario.get(Scenario.SCRIPT, exc) else: script = scenario.get(Scenario.SCRIPT) if script: script = self.engine.find_file(script) scenario[Scenario.SCRIPT] = script return script def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios", force_set=True) if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj def get_load(self): """ Helper method to read load specification """ def eval_int(value): try: return int(value) except (ValueError, TypeError): return value def eval_float(value): try: return int(value) except (ValueError, TypeError): return value prov_type = self.engine.config.get(Provisioning.PROV) throughput = eval_float(self.execution.get(ScenarioExecutor.THRPT).get(prov_type, 0)) concurrency = eval_int(self.execution.get(ScenarioExecutor.CONCURR).get(prov_type, 0)) iterations = eval_int(self.execution.get("iterations", None)) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = eval_int(self.execution.get(ScenarioExecutor.STEPS, None)) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite msg = '' if not isinstance(concurrency, numeric_types + (type(None),)): msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency) if not isinstance(throughput, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput) if not isinstance(steps, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps) if not isinstance(iterations, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations) if msg: raise TaurusConfigError(msg) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = [] if isinstance(self, FileLister): files_list.extend(self.resource_files()) files_list.extend(self.execution.get("files", [])) return files_list def __repr__(self): return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self)) def execute(self, args, **kwargs): self.preprocess_args(args) # for compatibility with other executors kwargs["stdout"] = kwargs.get("stdout", self.stdout) or PIPE kwargs["stderr"] = kwargs.get("stderr", self.stderr) or PIPE kwargs["cwd"] = kwargs.get("cwd", None) kwargs["env"] = self.env try: process = self.engine.start_subprocess(args=args, **kwargs) except OSError as exc: raise ToolError("Failed to start %s: %s (%s)" % (self.__class__.__name__, exc, args)) return process def post_process(self): if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() super(ScenarioExecutor, self).post_process()
class ConsolidatingAggregator(EngineModule, ResultsProvider): """ :type underlings: list[bzt.modules.aggregator.ResultsProvider] """ # TODO: switch to underling-count-based completeness criteria def __init__(self): EngineModule.__init__(self) ResultsProvider.__init__(self) self.generalize_labels = False self.ignored_labels = [] self.underlings = [] self.buffer = BetterDict() self.buffer_len = 2 def prepare(self): """ Read aggregation options """ super(ConsolidatingAggregator, self).prepare() self.track_percentiles = self.settings.get("percentiles", self.track_percentiles) self.buffer_len = self.settings.get("buffer-seconds", self.buffer_len) self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels) self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels) def add_underling(self, underling): """ Add source for aggregating :type underling: ResultsProvider """ underling.track_percentiles = self.track_percentiles if isinstance(underling, ResultsReader): underling.ignored_labels = self.ignored_labels underling.generalize_labels = self.generalize_labels # underling.buffer_len = self.buffer_len # NOTE: is it ok for underling to have the same buffer len? self.underlings.append(underling) def check(self): """ Check if there is next aggregate data present :rtype: bool """ for point in self.datapoints(): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) return super(ConsolidatingAggregator, self).check() def post_process(self): """ Process all remaining aggregate data """ super(ConsolidatingAggregator, self).post_process() for point in self.datapoints(True): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) def _process_underlings(self, final_pass): for underling in self.underlings: for data in [x for x in underling.datapoints(final_pass)]: tstamp = data[DataPoint.TIMESTAMP] if self.buffer: mints = min(self.buffer.keys()) if tstamp < mints: self.log.warning("Putting datapoint %s into %s", tstamp, mints) data[DataPoint.TIMESTAMP] = mints tstamp = mints self.buffer.get(tstamp, []).append(data) def _calculate_datapoints(self, final_pass=False): """ Override ResultsProvider._calculate_datapoints """ self._process_underlings(final_pass) self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys()) if not self.buffer: return timestamps = sorted(self.buffer.keys()) while timestamps and ( final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)): tstamp = timestamps.pop(0) self.log.debug("Merging into %s", tstamp) points_to_consolidate = self.buffer.pop(tstamp) point = DataPoint(tstamp, self.track_percentiles) for subresult in points_to_consolidate: self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP]) point.merge_point(subresult) point.recalculate() yield point
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self._label = None def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get( 'scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(self.engine, scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(self.engine, scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: try: error = ValueError("Wrong script in scenario") scen = self.__scenario.get(Scenario.SCRIPT, error) self._label = os.path.basename(scen) except BaseException: self._label = hashlib.md5(to_json( self.__scenario).encode()).hexdigest() return self.__scenario def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get( Provisioning.PROV, ValueError("There must be provisioning type set")) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get( prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None), )): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): """ Return resource files list """ files_list = self.execution.get("files", []) if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list def __repr__(self): return "%s/%s" % (self.execution.get( "executor", None), self._label if self._label else id(self)) def get_hostaliases(self): settings = self.engine.config.get(SETTINGS, {}) return settings.get("hostaliases", {}) def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps") def __init__(self): super(ScenarioExecutor, self).__init__() self.env = Environment(log=self.log) self.provisioning = None self.execution = BetterDict() # FIXME: why have this field if we have `parameters` from base class? self.__scenario = None self.label = None self.widget = None self.reader = None self.stdout = None self.stderr = None self.delay = None self.start_time = None self.preprocess_args = lambda x: None def _get_tool(self, tool, **kwargs): instance = tool(env=self.env, log=self.log, http_client=self.engine.get_http_client(), **kwargs) assert isinstance(instance, RequiredTool) return instance def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, required=False, scenario=None): """ :type required: bool :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() if required: exc = TaurusConfigError("You must provide script for %s" % self) script = scenario.get(Scenario.SCRIPT, exc) else: script = scenario.get(Scenario.SCRIPT) if script: script = self.engine.find_file(script) scenario[Scenario.SCRIPT] = script return script def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios", force_set=True) if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict.from_dict({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(scenario=Scenario(self.engine, scenario)) if path: label = os.path.basename(path) if not path or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj def get_raw_load(self): prov_type = self.engine.config.get(Provisioning.PROV) for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR): ensure_is_dict(self.execution, param, prov_type) throughput = self.execution.get(ScenarioExecutor.THRPT).get(prov_type, None) concurrency = self.execution.get(ScenarioExecutor.CONCURR).get(prov_type, None) iterations = self.execution.get("iterations", None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = self.execution.get(ScenarioExecutor.HOLD_FOR, None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=None, steps=steps) def get_load(self): """ Helper method to read load specification """ def eval_int(value): try: return int(value) except (ValueError, TypeError): return value def eval_float(value): try: return int(value) except (ValueError, TypeError): return value raw_load = self.get_raw_load() iterations = eval_int(raw_load.iterations) ramp_up = raw_load.ramp_up throughput = eval_float(raw_load.throughput or 0) concurrency = eval_int(raw_load.concurrency or 0) steps = eval_int(raw_load.steps) hold = dehumanize_time(raw_load.hold or 0) if ramp_up is None: duration = hold else: ramp_up = dehumanize_time(raw_load.ramp_up) duration = hold + ramp_up if not iterations: if duration: iterations = 0 # infinite else: iterations = 1 msg = '' if not isinstance(concurrency, numeric_types + (type(None),)): msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency) if not isinstance(throughput, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput) if not isinstance(steps, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps) if not isinstance(iterations, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations) if msg: raise TaurusConfigError(msg) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = [] if isinstance(self, FileLister): files_list.extend(self.resource_files()) files_list.extend(self.execution.get("files", [])) return files_list def __repr__(self): return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self)) def prepare(self): super(ScenarioExecutor, self).prepare() self.env.set(self.execution.get("env")) def _execute(self, args, **kwargs): self.preprocess_args(args) # for compatibility with other executors kwargs["stdout"] = kwargs.get("stdout", self.stdout) or PIPE kwargs["stderr"] = kwargs.get("stderr", self.stderr) or PIPE kwargs["cwd"] = kwargs.get("cwd", None) kwargs["env"] = self.env self.start_time = time.time() try: process = self.engine.start_subprocess(args=args, **kwargs) except OSError as exc: raise ToolError("Failed to start %s: %s (%s)" % (self.__class__.__name__, exc, args)) return process def post_process(self): if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() super(ScenarioExecutor, self).post_process()
class PipInstaller(Service): def __init__(self, packages=None, temp_flag=True): super(PipInstaller, self).__init__() self.packages = packages or [] self.versions = BetterDict() self.engine = None self.temp = temp_flag self.target_dir = None self.interpreter = sys.executable self.pip_cmd = [self.interpreter, "-m", "pip"] def _check_pip(self): cmdline = self.pip_cmd + ["--version"] try: exec_and_communicate(cmdline) except TaurusCalledProcessError as exc: self.log.debug(exc) raise TaurusInternalException( "pip module not found for interpreter %s" % self.interpreter) def _get_installed(self): cmdline = self.pip_cmd + ["list"] out, _ = exec_and_communicate(cmdline) out = out.split('\n')[2:-1] return dict( zip([line.split(' ')[0] for line in out], [line.strip().split(' ')[-1] for line in out])) def _missed(self, packages): installed = self._get_installed() missed = [] for package in packages: if package not in installed or package in self.versions and installed[ package] != self.versions[package]: missed.append(package) return missed def _convert_config_versions(self): """ extract from packages config: packages: - one - two==0.0.0 - name: three version: 0.0.0 and add to self.packages and self.versions """ packages_list = self.parameters.get("packages", None) if not packages_list: return for package_data in packages_list: package, version = None, None if isinstance(package_data, dict): package, version = package_data['name'], package_data.get( "version", None) elif isinstance(package_data, str): package_params = package_data.split("==") package, version = package_params[0], package_params[1] if len( package_params) > 1 else None self.packages.append(package) if version: self.versions[package] = version def prepare_pip(self): """ pip-installer expect follow definition: - service pip-install temp: false # install to ~/.bzt instead of artifacts dir packages: - first_pkg - second_pkg """ self._check_pip() self._convert_config_versions() if not self.packages: return # install into artifacts dir if temp, otherwise into .bzt self.temp = self.settings.get("temp", self.temp) self.temp = self.parameters.get("temp", self.temp) self.target_dir = self.engine.temp_pythonpath if self.temp else self.engine.user_pythonpath if not os.path.exists(self.target_dir): os.makedirs(get_full_path(self.target_dir), exist_ok=True) def prepare(self): self.prepare_pip() if not self.all_packages_installed(): self.install() def all_packages_installed(self): self.packages = self._missed(self.packages) self.versions = { package: self.versions[package] for package in self.versions.keys() if package in self.packages } return False if self.packages else True def install(self): if not self.packages: self.log.debug("Nothing to install") return cmdline = self.pip_cmd + ["install", "-t", self.target_dir] for package in self.packages: version = self.versions.get(package, None) cmdline += [f"{package}=={version}"] if version else [package] cmdline += ["--upgrade"] self.log.debug("pip-installer cmdline: '%s'" % ' '.join(cmdline)) try: out, err = exec_and_communicate(cmdline) except TaurusCalledProcessError as exc: self.log.debug(exc) for line in exc.output.split('\n'): if line.startswith("ERROR"): self.log.error(" ".join(line.split(" ")[1:])) return if "Successfully installed" in out: self.log.info(out.split("\n")[-2]) for err_line in err.split("\n"): if err_line.startswith('WARNING'): self.log.warning(" ".join(err_line.split(" ")[1:])) if err_line.startswith('ERROR'): self.log.error(" ".join(err_line.split(" ")[1:])) self.log.debug("pip-installer stdout: \n%s" % out) if err: self.log.debug("pip-installer stderr:\n%s" % err) def get_version(self, package): installed = self._get_installed() return installed[package] def post_process(self): # might be forbidden on win as tool still work if self.packages and self.temp and not is_windows() and os.path.exists( self.target_dir): self.log.debug("remove packages: %s" % self.packages) shutil.rmtree( self.target_dir ) # it removes all content of directory in reality, not only self.packages
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation") datadir = os.path.realpath(self.engine.artifacts_dir) if os.path.isfile(self.script): if self.script.endswith('.jar'): self.jar_list += os.pathsep + self.script simulation_folder = None else: simulation_folder = os.path.dirname(get_full_path(self.script)) else: simulation_folder = self.script cmdline = [self.launcher] cmdline += ["-df", datadir, "-rf", datadir] cmdline += ["-on", self.dir_prefix, "-m"] if simulation_folder: cmdline += ["-sf", simulation_folder] if simulation: cmdline += ["-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = self.settings.get('properties') load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', True): params_for_scala['gatling.http.ahc.keepAlive'] = 'true' else: params_for_scala['gatling.http.ahc.keepAlive'] = 'false' if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = ''.join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') env.merge({"JAVA_OPTS": java_opts, "NO_PAUSE": "TRUE"}) if self.jar_list: java_classpath = env.get('JAVA_CLASSPATH', '') compilation_classpath = env.get('COMPILATION_CLASSPATH', '') java_classpath += self.jar_list compilation_classpath += self.jar_list env.merge({'JAVA_CLASSPATH': java_classpath, 'COMPILATION_CLASSPATH': compilation_classpath}) self.process = self.execute(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', {}) if isinstance(scenario, string_types): scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) scenario = scenarios.get(scenario) self.__scenario = Scenario(scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(scenario) else: raise ValueError("Scenario not configured properly: %s" % scenario) return self.__scenario def get_load(self): """ Helper method to read load specification :return: """ prov_type = self.engine.config.get(Provisioning.PROV, None) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): """ Return resource files list """ files_list = self.execution.get("files", []) if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self.label = None self.widget = None self.reader = None def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, scenario=None): """ :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() if Scenario.SCRIPT in scenario and scenario[Scenario.SCRIPT]: return self.engine.find_file(scenario.get(Scenario.SCRIPT)) else: return None def get_scenario(self, name=None): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios") if name is None: # get current scenario label = self.execution.get( 'scenario', ValueError("Scenario is not configured properly")) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): raise ValueError( "Invalid content of scenario, list type instead of dict or string" ) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str( hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name err = ValueError("Scenario not found in scenarios: %s" % label) scenario = scenarios.get(label, err) scenario_obj = Scenario(self.engine, scenario) if name is None: self.__scenario = scenario_obj return scenario_obj def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get( Provisioning.PROV, ValueError("There must be provisioning type set")) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get( prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None), )): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = self.execution.get("files", [])[:] if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list def __repr__(self): return "%s/%s" % (self.execution.get( "executor", None), self.label if self.label else id(self)) def get_hostaliases(self): settings = self.engine.config.get(SETTINGS, {}) return settings.get("hostaliases", {}) def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
class ConsolidatingAggregator(EngineModule, ResultsProvider): """ :type underlings: list[bzt.modules.aggregator.ResultsProvider] """ # TODO: switch to underling-count-based completeness criteria def __init__(self): EngineModule.__init__(self) ResultsProvider.__init__(self) self.generalize_labels = False self.ignored_labels = [] self.underlings = [] self.buffer = BetterDict() def prepare(self): """ Read aggregation options """ super(ConsolidatingAggregator, self).prepare() # make unique & sort percentiles = self.settings.get("percentiles", self.track_percentiles) percentiles = list(set(percentiles)) percentiles.sort() self.track_percentiles = percentiles self.settings['percentiles'] = percentiles self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels) self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels) self.min_buffer_len = dehumanize_time(self.settings.get("min-buffer-len", self.min_buffer_len)) max_buffer_len = self.settings.get("max-buffer-len", self.max_buffer_len) try: # for max_buffer_len == float('inf') self.max_buffer_len = dehumanize_time(max_buffer_len) except ValueError as verr: if str(verr).find('inf') != -1: self.max_buffer_len = max_buffer_len else: raise self.buffer_multiplier = self.settings.get("buffer-multiplier", self.buffer_multiplier) percentile = self.settings.get("buffer-scale-choice", 0.5) count = len(self.track_percentiles) if count == 1: self.buffer_scale_idx = str(float(self.track_percentiles[0])) if count > 1: percentiles = [i / (count - 1.0) for i in range(count)] distances = [abs(percentile - percentiles[i]) for i in range(count)] index_position = distances.index(min(distances)) self.buffer_scale_idx = str(float(self.track_percentiles[index_position])) debug_str = 'Buffer scaling setup: percentile %s from %s selected' self.log.debug(debug_str, self.buffer_scale_idx, self.track_percentiles) def add_underling(self, underling): """ Add source for aggregating :type underling: ResultsProvider """ underling.track_percentiles = self.track_percentiles if isinstance(underling, ResultsReader): underling.ignored_labels = self.ignored_labels underling.generalize_labels = self.generalize_labels underling.min_buffer_len = self.min_buffer_len underling.max_buffer_len = self.max_buffer_len underling.buffer_multiplier = self.buffer_multiplier underling.buffer_scale_idx = self.buffer_scale_idx self.underlings.append(underling) def check(self): """ Check if there is next aggregate data present :rtype: bool """ for point in self.datapoints(): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) return super(ConsolidatingAggregator, self).check() def post_process(self): """ Process all remaining aggregate data """ super(ConsolidatingAggregator, self).post_process() for point in self.datapoints(True): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) def _process_underlings(self, final_pass): for underling in self.underlings: for data in underling.datapoints(final_pass): tstamp = data[DataPoint.TIMESTAMP] if self.buffer: mints = min(self.buffer.keys()) if tstamp < mints: self.log.warning("Putting datapoint %s into %s", tstamp, mints) data[DataPoint.TIMESTAMP] = mints tstamp = mints self.buffer.get(tstamp, []).append(data) def _calculate_datapoints(self, final_pass=False): """ Override ResultsProvider._calculate_datapoints """ self._process_underlings(final_pass) self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys()) if not self.buffer: return timestamps = sorted(self.buffer.keys()) while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)): tstamp = timestamps.pop(0) self.log.debug("Merging into %s", tstamp) points_to_consolidate = self.buffer.pop(tstamp) point = DataPoint(tstamp, self.track_percentiles) for subresult in points_to_consolidate: self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP]) point.merge_point(subresult) point.recalculate() yield point
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self._label = None def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get( 'scenario', ValueError("Scenario not configured properly")) if isinstance(scenario, string_types): self._label = scenario scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) ensure_is_dict(scenarios, scenario, Scenario.SCRIPT) scenario = scenarios.get(scenario) self.__scenario = Scenario(scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(scenario) else: raise ValueError("Unsupported type for scenario") if self._label is None: if Scenario.SCRIPT in self.__scenario: # using script name if present error = ValueError("Wrong script in scenario") self._label = os.path.basename( self.__scenario.get(Scenario.SCRIPT, error)) else: # last resort - a checksum of whole scenario self._label = hashlib.md5(to_json( self.__scenario).encode()).hexdigest() return self.__scenario def get_load(self): """ Helper method to read load specification :return: """ prov_type = self.engine.config.get(Provisioning.PROV, None) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get( prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite if not isinstance(concurrency, numeric_types + (type(None), )): raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency)) if not isinstance(throughput, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput)) if not isinstance(steps, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps)) if not isinstance(iterations, numeric_types + (type(None), )): raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations)) res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): """ Return resource files list """ files_list = self.execution.get("files", []) if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list def __repr__(self): return "%s/%s" % (self.execution.get( "executor", None), self._label if self._label else id(self))
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" STEPS = "steps" LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps") def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None self.label = None self.widget = None self.reader = None self.delay = None self.start_time = None self.env = None self.preprocess_args = lambda x: None def has_results(self): if self.reader and self.reader.buffer: return True else: return False def get_script_path(self, scenario=None): """ :type scenario: Scenario """ if scenario is None: scenario = self.get_scenario() script = scenario.get(Scenario.SCRIPT, None) return self.engine.find_file(script) def get_scenario(self, name=None, cache_scenario=True): """ Returns scenario dict, extract if scenario is inlined :return: DictOfDicts """ if name is None and self.__scenario is not None: return self.__scenario scenarios = self.engine.config.get("scenarios", force_set=True) if name is None: # get current scenario exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution) label = self.execution.get('scenario', exc) is_script = isinstance(label, string_types) and label not in scenarios and \ os.path.exists(self.engine.find_file(label)) if isinstance(label, list): msg = "Invalid content of scenario, list type instead of dict or string: %s" raise TaurusConfigError(msg % label) if isinstance(label, dict) or is_script: self.log.debug("Extract %s into scenarios" % label) if isinstance(label, string_types): scenario = BetterDict() scenario.merge({Scenario.SCRIPT: label}) else: scenario = label path = self.get_script_path(Scenario(self.engine, scenario)) if path is not None: label = os.path.basename(path) if path is None or label in scenarios: hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest()) label = 'autogenerated_' + hash_str[-10:] scenarios[label] = scenario self.execution['scenario'] = label self.label = label else: # get scenario by name label = name exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys())) scenario = scenarios.get(label, exc) scenario_obj = Scenario(self.engine, scenario) if name is None and cache_scenario: self.__scenario = scenario_obj return scenario_obj def get_load(self): """ Helper method to read load specification """ prov_type = self.engine.config.get(Provisioning.PROV) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) steps = self.execution.get(ScenarioExecutor.STEPS, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite msg = '' if not isinstance(concurrency, numeric_types + (type(None),)): msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency) if not isinstance(throughput, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput) if not isinstance(steps, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps) if not isinstance(iterations, numeric_types + (type(None),)): msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations) if msg: raise TaurusConfigError(msg) return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration, steps=steps) def get_resource_files(self): files_list = [] if isinstance(self, FileLister): files_list.extend(self.resource_files()) files_list.extend(self.execution.get("files", [])) return files_list def __repr__(self): return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self)) def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False): self.preprocess_args(args) return self.engine.start_subprocess(args=args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=self.env)
class EngineModule(object): """ Base class for any BZT engine module :type engine: Engine :type settings: BetterDict """ def __init__(self): self.log = logging.getLogger('') self.engine = None self.settings = BetterDict() self.parameters = BetterDict() def prepare(self): """ Preparation stage, at which configuration is being read, configs and tools being prepared. All long preparations and checks should be made here, to make `startup` stage as fast as possible. """ pass def startup(self): """ Startup should be as fast as possible. Launch background processes, do some API calls for initiation of actual work. Consider making all checks and preparations on `prepare` stage. """ pass def check(self): """ Check if work should be finished :rtype: bool :return: True if should be finished """ return False def shutdown(self): """ Stop all processes that were started in `startup` stage. Should also be as fast as possible, deferring all long operations to `post_process` stage. """ pass def post_process(self): """ Do all possibly long analysis and processing on run results """ pass def _should_run(self): """ Returns True if provisioning matches run-at """ prov = self.engine.config.get(Provisioning.PROV) runat = self.parameters.get("run-at", None) if runat is not None and prov != runat: self.log.debug( "Should not run because of non-matching prov: %s != %s", prov, runat) return False return True
def prepare(self): """ 1) Locate script or folder 2) detect script type 3) create runner instance, prepare runner """ self.scenario = self.get_scenario() if "requests" in self.scenario: if self.scenario.get("requests"): self.scenario["script"] = self.__tests_from_requests() else: raise RuntimeError( "Nothing to test, no requests were provided in scenario") self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv") self.err_jtl = self.engine.create_artifact("selenium_tests_err", ".xml") script_type = self.detect_script_type(self.scenario.get("script")) runner_config = BetterDict() if script_type == ".py": self.runner = NoseTester runner_config = self.settings.get("selenium-tools").get("nose") elif script_type == ".jar" or script_type == ".java": self.runner = JunitTester runner_config = self.settings.get("selenium-tools").get("junit") runner_config["script-type"] = script_type self.runner_working_dir = self.engine.create_artifact( runner_config.get("working-dir", "classes"), "") runner_config["working-dir"] = self.runner_working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("working-dir", self.runner_working_dir) runner_config.get("report-file", self.kpi_file) runner_config.get("err-file", self.err_jtl) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) self._cp_resource_files(self.runner_working_dir) self.runner = self.runner(runner_config, self.scenario, self.log) self.runner.prepare() self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader)
class ConsolidatingAggregator(EngineModule, ResultsProvider): """ :type underlings: list[bzt.modules.aggregator.ResultsProvider] """ # FIXME: it was oscillating with remote test of 100 servers def __init__(self): EngineModule.__init__(self) ResultsProvider.__init__(self) self.generalize_labels = True self.ignored_labels = [] self.underlings = [] self.buffer = BetterDict() self.buffer_len = 2 def prepare(self): """ Read aggregation options """ super(ConsolidatingAggregator, self).prepare() self.track_percentiles = self.settings.get("percentiles", self.track_percentiles) self.buffer_len = self.settings.get("buffer-seconds", self.buffer_len) self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels) self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels) def add_underling(self, underling): """ Add source for aggregating :type underling: ResultsProvider """ underling.track_percentiles = self.track_percentiles if isinstance(underling, ResultsReader): underling.ignored_labels = self.ignored_labels underling.generalize_labels = self.generalize_labels # underling.buffer_len = self.buffer_len # NOTE: is it ok for underling to have the same buffer len? self.underlings.append(underling) def check(self): """ Check if there is next aggregate data present :rtype: bool """ for point in self.datapoints(): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) return super(ConsolidatingAggregator, self).check() def post_process(self): """ Process all remaining aggregate data """ super(ConsolidatingAggregator, self).post_process() for point in self.datapoints(True): self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID]) def _process_underlings(self, final_pass): for underling in self.underlings: for data in [x for x in underling.datapoints(final_pass)]: tstamp = data[DataPoint.TIMESTAMP] if self.buffer: mints = min(self.buffer.keys()) if tstamp < mints: self.log.warning("Putting datapoint %s into %s", tstamp, mints) data[DataPoint.TIMESTAMP] = mints tstamp = mints self.buffer.get(tstamp, []).append(data) def _calculate_datapoints(self, final_pass=False): """ Override ResultsProvider._calculate_datapoints """ self._process_underlings(final_pass) self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys()) if not self.buffer: return timestamps = sorted(self.buffer.keys()) while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)): tstamp = timestamps.pop(0) self.log.debug("Merging into %s", tstamp) points_to_consolidate = self.buffer.pop(tstamp) point = DataPoint(tstamp, self.track_percentiles) for subresult in points_to_consolidate: self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP]) point.merge_point(subresult) point.recalculate() yield point
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation") datadir = os.path.realpath(self.engine.artifacts_dir) if os.path.isfile(self.script): if self.script.endswith('.jar'): self.jar_list += os.pathsep + self.script simulation_folder = None else: simulation_folder = os.path.dirname(get_full_path(self.script)) else: simulation_folder = self.script cmdline = [self.launcher] cmdline += ["-df", datadir, "-rf", datadir] cmdline += ["-on", self.dir_prefix, "-m"] if simulation_folder: cmdline += ["-sf", simulation_folder] if simulation: cmdline += ["-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = self.settings.get('properties') load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int( dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', True): params_for_scala['gatling.http.ahc.keepAlive'] = 'true' else: params_for_scala['gatling.http.ahc.keepAlive'] = 'false' if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = ''.join([ " -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala ]) java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get( 'java-opts', '') env.merge({"JAVA_OPTS": java_opts, "NO_PAUSE": "TRUE"}) if self.jar_list: java_classpath = env.get('JAVA_CLASSPATH', '') compilation_classpath = env.get('COMPILATION_CLASSPATH', '') java_classpath += self.jar_list compilation_classpath += self.jar_list env.merge({ 'JAVA_CLASSPATH': java_classpath, 'COMPILATION_CLASSPATH': compilation_classpath }) self.process = self.execute(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
class ScenarioExecutor(EngineModule): """ :type provisioning: engine.Provisioning :type execution: BetterDict """ RAMP_UP = "ramp-up" HOLD_FOR = "hold-for" CONCURR = "concurrency" THRPT = "throughput" EXEC = "execution" def __init__(self): super(ScenarioExecutor, self).__init__() self.provisioning = None self.execution = BetterDict() self.__scenario = None def get_scenario(self): """ Returns scenario dict, either inlined, or referenced by alias :return: DictOfDicts """ if self.__scenario is None: scenario = self.execution.get('scenario', {}) if isinstance(scenario, six.string_types): scenarios = self.engine.config.get("scenarios") if scenario not in scenarios: raise ValueError("Scenario not found in scenarios: %s" % scenario) scenario = scenarios.get(scenario) self.__scenario = Scenario(scenario) elif isinstance(scenario, dict): self.__scenario = Scenario(scenario) else: raise ValueError("Scenario not configured properly: %s" % scenario) return self.__scenario def get_load(self): """ Helper method to read load specification :return: """ prov_type = self.engine.config.get(Provisioning.PROV, None) ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type) throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0) ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type) concurrency = self.execution[ScenarioExecutor.CONCURR].get( prov_type, 0) iterations = self.execution.get("iterations", None) ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None) hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0)) if ramp_up is None: ramp_up = None duration = hold else: ramp_up = dehumanize_time(ramp_up) duration = hold + ramp_up if duration and not iterations: iterations = 0 # which means infinite res = namedtuple("LoadSpec", ('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration')) return res(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold, iterations=iterations, duration=duration) def get_resource_files(self): files_list = self.execution.get("files", []) if isinstance(self, FileLister): files_list.extend(self.resource_files()) return files_list
def prepare(self): """ 1) Locate script or folder 2) detect script type 3) create runner instance, prepare runner """ self.scenario = self.get_scenario() if "requests" in self.scenario: if self.scenario.get("requests"): self.scenario["script"] = self.__tests_from_requests() else: raise RuntimeError("Nothing to test, no requests were provided in scenario") self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv") self.err_jtl = self.engine.create_artifact("selenium_tests_err", ".xml") script_type = self.detect_script_type(self.scenario.get("script")) runner_config = BetterDict() if script_type == ".py": self.runner = NoseTester runner_config = self.settings.get("selenium-tools").get("nose") elif script_type == ".jar" or script_type == ".java": self.runner = JunitTester runner_config = self.settings.get("selenium-tools").get("junit") runner_config["script-type"] = script_type self.runner_working_dir = self.engine.create_artifact(runner_config.get("working-dir", "classes"), "") runner_config["working-dir"] = self.runner_working_dir runner_config.get("artifacts-dir", self.engine.artifacts_dir) runner_config.get("working-dir", self.runner_working_dir) runner_config.get("report-file", self.kpi_file) runner_config.get("err-file", self.err_jtl) runner_config.get("stdout", self.engine.create_artifact("junit", ".out")) runner_config.get("stderr", self.engine.create_artifact("junit", ".err")) self._cp_resource_files(self.runner_working_dir) self.runner = self.runner(runner_config, self.scenario, self.log) self.runner.prepare() self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader)