def __get_csv_dict(self, label, kpiset): kpi_copy = copy.deepcopy(kpiset) res = OrderedDict() res['label'] = label # sort label for key in sorted(kpi_copy.keys()): res[key] = kpi_copy[key] del res[KPISet.ERRORS] del res[KPISet.RESP_TIMES] del res[KPISet.RESP_CODES] del res[KPISet.PERCENTILES] percentiles = list(iteritems(kpiset[KPISet.PERCENTILES])) for level, val in sorted(percentiles, key=lambda lv: (float(lv[0]), lv[1])): res['perc_%s' % level] = val resp_codes = list(iteritems(kpiset[KPISet.RESP_CODES])) for rcd, val in sorted(resp_codes): res['rc_%s' % rcd] = val for key in res: if isinstance(res[key], float): res[key] = "%.5f" % res[key] return res
def get_interpolated_paths(self, parameter_interpolation=INTERPOLATE_WITH_VALUES): paths = OrderedDict() replacer_regex = lambda name: r'(?<!\$)(\{' + name + r'\})' # replace '{name}', but skip '${name}' for path, path_obj in iteritems(self.paths): new_path = path for method in Swagger.METHODS: operation = getattr(path_obj, method) if operation is not None: for _, param in iteritems(operation.parameters): if param.location == "path": name = param.name if parameter_interpolation == Swagger.INTERPOLATE_WITH_VALUES: value = str(Swagger.get_data_for_type(param.type, param.format)) elif parameter_interpolation == Swagger.INTERPOLATE_WITH_JMETER_VARS: value = "${" + param.name + "}" else: value = None if value is not None: new_path = re.sub(replacer_regex(name), value, new_path) for _, param in iteritems(path_obj.parameters): if param.location == "path": name = param.name if parameter_interpolation == Swagger.INTERPOLATE_WITH_VALUES: value = str(Swagger.get_data_for_type(param.type, param.format)) elif parameter_interpolation == Swagger.INTERPOLATE_WITH_JMETER_VARS: value = "${" + param.name + "}" else: value = None if value is not None: new_path = re.sub(replacer_regex(name), value, new_path) path_obj = copy.deepcopy(path_obj) paths[new_path] = path_obj return paths
def monitoring_data(self, data): for item in data: if item['source'] not in self.host_metrics: self.host_metrics[item['source']] = OrderedDict() for key in sorted(item.keys()): if key not in ("source", "ts"): color = '' if key in self.host_metrics[item['source']]: if self.host_metrics[item['source']][key][0] < item[key]: color = 'warmer' elif self.host_metrics[item['source']][key][0] > item[key]: color = 'colder' self.host_metrics[item['source']][key] = (item[key], color) text = [] for host, metrics in iteritems(self.host_metrics): text.append(('stat-hdr', " %s \n" % host)) if len(metrics): maxwidth = max([len(key) for key in metrics.keys()]) for metric, value in iteritems(metrics): values = (' ' * (maxwidth - len(metric)), metric, value[0]) text.append((value[1], " %s%s: %.3f\n" % values)) self.display.set_text(text) self._invalidate()
def update(self): if not self._sessions: self._sessions = self.prov.client.get_master_sessions(self.prov.client.active_session_id) if not self._sessions: return mapping = BetterDict() cnt = 0 for session in self._sessions: try: cnt += 1 name_split = session['name'].split('/') location = session['configuration']['location'] count = session['configuration']['serversCount'] mapping.get(name_split[0]).get(name_split[1])[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self.prov.test_name, self.prov.client.active_session_id) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) self.text.set_text(txt)
def send_kpi_data(self, data_buffer, is_check_response=True, is_final=False): """ Sends online data :param is_check_response: :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ data = [] for sec in data_buffer: self.first_ts = min(self.first_ts, sec[DataPoint.TIMESTAMP]) self.last_ts = max(self.last_ts, sec[DataPoint.TIMESTAMP]) for lbl, item in iteritems(sec[DataPoint.CURRENT]): if lbl == '': label = "ALL" else: label = lbl json_item = None for lbl_item in data: if lbl_item["name"] == label: json_item = lbl_item break if not json_item: json_item = self.__label_skel(label) data.append(json_item) interval_item = self.__interval_json(item, sec) for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): interval_item['rc'].append({"n": cnt, "rc": r_code}) json_item['intervals'].append(interval_item) cumul = sec[DataPoint.CUMULATIVE][lbl] json_item['n'] = cumul[KPISet.SAMPLE_COUNT] json_item["summary"] = self.__summary_json(cumul) data = {"labels": data, "sourceID": id(self)} if is_final: data['final'] = True url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s" url = url % (self.active_session_id, self.data_signature, self.test_id, self.user_id) url += "&pq=0&target=%s&update=1" % self.kpi_target hdr = {"Content-Type": " application/json"} response = self._request(url, to_json(data), headers=hdr) if response and 'response_code' in response and response['response_code'] != 200: raise RuntimeError("Failed to feed data, response code %s" % response['response_code']) if response and 'result' in response and is_check_response: result = response['result']['session'] self.log.debug("Result: %s", result) if 'statusCode' in result and result['statusCode'] > 100: self.log.info("Test was stopped through Web UI: %s", result['status']) raise ManualShutdown("The test was interrupted through Web UI")
def calc_aggregates(self): tab_pid = self.tracing_tab_pid if tab_pid not in self.memory_per_process: return memory_per_ts = self.reaggregate_by_ts(self.memory_per_process) tab_memory = [process_stats[tab_pid] for ts, process_stats in iteritems(memory_per_ts)] yield self.AVERAGE_TAB_MEMORY, average(tab_memory) browser_memory = [sum(process_memory for _, process_memory in iteritems(process_stats)) for _, process_stats in iteritems(memory_per_ts)] yield self.AVERAGE_BROWSER_MEMORY, average(browser_memory)
def startup(self): args = [self.tool_path] load = self.get_load() if load.hold: hold = int(ceil(dehumanize_time(load.hold))) args += ['-t', str(hold)] elif load.iterations: args += ['-n', str(load.iterations)] else: args += ['-n', '1'] # 1 iteration by default load_concurrency = load.concurrency if load.concurrency is not None else 1 args += ['-c', str(load_concurrency)] args += ['-d'] # do not print 'Processed *00 requests' every 100 requests or so args += ['-g', str(self.__tsv_file_name)] # dump stats to TSV file # add global scenario headers for key, val in iteritems(self.scenario.get_headers()): args += ['-H', "%s: %s" % (key, val)] requests = list(self.scenario.get_requests()) if not requests: raise ValueError("You must specify at least one request for ab") if len(requests) > 1: self.log.warning("ab doesn't support multiple requests." " Only first one will be used.") request = requests[0] # add request-specific headers for header in request.headers: for key, val in iteritems(header): args += ['-H', "%s: %s" % (key, val)] if request.method != 'GET': raise ValueError("ab supports only GET requests") keepalive = True if request.config.get('keepalive') is not None: keepalive = request.config.get('keepalive') elif self.scenario.get('keepalive') is not None: keepalive = self.scenario.get('keepalive') if keepalive: args += ['-k'] args += [request.url] self.reader.setup(load_concurrency, request.label) self.start_time = time.time() self.process = shell_exec(args, stdout=self.__out, stderr=self.__err)
def aggregate_by_ts(self, pid_stats, aggregate_func=average): # TODO: configurable granularity? per_ts = dict() # ts -> [measurement at ts] for offset, value in iteritems(pid_stats): base_ts = int(self.convert_ts(offset)) if base_ts not in per_ts: per_ts[base_ts] = [] per_ts[base_ts].append(value) return { ts: aggregate_func(values_at_ts) for ts, values_at_ts in iteritems(per_ts) }
def __get_xml_summary(self, label, kpiset): elem = etree.Element("Group", label=label) for kpi_name, kpi_val in iteritems(kpiset): if kpi_name in (KPISet.ERRORS, KPISet.RESP_TIMES): continue if isinstance(kpi_val, dict): for param_name, param_val in iteritems(kpi_val): elem.append(self.__get_kpi_xml(kpi_name, param_val, param_name)) else: elem.append(self.__get_kpi_xml(kpi_name, kpi_val)) return elem
def startup(self): args = [self.tool.tool_path] load = self.get_load() load_iterations = load.iterations or 1 load_concurrency = load.concurrency or 1 if load.hold: hold = int(ceil(dehumanize_time(load.hold))) args += ['-t', str(hold)] else: args += ['-n', str(load_iterations * load_concurrency)] # ab waits for total number of iterations args += ['-c', str(load_concurrency)] args += ['-d'] # do not print 'Processed *00 requests' every 100 requests or so args += ['-r'] # do not crash on socket level errors if self.tool.version and LooseVersion(self.tool.version) >= LooseVersion("2.4.7"): args += ['-l'] # accept variable-len responses args += ['-g', str(self._tsv_file)] # dump stats to TSV file # add global scenario headers for key, val in iteritems(self.scenario.get_headers()): args += ['-H', "%s: %s" % (key, val)] requests = self.scenario.get_requests() if not requests: raise TaurusConfigError("You must specify at least one request for ab") if len(requests) > 1: self.log.warning("ab doesn't support multiple requests. Only first one will be used.") request = self.__first_http_request() if request is None: raise TaurusConfigError("ab supports only HTTP requests, while scenario doesn't have any") # add request-specific headers for key, val in iteritems(request.headers): args += ['-H', "%s: %s" % (key, val)] if request.method != 'GET': raise TaurusConfigError("ab supports only GET requests, but '%s' is found" % request.method) if request.priority_option('keepalive', default=True): args += ['-k'] args += [request.url] self.reader.setup(load_concurrency, request.label) self.process = self._execute(args)
def _extract_toplevel_definitions(self): self.info = self.swagger.get("info", {}) for name, schema in iteritems(self.swagger.get("definitions", {})): self.definitions[name] = Swagger.Definition(name=name, schema=schema) for name, response in iteritems(self.swagger.get("responses", {})): self.responses[name] = Swagger.Response(name=name, description=response["description"], schema=response.get("schema"), headers=response.get("headers")) for name, param in iteritems(self.swagger.get("parameters", {})): parameter = Swagger.Parameter(name=name, location=param.get("in"), description=param.get("description"), required=param.get("required"), schema=param.get("schema"), type=param.get("type"), format=param.get("format")) self.parameters[name] = parameter
def file_replacer(container): if isinstance(container, dict): for key, val in iteritems(container): if val in rfiles: container[key] = os.path.basename(val) if container[key] != val: self.log.info("Replaced %s with %s in %s", val, container[key], key)
def test_aggr_metrics(self): obj = ChromeProfiler() obj.engine = EngineEmul() obj.settings.merge({ "processors": { "trace": { "class": "bzt.modules.chrome.TraceProcessor", "extractors": ["bzt.modules.chrome.MemoryMetricsExtractor"] } } }) shutil.copy(__dir__() + "/../chrome/trace.json", obj.engine.artifacts_dir) obj.prepare() obj.startup() obj.check() metrics = obj.get_aggr_metrics() for metric, _ in iteritems(metrics): self.assertIsNotNone(obj.get_metric_label(metric)) self.assertAlmostEqual(metrics["memory-average-tab"], 97.25, delta=0.01) self.assertAlmostEqual(metrics["memory-average-browser"], 97.25, delta=0.01)
def __gen_sessions(self, scenario): sessions = etree.Element("sessions") session = etree.Element("session", name="taurus_requests", probability="100", type="ts_http") for request in scenario.get_requests(): if not isinstance(request, HTTPRequest): msg = "Tsung config generator doesn't support '%s' blocks, skipping" self.log.warning(msg, request.NAME) continue request_elem = etree.Element("request") http_elem = etree.Element("http", url=request.url, method=request.method, version="1.1") if request.body: http_elem.set('contents', request.body) headers = copy.deepcopy(scenario.get_headers()) headers.update(copy.deepcopy(request.headers)) for header_name, header_value in iteritems(headers): http_elem.append(etree.Element("http_header", name=header_name, value=header_value)) request_elem.append(http_elem) session.append(request_elem) if request.think_time is not None: think_time = int(dehumanize_time(request.think_time)) session.append(etree.Element("thinktime", value=str(think_time), random="false")) sessions.append(session) return sessions
def _add_local_security(self, request, securities, scenario, disable_basic=False): if not securities: return # TODO: disable global security for request security = securities[0] for sec_name, _ in iteritems(security): secdef = self.swagger.security_defs.get(sec_name) if not secdef: self.log.warning("Security definition %r not found, skipping" % sec_name) continue if secdef.type == 'basic': if not disable_basic: self._insert_local_basic_auth(request, scenario) elif secdef.type == 'apiKey': if secdef.name is None: self.log.warning("apiKey security definition has no header name, skipping") continue if secdef.location is None: self.log.warning("apiKey location (`in`) is not given, assuming header") secdef.location = 'header' self._insert_local_apikey_auth(request, scenario, secdef.name, sec_name, secdef.location) elif secdef.type == 'oauth2': self.log.warning("OAuth2 security is not yet supported, skipping") continue
def _add_global_security(self, scenario, global_security, global_vars): if not global_security: return security = global_security[0] for sec_name, _ in iteritems(security): secdef = self.swagger.security_defs.get(sec_name) if not secdef: self.log.warning("Security definition %r not found, skipping" % sec_name) continue if secdef.type == 'basic': self._insert_global_basic_auth(scenario, global_vars) elif secdef.type == 'apiKey': if secdef.name is None: self.log.warning("apiKey security definition has no header name, skipping") continue if secdef.location is None: self.log.warning("apiKey location (`in`) is not given, assuming header") secdef.location = 'header' self._insert_global_apikey_auth(scenario, secdef.name, sec_name, secdef.location, global_vars) elif secdef.type == 'oauth2': self.log.warning("OAuth2 security is not yet supported, skipping") continue
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict() headers.merge({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, string_types): body = request.body elif request.body: raise ValueError("Cannot handle 'body' option of type %s: %s" % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get("headers")) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body,) return http
def startup(self): args = [self.tool_path] load = self.get_load() if load.iterations: args += ['--reps', str(load.iterations)] elif load.hold: hold_for = ceil(dehumanize_time(load.hold)) args += ['--time', '%sS' % hold_for] else: raise ValueError("You must specify either 'hold-for' or 'iterations' for siege") if self.scenario.get('think-time'): think_time = dehumanize_time(self.scenario.get('think-time')) args += ['--delay', str(think_time)] else: args += ['--benchmark'] load_concurrency = load.concurrency args += ['--concurrent', str(load_concurrency)] self.reader.concurrency = load_concurrency args += ['--file', self.__url_name] for key, val in iteritems(self.scenario.get_headers()): args += ['--header', "%s: %s" % (key, val)] env = BetterDict() env.merge(dict(environ)) env.merge({"SIEGERC": self.__rc_name}) self.start_time = time.time() self.process = shell_exec(args, stdout=self.__out, stderr=self.__err, env=env)
def __jtl_writer(filename, label, flags): """ Generates JTL writer :param filename: :return: """ jtl = etree.Element("stringProp", {"name": "filename"}) jtl.text = filename name = etree.Element("name") name.text = "saveConfig" value = etree.Element("value") value.set("class", "SampleSaveConfiguration") for key, val in iteritems(flags): value.append(JMX._flag(key, val)) obj_prop = etree.Element("objProp") obj_prop.append(name) obj_prop.append(value) kpi_listener = etree.Element("ResultCollector", testname=label, testclass="ResultCollector", guiclass="SimpleDataWriter") kpi_listener.append(jtl) kpi_listener.append(obj_prop) return kpi_listener
def shell_exec(args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): """ Wrapper for subprocess starting :param stderr: :param stdout: :param cwd: :param stdin: :type args: basestring or list :return: """ if stdout and not isinstance(stdout, int) and not isinstance(stdout, file_type): logging.warning("stdout is not IOBase: %s", stdout) stdout = None if stderr and not isinstance(stderr, int) and not isinstance(stderr, file_type): logging.warning("stderr is not IOBase: %s", stderr) stderr = None if isinstance(args, string_types) and not shell: args = shlex.split(args, posix=not is_windows()) logging.getLogger(__name__).debug("Executing shell: %s", args) if env: env = {k: str(v) for k, v in iteritems(env)} if is_windows(): return Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, bufsize=0, cwd=cwd, shell=shell, env=env) else: return Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, bufsize=0, preexec_fn=os.setpgrp, close_fds=True, cwd=cwd, shell=shell, env=env)
def process_functional(self, xunit): for suite_name, samples in iteritems(self.cumulative_results): duration = max(s.start_time for s in samples) - min(s.start_time for s in samples) duration += max(samples, key=lambda s: s.start_time).duration attrs = { "name": suite_name, "tests": str(len(samples)), "errors": str(len([sample for sample in samples if sample.status == "BROKEN"])), "skipped": str(len([sample for sample in samples if sample.status == "SKIPPED"])), "failures": str(len([sample for sample in samples if sample.status == "FAILED"])), "time": str(round(duration, 3)), # TODO: "timestamp" attribute } xunit.add_test_suite(suite_name, attributes=attrs) for sample in samples: attrs = { "classname": sample.test_suite, "name": sample.test_case, "time": str(round(sample.duration, 3)) } children = [] if sample.status == "BROKEN": error = etree.Element("error", type=sample.error_msg) if sample.error_trace: error.text = sample.error_trace children.append(error) elif sample.status == "FAILED": failure = etree.Element("failure", message=sample.error_msg) if sample.error_trace: failure.text = sample.error_trace children.append(failure) elif sample.status == "SKIPPED": skipped = etree.Element("skipped") children.append(skipped) xunit.add_test_case(suite_name, attributes=attrs, children=children)
def startup(self): args = [self.tool.tool_path] load = self.get_load() if load.iterations: args += ['--reps', str(load.iterations)] elif load.hold: hold_for = ceil(dehumanize_time(load.hold)) args += ['--time', '%sS' % hold_for] else: raise TaurusConfigError("Siege: You must specify either 'hold-for' or 'iterations'") think_time = self.scenario.get_think_time() if think_time: args += ['--delay', str(dehumanize_time(think_time))] else: args += ['--benchmark'] load_concurrency = load.concurrency args += ['--concurrent', str(load_concurrency)] self.reader.concurrency = load_concurrency args += ['--file', self.__url_name] for key, val in iteritems(self.scenario.get_headers()): args += ['--header', "%s: %s" % (key, val)] self.env.set({"SIEGERC": self.__rc_name}) self.process = self._execute(args)
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters.get(stage, [])): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] default_cwd = self.settings.get("default-cwd", None) cwd = self.engine.find_file(task_config.get("cwd", default_cwd)) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task)
def _handle_parameters(self, parameters): query_params = OrderedDict() form_data = {} request_body = None headers = {} for _, param in iteritems(parameters): if not param.required: continue if param.location == "header": name = param.name value = Swagger.get_data_for_type(param.type, param.format) headers[name] = value elif param.location == "query": name = param.name value = Swagger.get_data_for_type(param.type, param.format) query_params[name] = value elif param.location == "formData": name = param.name value = Swagger.get_data_for_type(param.type, param.format) form_data[name] = value elif param.location == "body": request_body = Swagger.get_data_for_schema(param.schema) elif param.location == "path": pass # path parameters are resolved at a different level else: self.log.warning("Unsupported parameter location (%s). Skipping", param.location) return query_params, form_data, request_body, headers
def prepare(self): super(PassFailStatus, self).prepare() # TODO: remove "criterias" support in three months criterias = self.parameters.get("criterias", []) if criterias: self.log.warning('"criterias" section name is deprecated, use "criteria" instead') criteria = self.parameters.get("criteria", criterias) if isinstance(criteria, dict): crit_iter = iteritems(criteria) else: crit_iter = enumerate(criteria) for idx, crit_config in crit_iter: if isinstance(crit_config, string_types): crit_config = DataCriterion.string_to_config(crit_config) self.parameters['criteria'][idx] = crit_config crit = load_class(crit_config.get('class', DataCriterion.__module__ + "." + DataCriterion.__name__)) crit_instance = crit(crit_config, self) assert isinstance(crit_instance, FailCriterion) if isinstance(idx, string_types): crit_instance.message = idx self.criteria.append(crit_instance) if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self)
def build_source_code(self): self.log.debug("Generating Python script for Grinder") self.root.append(self.gen_comment("This script was generated by Taurus", indent=0)) self.root.append(self.add_imports()) self.root.append(self.gen_new_line(indent=0)) default_address = self.scenario.get("default-address", "") url_arg = "url=%r" % default_address if default_address else "" self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0)) self.root.append(self.gen_statement('test = Test(1, "BZT Requests")', indent=0)) self.root.append(self.gen_statement('test.record(request)', indent=0)) self.root.append(self.gen_new_line(indent=0)) self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0)) self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0)) headers = self.scenario.get_headers() if headers: self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0)) for header, value in iteritems(headers): self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4)) self.root.append(self.gen_statement("])", indent=0)) global_timeout = dehumanize_time(self.scenario.get("timeout", None)) if global_timeout: self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0)) cookie_flag = int(self.scenario.get("store-cookie", True)) self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0)) self.root.append(self.gen_new_line(indent=0)) self.root.append(self.gen_runner_class())
def compile_scenario(self, scenario): elements = [] for _, protocol in iteritems(self.protocol_handlers): elements.extend(protocol.get_toplevel_elements(scenario)) elements.extend(self.__gen_authorization(scenario)) elements.extend(self.__gen_datasources(scenario)) elements.extend(self.__gen_requests(scenario)) return elements
def __deepcopy__(self, memo): mycopy = KPISet(self.perc_levels) mycopy.sum_rt = self.sum_rt mycopy.sum_lt = self.sum_lt mycopy.sum_cn = self.sum_cn for key, val in iteritems(self): mycopy[key] = copy.deepcopy(val, memo) return mycopy
def __dump_csv(self, filename): self.log.info("Dumping final status as CSV: %s", filename) # FIXME: what if there's no last_sec with open(filename, "wt") as fhd: writer = csv.DictWriter(fhd, self.__get_csv_dict("", self.last_sec[DataPoint.CUMULATIVE][""]).keys()) writer.writeheader() for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]): writer.writerow(self.__get_csv_dict(label, kpiset))
def reaggregate_by_ts(self, per_pid_stats, aggregate_func=average): # TODO: sub-second granularity per_ts = dict() # ts -> (pid -> [measurement at ts]) for pid in per_pid_stats: for offset, value in iteritems(per_pid_stats[pid]): base_ts = int(self.convert_ts(offset)) if base_ts not in per_ts: per_ts[base_ts] = {} if pid not in per_ts[base_ts]: per_ts[base_ts][pid] = [] per_ts[base_ts][pid].append(value) return { ts: { pid: aggregate_func(pid_measurements) for pid, pid_measurements in iteritems(stats_per_ts) } for ts, stats_per_ts in iteritems(per_ts) }
def __gen_sessions(self, scenario): sessions = etree.Element("sessions") session = etree.Element("session", name="taurus_requests", probability="100", type="ts_http") for request in scenario.get_requests(): request_elem = etree.Element("request") http_elem = etree.Element("http", url=request.url, method=request.method, version="1.1") if request.body: http_elem.set('contents', request.body) headers = {} headers.update(scenario.data.get('headers', {})) headers.update(request.headers) for header_name, header_value in iteritems(headers): http_elem.append(etree.Element("http_header", name=header_name, value=header_value)) request_elem.append(http_elem) session.append(request_elem) if request.think_time is not None: think_time = int(dehumanize_time(request.think_time)) session.append(etree.Element("thinktime", value=str(think_time), random="false")) sessions.append(session) return sessions
def default(self, obj): # pylint: disable=method-hidden """ Filters out protected and private fields :param obj: :return: """ if self.__dumpable(obj): res = {} for key, val in iteritems(obj.__dict__): if not self.__dumpable(val): # logging.debug("Filtered out: %s.%s", key, val) pass elif key.startswith('_'): # logging.debug("Filtered out: %s", key) pass else: res[key] = val return res else: return None
def gen_runner_class(self): runner_classdef = self.gen_class_definition("TestRunner", ["object"]) sleep_method = self.gen_method_definition("rampUpSleeper", ["self"]) sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return")) sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')")) sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)")) sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)")) sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)")) sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)")) sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')")) sleep_method.append(self.gen_new_line()) runner_classdef.append(sleep_method) main_method = self.gen_method_definition("__call__", ["self"]) main_method.append(self.gen_statement("self.rampUpSleeper()")) for req in self.scenario.get_requests(): if not isinstance(req, HTTPRequest): msg = "Grinder script generator doesn't support '%s' blocks, skipping" self.log.warning(msg, req.NAME) continue method = req.method.upper() url = req.url local_headers = req.headers params = "[]" headers = self.__list_to_nvpair_list(iteritems(local_headers)) main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers))) think_time = dehumanize_time(req.priority_option('think-time')) if think_time: main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000))) runner_classdef.append(main_method) return runner_classdef
def _get_header_mgr(hdict): """ :type hdict: dict[str,str] :rtype: lxml.etree.Element """ mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers") coll_prop = etree.Element("collectionProp", name="HeaderManager.headers") for hname, hval in iteritems(hdict): header = etree.Element("elementProp", name="", elementType="Header") header.append(JMX._string_prop("Header.name", hname)) header.append(JMX._string_prop("Header.value", hval)) coll_prop.append(header) mgr.append(coll_prop) return mgr
def __dump_xml(self, filename): self.log.info("Dumping final status as XML: %s", filename) root = etree.Element("FinalStatus") if self.first_ts < float("inf") and self.last_ts > 0: duration_elem = etree.Element("TestDuration") duration_elem.text = str(round(float(self.last_ts - self.first_ts), 3)) root.append(duration_elem) report_info = get_bza_report_info(self.engine, self.log) if report_info: link, _ = report_info[0] report_element = etree.Element("ReportURL") report_element.text = link root.append(report_element) if self.last_sec: for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]): root.append(self.__get_xml_summary(label, kpiset)) with open(get_full_path(filename), 'wb') as fhd: tree = etree.ElementTree(root) tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def _extract_operation(self, operation): parameters = OrderedDict() for param in operation.get("parameters", []): if "$ref" in param: param = self._lookup_reference(param["$ref"]) param_name = param["name"] parameter = Swagger.Parameter(name=param_name, location=param.get("in"), description=param.get("description"), required=param.get("required"), schema=param.get("schema"), type=param.get("type"), format=param.get("format")) parameters[param_name] = parameter responses = OrderedDict() for name, resp in iteritems(operation.get("responses", {})): response = Swagger.Response(name=name, description=resp.get("description"), schema=resp.get("schema"), headers=resp.get("headers")) responses[name] = response return Swagger.Operation(summary=operation.get("summary"), description=operation.get("description"), operation_id=operation.get("operationId"), consumes=operation.get("consumes"), produces=operation.get("produces"), parameters=parameters, responses=responses, security=operation.get("security"))
def save_report(self, fname): """ :type fname: str """ try: if os.path.exists(fname): self.log.warning("File %s already exists, it will be overwritten", fname) else: dirname = os.path.dirname(fname) if dirname and not os.path.exists(dirname): os.makedirs(dirname) testsuites = etree.Element("testsuites") for _, suite in iteritems(self.test_suites): testsuites.append(suite) etree_obj = etree.ElementTree(testsuites) self.log.info("Writing JUnit XML report into: %s", fname) with open(get_full_path(fname), 'wb') as _fds: etree_obj.write(_fds, xml_declaration=True, encoding="UTF-8", pretty_print=True) except BaseException: raise TaurusInternalException("Cannot create file %s" % fname)
def __gen_sessions(self, scenario): sessions = etree.Element("sessions") session = etree.Element("session", name="taurus_requests", probability="100", type="ts_http") for request in scenario.get_requests(): if not isinstance(request, HTTPRequest): msg = "Tsung config generator doesn't support '%s' blocks, skipping" self.log.warning(msg, request.NAME) continue request_elem = etree.Element("request") http_elem = etree.Element("http", url=request.url, method=request.method, version="1.1") if request.body: http_elem.set('contents', request.body) headers = copy.deepcopy(scenario.get_headers()) headers.update(copy.deepcopy(request.headers)) for header_name, header_value in iteritems(headers): http_elem.append( etree.Element("http_header", name=header_name, value=header_value)) request_elem.append(http_elem) session.append(request_elem) if request.think_time is not None: think_time = int(dehumanize_time(request.think_time)) session.append( etree.Element("thinktime", value=str(think_time), random="false")) sessions.append(session) return sessions
def convert(self, swagger_fd): self.swagger.parse(swagger_fd) info = self.swagger.get_info() title = info.get("title", "Swagger") host = self.swagger.get_host() paths = self.swagger.get_interpolated_paths() schemes = self.swagger.swagger.get("schemes", ["http"]) scheme = schemes[0] default_address = scheme + "://" + host scenario_name = title.replace(' ', '-') if self.scenarios_from_paths: scenarios = self._extract_scenarios_from_paths( paths, default_address) return { "scenarios": scenarios, "execution": [{ "concurrency": 1, "scenario": scenario_name, "hold-for": "1m", } for scenario_name, scenario in iteritems(scenarios)] } else: requests = self._extract_requests_from_paths(paths) return { "scenarios": { scenario_name: { "default-address": default_address, "requests": requests } }, "execution": [{ "concurrency": 1, "scenario": scenario_name, "hold-for": "1m", }] }
def build_source_code(self): self.log.debug("Generating Python script for Grinder") self.root.append(self.gen_comment("This script was generated by Taurus", indent=0)) self.root.append(self.add_imports()) self.root.append(self.gen_new_line()) default_address = self.scenario.get("default-address") url_arg = "url=%r" % default_address if default_address else "" self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0)) self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0)) self.root.append(self.gen_statement('test.record(request)', indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0)) self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0)) headers = self.scenario.get_headers() if not self.scenario.get("keepalive", True): headers['Connection'] = 'close' if headers: self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0)) for header, value in iteritems(headers): self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4)) self.root.append(self.gen_statement("])", indent=0)) global_timeout = dehumanize_time(self.scenario.get("timeout", None)) if global_timeout: self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0)) cookie_flag = int(self.scenario.get("store-cookie", True)) self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_runner_class())
def __add_boundary_ext(self, children, req): extractors = req.config.get("extract-boundary") for varname, cfg in iteritems(extractors): subj = cfg.get('subject', 'body') left = cfg.get( 'left', TaurusConfigError( "Left boundary is missing for boundary extractor %s" % varname)) right = cfg.get( 'right', TaurusConfigError( "Right boundary is missing for boundary extractor %s" % varname)) match_no = cfg.get('match-no', 1) defvalue = cfg.get('default', 'NOT_FOUND') scope = cfg.get("scope", None) from_var = cfg.get("from-variable", None) extractor = JMX._get_boundary_extractor(varname, subj, left, right, match_no, defvalue, scope, from_var) children.append(extractor) children.append(etree.Element("hashTree"))
def gen_test_method_body(self): var_defs = [ ast.Assign(targets=[ast.Name(id=var, ctx=ast.Store())], value=self.gen_expr(init)) for var, init in iteritems(self.scenario.get("variables")) ] if var_defs: var_defs.append(self.gen_empty_line_stmt()) init = self.gen_init() if init: init.append(self.gen_empty_line_stmt()) requests = [] for req in self.scenario.get_requests(): if not isinstance(req, HTTPRequest): msg = "Apiritif script generator doesn't support '%s' blocks, skipping" self.log.warning(msg, req.NAME) continue requests.extend(self.gen_request_lines(req)) requests.append(self.gen_empty_line_stmt()) return var_defs + init + requests
def gen_runner_class(self): runner_classdef = self.gen_class_definition("TestRunner", ["object"], indent=0) main_method = self.gen_method_definition("__call__", ["self"], indent=4) global_think_time = self.scenario.get('think-time', None) for req in self.scenario.get_requests(): method = req.method.upper() url = req.url think_time = dehumanize_time(req.think_time or global_think_time) local_headers = req.config.get("headers", {}) params = "[]" headers = self.__list_to_nvpair_list(iteritems(local_headers)) main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers), indent=8)) if think_time: main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000), indent=8)) runner_classdef.append(main_method) return runner_classdef
def __write_scenario_props(self, fds, scenario): """ Write scenario props and scenario file props to fds :param fds: :param scenario: dict :return: """ script_props_file = scenario.get("properties-file", None) if script_props_file: fds.write("# Script Properies File Start: %s\n" % script_props_file) with open(script_props_file) as spf: fds.write(spf.read()) fds.write("# Script Properies File End: %s\n\n" % script_props_file) # scenario props local_props = scenario.get("properties") if local_props: fds.write("# Scenario Properies Start\n") for key, val in iteritems(local_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Scenario Properies End\n\n")
def recalculate(self): """ Recalculate averages, stdev and percentiles :return: """ if self[self.SAMPLE_COUNT]: self[self.AVG_CONN_TIME] = self.sum_cn / self[self.SAMPLE_COUNT] self[self.AVG_LATENCY] = self.sum_lt / self[self.SAMPLE_COUNT] self[self.AVG_RESP_TIME] = self.sum_rt / self[self.SAMPLE_COUNT] if len(self._concurrencies): self[self.CONCURRENCY] = sum(self._concurrencies.values()) resp_times = self[self.RESP_TIMES] if resp_times: self[self.PERCENTILES] = { str(float(perc)): value / 1000.0 for perc, value in iteritems( resp_times.get_percentiles_dict(self.perc_levels)) } return self
def _load_tasks(self, stage, container): if not isinstance(self.parameters.get(stage, []), list): self.parameters[stage] = [self.parameters[stage]] for index, stage_task in enumerate(self.parameters[stage]): stage_task = ensure_is_dict(self.parameters[stage], index, "command") task_config = self.parameters[stage][index] run_at = task_config.get("run-at", "local") default_cwd = self.settings.get("default-cwd", None) if run_at == self.engine.config.get(Provisioning.PROV, None): cwd = task_config.get("cwd", default_cwd) if cwd is None: working_dir = self.engine.default_cwd elif cwd == 'artifacts-dir': working_dir = self.engine.artifacts_dir else: working_dir = cwd env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge(self.settings.get('env')) env.merge(task_config.get('env')) env.merge({"PYTHONPATH": working_dir}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv( "PYTHONPATH") + os.pathsep + env['PYTHONPATH'] env[ARTIFACTS_DIR_ENVVAR] = self.engine.artifacts_dir for name, value in iteritems(env): env[str(name)] = str(value) task = Task(task_config, self.log, working_dir, env) container.append(task) self.log.debug("Added %s task: %s", stage, stage_task) else: self.log.debug("Skipped task: %s", task_config)
def _add_local_security(self, request, securities, scenario, disable_basic=False): if not securities: return # TODO: disable global security for request security = securities[0] for sec_name, _ in iteritems(security): secdef = self.swagger.security_defs.get(sec_name) if not secdef: self.log.warning("Security definition %r not found, skipping" % sec_name) continue if secdef.type == 'basic': if not disable_basic: self._insert_local_basic_auth(request, scenario) elif secdef.type == 'apiKey': if secdef.name is None: self.log.warning( "apiKey security definition has no header name, skipping" ) continue if secdef.location is None: self.log.warning( "apiKey location (`in`) is not given, assuming header") secdef.location = 'header' self._insert_local_apikey_auth(request, scenario, secdef.name, sec_name, secdef.location) elif secdef.type == 'oauth2': self.log.warning( "OAuth2 security is not yet supported, skipping") continue
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = {key: environ[key] for key in environ.keys() if environ[key] is not None} return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: if is_windows(): # as variables in windows are case insensitive we should provide correct merging cur_env = {name.upper(): environ[name] for name in environ} old_keys = set(env.keys()) env = {name.upper(): env[name] for name in env} new_keys = set(env.keys()) if old_keys != new_keys: msg = 'Some taurus environment variables might be been lost: %s' self.log.debug(msg, list(old_keys - new_keys)) environ = BetterDict() environ.merge(cur_env) environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = {key: environ[key] for key in environ.keys() if environ[key] is not None} self.log.debug("Executing shell from %s: %s", cwd, args) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def prepare(self): super(PassFailStatus, self).prepare() criteria = self.parameters.get("criterias", []) if isinstance(criteria, dict): crit_iter = iteritems(criteria) else: crit_iter = enumerate(criteria) for idx, crit_config in crit_iter: if isinstance(crit_config, string_types): crit_config = DataCriteria.string_to_config(crit_config) self.parameters['criterias'][idx] = crit_config crit = load_class( crit_config.get( 'class', DataCriteria.__module__ + "." + DataCriteria.__name__)) crit_instance = crit(crit_config, self) assert isinstance(crit_instance, FailCriteria) if isinstance(idx, string_types): crit_instance.message = idx self.criterias.append(crit_instance) if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self)
def _dump_vars(self, key): cmdline = [] vals = self.get_scenario().get(key) if isinstance(vals, string_types): cmdline += ["--%s" % key, vals] else: data = {"values": []} if isinstance(vals, list): data['values'] = vals else: for varname, val in iteritems(vals): data["values"] = { "key": varname, "value": val, "type": "any", "enabled": True } fname = self.engine.create_artifact(key, ".json") with open(fname, "wt") as fds: fds.write(to_json(data)) cmdline += ["--%s" % key, fname] return cmdline
def _add_url_request(self, request, test_method): """ :type request: bzt.requests_model.HTTPRequest """ named_args = OrderedDict() method = request.method.lower() think_time = dehumanize_time( request.priority_option('think-time', default=None)) if request.timeout is not None: named_args['timeout'] = dehumanize_time(request.timeout) if request.follow_redirects is not None: named_args['allow_redirects'] = request.priority_option( 'follow-redirects', default=True) headers = {} scenario_headers = self.scenario.get("headers", None) if scenario_headers: headers.update(scenario_headers) if request.headers: headers.update(request.headers) if headers: named_args['headers'] = self.repr_inter(headers) merged_headers = dict([(key.lower(), value) for key, value in iteritems(headers)]) content_type = merged_headers.get('content-type', None) if content_type == 'application/json' and isinstance( request.body, (dict, list)): # json request body named_args['json'] = self.repr_inter(request.body) elif method == "get" and isinstance( request.body, dict): # request URL params (?a=b&c=d) named_args['params'] = self.repr_inter(request.body) elif isinstance(request.body, dict): # form data named_args['data'] = self.repr_inter(list(iteritems(request.body))) elif isinstance(request.body, string_types): named_args['data'] = self.repr_inter(request.body) elif request.body: msg = "Cannot handle 'body' option of type %s: %s" raise TaurusConfigError(msg % (type(request.body), request.body)) kwargs = ", ".join( [""] + ["%s=%s" % (name, value) for name, value in iteritems(named_args)]) request_source = "self.target" if self.access_method == "target" else "apiritif.http" if request.label: label = request.label else: label = request.url test_method.append( self.gen_statement("with apiritif.transaction(%r):" % label, indent=8)) request_line = "response = {source}.{method}({url}{kwargs})".format( source=request_source, method=method, url=self.repr_inter(request.url), kwargs=kwargs, ) test_method.append(self.gen_statement(request_line, indent=12)) self._add_assertions(request, test_method) self._add_jsonpath_assertions(request, test_method) self._add_xpath_assertions(request, test_method) self._add_extractors(request, test_method) if think_time: test_method.append( self.gen_statement('time.sleep(%s)' % think_time))
def startup(self): args = [self.tool.tool_path] load = self.get_load() load_iterations = load.iterations or 1 load_concurrency = load.concurrency or 1 if load.hold: hold = int(ceil(dehumanize_time(load.hold))) args += ['-t', str(hold)] else: args += ['-n', str(load_iterations * load_concurrency) ] # ab waits for total number of iterations timeout = self.get_scenario().get("timeout", None) if timeout: args += ['-s', str(ceil(dehumanize_time(timeout)))] args += ['-c', str(load_concurrency)] args += [ '-d' ] # do not print 'Processed *00 requests' every 100 requests or so args += ['-r'] # do not crash on socket level errors if self.tool.version and LooseVersion( self.tool.version) >= LooseVersion("2.4.7"): args += ['-l'] # accept variable-len responses args += ['-g', str(self._tsv_file)] # dump stats to TSV file # add global scenario headers for key, val in iteritems(self.scenario.get_headers()): args += ['-H', "%s: %s" % (key, val)] requests = self.scenario.get_requests() if not requests: raise TaurusConfigError( "You must specify at least one request for ab") if len(requests) > 1: self.log.warning( "ab doesn't support multiple requests. Only first one will be used." ) request = self.__first_http_request() if request is None: raise TaurusConfigError( "ab supports only HTTP requests, while scenario doesn't have any" ) # add request-specific headers for key, val in iteritems(request.headers): args += ['-H', "%s: %s" % (key, val)] if request.method != 'GET': raise TaurusConfigError( "ab supports only GET requests, but '%s' is found" % request.method) if request.priority_option('keepalive', default=True): args += ['-k'] args += [request.url] self.reader.setup(load_concurrency, request.label) self.process = self._execute(args)
def __json__(self): return { rt / 1000.0: int(count) # because hdrpy returns int64, which is unrecognized by json serializer for rt, count in iteritems(self.get_counts()) }
def send_kpi_data(self, data_buffer, is_check_response=True, is_final=False): """ Sends online data :param is_check_response: :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ data = [] for sec in data_buffer: self.first_ts = min(self.first_ts, sec[DataPoint.TIMESTAMP]) self.last_ts = max(self.last_ts, sec[DataPoint.TIMESTAMP]) for lbl, item in iteritems(sec[DataPoint.CURRENT]): if lbl == '': label = "ALL" else: label = lbl json_item = None for lbl_item in data: if lbl_item["name"] == label: json_item = lbl_item break if not json_item: json_item = self.__label_skel(label) data.append(json_item) interval_item = self.__interval_json(item, sec) for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): interval_item['rc'].append({"n": cnt, "rc": r_code}) json_item['intervals'].append(interval_item) cumul = sec[DataPoint.CUMULATIVE][lbl] json_item['n'] = cumul[KPISet.SAMPLE_COUNT] json_item["summary"] = self.__summary_json(cumul) data = {"labels": data, "sourceID": id(self)} if is_final: data['final'] = True url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s" url = url % (self.active_session_id, self.data_signature, self.test_id, self.user_id) url += "&pq=0&target=%s&update=1" % self.kpi_target hdr = {"Content-Type": " application/json"} response = self._request(url, to_json(data), headers=hdr) if response and 'response_code' in response and response[ 'response_code'] != 200: raise RuntimeError("Failed to feed data, response code %s" % response['response_code']) if response and 'result' in response and is_check_response: result = response['result']['session'] self.log.debug("Result: %s", result) if 'statusCode' in result and result['statusCode'] > 100: self.log.info("Test was stopped through Web UI: %s", result['status']) raise ManualShutdown("The test was interrupted through Web UI")
def __prepare_tags(self): for name, style in iteritems(self._palette): # NOTE: not sure which index use, used [0] bgc = self.__translate_tcl_color(style[0].background) fgc = self.__translate_tcl_color(style[0].foreground) self.text.tag_configure(name, background=bgc, foreground=fgc)
def _extract_requests_from_paths(self, paths, scenario_name, default_address, global_security): base_path = self.swagger.get_base_path() requests = [] scenario = { "default-address": "${default-address}", "variables": {}, } global_vars = { "default-address": default_address, } if base_path: global_vars["default-path"] = base_path if global_security: self._add_global_security(scenario, global_security, global_vars) for path, path_obj in iteritems(paths): self.log.debug("Handling path %s", path) for method in Swagger.METHODS: operation = getattr(path_obj, method) if operation is not None: self.log.debug("Handling method %s", method.upper()) if base_path: route = "${default-path}" + path else: route = path request = self._extract_request(route, path_obj, method, operation) # TODO: Swagger responses -> JMeter assertions? if request is not None: if operation.security: self._add_local_security(request, operation.security, scenario) elif global_security: self._add_local_security(request, global_security, scenario, disable_basic=True) requests.append(request) if not scenario["variables"]: scenario.pop("variables") scenario["requests"] = requests config = { "scenarios": { scenario_name: scenario }, "execution": [{ "concurrency": 1, "scenario": scenario_name, "hold-for": "1m", }] } if global_vars: config["settings"] = {"env": global_vars} return config
def _extract_scenarios_from_paths(self, paths, default_address, global_security): base_path = self.swagger.get_base_path() scenarios = OrderedDict() global_vars = {"default-address": default_address} if base_path: global_vars["default-path"] = base_path for path, path_obj in iteritems(paths): self.log.info("Handling path %s", path) scenario_name = path scenario = { "default-address": "${default-address}", "variables": {}, } if base_path: route = "${default-path}" + path else: route = path requests = [] for method in Swagger.METHODS: operation = getattr(path_obj, method) if operation is not None: self.log.debug("Handling method %s", method.upper()) request = self._extract_request(route, path_obj, method, operation) if operation.security: self._add_local_security(request, operation.security, scenario) elif global_security: self._add_local_security(request, global_security, scenario) requests.append(request) # TODO: Swagger responses -> assertions? if not requests: continue scenario["requests"] = requests if global_security: self._add_global_security(scenario, global_security, global_vars) if not scenario["variables"]: scenario.pop("variables") scenarios[scenario_name] = scenario config = { "scenarios": scenarios, "execution": [{ "concurrency": 1, "scenario": scenario_name, "hold-for": "1m", } for scenario_name, scenario in iteritems(scenarios)] } if global_vars: config["settings"] = {"env": global_vars} return config
def __json__(self): return { float(rt) / 1000: count for rt, count in iteritems(self.histogram.get_value_counts()) }
def _cleanup_config(config): for _, scenario in iteritems(config.get("scenarios")): if "test-case" in scenario: scenario.pop("test-case") if "test-suite" in scenario: scenario.pop("test-suite")