def from_dict(obj, ext_aggregation=False): """ :type obj: dict :type ext_aggregation: bool :rtype: KPISet """ prc_levels = [float(x) for x in obj[KPISet.PERCENTILES].keys()] inst = KPISet(perc_levels=prc_levels, ext_aggregation=ext_aggregation) assert inst.PERCENTILES in obj for key, val in iteritems(obj): if key == inst.RESP_TIMES: if isinstance(val, dict): for value, count in iteritems(val): inst[inst.RESP_TIMES].add(float(value), count) else: inst[key] = val inst.sum_cn = obj[inst.AVG_CONN_TIME] * obj[inst.SAMPLE_COUNT] inst.sum_lt = obj[inst.AVG_LATENCY] * obj[inst.SAMPLE_COUNT] inst.sum_rt = obj[inst.AVG_RESP_TIME] * obj[inst.SAMPLE_COUNT] for error in inst[KPISet.ERRORS]: error['urls'] = Counter(error['urls']) return inst
def _extract_toplevel_definitions(self): self.info = self.swagger.get("info", {}) for name, schema in iteritems(self.swagger.get("definitions", {})): self.definitions[name] = Swagger.Definition(name=name, schema=schema) for name, response in iteritems(self.swagger.get("responses", {})): self.responses[name] = Swagger.Response( name=name, description=response.get("description"), schema=response.get("schema"), headers=response.get("headers")) for name, param in iteritems(self.swagger.get("parameters", {})): parameter = Swagger.Parameter(name=name, location=param.get("in"), description=param.get("description"), required=param.get("required"), schema=param.get("schema"), type=param.get("type"), format=param.get("format")) self.parameters[name] = parameter for name, secdef in iteritems( self.swagger.get("securityDefinitions", {})): self.security_defs[name] = Swagger.SecurityDef( type=secdef.get('type'), description=secdef.get('description'), name=secdef.get('name'), location=secdef.get('in'))
def __get_csv_dict(self, label, kpiset): kpi_copy = copy.deepcopy(kpiset) res = OrderedDict() res['label'] = label # sort label for key in sorted(kpi_copy.keys()): res[key] = kpi_copy[key] del res[KPISet.ERRORS] del res[KPISet.RESP_TIMES] del res[KPISet.RESP_CODES] del res[KPISet.PERCENTILES] percentiles = list(iteritems(kpiset[KPISet.PERCENTILES])) for level, val in sorted(percentiles, key=lambda lv: (float(lv[0]), lv[1])): res['perc_%s' % level] = val resp_codes = list(iteritems(kpiset[KPISet.RESP_CODES])) for rcd, val in sorted(resp_codes): res['rc_%s' % rcd] = val for key in res: if isinstance(res[key], float): res[key] = "%.5f" % res[key] return res
def get_test_status_text(self): if not self._sessions: self._sessions = self.master.sessions() if not self._sessions: return mapping = BetterDict( ) # dict(executor -> dict(scenario -> dict(location -> servers count))) for session in self._sessions: try: name_split = [ part.strip() for part in session['name'].split('/') ] location = session['configuration']['location'] count = session['configuration']['serversCount'] ex_item = mapping.get(name_split[0], force_set=True) if len(name_split) > 1: name = name_split[1] else: name = "N/A" ex_item.get(name, force_set=True)[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self._test['name'], self.master['id']) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) return txt
def test_scenarios_from_paths(self): obj = SwaggerConverter(ROOT_LOGGER, scenarios_from_paths=True) config = obj.convert_path(RESOURCES_DIR + "/swagger/bzm-api.json") self.assertEqual(len(config["scenarios"]), 5) scenario_names = set(key for key, _ in iteritems(config["scenarios"])) self.assertEqual( {"/reports", "/reports/1", "/tests", "/tests/1", "/tests/1/start"}, scenario_names) self.assertEqual(len(config["execution"]), 5) self.assertEqual(config["settings"]["env"]["default-address"], "https://a.blazemeter.com") for scenario_name, scenario in iteritems(config["scenarios"]): self.assertEqual(scenario["default-address"], "${default-address}") scenario_requests = scenario["requests"] self.assertGreater(len(scenario_requests), 0) for scenario_request in scenario_requests: self.assertTrue( scenario_request["url"].startswith("${default-path}/")) self.assertEqual(len(config["scenarios"]["/reports"]["requests"]), 1) self.assertEqual(len(config["scenarios"]["/reports/1"]["requests"]), 1) self.assertEqual(len(config["scenarios"]["/tests"]["requests"]), 2) self.assertEqual(len(config["scenarios"]["/tests/1"]["requests"]), 4) self.assertEqual( len(config["scenarios"]["/tests/1/start"]["requests"]), 1)
def get_kpi_body(self, data_buffer, is_final): # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes that were received on it. report_items = BetterDict() if data_buffer: self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP]) self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP]) # following data is received in the cumulative way for label, kpi_set in iteritems( data_buffer[-1][DataPoint.CUMULATIVE]): if self.owner.extend_report: report_item = {} for state in kpi_set: report_item[state] = self.__get_label( label, kpi_set[state]) self.__add_errors(report_item[state], kpi_set[state]) else: report_item = self.__get_label(label, kpi_set) self.__add_errors(report_item, kpi_set) # 'Errors' tab report_items[label] = report_item # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way if report_items: for dpoint in data_buffer: time_stamp = dpoint[DataPoint.TIMESTAMP] for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]): exc = TaurusInternalException( 'Cumulative KPISet is non-consistent') report_item = report_items.get(label, exc) if self.owner.extend_report: for state in report_item: if state in kpi_set: report_item[state]['intervals'].append( self.__get_interval( kpi_set[state], time_stamp)) else: report_item['intervals'].append( self.__get_interval(kpi_set, time_stamp)) report_items = [ report_items[key] for key in sorted(report_items.keys()) ] # convert dict to list data = {"labels": report_items, "sourceID": id(self.owner)} if is_final: data['final'] = True return to_json(data)
def __get_xml_summary(self, label, kpiset): elem = etree.Element("Group", label=label) for kpi_name, kpi_val in iteritems(kpiset): if kpi_name in (KPISet.ERRORS, KPISet.RESP_TIMES): continue if isinstance(kpi_val, dict): for param_name, param_val in iteritems(kpi_val): elem.append(self.__get_kpi_xml(kpi_name, param_val, param_name)) else: elem.append(self.__get_kpi_xml(kpi_name, kpi_val)) return elem
def __dump_xml(self, filename): self.log.info("Dumping final status as XML: %s", filename) root = etree.Element("FinalStatus") if self.first_ts < float("inf") and self.last_ts > 0: duration_elem = etree.Element("TestDuration") duration_elem.text = str( round(float(self.last_ts - self.first_ts), 3)) root.append(duration_elem) report_info = get_bza_report_info(self.engine, self.log) if report_info: link, _ = report_info[0] report_element = etree.Element("ReportURL") report_element.text = link root.append(report_element) if self.last_sec: for label, kpiset in iteritems( self.last_sec[DataPoint.CUMULATIVE]): root.append(self.__get_xml_summary(label, kpiset)) with open(get_full_path(filename), 'wb') as fhd: tree = etree.ElementTree(root) tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def process_functional(self, xunit): for suite_name, samples in iteritems(self.cumulative_results): duration = max(s.start_time for s in samples) - min(s.start_time for s in samples) duration += max(samples, key=lambda s: s.start_time).duration attrs = { "name": suite_name, "tests": str(len(samples)), "errors": str(len([sample for sample in samples if sample.status == "BROKEN"])), "skipped": str(len([sample for sample in samples if sample.status == "SKIPPED"])), "failures": str(len([sample for sample in samples if sample.status == "FAILED"])), "time": str(round(duration, 3)), # TODO: "timestamp" attribute } xunit.add_test_suite(suite_name, attributes=attrs) for sample in samples: attrs = { "classname": sample.test_suite, "name": sample.test_case, "time": str(round(sample.duration, 3)) } children = [] if sample.status == "BROKEN": error = etree.Element("error", type=sample.error_msg) if sample.error_trace: error.text = sample.error_trace children.append(error) elif sample.status == "FAILED": failure = etree.Element("failure", message=sample.error_msg) if sample.error_trace: failure.text = sample.error_trace children.append(failure) elif sample.status == "SKIPPED": skipped = etree.Element("skipped") children.append(skipped) xunit.add_test_case(suite_name, attributes=attrs, children=children)
def __json__(self): return { rt / 1000.0: int( count ) # because hdrpy returns int64, which is unrecognized by json serializer for rt, count in iteritems(self.get_counts()) }
def startup(self): args = [self.tool.tool_path] load = self.get_load() if load.iterations: args += ['--reps', str(load.iterations)] elif load.hold: hold_for = ceil(dehumanize_time(load.hold)) args += ['--time', '%sS' % hold_for] else: raise TaurusConfigError("Siege: You must specify either 'hold-for' or 'iterations'") think_time = self.scenario.get_think_time() if think_time: args += ['--delay', str(dehumanize_time(think_time))] else: args += ['--benchmark'] load_concurrency = load.concurrency args += ['--concurrent', str(load_concurrency)] self.reader.concurrency = load_concurrency args += ['--file', self.__url_name] for key, val in iteritems(self.scenario.get_headers()): args += ['--header', "%s: %s" % (key, val)] self.env.set({"SIEGERC": self.__rc_name}) self.process = self._execute(args)
def save_report(self, fname): """ :type fname: str """ try: if os.path.exists(fname): self.log.warning( "File %s already exists, it will be overwritten", fname) else: dirname = os.path.dirname(fname) if dirname and not os.path.exists(dirname): os.makedirs(dirname) testsuites = etree.Element("testsuites") for _, suite in iteritems(self.test_suites): testsuites.append(suite) etree_obj = etree.ElementTree(testsuites) self.log.info("Writing JUnit XML report into: %s", fname) with open(get_full_path(fname), 'wb') as _fds: etree_obj.write(_fds, xml_declaration=True, encoding="UTF-8", pretty_print=True) except BaseException: raise TaurusInternalException("Cannot create file %s" % fname)
def _extract_operation(self, operation): parameters = OrderedDict() for param in operation.get("parameters", []): if "$ref" in param: param = self._lookup_reference(param["$ref"]) param_name = param["name"] parameter = Swagger.Parameter(name=param_name, location=param.get("in"), description=param.get("description"), required=param.get("required"), schema=param.get("schema"), type=param.get("type"), format=param.get("format")) parameters[param_name] = parameter responses = OrderedDict() for name, resp in iteritems(operation.get("responses", {})): response = Swagger.Response(name=name, description=resp.get("description"), schema=resp.get("schema"), headers=resp.get("headers")) responses[name] = response return Swagger.Operation(summary=operation.get("summary"), description=operation.get("description"), operation_id=operation.get("operationId"), consumes=operation.get("consumes"), produces=operation.get("produces"), parameters=parameters, responses=responses, security=operation.get("security"))
def __init__(self, crit_cfg_list, feeder): super(CriteriaProcessor, self).__init__() self.engine = None if isinstance(feeder, EngineModule): self.engine = feeder.engine self.criteria = [] self.last_datapoint = None self.log = logging.getLogger(__name__) if isinstance(crit_cfg_list, dict): crit_iter = iteritems(crit_cfg_list) else: crit_iter = enumerate(crit_cfg_list) for idx, crit_config in crit_iter: if isinstance(crit_config, str): crit_config = DataCriterion.string_to_config(crit_config) crit_cfg_list[idx] = crit_config crit = load_class( crit_config.get( 'class', DataCriterion.__module__ + "." + DataCriterion.__name__)) crit_instance = crit(crit_config, self) assert isinstance(crit_instance, FailCriterion) if isinstance(idx, str): crit_instance.message = idx self.criteria.append(crit_instance) if isinstance(feeder, ResultsProvider): feeder.add_listener(self)
def _extract_paths(self): for name, path_item in iteritems(self.swagger["paths"]): path = { "ref": None, "get": None, "put": None, "post": None, "delete": None, "options": None, "head": None, "patch": None, "parameters": {} } for method in Swagger.METHODS: if method in path_item: operation = path_item[method] path[method] = self._extract_operation(operation) for param in path_item.get("parameters", []): if "$ref" in param: param = self._lookup_reference(param["$ref"]) param_name = param["name"] parameter = Swagger.Parameter( name=param_name, location=param.get("in"), description=param.get("description"), required=param.get("required"), schema=param.get("schema"), type=param.get("type"), format=param.get("format")) path["parameters"][param_name] = parameter self.paths[name] = Swagger.Path(**path)
def __gen_sessions(self, scenario): sessions = etree.Element("sessions") session = etree.Element("session", name="taurus_requests", probability="100", type="ts_http") for request in scenario.get_requests(): if not isinstance(request, HTTPRequest): msg = "Tsung config generator doesn't support '%s' blocks, skipping" self.log.warning(msg, request.NAME) continue request_elem = etree.Element("request") http_elem = etree.Element("http", url=request.url, method=request.method, version="1.1") if request.body: http_elem.set('contents', request.body) headers = copy.deepcopy(scenario.get_headers()) headers.update(copy.deepcopy(request.headers)) for header_name, header_value in iteritems(headers): http_elem.append(etree.Element("http_header", name=header_name, value=header_value)) request_elem.append(http_elem) session.append(request_elem) if request.get_think_time(): think_time = int(dehumanize_time(request.get_think_time())) session.append(etree.Element("thinktime", value=str(think_time), random="false")) sessions.append(session) return sessions
def _build_request(self, request, scenario): path = self._get_request_path(request, scenario) http = "%s %s HTTP/1.1\r\n" % (request.method, path) headers = BetterDict.from_dict({"Host": self.hostname}) if not scenario.get("keepalive", True): headers.merge({"Connection": 'close'}) # HTTP/1.1 implies keep-alive by default body = "" if isinstance(request.body, dict): if request.method != "GET": body = urlencode(request.body) elif isinstance(request.body, str): body = request.body elif request.body: msg = "Cannot handle 'body' option of type %s: %s" raise TaurusConfigError(msg % (type(request.body), request.body)) if body: headers.merge({"Content-Length": len(body)}) headers.merge(scenario.get_headers()) headers.merge(request.headers) for header, value in iteritems(headers): http += "%s: %s\r\n" % (header, value) http += "\r\n%s" % (body, ) return http
def _add_global_security(self, scenario, global_security, global_vars): if not global_security: return security = global_security[0] for sec_name, _ in iteritems(security): secdef = self.swagger.security_defs.get(sec_name) if not secdef: self.log.warning("Security definition %r not found, skipping" % sec_name) continue if secdef.type == 'basic': self._insert_global_basic_auth(scenario, global_vars) elif secdef.type == 'apiKey': if secdef.name is None: self.log.warning( "apiKey security definition has no header name, skipping" ) continue if secdef.location is None: self.log.warning( "apiKey location (`in`) is not given, assuming header") secdef.location = 'header' self._insert_global_apikey_auth(scenario, secdef.name, sec_name, secdef.location, global_vars) elif secdef.type == 'oauth2': self.log.warning( "OAuth2 security is not yet supported, skipping") continue
def _handle_parameters(self, parameters): query_params = OrderedDict() form_data = {} request_body = None headers = {} for _, param in iteritems(parameters): if not param.required: continue if param.location == "header": name = param.name value = self._interpolate_parameter(param) headers[name] = value elif param.location == "query": name = param.name value = self._interpolate_parameter(param) query_params[name] = value elif param.location == "formData": name = param.name value = self._interpolate_parameter(param) form_data[name] = value elif param.location == "body": request_body = self._interpolate_body(param) elif param.location == "path": pass # path parameters are resolved at a different level else: self.log.warning( "Unsupported parameter location (%s). Skipping", param.location) return query_params, form_data, request_body, headers
def __report_summary(self): status_counter = Counter() for test_suite in self.cumulative_results.test_suites(): for case in self.cumulative_results.test_cases(test_suite): status_counter[case.status] += 1 # FIXME: it's actually not tests, but test cases total = sum(count for _, count in iteritems(status_counter)) self.log.info("Total: %s %s", total, self.__plural(total, 'test'))
def __merge_to_cumulative(self, current): """ Merge current KPISet to cumulative :param current: KPISet """ for label, data in iteritems(current): cumul = self.cumulative.setdefault(label, KPISet(self.track_percentiles, data[KPISet.RESP_TIMES].high)) cumul.merge_kpis(data) cumul.recalculate()
def run_subscribers(self, concrete_path, value): for sub_path, sub_funs in iteritems(self._subscriptions): if sub_path.matches(concrete_path): for fun in sub_funs: try: fun(concrete_path, value) except BaseException: self.log.warning("Checker failed: %s", traceback.format_exc()) continue
def compile_scenario(self, scenario): elements = [] for _, protocol in iteritems(self.protocol_handlers): elements.extend(protocol.get_toplevel_elements(scenario)) elements.extend(self.__gen_authorization(scenario)) elements.extend(self.__gen_keystore_config(scenario)) elements.extend(self.__gen_data_sources(scenario)) elements.extend(self.__gen_requests(scenario)) self.__add_jsr_elements(elements, scenario, False) return elements
def test_sources(self): mon_buffer = MonitoringBuffer(10, ROOT_LOGGER) for i in range(100): mon = [ {"ts": i, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100}, {"ts": i, "source": "server-agent", "cpu": 10, "mem": 20}, ] mon_buffer.record_data(mon) for source, buffer in iteritems(mon_buffer.data): self.assertLessEqual(len(buffer), 10)
def __dump_csv(self, filename): self.log.info("Dumping final status as CSV: %s", filename) # FIXME: what if there's no last_sec with open(get_full_path(filename), 'wt') as fhd: if '' in self.last_sec[DataPoint.CUMULATIVE]: fieldnames = self.__get_csv_dict('', self.last_sec[DataPoint.CUMULATIVE]['']).keys() writer = csv.DictWriter(fhd, fieldnames) writer.writeheader() for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]): writer.writerow(self.__get_csv_dict(label, kpiset))
def build_source_code(self): self.log.debug("Generating Python script for Grinder") self.root.append( self.gen_comment("This script was generated by Taurus", indent=0)) self.root.append(self.add_imports()) self.root.append(self.gen_new_line()) default_address = self.scenario.get("default-address") url_arg = "url=%r" % default_address if default_address else "" self.root.append( self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0)) self.root.append( self.gen_statement('test = Test(1, "%s")' % self.label, indent=0)) self.root.append(self.gen_statement('test.record(request)', indent=0)) self.root.append(self.gen_new_line()) self.root.append( self.gen_statement( "defaults = HTTPPluginControl.getConnectionDefaults()", indent=0)) self.root.append( self.gen_statement( "utilities = HTTPPluginControl.getHTTPUtilities()", indent=0)) headers = self.scenario.get_headers() if not self.scenario.get("keepalive", True): headers['Connection'] = 'close' if headers: self.root.append( self.gen_statement("defaults.setDefaultHeaders([", indent=0)) for header, value in iteritems(headers): self.root.append( self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4)) self.root.append(self.gen_statement("])", indent=0)) global_timeout = dehumanize_time(self.scenario.get("timeout", None)) if global_timeout: self.root.append( self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0)) cookie_flag = int(self.scenario.get("store-cookie", True)) self.root.append( self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_runner_class())
def gen_runner_class(self): runner_classdef = self.gen_class_definition("TestRunner", ["object"]) sleep_method = self.gen_method_definition("rampUpSleeper", ["self"]) sleep_method.append( self.gen_statement("if grinder.runNumber != 0: return")) sleep_method.append( self.gen_statement( "tprops = grinder.properties.getPropertySubset('taurus.')")) sleep_method.append( self.gen_statement( "inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)" )) sleep_method.append( self.gen_statement( "sleep_time = int(1000 * grinder.threadNumber * inc)")) sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)")) sleep_method.append( self.gen_statement( "if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)" )) sleep_method.append( self.gen_statement("else: grinder.logger.info('No sleep needed')")) sleep_method.append(self.gen_new_line()) runner_classdef.append(sleep_method) main_method = self.gen_method_definition("__call__", ["self"]) main_method.append(self.gen_statement("self.rampUpSleeper()")) for req in self.scenario.get_requests(): if not isinstance(req, HTTPRequest): msg = "Grinder script generator doesn't support '%s' blocks, skipping" self.log.warning(msg, req.NAME) continue method = req.method.upper() url = req.url local_headers = req.headers params = "[]" headers = self.__list_to_nvpair_list(iteritems(local_headers)) main_method.append( self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers))) think_time = dehumanize_time(req.get_think_time()) if think_time: main_method.append( self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000))) runner_classdef.append(main_method) return runner_classdef
def get_interpolated_paths(self, parameter_interpolation=INTERPOLATE_WITH_VALUES ): paths = OrderedDict() replacer_regex = lambda name: r'(?<!\$)(\{' + name + r'\})' # replace '{name}', but skip '${name}' for path, path_obj in iteritems(self.paths): new_path = path for method in Swagger.METHODS: operation = getattr(path_obj, method) if operation is not None: for _, param in iteritems(operation.parameters): if param.location == "path": name = param.name if parameter_interpolation == Swagger.INTERPOLATE_WITH_VALUES: value = str( Swagger.get_data_for_type( param.type, param.format)) elif parameter_interpolation == Swagger.INTERPOLATE_WITH_JMETER_VARS: value = "${" + param.name + "}" else: value = None if value is not None: new_path = re.sub(replacer_regex(name), value, new_path) for _, param in iteritems(path_obj.parameters): if param.location == "path": name = param.name if parameter_interpolation == Swagger.INTERPOLATE_WITH_VALUES: value = str( Swagger.get_data_for_type(param.type, param.format)) elif parameter_interpolation == Swagger.INTERPOLATE_WITH_JMETER_VARS: value = "${" + param.name + "}" else: value = None if value is not None: new_path = re.sub(replacer_regex(name), value, new_path) path_obj = copy.deepcopy(path_obj) paths[new_path] = path_obj return paths
def merge_datapoints(self, max_full_ts): reader_id = self.file.name + "@" + str(id(self)) for key in sorted(self.join_buffer.keys(), key=int): if int(key) <= max_full_ts: sec_data = self.join_buffer.pop(key) self.log.debug("Processing complete second: %s", key) point = DataPoint(int(key)) point[DataPoint.SOURCE_ID] = reader_id for sid, item in iteritems(sec_data): point.merge_point(self.point_from_locust(key, sid, item)) point.recalculate() yield point
def _calculate_datapoints(self, final_pass=False): """ Override ResultsProvider._calculate_datapoints """ self._process_underlings(final_pass) self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys()) if not self.buffer: return timestamps = sorted(self.buffer.keys()) while timestamps and ( final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)): tstamp = timestamps.pop(0) self.log.debug("Merging into %s", tstamp) points_to_consolidate = self.buffer.pop(tstamp) for subresult in points_to_consolidate: if not subresult[DataPoint.SOURCE_ID]: raise ValueError( "Reader must provide source ID for datapoint") self._sticky_concurrencies[subresult[DataPoint.SOURCE_ID]] = { label: kpiset[KPISet.CONCURRENCY] for label, kpiset in iteritems(subresult[ DataPoint.CURRENT]) } if len(points_to_consolidate) == 1: self.log.debug( "Bypassing consolidation because of single result") point = points_to_consolidate[0] point[DataPoint.SUBRESULTS] = [points_to_consolidate[0]] else: point = DataPoint(tstamp, self.track_percentiles) for subresult in points_to_consolidate: self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP]) point.merge_point(subresult, do_recalculate=False) point.recalculate() current_sids = [ x[DataPoint.SOURCE_ID] for x in point[DataPoint.SUBRESULTS] ] for sid in self._sticky_concurrencies: if sid not in current_sids: self.log.debug("Adding sticky concurrency for %s", sid) self._add_sticky_concurrency(point, sid) point[DataPoint.SOURCE_ID] = self.__class__.__name__ + '@' + str( id(self)) yield point
def test_suites(self): return [key for key, _ in iteritems(self)]