def send_kpi_data(self, data_buffer, is_check_response=True): """ Sends online data :param is_check_response: :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ data = [] for sec in data_buffer: self.first_ts = min(self.first_ts, sec[DataPoint.TIMESTAMP]) self.last_ts = max(self.last_ts, sec[DataPoint.TIMESTAMP]) for lbl, item in iteritems(sec[DataPoint.CURRENT]): if lbl == '': label = "ALL" else: label = lbl json_item = None for lbl_item in data: if lbl_item["name"] == label: json_item = lbl_item break if not json_item: json_item = self.__label_skel(label) data.append(json_item) interval_item = self.__interval_json(item, sec) for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): interval_item['rc'].append({"n": cnt, "rc": r_code}) json_item['intervals'].append(interval_item) cumul = sec[DataPoint.CUMULATIVE][lbl] json_item['n'] = cumul[KPISet.SAMPLE_COUNT] json_item["summary"] = self.__summary_json(cumul) data = {"labels": data} url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s" url = url % (self.active_session_id, self.data_signature, self.test_id, self.user_id) url += "&pq=0&target=labels_bulk&update=1" hdr = {"Content-Type": " application/json"} response = self._request(url, to_json(data), headers=hdr) if response and 'response_code' in response and response['response_code'] != 200: raise RuntimeError("Failed to feed data, response code %s" % response['response_code']) if response and 'result' in response and is_check_response: result = response['result']['session'] self.log.debug("Result: %s", result) if 'statusCode' in result and result['statusCode'] > 100: self.log.info("Test was stopped through Web UI: %s", result['status']) raise ManualShutdown("The test was interrupted through Web UI")
def __deepcopy__(self, memo): mycopy = KPISet(self.perc_levels) mycopy.sum_rt = self.sum_rt mycopy.sum_lt = self.sum_lt mycopy.sum_cn = self.sum_cn for key, val in iteritems(self): mycopy[key] = copy.deepcopy(val, memo) return mycopy
def __merge_to_cumulative(self, current): """ Merge current KPISet to cumulative :param current: KPISet :return: """ for label, data in iteritems(current): cumul = self.cumulative.get(label, KPISet(self.track_percentiles)) cumul.merge_kpis(data)
def dict_key(dictnr, value): """ Search key by value in dict :type dictnr: dict :type value: type :return: :raise KeyError: """ for key, val in iteritems(dictnr): if val == value: return key raise KeyError("Value not found in dict: %s" % value)
def __merge_kpis(self, src, dst, sid): """ :param src: KPISet :param dst: KPISet :param sid: int :return: """ for label, val in iteritems(src): dest = dst.get(label, KPISet(self.perc_levels)) if not isinstance(val, KPISet): val = KPISet.from_dict(val) val.perc_levels = self.perc_levels dest.merge_kpis(val, sid)
def merge(self, src): """ Deep merge other dict into current '-' - overwrite operation prefix for dict key :type src: dict """ if not isinstance(src, dict): raise ValueError("Loaded object is not dict: %s" % src) for key, val in iteritems(src): if len(key) and key[0] == '~': # overwrite flag if key[1:] in self: self.pop(key[1:]) key = key[1:] self.log.debug("Overridden key: %s", key) if len(key) and key[0] == '^': # eliminate flag # TODO: improve logic - use val contents to see what to eliminate self.pop(key[1:]) self.log.debug("Removed key: %s", key) continue if isinstance(val, dict): dst = self.get(key) if isinstance(dst, BetterDict): dst.merge(val) elif isinstance(dst, Counter): self[key] += val elif isinstance(dst, dict): raise ValueError("Mix of DictOfDict and dict is forbidden") else: self.log.warning("Overwritten key: %s", key) self[key] = val elif isinstance(val, list): self.__ensure_list_type(val) if key not in self: self[key] = [] if isinstance(self[key], list): self[key].extend(val) else: self.log.warning("Overridden key: %s", key) self[key] = val else: self[key] = val return
def from_dict(obj): """ :type obj: dict :rtype: KPISet """ inst = KPISet() for key, val in iteritems(obj): inst[key] = val inst.sum_cn = obj[inst.AVG_CONN_TIME] * obj[inst.SAMPLE_COUNT] inst.sum_lt = obj[inst.AVG_LATENCY] * obj[inst.SAMPLE_COUNT] inst.sum_rt = obj[inst.AVG_RESP_TIME] * obj[inst.SAMPLE_COUNT] inst.perc_levels = [float(x) for x in inst[inst.PERCENTILES].keys()] inst[inst.RESP_TIMES] = {float(level): inst[inst.RESP_TIMES][level] for level in inst[inst.RESP_TIMES].keys()} for error in inst[KPISet.ERRORS]: error['urls'] = Counter(error['urls']) return inst
def __dict_to_overrides(cls, obj, path=''): """ Converts dict into OVERRIDES format, which is properties-like format :type path: str or unicode :return: """ if isinstance(obj, dict): result = '' for key, val in iteritems(obj): result += cls.__dict_to_overrides(val, '%s.%s' % (path, key)) return result elif isinstance(obj, list): result = '' for key, val in enumerate(obj): result += cls.__dict_to_overrides(val, '%s.%s' % (path, key)) return result else: return "%s=%s\n" % (path[1:], obj)
def send_error_summary(self, data_buffer): """ Sends error summary file :type data_buffer: list[bzt.modules.aggregator.DataPoint] """ if not data_buffer: return recent = data_buffer[-1] if not recent[DataPoint.CUMULATIVE][''][KPISet.ERRORS]: return errors = self.__errors_skel(recent[DataPoint.TIMESTAMP], self.active_session_id, self.test_id, self.user_id) for label, label_data in iteritems(recent[DataPoint.CUMULATIVE]): if not label_data[KPISet.ERRORS]: continue if label == '': label = 'ALL' error_item = self.__error_item_skel(label) for err_item in label_data[KPISet.ERRORS]: if err_item["type"] == KPISet.ERRTYPE_ASSERT: error_item['assertionsCount'] += err_item['cnt'] error_item['assertions'].append({ "name": "All Assertions", "failureMessage": err_item['msg'], "failure": True, "error": False, "count": err_item['cnt'] }) else: error_item['count'] += err_item['cnt'] error_item['responseInfo'].append({ "description": err_item['msg'], "code": err_item['rc'], "count": err_item['cnt'], }) errors['summery']['labels'].append(error_item) self.upload_file("sample.jtl.blazemeter.summery.json", to_json(errors))
def __write_base_props(self, fds): """ write base properties and base properties file contents to fds :param fds: fds :return: """ base_props_file = self.settings.get("properties-file", "") if base_props_file: fds.write("# Base Properies File Start: %s\n" % base_props_file) with open(base_props_file) as bpf: fds.write(bpf.read()) fds.write("# Base Properies File End: %s\n\n" % base_props_file) # base props base_props = self.settings.get("properties") if base_props: fds.write("# Base Properies Start\n") for key, val in iteritems(base_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Base Properies End\n\n")
def default(self, obj): """ Filters out protected and private fields :param obj: :return: """ if self.__dumpable(obj): res = {} for key, val in iteritems(obj.__dict__): if not self.__dumpable(val): # logging.debug("Filtered out: %s.%s", key, val) pass elif key.startswith('_'): # logging.debug("Filtered out: %s", key) pass else: res[key] = val return res else: return None
def __write_scenario_props(self, fds, scenario): """ Write scenario props and scenario file props to fds :param fds: :param scenario: dict :return: """ script_props_file = scenario.get("properties-file", "") if script_props_file: fds.write( "# Script Properies File Start: %s\n" % script_props_file) with open(script_props_file) as spf: fds.write(spf.read()) fds.write( "# Script Properies File End: %s\n\n" % script_props_file) # scenario props local_props = scenario.get("properties") if local_props: fds.write("# Scenario Properies Start\n") for key, val in iteritems(local_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Scenario Properies End\n\n")