def point_from_locust(timestamp, sid, data): """ :type timestamp: str :type sid: str :type data: dict :rtype: DataPoint """ point = DataPoint(int(timestamp)) point[DataPoint.SOURCE_ID] = sid overall = KPISet() for item in data['stats']: if timestamp not in item['num_reqs_per_sec']: continue kpiset = KPISet() kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp] kpiset[KPISet.CONCURRENCY] = data['user_count'] kpiset[KPISet.BYTE_COUNT] = item['total_content_length'] if item['num_requests']: avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests'] kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt point[DataPoint.CURRENT][item['name']] = kpiset overall.merge_kpis(kpiset) point[DataPoint.CURRENT][''] = overall point.recalculate() return point
def point_from_locust(timestamp, sid, data): """ :type timestamp: str :type sid: str :type data: dict :rtype: DataPoint """ point = DataPoint(int(timestamp)) point[DataPoint.SOURCE_ID] = sid overall = KPISet() for item in data['stats']: if timestamp not in item['num_reqs_per_sec']: continue kpiset = KPISet() kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp] kpiset[KPISet.CONCURRENCY] = data['user_count'] kpiset[KPISet.BYTE_COUNT] = item['total_content_length'] if item['num_requests']: avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests'] kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt for err in data['errors'].values(): if err['name'] == item['name']: new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR, Counter()) KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err) kpiset[KPISet.FAILURES] += err['occurences'] point[DataPoint.CURRENT][item['name']] = kpiset overall.merge_kpis(kpiset) point[DataPoint.CURRENT][''] = overall point.recalculate() return point
def random_datapoint(n): point = DataPoint(n) overall = point[DataPoint.CURRENT].get('', KPISet()) overall[KPISet.CONCURRENCY] = r(100) overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) + 1 overall[KPISet.SUCCESSES] = int(overall[KPISet.SAMPLE_COUNT] * random()) overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] - overall[ KPISet.SUCCESSES] overall[KPISet.PERCENTILES]['25.0'] = r(10) overall[KPISet.PERCENTILES]['50.0'] = r(20) overall[KPISet.PERCENTILES]['75.0'] = r(30) overall[KPISet.PERCENTILES]['90.0'] = r(40) overall[KPISet.PERCENTILES]['99.0'] = r(50) overall[KPISet.PERCENTILES]['100.0'] = r(100) overall[KPISet.RESP_CODES][rc()] = 1 overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 overall.sum_rt = overall[KPISet.AVG_RESP_TIME] * overall[ KPISet.SAMPLE_COUNT] overall.sum_cn = overall[KPISet.AVG_CONN_TIME] * overall[ KPISet.SAMPLE_COUNT] overall.sum_lt = overall[KPISet.AVG_LATENCY] * overall[KPISet.SAMPLE_COUNT] cumul = point[DataPoint.CUMULATIVE].get('', KPISet()) cumul.merge_kpis(overall) cumul.recalculate() point.recalculate() overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 return point
def test_kpiset_merge_many_rtimes(self): vals = { round(random() * 20 + 0.1, int(random() * 3) + 2): int(random() * 3 + 1) for _ in range(1000) } src = KPISet() src[KPISet.RESP_TIMES].update(vals) dst = KPISet() dst.rtimes_len = 100 for _ in range(100): dst.merge_kpis(src) dst.compact_times() self.assertEqual(100, len(dst[KPISet.RESP_TIMES]))
def add_data(self, data): """ Append data :type data: bzt.modules.aggregator.DataPoint """ while len(self.body): self.body.pop(0) self.body.append(Text(("stat-hdr", " Average Times: "), align=RIGHT)) overall = data.get(self.key).get('', KPISet()) recv = overall[KPISet.AVG_RESP_TIME] recv -= overall[KPISet.AVG_CONN_TIME] recv -= overall[KPISet.AVG_LATENCY] self.body.append( Text(("stat-txt", "Full: %.3f" % overall[KPISet.AVG_RESP_TIME]), align=RIGHT)) self.body.append( Text(("stat-txt", "Connect: %.3f" % overall[KPISet.AVG_CONN_TIME]), align=RIGHT)) self.body.append( Text(("stat-txt", "Latency: %.3f" % overall[KPISet.AVG_LATENCY]), align=RIGHT)) self.body.append(Text(("stat-txt", "~Receive: %.3f" % recv), align=RIGHT))
def test_merging(self): dst = DataPoint(0) src = DataPoint(0) src[DataPoint.CUMULATIVE].setdefault('', KPISet()) src[DataPoint.CUMULATIVE][''].sum_rt = 0.5 src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 1 dst.merge_point(src) self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''].sum_rt) self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME]) src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 3 dst.merge_point(src) self.assertEquals(4, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT]) self.assertEquals(1, dst[DataPoint.CUMULATIVE][''].sum_rt) self.assertEquals(0.25, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME]) src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 6 dst.merge_point(src) self.assertEquals(10, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT]) self.assertEquals(1.5, dst[DataPoint.CUMULATIVE][''].sum_rt) self.assertEquals(0.15, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
def _calculate_datapoints(self, final_pass=False): if self.master_id is None: return data = self.client.get_kpis(self.master_id, self.min_ts) for label in data: if label['kpis']: label['kpis'].pop( -1) # never take last second since it could be incomplete timestamps = [] for label in data: if label['label'] == 'ALL': timestamps.extend([kpi['ts'] for kpi in label['kpis']]) for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label['kpis']: if kpi['ts'] != tstmp: continue kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 point[ DataPoint.CURRENT]['' if label['label'] == 'ALL' else label['label']] = kpiset point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point
def new(): subj = KPISet(perc_levels=(100.0, )) subj[KPISet.RESP_TIMES].add(0.1) subj[KPISet.RESP_TIMES].add(0.01) subj[KPISet.RESP_TIMES].add(0.001) subj.recalculate() return subj
def __get_kpiset(self, aggr, kpi, label): kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] assert kpi['n'] > 0 and kpi['n'] >= kpi['ec'] kpiset[KPISet.SUCCESSES] = kpi['n'] - kpi['ec'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0} for field, level in iteritems(perc_map): kpiset[KPISet.PERCENTILES][str(level)] = aggr[label][field] / 1000.0 return kpiset
def add_data(self, data): """ Append data :type data: bzt.modules.aggregator.DataPoint """ while len(self.body): self.body.pop(0) self.body.append(Text(("stat-hdr", " Percentiles: "), align=RIGHT)) overall = data.get(self.key).get('', KPISet()) for key in sorted(overall.get(KPISet.PERCENTILES).keys(), key=float): dat = (float(key), overall[KPISet.PERCENTILES][key]) self.body.append( Text(("stat-txt", "%.1f%%: %.3f" % dat), align=RIGHT))
def __add_err_diff(self, point, err_diff): for label in err_diff: point_label = '' if label == 'ALL' else label if point_label not in point[DataPoint.CURRENT]: self.log.warning("Got inconsistent kpi/error data for label: %s", point_label) kpiset = KPISet() point[DataPoint.CURRENT][point_label] = kpiset kpiset[KPISet.SAMPLE_COUNT] = sum([item['count'] for item in err_diff[label].values()]) else: kpiset = point[DataPoint.CURRENT][point_label] kpiset[KPISet.ERRORS] = self.__get_kpi_errors(err_diff[label]) kpiset[KPISet.FAILURES] = sum([x['cnt'] for x in kpiset[KPISet.ERRORS]]) kpiset[KPISet.SAMPLE_COUNT] = kpiset[KPISet.SUCCESSES] + kpiset[KPISet.FAILURES] assert kpiset[KPISet.SAMPLE_COUNT] > 0, point_label
def __get_datapoint(self, n): point = DataPoint(n) overall = point[DataPoint.CURRENT].get('', KPISet()) overall[KPISet.CONCURRENCY] = r(100) overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] / 2.0 overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.PERCENTILES]['25'] = r(10) overall[KPISet.PERCENTILES]['50'] = r(20) overall[KPISet.PERCENTILES]['75'] = r(30) overall[KPISet.PERCENTILES]['90'] = r(40) overall[KPISet.PERCENTILES]['99'] = r(50) overall[KPISet.PERCENTILES]['100'] = r(100) overall[KPISet.RESP_CODES][rc()] = 1 return point
def _calculate_datapoints(self, final_pass=False): if self.master_id is None: return try: data = self.client.get_kpis(self.master_id, self.min_ts) except URLError as exc: self.log.warning( "Failed to get result KPIs, will retry in %s seconds...", self.client.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.client.timeout) data = self.client.get_kpis(self.master_id, self.min_ts) self.log.info("Succeeded with retry") for label in data: if label['kpis']: label['kpis'].pop( -1) # never take last second since it could be incomplete timestamps = [] for label in data: if label['label'] == 'ALL': timestamps.extend([kpi['ts'] for kpi in label['kpis']]) for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label['kpis']: if kpi['ts'] != tstmp: continue kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 point[ DataPoint.CURRENT]['' if label['label'] == 'ALL' else label['label']] = kpiset point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point
def __extend_reported_data(dp_list): for dp in dp_list: for data in dp['cumulative'], dp['current']: del data[''] for key in list( data.keys() ): # list() is important due to changing dictionary size in the cycle sep = key.rindex('-') original_label, state_idx = key[:sep], int(key[sep + 1:]) kpi_set = data.pop(key) if original_label not in data: data[original_label] = {} data[original_label][state_idx] = kpi_set if '' not in data: data[''] = dict() if state_idx not in data['']: data[''][state_idx] = KPISet() data[''][state_idx].merge_kpis(kpi_set)
def add_data(self, data): """ Append data point :type data: bzt.modules.aggregator.DataPoint """ while len(self.body): self.body.pop(0) overall = data.get(self.key).get('', KPISet()) self.body.append(Text(("stat-hdr", " Response Codes: "), align=RIGHT)) for key in sorted(overall.get(KPISet.RESP_CODES).keys()): if overall[KPISet.SAMPLE_COUNT]: part = 100 * float(overall[KPISet.RESP_CODES][key]) / overall[ KPISet.SAMPLE_COUNT] else: part = 0 dat = ( key, part, overall[KPISet.RESP_CODES][key], ) if not len(key): style = "stat-nonhttp" elif key[0] == '2': style = 'stat-2xx' elif key[0] == '3': style = 'stat-3xx' elif key[0] == '4': style = 'stat-4xx' elif key[0] == '5': style = 'stat-5xx' else: style = "stat-nonhttp" self.body.append( Text((style, "%s: %.2f%% (%s)" % dat), align=RIGHT)) dat = (100, overall[KPISet.SAMPLE_COUNT]) self.body.append( Text(('stat-txt', "All: %.2f%% (%s)" % dat), align=RIGHT))
def add_data(self, data): """ New datapoint notification :type data: bzt.modules.aggregator.DataPoint """ overall = data[DataPoint.CURRENT].get('', KPISet()) # self.log.debug("Got data for second: %s", to_json(data)) active = int(math.floor(overall[KPISet.SAMPLE_COUNT] * overall[ KPISet.AVG_RESP_TIME])) self.graphs.append(overall[KPISet.CONCURRENCY], min(overall[KPISet.CONCURRENCY], active), overall[KPISet.SAMPLE_COUNT], overall[KPISet.FAILURES], overall[KPISet.AVG_RESP_TIME], overall[KPISet.AVG_CONN_TIME], overall[KPISet.AVG_LATENCY], ) self.latest_stats.add_data(data) self.cumulative_stats.add_data(data)