def point_from_locust(self, ts, sid, data): """ :type sid: str :type ts: str :type data: dict :rtype: DataPoint """ point = DataPoint(int(ts)) point[DataPoint.SOURCE_ID] = sid overall = KPISet() for item in data['stats']: if ts not in item['num_reqs_per_sec']: continue kpiset = KPISet() kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][ts] kpiset[KPISet.CONCURRENCY] = data['user_count'] if item['num_requests']: avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests'] kpiset.sum_rt = item['num_reqs_per_sec'][ts] * avg_rt point[DataPoint.CURRENT][item['name']] = kpiset overall.merge_kpis(kpiset) point[DataPoint.CURRENT][''] = overall point.recalculate() return point
def _calculate_datapoints(self, final_pass=False): if self.master_id is None: return data = self.client.get_kpis(self.master_id, self.min_ts) for label in data: if label['kpis']: label['kpis'].pop(-1) # never take last second since it could be incomplete timestamps = [] for label in data: if label['label'] == 'ALL': timestamps.extend([kpi['ts'] for kpi in label['kpis']]) for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label['kpis']: if kpi['ts'] != tstmp: continue kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 point[DataPoint.CURRENT]['' if label['label'] == 'ALL' else label['label']] = kpiset point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point
def random_datapoint(n): point = DataPoint(n) overall = point[DataPoint.CURRENT].get('', KPISet()) overall[KPISet.CONCURRENCY] = r(100) overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) + 1 overall[KPISet.SUCCESSES] = int(overall[KPISet.SAMPLE_COUNT] * random()) overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] - overall[KPISet.SUCCESSES] overall[KPISet.PERCENTILES]['25'] = r(10) overall[KPISet.PERCENTILES]['50'] = r(20) overall[KPISet.PERCENTILES]['75'] = r(30) overall[KPISet.PERCENTILES]['90'] = r(40) overall[KPISet.PERCENTILES]['99'] = r(50) overall[KPISet.PERCENTILES]['100'] = r(100) overall[KPISet.RESP_CODES][rc()] = 1 overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 cumul = point[DataPoint.CUMULATIVE].get('', KPISet()) cumul.merge(overall) point.recalculate() overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 return point
def point_from_locust(timestamp, sid, data): """ :type timestamp: str :type sid: str :type data: dict :rtype: DataPoint """ point = DataPoint(int(timestamp)) point[DataPoint.SOURCE_ID] = sid overall = KPISet() for item in data['stats']: if timestamp not in item['num_reqs_per_sec']: continue kpiset = KPISet() kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp] kpiset[KPISet.CONCURRENCY] = data['user_count'] kpiset[KPISet.BYTE_COUNT] = item['total_content_length'] if item['num_requests']: avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests'] kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt for err in data['errors'].values(): if err['name'] == item['name']: new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR, Counter()) KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err) kpiset[KPISet.FAILURES] += err['occurences'] point[DataPoint.CURRENT][item['name']] = kpiset overall.merge_kpis(kpiset) point[DataPoint.CURRENT][''] = overall point.recalculate() return point
def merge_datapoints(self, max_full_ts): for key in sorted(self.join_buffer.keys(), key=lambda x: int(x)): if int(key) <= max_full_ts: sec_data = self.join_buffer.pop(key) self.log.debug("Processing complete second: %s", key) point = DataPoint(int(key)) for sid, item in iteritems(sec_data): point.merge_point(self.point_from_locust(key, sid, item)) point.recalculate() yield point
def merge_datapoints(self, max_full_ts): reader_id = self.file.name + "@" + str(id(self)) for key in sorted(self.join_buffer.keys(), key=int): if int(key) <= max_full_ts: sec_data = self.join_buffer.pop(key) self.log.debug("Processing complete second: %s", key) point = DataPoint(int(key)) point[DataPoint.SOURCE_ID] = reader_id for sid, item in iteritems(sec_data): point.merge_point(self.point_from_locust(key, sid, item)) point.recalculate() yield point
def test_report_criteria_without_label(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() pass_fail = PassFailStatus() criteria = DataCriterion( { 'stop': True, 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt' }, pass_fail) pass_fail.criteria.append(criteria) criteria.is_triggered = True obj.engine.reporters.append(pass_fail) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({ "filename": path_from_config, "data-source": "pass-fail" }) obj.prepare() obj.last_second = DataPoint(0) obj.post_process()
def test_log_messages_samples_count(self): obj = FinalStatus() obj.engine = EngineEmul obj.parameters = BetterDict() obj.log = logger_mock() obj.parameters.merge({"failed-labels": False, "percentiles": False, "summary": True, "test-duration":False}) datapoint = DataPoint(None, None) cumul_data = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter( {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1}), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.last_sec = datapoint obj.post_process() self.assertEqual("Samples count: 59314, 50.00% failures\n", obj.log.info_buf.getvalue())
def test_prepare_no_filename_in_settings(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"data-source": "sample-labels"}) obj.prepare() datapoint = DataPoint(0, []) cumul_data = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.aggregated_second(datapoint) obj.post_process() self.assertTrue(os.path.exists(obj.report_file_path))
def _calculate_datapoints(self, final_pass=False): if self.master_id is None: return try: data = self.client.get_kpis(self.master_id, self.min_ts) except URLError as exc: self.log.warning( "Failed to get result KPIs, will retry in %s seconds...", self.client.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.client.timeout) data = self.client.get_kpis(self.master_id, self.min_ts) self.log.info("Succeeded with retry") for label in data: if label['kpis']: label['kpis'].pop( -1) # never take last second since it could be incomplete timestamps = [] for label in data: if label['label'] == 'ALL': timestamps.extend([kpi['ts'] for kpi in label['kpis']]) for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label['kpis']: if kpi['ts'] != tstmp: continue kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 point[ DataPoint.CURRENT]['' if label['label'] == 'ALL' else label['label']] = kpiset point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point
def __init__(self): self.data = DataPoint(0) self.percentiles = PercentilesList(DataPoint.CURRENT) self.avg_times = AvgTimesList(DataPoint.CURRENT) self.rcodes = RCodesList(DataPoint.CURRENT) original_widget = Columns( [self.avg_times, self.percentiles, self.rcodes], dividechars=1) padded = Padding(original_widget, align=CENTER) super(LatestStats, self).__init__(padded, self.title)
def __init__(self): self.data = DataPoint(0) self.percentiles = PercentilesList(DataPoint.CUMULATIVE) self.avg_times = AvgTimesList(DataPoint.CUMULATIVE) self.rcodes = RCodesList(DataPoint.CUMULATIVE) original_widget = Columns( [self.avg_times, self.percentiles, self.rcodes], dividechars=1) padded = Padding(original_widget, align=CENTER) super(CumulativeStats, self).__init__(padded, "Cumulative Stats")
def point_from_locust(timestamp, sid, data): """ :type timestamp: str :type sid: str :type data: dict :rtype: DataPoint """ point = DataPoint(int(timestamp)) point[DataPoint.SOURCE_ID] = sid overall = KPISet() for item in data['stats']: if timestamp not in item['num_reqs_per_sec']: continue kpiset = KPISet() kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp] kpiset[KPISet.CONCURRENCY] = data['user_count'] kpiset[KPISet.BYTE_COUNT] = item['total_content_length'] if item['num_requests']: avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests'] kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt for err in data['errors'].values(): if err['name'] == item['name']: new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR, Counter(), None) KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err) kpiset[KPISet.FAILURES] += err['occurences'] kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[ KPISet.FAILURES] point[DataPoint.CURRENT][item['name']] = kpiset overall.merge_kpis(kpiset, sid) point[DataPoint.CURRENT][''] = overall point.recalculate() return point
def _calculate_datapoints(self, final_pass=False): if self.master_id is None: return try: data = self.client.get_kpis(self.master_id, self.min_ts) except URLError: self.log.warning("Failed to get result KPIs, will retry in %s seconds...", self.client.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.client.timeout) data = self.client.get_kpis(self.master_id, self.min_ts) self.log.info("Succeeded with retry") for label in data: if label['kpis']: label['kpis'].pop(-1) # never take last second since it could be incomplete timestamps = [] for label in data: if label['label'] == 'ALL': timestamps.extend([kpi['ts'] for kpi in label['kpis']]) for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label['kpis']: if kpi['ts'] != tstmp: continue kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 point[DataPoint.CURRENT]['' if label['label'] == 'ALL' else label['label']] = kpiset point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point
def _calculate_datapoints(self, final_pass=False): if self.master is None: return data, aggr_raw = self.query_data() aggr = {} for label in aggr_raw: aggr[label['labelName']] = label for label in data: if label.get('kpis') and not final_pass: label['kpis'].pop( -1) # never take last second since it could be incomplete timestamps = [] for label in data: if label.get('label') == 'ALL': timestamps.extend([kpi['ts'] for kpi in label.get('kpis', [])]) self.handle_errors = True for tstmp in timestamps: point = DataPoint(tstmp) point[DataPoint.SOURCE_ID] = self.master['id'] self.__generate_kpisets(aggr, data, point, tstmp) if self.handle_errors: self.handle_errors = False self.cur_errors = self.__get_errors_from_bza() err_diff = self._get_err_diff() if err_diff: self.__add_err_diff(point, err_diff) self.prev_errors = self.cur_errors point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point
def __init__(self): self.data = DataPoint(0) self._start_time = None self.percentiles = PercentilesList(DataPoint.CUMULATIVE) self.avg_times = AvgTimesList(DataPoint.CUMULATIVE) self.rcodes = RCodesList(DataPoint.CUMULATIVE) self.labels_pile = LabelsPile(DataPoint.CUMULATIVE) original_widget = Pile([ Columns([self.avg_times, self.percentiles, self.rcodes], dividechars=1), self.labels_pile ]) padded = Padding(original_widget, align=CENTER) super(CumulativeStats, self).__init__(padded, self.title + ': waiting for data...')
def test_short_data(self): proc = CriteriaProcessor(["failures>0%, stop as failed"], self) self.obj.processors.append(proc) point = DataPoint(0) point[DataPoint.CUMULATIVE] = {} point[DataPoint.CUMULATIVE][''] = {} point[DataPoint.CUMULATIVE][''][KPISet.FAILURES] = 100 * 16 point[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 100 * 16 self.obj.check() self.obj.shutdown() self.obj.aggregated_second(point) self.assertRaises(AutomatedShutdown, self.obj.post_process)
def random_datapoint(n): point = DataPoint(n) overall = point[DataPoint.CURRENT].setdefault('', KPISet()) overall[KPISet.CONCURRENCY] = r(100) overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) + 1 overall[KPISet.SUCCESSES] = int(overall[KPISet.SAMPLE_COUNT] * random()) overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] - overall[ KPISet.SUCCESSES] overall[KPISet.BYTE_COUNT] = int(random() * 1000) + 1 overall[KPISet.PERCENTILES]['25.0'] = r(10) overall[KPISet.PERCENTILES]['50.0'] = r(20) overall[KPISet.PERCENTILES]['75.0'] = r(30) overall[KPISet.PERCENTILES]['90.0'] = r(40) overall[KPISet.PERCENTILES]['99.0'] = r(50) overall[KPISet.PERCENTILES]['100.0'] = r(100) overall[KPISet.RESP_CODES][rc()] = 1 overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 overall.sum_rt = overall[KPISet.AVG_RESP_TIME] * overall[ KPISet.SAMPLE_COUNT] overall.sum_cn = overall[KPISet.AVG_CONN_TIME] * overall[ KPISet.SAMPLE_COUNT] overall.sum_lt = overall[KPISet.AVG_LATENCY] * overall[KPISet.SAMPLE_COUNT] cumul = point[DataPoint.CUMULATIVE].setdefault('', KPISet()) cumul.merge_kpis(overall) cumul.recalculate() point.recalculate() overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 return point
def test_short_data(self): crit_cfg = DataCriterion.string_to_config( "failures>0%, stop as failed") self.obj.criteria.append(DataCriterion(crit_cfg, self.obj)) point = DataPoint(0) point[DataPoint.CUMULATIVE] = {} point[DataPoint.CUMULATIVE][''] = {} point[DataPoint.CUMULATIVE][''][KPISet.FAILURES] = 100 * 16 point[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 100 * 16 self.obj.check() self.obj.shutdown() self.obj.aggregated_second(point) self.assertRaises(AutomatedShutdown, self.obj.post_process)
def merge_datapoints(self, max_full_ts): for key in sorted(self.join_buffer.keys(), key=int): if int(key) <= max_full_ts: sec_data = self.join_buffer.pop(key) self.log.debug("Processing complete second: %s", key) point = DataPoint(int(key)) for sid, item in iteritems(sec_data): point.merge_point(self.point_from_locust(key, sid, item)) point.recalculate() yield point
def test_merging(self): dst = DataPoint(0) src = DataPoint(0) src[DataPoint.CUMULATIVE].setdefault('', KPISet()) src[DataPoint.CUMULATIVE][''].sum_rt = 0.5 src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 1 dst.merge_point(src) self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''].sum_rt) self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME]) src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 3 dst.merge_point(src) self.assertEquals(4, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT]) self.assertEquals(1, dst[DataPoint.CUMULATIVE][''].sum_rt) self.assertEquals(0.25, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME]) src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 6 dst.merge_point(src) self.assertEquals(10, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT]) self.assertEquals(1.5, dst[DataPoint.CUMULATIVE][''].sum_rt) self.assertEquals(0.15, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
def test_ashort_data(self): obj = PassFailStatus() obj.engine = EngineEmul() crit_cfg = DataCriteria.string_to_config("failures>0%, stop as failed") obj.criterias.append(DataCriteria(crit_cfg, obj)) point = DataPoint(0) point[DataPoint.CUMULATIVE] = {} point[DataPoint.CUMULATIVE][''] = {} point[DataPoint.CUMULATIVE][''][KPISet.FAILURES] = 100 * 16 point[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 100 * 16 obj.check() obj.shutdown() obj.aggregated_second(point) self.assertRaises(AutomatedShutdown, obj.post_process)
def __get_datapoint(self, n): point = DataPoint(n) overall = point[DataPoint.CURRENT].get('', KPISet()) overall[KPISet.CONCURRENCY] = r(100) overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] / 2.0 overall[KPISet.AVG_RESP_TIME] = r(100) overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0 overall[KPISet.PERCENTILES]['25'] = r(10) overall[KPISet.PERCENTILES]['50'] = r(20) overall[KPISet.PERCENTILES]['75'] = r(30) overall[KPISet.PERCENTILES]['90'] = r(40) overall[KPISet.PERCENTILES]['99'] = r(50) overall[KPISet.PERCENTILES]['100'] = r(100) overall[KPISet.RESP_CODES][rc()] = 1 return point
def test_xml_format_passfail(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() obj.engine.provisioning = CloudProvisioning() obj.engine.provisioning.results_url = "http://test/report/123" pass_fail1 = PassFailStatus() fc1_triggered = DataCriterion( { 'stop': True, 'label': 'Sample 1 Triggered', 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt' }, pass_fail1) fc1_not_triggered = DataCriterion( { 'stop': True, 'label': 'Sample 1 Not Triggered', 'fail': True, 'timeframe': -1, 'threshold': '300ms', 'condition': '>', 'subject': 'avg-rt' }, pass_fail1) pass_fail2 = PassFailStatus() fc2_triggered = DataCriterion( { 'stop': True, 'label': 'Sample 2 Triggered', 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<=', 'subject': 'avg-rt' }, pass_fail1) fc2_not_triggered = DataCriterion( { 'stop': True, 'label': 'Sample 2 Not Triggered', 'fail': True, 'timeframe': -1, 'threshold': '300ms', 'condition': '=', 'subject': 'avg-rt' }, pass_fail1) pass_fail1.criteria.append(fc1_triggered) pass_fail1.criteria.append(fc1_not_triggered) pass_fail2.criteria.append(fc2_triggered) pass_fail2.criteria.append(fc2_not_triggered) fc1_triggered.is_triggered = True fc2_triggered.is_triggered = True obj.engine.reporters.append(pass_fail1) obj.engine.reporters.append(pass_fail2) obj.engine.reporters.append(BlazeMeterUploader()) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({ "filename": path_from_config, "data-source": "pass-fail" }) obj.prepare() obj.last_second = DataPoint(0) obj.post_process() with open(obj.report_file_path, 'rb') as fds: f_contents = fds.read() logging.info("File: %s", f_contents) xml_tree = etree.fromstring(f_contents) self.assertEqual('testsuites', xml_tree.tag) suite = xml_tree.getchildren()[0] self.assertEqual('testsuite', suite.tag) test_cases = suite.getchildren() self.assertEqual(4, len(test_cases)) self.assertEqual('testcase', test_cases[0].tag) self.assertEqual('error', test_cases[0].getchildren()[1].tag) self.assertEqual('error', test_cases[2].getchildren()[1].tag) sys_out = test_cases[0].getchildren()[0] self.assertEqual('system-out', sys_out.tag) self.assertIn('BlazeMeter report link: http://test/report/123', sys_out.text)
def test_xml_format_sample_labels(self): # generate xml, compare hash obj = JUnitXMLReporter() obj.engine = EngineEmul() rep = BlazeMeterUploader() rep.results_url = "http://report/123" obj.engine.reporters.append(rep) obj.parameters = BetterDict() path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml-sample-labels', dir=obj.engine.artifacts_dir) # data-source: finalstats by default obj.parameters.merge({"filename": path_from_config}) obj.prepare() datapoint = DataPoint(0, []) cumul_data = datapoint[DataPoint.CUMULATIVE] cumul_data[""] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [ { 'msg': 'Forbidden', 'cnt': 7300, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7300}), KPISet.RESP_CODES: '403' }, { 'msg': 'Assertion failed: text /smth/ not found', 'cnt': 73, 'type': 1, 'urls': Counter({'http://192.168.1.1/anotherquery': 73}), KPISet.RESP_CODES: '200' }, ], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({ '304': 29656, '403': 29656, '200': 2 }), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0 }, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 6.1707580253574335e-06, KPISet.RESP_TIMES: Counter({ 0.0: 14941, 0.001: 13673, 0.002: 506, 0.003: 289, 0.004: 103, 0.005: 59, 0.006: 37, 0.008: 14, 0.007: 13, 0.009: 8, 0.01: 3, 0.011: 2, 0.016: 2, 0.014: 2, 0.017: 1, 0.013: 1, 0.015: 1, 0.04: 1 }), KPISet.ERRORS: [ { 'msg': 'Forbidden', 'cnt': 7300, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7300}), KPISet.RESP_CODES: '403' }, { 'msg': 'Assertion failed: text /smth/ not found', 'cnt': 73, 'type': 1, 'urls': Counter({'http://192.168.1.1/anotherquery': 73}), KPISet.RESP_CODES: '200' }, ], KPISet.STDEV_RESP_TIME: 0.032465137860758844, KPISet.AVG_LATENCY: 0.0005634272997032645, KPISet.RESP_CODES: Counter({'403': 29656}), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.04, '99.0': 0.003, '50.0': 0.0 }, KPISet.SUCCESSES: 0, KPISet.SAMPLE_COUNT: 29656, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005716549770704078, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) obj.aggregated_second(datapoint) obj.post_process() with open(obj.report_file_path, 'rb') as fds: f_contents = fds.read() logging.info("File: %s", f_contents) xml_tree = etree.fromstring(f_contents) self.assertEqual('testsuites', xml_tree.tag) suite = xml_tree.getchildren()[0] self.assertEqual('testsuite', suite.tag) self.assertListEqual(['sample_labels', "bzt"], suite.values()) test_cases = suite.getchildren() self.assertEqual(3, len(test_cases)) self.assertEqual('testcase', test_cases[0].tag) self.assertEqual('error', test_cases[0].getchildren()[1].tag) self.assertEqual('failure', test_cases[0].getchildren()[2].tag) self.assertEqual('system-out', test_cases[0].getchildren()[0].tag) self.assertIn('BlazeMeter report link: http://report/123', test_cases[0].getchildren()[0].text)
def test_log_messages_failed_labels(self): obj = FinalStatus() obj.engine = EngineEmul obj.parameters = BetterDict() obj.log = logger_mock() obj.parameters.merge({ "failed-labels": True, "percentiles": False, "summary": False }) datapoint = DataPoint(None, None) cumul_data = datapoint[DataPoint.CUMULATIVE] cumul_data[""] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{ 'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403' }], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({ '304': 29656, '403': 29656, '200': 2 }), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0 }), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 6.1707580253574335e-06, KPISet.RESP_TIMES: Counter({ 0.0: 14941, 0.001: 13673, 0.002: 506, 0.003: 289, 0.004: 103, 0.005: 59, 0.006: 37, 0.008: 14, 0.007: 13, 0.009: 8, 0.01: 3, 0.011: 2, 0.016: 2, 0.014: 2, 0.017: 1, 0.013: 1, 0.015: 1, 0.04: 1 }), KPISet.ERRORS: [{ 'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403' }], KPISet.STDEV_RESP_TIME: 0.032465137860758844, KPISet.AVG_LATENCY: 0.0005634272997032645, KPISet.RESP_CODES: Counter({'403': 29656}), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.04, '99.0': 0.003, '50.0': 0.0 }), KPISet.SUCCESSES: 0, KPISet.SAMPLE_COUNT: 29656, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005716549770704078, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) obj.last_sec = datapoint obj.post_process() self.assertEqual( "29656 failed samples: http://192.168.1.1/anotherquery\n", obj.log.info_buf.getvalue())
def test_xml_format_passfail(self): obj = JUnitXMLReporter() obj.engine = EngineEmul() obj.parameters = BetterDict() pass_fail1 = PassFailStatus() fc1_triggered = DataCriteria( { 'stop': True, 'label': 'Sample 1 Triggered', 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt' }, pass_fail1) fc1_not_triggered = DataCriteria( { 'stop': True, 'label': 'Sample 1 Not Triggered', 'fail': True, 'timeframe': -1, 'threshold': '300ms', 'condition': '>', 'subject': 'avg-rt' }, pass_fail1) pass_fail2 = PassFailStatus() fc2_triggered = DataCriteria( { 'stop': True, 'label': 'Sample 2 Triggered', 'fail': True, 'timeframe': -1, 'threshold': '150ms', 'condition': '<=', 'subject': 'avg-rt' }, pass_fail1) fc2_not_triggered = DataCriteria( { 'stop': True, 'label': 'Sample 2 Not Triggered', 'fail': True, 'timeframe': -1, 'threshold': '300ms', 'condition': '=', 'subject': 'avg-rt' }, pass_fail1) pass_fail1.criterias.append(fc1_triggered) pass_fail1.criterias.append(fc1_not_triggered) pass_fail2.criterias.append(fc2_triggered) pass_fail2.criterias.append(fc2_not_triggered) fc1_triggered.is_triggered = True fc2_triggered.is_triggered = True obj.engine.reporters.append(pass_fail1) obj.engine.reporters.append(pass_fail2) obj.engine.reporters.append(object()) path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir) obj.parameters.merge({ "filename": path_from_config, "data-source": "pass-fail" }) obj.prepare() obj.last_second = DataPoint(0) obj.post_process() with open(obj.report_file_path, 'rb') as fds: f_contents = fds.read() xml_tree = etree.fromstring(f_contents) self.assertEqual('testsuite', xml_tree.tag) self.assertEqual(4, len(xml_tree.getchildren())) self.assertEqual('testcase', xml_tree.getchildren()[0].tag) self.assertEqual('error', xml_tree.getchildren()[0].getchildren()[0].tag) self.assertEqual('error', xml_tree.getchildren()[2].getchildren()[0].tag)
def __get_datapoint(self): datapoint = DataPoint(None, None) cumul_data = datapoint[DataPoint.CUMULATIVE] cumul_data[""] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{ 'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403' }], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({ '304': 29656, '403': 29656, '200': 2 }), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0 }, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 6.1707580253574335e-06, KPISet.RESP_TIMES: Counter({ 0.0: 14941, 0.001: 13673, 0.002: 506, 0.003: 289, 0.004: 103, 0.005: 59, 0.006: 37, 0.008: 14, 0.007: 13, 0.009: 8, 0.01: 3, 0.011: 2, 0.016: 2, 0.014: 2, 0.017: 1, 0.013: 1, 0.015: 1, 0.04: 1 }), KPISet.ERRORS: [{ 'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403' }], KPISet.STDEV_RESP_TIME: 0.032465137860758844, KPISet.AVG_LATENCY: 0.0005634272997032645, KPISet.RESP_CODES: Counter({'403': 29656}), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.04, '99.0': 0.003, '50.0': 0.0 }, KPISet.SUCCESSES: 0, KPISet.SAMPLE_COUNT: 29656, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005716549770704078, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }, KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) return datapoint