def test_log_messages_percentiles(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"failed-labels": False, "percentiles": True, "summary": False, "test-duration": False, "summary-labels": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() target_output = ("Average times: total 0.001, latency 0.000, connect 0.000\n" "Percentiles:\n" "+---------------+---------------+\n" "| Percentile, % | Resp. Time, s |\n" "+---------------+---------------+\n" "| 0.0 | 0.0 |\n" "| 50.0 | 0.0 |\n" "| 90.0 | 0.001 |\n" "| 95.0 | 0.001 |\n" "| 99.0 | 0.003 |\n" "| 99.9 | 0.008 |\n" "| 100.0 | 0.081 |\n" "+---------------+---------------+\n" ) self.assertEqual(target_output, self.log_recorder.info_buff.getvalue())
def test_log_messages_samples_count(self): obj = FinalStatus() obj.engine = EngineEmul obj.parameters = BetterDict() obj.log = logger_mock() obj.parameters.merge({"failed-labels": False, "percentiles": False, "summary": True, "test-duration": False}) datapoint = DataPoint(None, None) cumul_data = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter( {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1}), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.last_sec = datapoint obj.post_process() self.assertEqual("Samples count: 59314, 50.00% failures\n", obj.log.info_buf.getvalue())
def test_log_messages_summary_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({ "summary-labels": True, "percentiles": False, "summary": False, "test-duration": False }) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() expected = ( "Request label stats:\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| label | status | succ | avg_rt | error |\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| http://192.168.1.1/anotherquery | FAIL | 0.00% | 0.001 | Forbidden |\n" "| http://192.168.1.1/somequery | OK | 100.00% | 0.001 | |\n" "| http://192.168.100.100/somequery | OK | 100.00% | 0.001 | |\n" "+----------------------------------+--------+---------+--------+-----------+\n" ) self.assertIn(expected, self.log_recorder.info_buff.getvalue())
def test_log_messages_percentiles(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.parameters.merge({ "failed-labels": False, "percentiles": True, "summary": False, "test-duration": False }) obj.aggregated_second(self.__get_datapoint()) obj.post_process() target_output = ( "Average times: total 0.001, latency 0.000, connect 0.000\n" "Percentile 0.0%: 0.000\n" "Percentile 50.0%: 0.000\n" "Percentile 90.0%: 0.001\n" "Percentile 95.0%: 0.001\n" "Percentile 99.0%: 0.003\n" "Percentile 99.9%: 0.008\n" "Percentile 100.0%: 0.081\n") self.assertEqual(target_output, log_recorder.info_buff.getvalue()) obj.log.removeHandler(log_recorder)
def test_log_messages_samples_count(self): obj = FinalStatus() obj.engine = EngineEmul obj.parameters = BetterDict() obj.log = logger_mock() obj.parameters.merge({"failed-labels": False, "percentiles": False, "summary": True, "test-duration":False}) datapoint = DataPoint(None, None) cumul_data = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter( {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1}), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) datapoint[DataPoint.CUMULATIVE][""] = cumul_data obj.last_sec = datapoint obj.post_process() self.assertEqual("Samples count: 59314, 50.00% failures\n", obj.log.info_buf.getvalue())
def test_xml_report_test_duration_failed_prepare(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() obj.aggregated_second(self.__get_datapoint(ts=100)) obj.post_process( ) # shouldn't raise ValueError because obj.start_time is None
def test_csv_report_fieldname_order(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() csv_report = obj.engine.create_artifact("report", ".csv") obj.parameters.merge({ "dump-csv": csv_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(csv_report)) with open(csv_report) as fds: fieldnames = fds.readline().strip().split(",") perc_fields = [ float(name[5:]) for name in fieldnames if name.startswith('perc_') ] self.assertTrue(sorted(perc_fields) == perc_fields) rc_fields = [ float(name[3:]) for name in fieldnames if name.startswith('rc_') ] self.assertTrue(sorted(rc_fields) == rc_fields)
def test_log_messages_failed_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"failed-labels": True, "percentiles": False, "summary": False, "test-duration": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertIn("29656 failed samples: http://192.168.1.1/anotherquery\n", self.log_recorder.info_buff.getvalue())
def test_log_messages_samples_count(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"failed-labels": False, "percentiles": False, "summary": True, "test-duration": False, "summary-labels": False}) self.sniff_log(obj.log) obj.aggregated_second(self.__get_datapoint()) obj.startup() obj.shutdown() obj.post_process() self.assertEqual("Samples count: 59314, 50.00% failures\n", self.log_recorder.info_buff.getvalue())
def test_long_kpi(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"dump-xml": obj.engine.create_artifact("status", ".xml")}) datapoint = random_datapoint(time.time()) datapoint[datapoint.CUMULATIVE][""]["stdev_rt"] = long(0) obj.aggregated_second(datapoint) obj.startup() obj.shutdown() obj.post_process()
def test_log_messages_failed_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.parameters.merge({"failed-labels": True, "percentiles": False, "summary": False, "test-duration": False}) obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertIn("29656 failed samples: http://192.168.1.1/anotherquery\n", log_recorder.info_buff.getvalue()) obj.log.removeHandler(log_recorder)
def test_log_messages_samples_count(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.parameters.merge({"failed-labels": False, "percentiles": False, "summary": True, "test-duration": False}) obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertEqual("Samples count: 59314, 50.00% failures\n", log_recorder.info_buff.getvalue()) obj.log.removeHandler(log_recorder)
def test_log_messages_duration(self): """ Test duration report :return: """ obj = FinalStatus() obj.parameters = BetterDict() obj.log = logger_mock() obj.prepare() obj.start_time -= 120005 obj.post_process() self.assertEqual("Test duration: 1 day, 9:20:05\n", obj.log.info_buf.getvalue())
def test_long_kpi(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict( {"dump-xml": obj.engine.create_artifact("status", ".xml")}) datapoint = random_datapoint(time.time()) datapoint[datapoint.CUMULATIVE][""]["stdev_rt"] = long(0) obj.aggregated_second(datapoint) obj.startup() obj.shutdown() obj.post_process()
def test_log_messages_duration(self): """ Test duration report :return: """ obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() self.sniff_log(obj.log) obj.prepare() obj.start_time -= 120005 obj.post_process() self.assertEqual("Test duration: 1 day, 9:20:05\n", self.log_recorder.info_buff.getvalue())
def test_dump(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.parameters.merge({ "dump-xml": obj.engine.create_artifact("status", ".xml"), "dump-csv": obj.engine.create_artifact("status", ".csv") }) obj.aggregated_second(random_datapoint(time.time())) obj.post_process() self.assertIn("XML", log_recorder.info_buff.getvalue())
def test_dump(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({ "dump-xml": obj.engine.create_artifact("status", ".xml"), "dump-csv": obj.engine.create_artifact("status", ".csv") }) self.sniff_log(obj.log) obj.aggregated_second(random_datapoint(time.time())) obj.startup() obj.shutdown() obj.post_process() self.assertIn("XML", self.log_recorder.info_buff.getvalue())
def test_log_messages_duration(self): """ Test duration report :return: """ obj = FinalStatus() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.prepare() obj.start_time -= 120005 obj.post_process() self.assertEqual("Test duration: 1 day, 9:20:05\n", log_recorder.info_buff.getvalue()) obj.log.removeHandler(log_recorder)
def test_log_messages_duration(self): """ Test duration report :return: """ obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() self.sniff_log(obj.log) obj.prepare() obj.startup() obj.shutdown() obj.start_time -= 120005 obj.post_process() self.assertEqual("Test duration: 1 day, 9:20:05\n", self.log_recorder.info_buff.getvalue())
def test_func_report(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() self.sniff_log(obj.log) obj.prepare() obj.aggregated_results(*self.__get_func_tree()) obj.post_process() info_log = self.log_recorder.info_buff.getvalue() warn_log = self.log_recorder.warn_buff.getvalue() self.assertIn("Total: 3 tests", info_log) self.assertIn("Test TestClass.case2 failed: something broke", warn_log) self.assertIn("stacktrace2", warn_log) self.assertIn("Test TestClass.case3 failed: something is badly broken", warn_log) self.assertIn("stacktrace3", warn_log)
def test_func_report(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.prepare() obj.aggregated_results(*self.__get_func_tree()) obj.post_process() info_log = log_recorder.info_buff.getvalue() self.assertIn("Total: 3 tests", info_log) self.assertIn("Test TestClass.case2", info_log) self.assertIn("stacktrace2", info_log) self.assertIn("Test TestClass.case3", info_log) self.assertIn("stacktrace3", info_log) obj.log.removeHandler(log_recorder)
def test_func_report_all_no_stacktrace(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"report-tests": "all", "print-stacktrace": False}) self.sniff_log(obj.log) obj.prepare() obj.startup() obj.shutdown() obj.aggregated_results(*self.__get_func_tree()) obj.post_process() info_log = self.log_recorder.info_buff.getvalue() self.assertIn("Total: 3 tests", info_log) self.assertIn("Test TestClass.case1 - PASSED", info_log) self.assertIn("Test TestClass.case2 - FAILED", info_log) self.assertIn("Test TestClass.case3 - BROKEN", info_log) self.assertNotIn("stacktrace2", info_log) self.assertNotIn("stacktrace3", info_log)
def test_func_report(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() self.sniff_log(obj.log) obj.prepare() obj.startup() obj.shutdown() obj.aggregated_results(*self.__get_func_tree()) obj.post_process() info_log = self.log_recorder.info_buff.getvalue() warn_log = self.log_recorder.warn_buff.getvalue() self.assertIn("Total: 3 tests", info_log) self.assertIn("Test TestClass.case2 failed: something broke", warn_log) self.assertIn("stacktrace2", warn_log) self.assertIn("Test TestClass.case3 failed: something is badly broken", warn_log) self.assertIn("stacktrace3", warn_log)
def test_func_report_all_no_stacktrace(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() log_recorder = RecordingHandler() obj.log.addHandler(log_recorder) obj.parameters.merge({"report-tests": "all", "print-stacktrace": False}) obj.prepare() obj.aggregated_results(*self.__get_func_tree()) obj.post_process() info_log = log_recorder.info_buff.getvalue() self.assertIn("Total: 3 tests", info_log) self.assertIn("Test TestClass.case1 - PASSED", info_log) self.assertIn("Test TestClass.case2 - FAILED", info_log) self.assertIn("Test TestClass.case3 - BROKEN", info_log) self.assertNotIn("stacktrace2", info_log) self.assertNotIn("stacktrace3", info_log) obj.log.removeHandler(log_recorder)
def test_xml_report_test_duration(self): obj = FinalStatus() obj.engine = EngineEmul() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters = BetterDict.from_dict({ "dump-xml": xml_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<TestDuration>10.0</TestDuration>', report_content)
def test_log_messages_duration(self): """ Test duration report :return: """ executor_obj = JMeterExecutor() executor_obj.engine = EngineEmul() executor_obj.engine.provisioning = Provisioning() executor_obj.engine.provisioning.executors.append(executor_obj) executor_obj.start_time = time.time() executor_obj.end_time = executor_obj.start_time + 120005 obj = FinalStatus() obj.parameters = BetterDict() obj.engine = executor_obj.engine obj.log = logger_mock() obj.post_process() self.assertEqual("Test duration: 1 day, 9:20:05\n", obj.log.info_buf.getvalue())
def test_log_messages_percentiles(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() self.sniff_log(obj.log) obj.parameters.merge({"failed-labels": False, "percentiles": True, "summary": False, "test-duration": False}) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() target_output = ("Average times: total 0.001, latency 0.000, connect 0.000\n" "Percentile 0.0%: 0.000\n" "Percentile 50.0%: 0.000\n" "Percentile 90.0%: 0.001\n" "Percentile 95.0%: 0.001\n" "Percentile 99.0%: 0.003\n" "Percentile 99.9%: 0.008\n" "Percentile 100.0%: 0.081\n" ) self.assertEqual(target_output, self.log_recorder.info_buff.getvalue())
def test_blazemeter_cloud_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters.merge({ "dump-xml": xml_report, }) prov = CloudProvisioning() prov.results_url = "http://report/link" obj.engine.provisioning = prov obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_log_messages_summary_labels(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict.from_dict({"summary-labels": True, "percentiles": False, "summary": False, "test-duration": False}) self.sniff_log(obj.log) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() expected = ("Request label stats:\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| label | status | succ | avg_rt | error |\n" "+----------------------------------+--------+---------+--------+-----------+\n" "| http://192.168.1.1/anotherquery | FAIL | 0.00% | 0.001 | Forbidden |\n" "| http://192.168.1.1/somequery | OK | 100.00% | 0.001 | |\n" "| http://192.168.100.100/somequery | OK | 100.00% | 0.001 | |\n" "+----------------------------------+--------+---------+--------+-----------+\n") self.assertIn(expected, self.log_recorder.info_buff.getvalue())
def test_blazemeter_cloud_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters = BetterDict.from_dict({ "dump-xml": xml_report, }) prov = CloudProvisioning() prov.results_url = "http://report/link" obj.engine.provisioning = prov obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_blazemeter_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters = BetterDict.from_dict({ "dump-xml": xml_report, }) rep = BlazeMeterUploader() rep.results_url = "http://report/link" obj.engine.reporters.append(rep) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_csv_report_fieldname_order(self): obj = FinalStatus() obj.engine = EngineEmul() csv_report = obj.engine.create_artifact("report", ".csv") obj.parameters = BetterDict.from_dict({ "dump-csv": csv_report, }) obj.startup() obj.aggregated_second(self.__get_datapoint(ts=90)) obj.aggregated_second(self.__get_datapoint(ts=100)) obj.shutdown() obj.post_process() self.assertTrue(os.path.exists(csv_report)) with open(csv_report) as fds: fieldnames = fds.readline().strip().split(",") perc_fields = [float(name[5:]) for name in fieldnames if name.startswith('perc_')] self.assertTrue(sorted(perc_fields) == perc_fields) rc_fields = [float(name[3:]) for name in fieldnames if name.startswith('rc_')] self.assertTrue(sorted(rc_fields) == rc_fields)
def test_blazemeter_report_link(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() xml_report = obj.engine.create_artifact("status", ".xml") obj.parameters.merge({ "dump-xml": xml_report, }) rep = BlazeMeterUploader() rep.results_url = "http://report/link" obj.engine.reporters.append(rep) obj.startup() obj.shutdown() obj.aggregated_second(self.__get_datapoint()) obj.post_process() self.assertTrue(os.path.exists(xml_report)) with open(xml_report) as fds: report_content = fds.read() self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_xml_report_test_duration_failed_prepare(self): obj = FinalStatus() obj.engine = EngineEmul() obj.parameters = BetterDict() obj.aggregated_second(self.__get_datapoint(ts=100)) obj.post_process() # shouldn't raise ValueError because obj.start_time is None
def test_log_messages_failed_labels(self): obj = FinalStatus() obj.engine = EngineEmul obj.parameters = BetterDict() obj.log = logger_mock() obj.parameters.merge({ "failed-labels": True, "percentiles": False, "summary": False }) datapoint = DataPoint(None, None) cumul_data = datapoint[DataPoint.CUMULATIVE] cumul_data[""] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter({ 0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1 }), KPISet.ERRORS: [{ 'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403' }], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({ '304': 29656, '403': 29656, '200': 2 }), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0 }), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 6.1707580253574335e-06, KPISet.RESP_TIMES: Counter({ 0.0: 14941, 0.001: 13673, 0.002: 506, 0.003: 289, 0.004: 103, 0.005: 59, 0.006: 37, 0.008: 14, 0.007: 13, 0.009: 8, 0.01: 3, 0.011: 2, 0.016: 2, 0.014: 2, 0.017: 1, 0.013: 1, 0.015: 1, 0.04: 1 }), KPISet.ERRORS: [{ 'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403' }], KPISet.STDEV_RESP_TIME: 0.032465137860758844, KPISet.AVG_LATENCY: 0.0005634272997032645, KPISet.RESP_CODES: Counter({'403': 29656}), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.04, '99.0': 0.003, '50.0': 0.0 }), KPISet.SUCCESSES: 0, KPISet.SAMPLE_COUNT: 29656, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005716549770704078, KPISet.FAILURES: 29656 }) cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({ KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter({ 0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1 }), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({ '304': 29656, '200': 2 }), KPISet.PERCENTILES: defaultdict( None, { '95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0 }), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0 }) obj.last_sec = datapoint obj.post_process() self.assertEqual( "29656 failed samples: http://192.168.1.1/anotherquery\n", obj.log.info_buf.getvalue())
def test_log_messages_failed_labels(self): obj = FinalStatus() obj.engine = EngineEmul obj.parameters = BetterDict() obj.log = logger_mock() obj.parameters.merge({"failed-labels": True, "percentiles": False, "summary": False, "test-duration": False}) datapoint = DataPoint(None, None) cumul_data = datapoint[DataPoint.CUMULATIVE] cumul_data[""] = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 7.890211417203362e-06, KPISet.RESP_TIMES: Counter( {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125, 0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3, 0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1, 0.019: 1, 0.015: 1}), KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.04947974228872108, KPISet.AVG_LATENCY: 0.0002825639815220692, KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 59314, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005440536804127192, KPISet.FAILURES: 29656}) cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter( {0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0}), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0}) cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 6.1707580253574335e-06, KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506, 0.003: 289, 0.004: 103, 0.005: 59, 0.006: 37, 0.008: 14, 0.007: 13, 0.009: 8, 0.01: 3, 0.011: 2, 0.016: 2, 0.014: 2, 0.017: 1, 0.013: 1, 0.015: 1, 0.04: 1}), KPISet.ERRORS: [ {'msg': 'Forbidden', 'cnt': 7373, 'type': 0, 'urls': Counter( {'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}], KPISet.STDEV_RESP_TIME: 0.032465137860758844, KPISet.AVG_LATENCY: 0.0005634272997032645, KPISet.RESP_CODES: Counter({'403': 29656}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.04, '99.0': 0.003, '50.0': 0.0}), KPISet.SUCCESSES: 0, KPISet.SAMPLE_COUNT: 29656, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005716549770704078, KPISet.FAILURES: 29656}) cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict( {KPISet.AVG_CONN_TIME: 9.609548856969457e-06, KPISet.RESP_TIMES: Counter( {0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341, 0.004: 121, 0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18, 0.009: 12, 0.011: 6, 0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1, 0.016: 1, 0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}), KPISet.ERRORS: [], KPISet.STDEV_RESP_TIME: 0.04073402130687656, KPISet.AVG_LATENCY: 1.7196034796682178e-06, KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}), KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.009, '90.0': 0.001, '100.0': 0.081, '99.0': 0.004, '50.0': 0.0}), KPISet.SUCCESSES: 29658, KPISet.SAMPLE_COUNT: 29658, KPISet.CONCURRENCY: 0, KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0}) obj.last_sec = datapoint obj.post_process() self.assertEqual("29656 failed samples: http://192.168.1.1/anotherquery\n", obj.log.info_buf.getvalue())