class TestStatsPrinting(LocustTestCase): def setUp(self): super().setUp() self.stats = RequestStats() for i in range(100): for method, name, freq in [ ( "GET", "test_entry", 5, ), ( "DELETE", "test" * int((STATS_NAME_WIDTH - STATS_TYPE_WIDTH + 4) / len("test")), 3, ), ]: self.stats.log_request(method, name, i, 2000 + i) if i % freq == 0: self.stats.log_error(method, name, RuntimeError(f"{method} error")) def test_print_percentile_stats(self): locust.stats.print_percentile_stats(self.stats) info = self.mocked_log.info self.assertEqual(8, len(info)) self.assertEqual("Response time percentiles (approximated)", info[0]) # check that headline contains same number of column as the value rows headlines = info[1].replace("# reqs", "#reqs").split() self.assertEqual(len(headlines), len(info[3].split())) self.assertEqual(len(headlines) - 1, len(info[-2].split())) # Aggregated, no "Type" self.assertEqual(info[2], info[-3]) # table ascii separators def test_print_stats(self): locust.stats.print_stats(self.stats) info = self.mocked_log.info self.assertEqual(7, len(info)) headlines = info[0].replace("# ", "#").split() # check number of columns in separator self.assertEqual(len(headlines), len(info[1].split("|")) + 2) # check entry row self.assertEqual(len(headlines), len(info[2].split())) # check aggregated row, which is missing value in "type"-column self.assertEqual(len(headlines) - 1, len(info[-2].split())) # table ascii separators self.assertEqual(info[1], info[-3]) def test_print_error_report(self): locust.stats.print_error_report(self.stats) info = self.mocked_log.info self.assertEqual(7, len(info)) self.assertEqual("Error report", info[0]) headlines = info[1].replace("# ", "#").split() # check number of columns in headlines vs table ascii separator self.assertEqual(len(headlines), len(info[2].split("|"))) # table ascii seprators self.assertEqual(info[2], info[-2])
def test_print_percentile_stats(self): stats = RequestStats() for i in range(100): stats.log_request("GET", "test_entry", i, 2000+i) locust.stats.print_percentile_stats(stats) info = self.mocked_log.info self.assertEqual(7, len(info)) # check that headline contains same number of column as the value rows headlines = info[1].replace("# reqs", "#reqs").split() self.assertEqual(len(headlines), len(info[3].split())) self.assertEqual(len(headlines), len(info[5].split()))
def test_master_current_response_times(self): import mock class MyTestLocust(Locust): pass start_time = 1 with mock.patch("time.time") as mocked_time: mocked_time.return_value = start_time global_stats.reset_all() with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server: master = MasterLocustRunner(MyTestLocust, self.options) mocked_time.return_value += 1 server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) server.mocked_send(Message("stats", { "stats":stats.serialize_stats(), "stats_total": stats.total.get_stripped_report(), "errors":stats.serialize_errors(), "user_count": 1, }, "fake_client")) mocked_time.return_value += 1 stats2 = RequestStats() stats2.log_request("GET", "/2", 400, 2201) server.mocked_send(Message("stats", { "stats":stats2.serialize_stats(), "stats_total": stats2.total.get_stripped_report(), "errors":stats2.serialize_errors(), "user_count": 2, }, "fake_client")) mocked_time.return_value += 4 self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5)) self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95)) # let 10 second pass, do some more requests, send it to the master and make # sure the current response time percentiles only accounts for these new requests mocked_time.return_value += 10 stats.log_request("GET", "/1", 20, 1) stats.log_request("GET", "/1", 30, 1) stats.log_request("GET", "/1", 3000, 1) server.mocked_send(Message("stats", { "stats":stats.serialize_stats(), "stats_total": stats.total.get_stripped_report(), "errors":stats.serialize_errors(), "user_count": 2, }, "fake_client")) self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5)) self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
def test_master_current_response_times(self): class MyTestLocust(Locust): pass start_time = 1 with mock.patch("time.time") as mocked_time: mocked_time.return_value = start_time global_stats.reset_all() with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = MasterLocustRunner(MyTestLocust, self.options) mocked_time.return_value += 1.0234 server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) server.mocked_send(Message("stats", { "stats":stats.serialize_stats(), "stats_total": stats.total.get_stripped_report(), "errors":stats.serialize_errors(), "user_count": 1, }, "fake_client")) mocked_time.return_value += 1 stats2 = RequestStats() stats2.log_request("GET", "/2", 400, 2201) server.mocked_send(Message("stats", { "stats":stats2.serialize_stats(), "stats_total": stats2.total.get_stripped_report(), "errors":stats2.serialize_errors(), "user_count": 2, }, "fake_client")) mocked_time.return_value += 4 self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5)) self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95)) # let 10 second pass, do some more requests, send it to the master and make # sure the current response time percentiles only accounts for these new requests mocked_time.return_value += 10.10023 stats.log_request("GET", "/1", 20, 1) stats.log_request("GET", "/1", 30, 1) stats.log_request("GET", "/1", 3000, 1) server.mocked_send(Message("stats", { "stats":stats.serialize_stats(), "stats_total": stats.total.get_stripped_report(), "errors":stats.serialize_errors(), "user_count": 2, }, "fake_client")) self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5)) self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
def test_master_total_stats(self): class MyTestLocust(Locust): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = MasterLocustRunner(MyTestLocust, self.options) server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) stats2 = RequestStats() stats2.log_request("GET", "/2", 700, 2201) server.mocked_send(Message("stats", { "stats":stats.serialize_stats(), "stats_total": stats.total.serialize(), "errors":stats.serialize_errors(), "user_count": 1, }, "fake_client")) server.mocked_send(Message("stats", { "stats":stats2.serialize_stats(), "stats_total": stats2.total.serialize(), "errors":stats2.serialize_errors(), "user_count": 2, }, "fake_client")) self.assertEqual(700, master.stats.total.median_response_time)
def test_master_total_stats(self): import mock class MyTestLocust(Locust): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server: master = MasterLocustRunner(MyTestLocust, self.options) server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) stats2 = RequestStats() stats2.log_request("GET", "/2", 700, 2201) server.mocked_send(Message("stats", { "stats":stats.serialize_stats(), "stats_total": stats.total.serialize(), "errors":stats.serialize_errors(), "user_count": 1, }, "fake_client")) server.mocked_send(Message("stats", { "stats":stats2.serialize_stats(), "stats_total": stats2.total.serialize(), "errors":stats2.serialize_errors(), "user_count": 2, }, "fake_client")) self.assertEqual(700, master.stats.total.median_response_time)
class TestRequestStats(unittest.TestCase): def setUp(self): self.stats = RequestStats() def log(response_time, size): self.stats.log_request("GET", "test_entry", response_time, size) def log_error(exc): self.stats.log_error("GET", "test_entry", exc) log(45, 1) log(135, 1) log(44, 1) log(None, 1) log_error(Exception("dummy fail")) log_error(Exception("dummy fail")) log(375, 1) log(601, 1) log(35, 1) log(79, 1) log(None, 1) log_error(Exception("dummy fail")) self.s = self.stats.get("test_entry", "GET") def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95) def test_median(self): self.assertEqual(self.s.median_response_time, 79) def test_median_out_of_min_max_bounds(self): s = StatsEntry(self.stats, "median_test", "GET") s.log(6034, 0) self.assertEqual(s.median_response_time, 6034) s.reset() s.log(6099, 0) self.assertEqual(s.median_response_time, 6099) def test_total_rps(self): self.assertEqual(self.s.total_rps, 9) def test_current_rps(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 4.5) self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_current_fail_per_sec(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_fail_per_sec, 1.5) self.stats.total.last_request_timestamp = int(time.time()) + 12 self.assertEqual(self.s.current_fail_per_sec, 0.3) self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_fail_per_sec, 0) def test_num_reqs_fails(self): self.assertEqual(self.s.num_requests, 9) self.assertEqual(self.s.num_failures, 3) def test_avg(self): self.assertEqual(self.s.avg_response_time, 187.71428571428572) def test_total_content_length(self): self.assertEqual(self.s.total_content_length, 9) def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) self.assertEqual(self.s.total_rps, 2) self.assertEqual(self.s.num_requests, 2) self.assertEqual(self.s.num_failures, 1) self.assertEqual(self.s.avg_response_time, 420.5) self.assertEqual(self.s.median_response_time, 85) self.assertNotEqual(None, self.s.last_request_timestamp) self.s.reset() self.assertEqual(None, self.s.last_request_timestamp) def test_avg_only_none(self): self.s.reset() self.s.log(None, 123) self.assertEqual(self.s.avg_response_time, 0) self.assertEqual(self.s.median_response_time, 0) self.assertEqual(self.s.get_response_time_percentile(0.5), 0) def test_reset_min_response_time(self): self.s.reset() self.s.log(756, 0) self.assertEqual(756, self.s.min_response_time) def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2) def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) self.assertEqual(s1.num_requests, 8) self.assertEqual(s1.median_response_time, 550) self.assertEqual(s1.avg_response_time, 535.75) self.assertEqual(s1.min_response_time, 122) self.assertEqual(s1.max_response_time, 992) def test_aggregation_min_response_time(self): s1 = StatsEntry(self.stats, "min", "GET") s1.log(10, 0) self.assertEqual(10, s1.min_response_time) s2 = StatsEntry(self.stats, "min", "GET") s1.extend(s2) self.assertEqual(10, s1.min_response_time) def test_aggregation_last_request_timestamp(self): s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.extend(s2) self.assertEqual(None, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s2.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.last_request_timestamp = 700 s1.extend(s2) self.assertEqual(700, s1.last_request_timestamp) def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() self.assertEqual( actual_percentile, " GET rounding down! 1 120 120 120 120 120 120 120 120 120 120 120" ) def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() self.assertEqual( actual_percentile, " GET rounding up! 1 130 130 130 130 130 130 130 130 130 130 130" ) def test_error_grouping(self): # reset stats self.stats = RequestStats() self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurrences) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors)) def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy(object): pass self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors)) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize( Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
class TestRequestStats(unittest.TestCase): def setUp(self): locust.stats.PERCENTILES_TO_REPORT = PERCENTILES_TO_REPORT self.stats = RequestStats() def log(response_time, size): self.stats.log_request("GET", "test_entry", response_time, size) def log_error(exc): self.stats.log_error("GET", "test_entry", exc) log(45, 1) log(135, 1) log(44, 1) log(None, 1) log_error(Exception("dummy fail")) log_error(Exception("dummy fail")) log(375, 1) log(601, 1) log(35, 1) log(79, 1) log(None, 1) log_error(Exception("dummy fail")) self.s = self.stats.get("test_entry", "GET") def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in range(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95) def test_median(self): self.assertEqual(self.s.median_response_time, 79) def test_median_out_of_min_max_bounds(self): s = StatsEntry(self.stats, "median_test", "GET") s.log(6034, 0) self.assertEqual(s.median_response_time, 6034) s.reset() s.log(6099, 0) self.assertEqual(s.median_response_time, 6099) def test_total_rps(self): self.stats.log_request("GET", "other_endpoint", 1337, 1337) s2 = self.stats.get("other_endpoint", "GET") s2.start_time = 2.0 s2.last_request_timestamp = 6.0 self.s.start_time = 1.0 self.s.last_request_timestamp = 4.0 self.stats.total.start_time = 1.0 self.stats.total.last_request_timestamp = 6.0 self.assertEqual(self.s.total_rps, 9 / 5.0) self.assertAlmostEqual(s2.total_rps, 1 / 5.0) self.assertEqual(self.stats.total.total_rps, 10 / 5.0) def test_rps_less_than_one_second(self): s = StatsEntry(self.stats, "percentile_test", "GET") for i in range(10): s.log(i, 0) self.assertGreater(s.total_rps, 10) def test_current_rps(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 4.5) self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_current_fail_per_sec(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_fail_per_sec, 1.5) self.stats.total.last_request_timestamp = int(time.time()) + 12 self.assertEqual(self.s.current_fail_per_sec, 0.3) self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_fail_per_sec, 0) def test_num_reqs_fails(self): self.assertEqual(self.s.num_requests, 9) self.assertEqual(self.s.num_failures, 3) def test_avg(self): self.assertEqual(self.s.avg_response_time, 187.71428571428572) def test_total_content_length(self): self.assertEqual(self.s.total_content_length, 9) def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) self.assertGreater(self.s.total_rps, 2) self.assertEqual(self.s.num_requests, 2) self.assertEqual(self.s.num_failures, 1) self.assertEqual(self.s.avg_response_time, 420.5) self.assertEqual(self.s.median_response_time, 85) self.assertNotEqual(None, self.s.last_request_timestamp) self.s.reset() self.assertEqual(None, self.s.last_request_timestamp) def test_avg_only_none(self): self.s.reset() self.s.log(None, 123) self.assertEqual(self.s.avg_response_time, 0) self.assertEqual(self.s.median_response_time, 0) self.assertEqual(self.s.get_response_time_percentile(0.5), 0) def test_reset_min_response_time(self): self.s.reset() self.s.log(756, 0) self.assertEqual(756, self.s.min_response_time) def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exception") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exception") s2.log_error("Dummy exception") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2) def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) self.assertEqual(s1.num_requests, 8) self.assertEqual(s1.median_response_time, 550) self.assertEqual(s1.avg_response_time, 535.75) self.assertEqual(s1.min_response_time, 122) self.assertEqual(s1.max_response_time, 992) def test_aggregation_with_decimal_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(1.1, 0) s1.log(1.99, 0) s1.log(3.1, 0) self.assertEqual(s1.num_requests, 3) self.assertEqual(s1.median_response_time, 2) self.assertEqual(s1.avg_response_time, (1.1 + 1.99 + 3.1) / 3) self.assertEqual(s1.min_response_time, 1.1) self.assertEqual(s1.max_response_time, 3.1) def test_aggregation_min_response_time(self): s1 = StatsEntry(self.stats, "min", "GET") s1.log(10, 0) self.assertEqual(10, s1.min_response_time) s2 = StatsEntry(self.stats, "min", "GET") s1.extend(s2) self.assertEqual(10, s1.min_response_time) def test_aggregation_last_request_timestamp(self): s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.extend(s2) self.assertEqual(None, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s2.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.last_request_timestamp = 700 s1.extend(s2) self.assertEqual(700, s1.last_request_timestamp) def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile().split() self.assertEqual(actual_percentile, ["GET", "rounding", "down!"] + ["120"] * len(PERCENTILES_TO_REPORT) + ["1"]) def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile().split() self.assertEqual(actual_percentile, ["GET", "rounding", "up!"] + ["130"] * len(PERCENTILES_TO_REPORT) + ["1"]) def test_custom_percentile_list(self): s = StatsEntry(self.stats, "custom_percentiles", "GET") custom_percentile_list = [0.50, 0.90, 0.95, 0.99] locust.stats.PERCENTILES_TO_REPORT = custom_percentile_list s.log(150, 0) actual_percentile = s.percentile().split() self.assertEqual(actual_percentile, ["GET", "custom_percentiles"] + ["150"] * len(custom_percentile_list) + ["1"]) def test_error_grouping(self): # reset stats self.stats = RequestStats() self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurrences) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors)) def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy: pass self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors)) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from workers to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize( Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)