def test_response_times_not_cached_if_not_enabled(self): s = StatsEntry(self.stats, "/", "GET") s.log(11, 1337) assert None == s.response_times_cache s.last_request_timestamp -= 1 s.log(666, 1337) assert None == s.response_times_cache
def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95)
def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) assert s.get_response_time_percentile(0.5) == 50 assert s.get_response_time_percentile(0.6) == 60 assert s.get_response_time_percentile(0.95) == 95
def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy(object): pass s = StatsEntry(self.stats, "/", "GET") s.log_error(Exception("Error caused by %r" % Dummy())) s.log_error(Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors))
def test_response_times_cached(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) assert 1 == len(s.response_times_cache) s.log(11, 1337) assert 1 == len(s.response_times_cache) s.last_request_timestamp -= 1 s.log(666, 1337) assert 2 == len(s.response_times_cache) assert CachedResponseTimes( response_times={11:1}, num_requests=1, ) == s.response_times_cache[s.last_request_timestamp-1]
def test_response_times_cached(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) self.assertEqual(1, len(s.response_times_cache)) s.log(11, 1337) self.assertEqual(1, len(s.response_times_cache)) s.last_request_timestamp -= 1 s.log(666, 1337) self.assertEqual(2, len(s.response_times_cache)) self.assertEqual(CachedResponseTimes( response_times={11:1}, num_requests=1, ), s.response_times_cache[s.last_request_timestamp-1])
def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) StatsEntry.unserialize(s1.serialize()) data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) self.assertEqual(s1.num_requests, 8) self.assertEqual(s1.median_response_time, 550) self.assertEqual(s1.avg_response_time, 535.75) self.assertEqual(s1.min_response_time, 122) self.assertEqual(s1.max_response_time, 992)
def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) assert s1.num_requests == 8 assert s1.median_response_time == 550 assert s1.avg_response_time == 535.75 assert s1.min_response_time == 122 assert s1.max_response_time == 992
def test_response_times_not_cached_if_not_enabled(self): s = StatsEntry(self.stats, "/", "GET") s.log(11, 1337) self.assertEqual(None, s.response_times_cache) s.last_request_timestamp -= 1 s.log(666, 1337) self.assertEqual(None, s.response_times_cache)
def test_error_grouping(self): # reset stats self.stats = RequestStats() s = StatsEntry(self.stats, "/some-path", "GET") s.log_error(Exception("Exception!")) s.log_error(Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurences) s.log_error(Exception("Another exception!")) s.log_error(Exception("Another exception!")) s.log_error(Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors))
def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail"))
def test_get_current_response_time_percentile(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) t = int(time.time()) s.response_times_cache[t - 10] = CachedResponseTimes( response_times={i: 1 for i in xrange(100)}, num_requests=200) s.response_times_cache[t - 10].response_times[1] = 201 s.response_times = {i: 2 for i in xrange(100)} s.response_times[1] = 202 s.num_requests = 300 self.assertEqual(95, s.get_current_response_time_percentile(0.95))
def _get_percentiles_dict(self, stats_entry: StatsEntry) -> Dict[str, int]: """Returns a dictionary of a human-readable percentile string to its reported value Args: stats_entry (StatsEntry): The Locust StatsEntry object which encodes a particular task's statistics Returns: Dict[str, int]: A dictionary of human-readable percentile strings to the percent response time """ if not stats_entry.num_requests: return self.percentiles_na return { self._get_readable_percentile(percentile): int(stats_entry.get_response_time_percentile(percentile) or 0) for percentile in self.percentiles_to_report }
def test_response_times_cached(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) self.assertEqual(1, len(s.response_times_cache)) s.log(11, 1337) self.assertEqual(1, len(s.response_times_cache)) s.last_request_timestamp -= 1 s.log(666, 1337) self.assertEqual(2, len(s.response_times_cache)) self.assertEqual(CachedResponseTimes( response_times={11:1}, num_requests=1, ), s.response_times_cache[int(s.last_request_timestamp)-1])
def test_latest_total_response_times_pruned(self): """ Check that RequestStats.latest_total_response_times are pruned when execeeding 20 entries """ s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) t = int(time.time()) for i in reversed(range(2, 30)): s.response_times_cache[t-i] = CachedResponseTimes(response_times={}, num_requests=0) self.assertEqual(29, len(s.response_times_cache)) s.log(17, 1337) s.last_request_timestamp -= 1 s.log(1, 1) self.assertEqual(20, len(s.response_times_cache)) self.assertEqual( CachedResponseTimes(response_times={17:1}, num_requests=1), s.response_times_cache.popitem(last=True)[1], )
def test_get_current_response_time_percentile(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) t = int(time.time()) s.response_times_cache[t-10] = CachedResponseTimes( response_times={i:1 for i in xrange(100)}, num_requests=200 ) s.response_times_cache[t-10].response_times[1] = 201 s.response_times = {i:2 for i in xrange(100)} s.response_times[1] = 202 s.num_requests = 300 assert 95 == s.get_current_response_time_percentile(0.95)
def test_latest_total_response_times_pruned(self): """ Check that RequestStats.latest_total_response_times are pruned when execeeding 20 entries """ s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) t = int(time.time()) for i in reversed(range(2, 30)): s.response_times_cache[t-i] = CachedResponseTimes(response_times={}, num_requests=0) assert 29 == len(s.response_times_cache) s.log(17, 1337) s.last_request_timestamp -= 1 s.log(1, 1) assert 20 == len(s.response_times_cache) assert ( CachedResponseTimes(response_times={17:1}, num_requests=1)) == ( s.response_times_cache.popitem(last=True)[1]), ( )
def test_aggregation_last_request_timestamp(self): s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.extend(s2) self.assertEqual(None, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s2.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.last_request_timestamp = 700 s1.extend(s2) self.assertEqual(700, s1.last_request_timestamp)
def test_rps_less_than_one_second(self): s = StatsEntry(self.stats, "percentile_test", "GET") for i in range(10): s.log(i, 0) self.assertGreater(s.total_rps, 10)
def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2)
def test_get_current_response_time_percentile_outside_cache_window(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) # an empty response times cache, current time will not be in this cache s.response_times_cache = {} self.assertEqual(None, s.get_current_response_time_percentile(0.95))
class TestRequestStats(unittest.TestCase): def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail")) def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95) def test_median(self): self.assertEqual(self.s.median_response_time, 79) def test_total_rps(self): self.assertEqual(self.s.total_rps, 7) def test_current_rps(self): self.stats.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 3.5) self.stats.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_num_reqs_fails(self): self.assertEqual(self.s.num_requests, 7) self.assertEqual(self.s.num_failures, 3) def test_avg(self): self.assertEqual(self.s.avg_response_time, 187.71428571428571428571428571429) def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) self.assertEqual(self.s.total_rps, 2) self.assertEqual(self.s.num_requests, 2) self.assertEqual(self.s.num_failures, 1) self.assertEqual(self.s.avg_response_time, 420.5) self.assertEqual(self.s.median_response_time, 85) def test_reset_min_response_time(self): self.s.reset() self.s.log(756, 0) self.assertEqual(756, self.s.min_response_time) def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1, full_request_history=True) s.extend(s2, full_request_history=True) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize( Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() self.assertEqual(actual_percentile, " GET rounding down! 1 120 120 120 120 120 120 120 120 120 120 120")
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() self.assertEqual(actual_percentile, " GET rounding up! 1 130 130 130 130 130 130 130 130 130 130 130")
def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) assert s.num_requests == 10 assert s.num_failures == 3 assert s.median_response_time == 38 assert s.avg_response_time == 43.2
class TestRequestStats(unittest.TestCase): def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail")) def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95) def test_median(self): self.assertEqual(self.s.median_response_time, 79) def test_median_out_of_min_max_bounds(self): s = StatsEntry(self.stats, "median_test", "GET") s.log(6034, 0) self.assertEqual(s.median_response_time, 6034) s.reset() s.log(6099, 0) self.assertEqual(s.median_response_time, 6099) def test_total_rps(self): self.assertEqual(self.s.total_rps, 7) def test_current_rps(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 3.5) self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_num_reqs_fails(self): self.assertEqual(self.s.num_requests, 7) self.assertEqual(self.s.num_failures, 3) def test_avg(self): self.assertEqual(self.s.avg_response_time, 187.71428571428572) def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) self.assertEqual(self.s.total_rps, 2) self.assertEqual(self.s.num_requests, 2) self.assertEqual(self.s.num_failures, 1) self.assertEqual(self.s.avg_response_time, 420.5) self.assertEqual(self.s.median_response_time, 85) def test_reset_min_response_time(self): self.s.reset() self.s.log(756, 0) self.assertEqual(756, self.s.min_response_time) def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2) def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) self.assertEqual(s1.num_requests, 8) self.assertEqual(s1.median_response_time, 550) self.assertEqual(s1.avg_response_time, 535.75) self.assertEqual(s1.min_response_time, 122) self.assertEqual(s1.max_response_time, 992) def test_aggregation_min_response_time(self): s1 = StatsEntry(self.stats, "min", "GET") s1.log(10, 0) self.assertEqual(10, s1.min_response_time) s2 = StatsEntry(self.stats, "min", "GET") s1.extend(s2) self.assertEqual(10, s1.min_response_time) def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() self.assertEqual(actual_percentile, " GET rounding down! 1 120 120 120 120 120 120 120 120 120") def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() self.assertEqual(actual_percentile, " GET rounding up! 1 130 130 130 130 130 130 130 130 130") def test_error_grouping(self): # reset stats self.stats = RequestStats() self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurrences) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors)) def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy(object): pass self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors)) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
def test_fail_ratio_with_failures(self): s = StatsEntry(self.stats, "/", "GET") s.num_requests = 10 s.num_failures = 5 self.assertAlmostEqual(s.fail_ratio, 0.5)
class TestRequestStats(unittest.TestCase): def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail")) def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95) def test_median(self): self.assertEqual(self.s.median_response_time, 79) def test_total_rps(self): self.assertEqual(self.s.total_rps, 7) def test_current_rps(self): self.stats.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 3.5) self.stats.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_num_reqs_fails(self): self.assertEqual(self.s.num_requests, 7) self.assertEqual(self.s.num_failures, 3) def test_avg(self): self.assertEqual(self.s.avg_response_time, 187.71428571428571428571428571429) def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) self.assertEqual(self.s.total_rps, 2) self.assertEqual(self.s.num_requests, 2) self.assertEqual(self.s.num_failures, 1) self.assertEqual(self.s.avg_response_time, 420.5) self.assertEqual(self.s.median_response_time, 85) def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1, full_request_history=True) s.extend(s2, full_request_history=True) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile().split() self.assertEqual(actual_percentile, ['GET', 'rounding', 'down!', '1'] + ['120'] * len(PERCENTILES_TO_REPORT))
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile().split() self.assertEqual(actual_percentile, ['GET', 'rounding', 'up!', '1'] + ['130'] * len(PERCENTILES_TO_REPORT))
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile().split() self.assertEqual(actual_percentile, ["GET", "rounding", "down!"] + ["120"] * len(PERCENTILES_TO_REPORT) + ["1"])
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile().split() self.assertEqual(actual_percentile, ["GET", "rounding", "up!"] + ["130"] * len(PERCENTILES_TO_REPORT) + ["1"])
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() assert actual_percentile == " GET rounding down! 1 120 120 120 120 120 120 120 120 120"
class TestRequestStats(unittest.TestCase): def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail")) def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95) def test_median(self): self.assertEqual(self.s.median_response_time, 79) def test_total_rps(self): self.assertEqual(self.s.total_rps, 7) def test_current_rps(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 self.assertEqual(self.s.current_rps, 3.5) self.stats.total.last_request_timestamp = int(time.time()) + 25 self.assertEqual(self.s.current_rps, 0) def test_num_reqs_fails(self): self.assertEqual(self.s.num_requests, 7) self.assertEqual(self.s.num_failures, 3) def test_avg(self): self.assertEqual(self.s.avg_response_time, 187.71428571428572) def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) self.assertEqual(self.s.total_rps, 2) self.assertEqual(self.s.num_requests, 2) self.assertEqual(self.s.num_failures, 1) self.assertEqual(self.s.avg_response_time, 420.5) self.assertEqual(self.s.median_response_time, 85) def test_reset_min_response_time(self): self.s.reset() self.s.log(756, 0) self.assertEqual(756, self.s.min_response_time) def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2) def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) self.assertEqual(s1.num_requests, 8) self.assertEqual(s1.median_response_time, 550) self.assertEqual(s1.avg_response_time, 535.75) self.assertEqual(s1.min_response_time, 122) self.assertEqual(s1.max_response_time, 992) def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() self.assertEqual(actual_percentile, " GET rounding down! 1 120 120 120 120 120 120 120 120 120") def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() self.assertEqual(actual_percentile, " GET rounding up! 1 130 130 130 130 130 130 130 130 130") def test_error_grouping(self): # reset stats self.stats = RequestStats() self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurences) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors)) def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy(object): pass self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors)) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
class TestRequestStats(unittest.TestCase): def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail")) def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) assert s.get_response_time_percentile(0.5) == 50 assert s.get_response_time_percentile(0.6) == 60 assert s.get_response_time_percentile(0.95) == 95 def test_median(self): assert self.s.median_response_time == 79 def test_total_rps(self): assert self.s.total_rps == 7 def test_current_rps(self): self.stats.total.last_request_timestamp = int(time.time()) + 4 assert self.s.current_rps == 3.5 self.stats.total.last_request_timestamp = int(time.time()) + 25 assert self.s.current_rps == 0 def test_num_reqs_fails(self): assert self.s.num_requests == 7 assert self.s.num_failures == 3 def test_avg(self): assert self.s.avg_response_time == 187.71428571428572 def test_reset(self): self.s.reset() self.s.log(756, 0) self.s.log_error(Exception("dummy fail after reset")) self.s.log(85, 0) assert self.s.total_rps == 2 assert self.s.num_requests == 2 assert self.s.num_failures == 1 assert self.s.avg_response_time == 420.5 assert self.s.median_response_time == 85 def test_reset_min_response_time(self): self.s.reset() self.s.log(756, 0) assert 756 == self.s.min_response_time def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1) s.extend(s2) assert s.num_requests == 10 assert s.num_failures == 3 assert s.median_response_time == 38 assert s.avg_response_time == 43.2 def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) assert s1.num_requests == 8 assert s1.median_response_time == 550 assert s1.avg_response_time == 535.75 assert s1.min_response_time == 122 assert s1.max_response_time == 992 def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() assert actual_percentile == " GET rounding down! 1 120 120 120 120 120 120 120 120 120" def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() assert actual_percentile == " GET rounding up! 1 130 130 130 130 130 130 130 130 130" def test_error_grouping(self): # reset stats self.stats = RequestStats() self.stats.log_error("GET", "/some-path", Exception("Exception!")) self.stats.log_error("GET", "/some-path", Exception("Exception!")) assert 1 == len(self.stats.errors) assert 2 == list(self.stats.errors.values())[0].occurences self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Another exception!")) self.stats.log_error("GET", "/some-path", Exception("Third exception!")) assert 3 == len(self.stats.errors) def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy(object): pass self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy())) assert 1 == len(self.stats.errors) def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) assert 20 == u1.median_response_time
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() assert actual_percentile == " GET rounding up! 1 130 130 130 130 130 130 130 130 130"
def test_aggregation(self): s1 = StatsEntry(self.stats, "aggregate me!", "GET") s1.log(12, 0) s1.log(12, 0) s1.log(38, 0) s1.log_error("Dummy exzeption") s2 = StatsEntry(self.stats, "aggregate me!", "GET") s2.log_error("Dummy exzeption") s2.log_error("Dummy exzeption") s2.log(12, 0) s2.log(99, 0) s2.log(14, 0) s2.log(55, 0) s2.log(38, 0) s2.log(55, 0) s2.log(97, 0) s = StatsEntry(self.stats, "GET", "") s.extend(s1, full_request_history=True) s.extend(s2, full_request_history=True) self.assertEqual(s.num_requests, 10) self.assertEqual(s.num_failures, 3) self.assertEqual(s.median_response_time, 38) self.assertEqual(s.avg_response_time, 43.2)
def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from slaves to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) StatsEntry.unserialize(s1.serialize()) data = Message.unserialize( Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)