def test_median_out_of_min_max_bounds(self): s = StatsEntry(self.stats, "median_test", "GET") s.log(6034, 0) self.assertEqual(s.median_response_time, 6034) s.reset() s.log(6099, 0) self.assertEqual(s.median_response_time, 6099)
def test_response_times_not_cached_if_not_enabled(self): s = StatsEntry(self.stats, "/", "GET") s.log(11, 1337) self.assertEqual(None, s.response_times_cache) s.last_request_timestamp -= 1 s.log(666, 1337) self.assertEqual(None, s.response_times_cache)
def test_custom_percentile_list(self): s = StatsEntry(self.stats, "custom_percentiles", "GET") custom_percentile_list = [0.50, 0.90, 0.95, 0.99] locust.stats.PERCENTILES_TO_REPORT = custom_percentile_list s.log(150, 0) actual_percentile = s.percentile().split() self.assertEqual(actual_percentile, ['GET', 'custom_percentiles'] + ['150'] * len(custom_percentile_list) + ['1'])
def test_percentile(self): s = StatsEntry(self.stats, "percentile_test", "GET") for x in xrange(100): s.log(x, 0) self.assertEqual(s.get_response_time_percentile(0.5), 50) self.assertEqual(s.get_response_time_percentile(0.6), 60) self.assertEqual(s.get_response_time_percentile(0.95), 95)
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile() self.assertEqual( actual_percentile, " GET rounding down! 1 120 120 120 120 120 120 120 120 120 120 120" )
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile() self.assertEqual( actual_percentile, " GET rounding up! 1 130 130 130 130 130 130 130 130 130 130 130" )
def test_aggregation_with_decimal_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(1.1, 0) s1.log(1.99, 0) s1.log(3.1, 0) self.assertEqual(s1.num_requests, 3) self.assertEqual(s1.median_response_time, 2) self.assertEqual(s1.avg_response_time, (1.1+1.99+3.1)/3) self.assertEqual(s1.min_response_time, 1.1) self.assertEqual(s1.max_response_time, 3.1)
def test_response_times_cached(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) self.assertEqual(1, len(s.response_times_cache)) s.log(11, 1337) self.assertEqual(1, len(s.response_times_cache)) s.last_request_timestamp -= 1 s.log(666, 1337) self.assertEqual(2, len(s.response_times_cache)) self.assertEqual(CachedResponseTimes( response_times={11:1}, num_requests=1, ), s.response_times_cache[int(s.last_request_timestamp)-1])
def test_error_grouping_errors_with_memory_addresses(self): # reset stats self.stats = RequestStats() class Dummy(object): pass s = StatsEntry(self.stats, "/", "GET") s.log_error(Exception("Error caused by %r" % Dummy())) s.log_error(Exception("Error caused by %r" % Dummy())) self.assertEqual(1, len(self.stats.errors))
def test_get_current_response_time_percentile(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) t = int(time.time()) s.response_times_cache[t - 10] = CachedResponseTimes( response_times={i: 1 for i in xrange(100)}, num_requests=200) s.response_times_cache[t - 10].response_times[1] = 201 s.response_times = {i: 2 for i in xrange(100)} s.response_times[1] = 202 s.num_requests = 300 self.assertEqual(95, s.get_current_response_time_percentile(0.95))
def test_fail_ratio_with_half_failures(self): REQUEST_COUNT = 10 FAILURE_COUNT = 5 EXPECTED_FAIL_RATIO = 0.5 s = StatsEntry(self.stats, "/", "GET") s.num_requests = REQUEST_COUNT s.num_failures = FAILURE_COUNT self.assertAlmostEqual(s.fail_ratio, EXPECTED_FAIL_RATIO) output_fields = self.parse_string_output(str(s)) self.assertEqual(output_fields['request_count'], REQUEST_COUNT) self.assertEqual(output_fields['failure_count'], FAILURE_COUNT) self.assertAlmostEqual(output_fields['failure_precentage'], EXPECTED_FAIL_RATIO*100)
def setUp(self): self.stats = RequestStats() self.stats.start_time = time.time() self.s = StatsEntry(self.stats, "test_entry", "GET") self.s.log(45, 0) self.s.log(135, 0) self.s.log(44, 0) self.s.log_error(Exception("dummy fail")) self.s.log_error(Exception("dummy fail")) self.s.log(375, 0) self.s.log(601, 0) self.s.log(35, 0) self.s.log(79, 0) self.s.log_error(Exception("dummy fail"))
def test_error_grouping(self): # reset stats self.stats = RequestStats() s = StatsEntry(self.stats, "/some-path", "GET") s.log_error(Exception("Exception!")) s.log_error(Exception("Exception!")) self.assertEqual(1, len(self.stats.errors)) self.assertEqual(2, list(self.stats.errors.values())[0].occurences) s.log_error(Exception("Another exception!")) s.log_error(Exception("Another exception!")) s.log_error(Exception("Third exception!")) self.assertEqual(3, len(self.stats.errors))
def test_aggregation_with_rounding(self): s1 = StatsEntry(self.stats, "round me!", "GET") s1.log(122, 0) # (rounded 120) min s1.log(992, 0) # (rounded 990) max s1.log(142, 0) # (rounded 140) s1.log(552, 0) # (rounded 550) s1.log(557, 0) # (rounded 560) s1.log(387, 0) # (rounded 390) s1.log(557, 0) # (rounded 560) s1.log(977, 0) # (rounded 980) self.assertEqual(s1.num_requests, 8) self.assertEqual(s1.median_response_time, 550) self.assertEqual(s1.avg_response_time, 535.75) self.assertEqual(s1.min_response_time, 122) self.assertEqual(s1.max_response_time, 992)
def test_serialize_through_message(self): """ Serialize a RequestStats instance, then serialize it through a Message, and unserialize the whole thing again. This is done "IRL" when stats are sent from workers to master. """ s1 = StatsEntry(self.stats, "test", "GET") s1.log(10, 0) s1.log(20, 0) s1.log(40, 0) u1 = StatsEntry.unserialize(s1.serialize()) data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data u1 = StatsEntry.unserialize(data) self.assertEqual(20, u1.median_response_time)
def test_latest_total_response_times_pruned(self): """ Check that RequestStats.latest_total_response_times are pruned when execeeding 20 entries """ s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) t = int(time.time()) for i in reversed(range(2, 30)): s.response_times_cache[t-i] = CachedResponseTimes(response_times={}, num_requests=0) self.assertEqual(29, len(s.response_times_cache)) s.log(17, 1337) s.last_request_timestamp -= 1 s.log(1, 1) self.assertEqual(20, len(s.response_times_cache)) self.assertEqual( CachedResponseTimes(response_times={17:1}, num_requests=1), s.response_times_cache.popitem(last=True)[1], )
def test_aggregation_last_request_timestamp(self): s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.extend(s2) self.assertEqual(None, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s2.last_request_timestamp = 666 s1.extend(s2) self.assertEqual(666, s1.last_request_timestamp) s1 = StatsEntry(self.stats, "r", "GET") s2 = StatsEntry(self.stats, "r", "GET") s1.last_request_timestamp = 666 s1.last_request_timestamp = 700 s1.extend(s2) self.assertEqual(700, s1.last_request_timestamp)
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile().split() self.assertEqual(actual_percentile, ['GET', 'rounding', 'down!', '1'] + ['120'] * len(PERCENTILES_TO_REPORT))
def test_percentile_rounded_down(self): s1 = StatsEntry(self.stats, "rounding down!", "GET") s1.log(122, 0) # (rounded 120) min actual_percentile = s1.percentile().split() self.assertEqual(actual_percentile, ["GET", "rounding", "down!"] + ["120"] * len(PERCENTILES_TO_REPORT) + ["1"])
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile().split() self.assertEqual(actual_percentile, ["GET", "rounding", "up!"] + ["130"] * len(PERCENTILES_TO_REPORT) + ["1"])
def test_rps_less_than_one_second(self): s = StatsEntry(self.stats, "percentile_test", "GET") for i in range(10): s.log(i, 0) self.assertGreater(s.total_rps, 10)
def test_percentile_rounded_up(self): s2 = StatsEntry(self.stats, "rounding up!", "GET") s2.log(127, 0) # (rounded 130) min actual_percentile = s2.percentile().split() self.assertEqual(actual_percentile, ['GET', 'rounding', 'up!', '1'] + ['130'] * len(PERCENTILES_TO_REPORT))
def test_fail_ratio_with_failures(self): s = StatsEntry(self.stats, "/", "GET") s.num_requests = 10 s.num_failures = 5 self.assertAlmostEqual(s.fail_ratio, 0.5)
def test_get_current_response_time_percentile_outside_cache_window(self): s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) # an empty response times cache, current time will not be in this cache s.response_times_cache = {} self.assertEqual(None, s.get_current_response_time_percentile(0.95))