def __init__(self, window_ms, bucket_ms, overload_ratio): """Initializes AdaptiveThrottler. Args: window_ms: int, length of history to consider, in ms, to set throttling. bucket_ms: int, granularity of time buckets that we store data in, in ms. overload_ratio: float, the target ratio between requests sent and successful requests. This is "K" in the formula in https://landing.google.com/sre/book/chapters/handling-overload.html. """ self._all_requests = util.MovingSum(window_ms, bucket_ms) self._successful_requests = util.MovingSum(window_ms, bucket_ms) self._overload_ratio = float(overload_ratio) self._random = random.Random()
def test_data_expires_from_moving_window(self): ms = util.MovingSum(5, 1) ms.add(MovingSumTest.TIMESTAMP, 5) ms.add(MovingSumTest.TIMESTAMP+3, 3) ms.add(MovingSumTest.TIMESTAMP+6, 7) self.assertEqual(10, ms.sum(MovingSumTest.TIMESTAMP+7)) self.assertEqual(2, ms.count(MovingSumTest.TIMESTAMP+7))
def test_aggregates_within_window(self): ms = util.MovingSum(10, 1) ms.add(MovingSumTest.TIMESTAMP, 5) ms.add(MovingSumTest.TIMESTAMP+1, 3) ms.add(MovingSumTest.TIMESTAMP+2, 7) self.assertEqual(15, ms.sum(MovingSumTest.TIMESTAMP+3)) self.assertEqual(3, ms.count(MovingSumTest.TIMESTAMP+3))
def __init__(self): self._commit_time_per_entity_ms = util.MovingSum(window_ms=120000, bucket_ms=10000)
def test_one_data_point(self): ms = util.MovingSum(10, 1) ms.add(MovingSumTest.TIMESTAMP, 5) self.assertEqual(5, ms.sum(MovingSumTest.TIMESTAMP)) self.assertEqual(1, ms.count(MovingSumTest.TIMESTAMP)) self.assertTrue(ms.has_data(MovingSumTest.TIMESTAMP))
def test_no_data(self): ms = util.MovingSum(10, 1) self.assertEqual(0, ms.sum(MovingSumTest.TIMESTAMP)) self.assertEqual(0, ms.count(MovingSumTest.TIMESTAMP)) self.assertFalse(ms.has_data(MovingSumTest.TIMESTAMP))
def test_bad_window_size(self): with self.assertRaises(ValueError): _ = util.MovingSum(1, 2)
def test_bad_bucket_size(self): with self.assertRaises(ValueError): _ = util.MovingSum(1, 0)