def test_rate_no_change_if_time_too_short(self): """ Test that when we have accepted more than half the maximum permitted requests but haven't passed the update period, we do not change the permitted request rate. """ # Create a load monitor as we do in the main crest base load_monitor = base.LoadMonitor(0.1, 100, 100, 10) initial_rate = load_monitor.bucket.rate # We need this to be a float, so that the sleeps below don't round to 0 update_period = float(load_monitor.SECONDS_BEFORE_ADJUSTMENT) # The number of requests we need to send to go over the adjustment threshold is: threshold_requests = load_monitor.bucket.rate * load_monitor.SECONDS_BEFORE_ADJUSTMENT # To simulate load, we will add three sets of half this threshold over the time period # and then trigger the update_latency function at the end of it. for _ in range(3): for _ in range(threshold_requests/2): load_monitor.admit_request() load_monitor.request_complete() load_monitor.update_latency(0.08) sleep(update_period/4) # We do not sleep here, as we want to remain under the update_period # Do one more request, and then test that the rate remains unchanged load_monitor.admit_request() load_monitor.request_complete() load_monitor.update_latency(0.08) final_rate = load_monitor.bucket.rate print("Initial rate {}, final rate {}".format(initial_rate, final_rate)) self.assertTrue(final_rate == initial_rate)
def test_divide_by_zero(self): """ Test that twice the LoadMonitor's REQUESTS_BEFORE_ADJUSTMENT requests all arriving at once don't cause a ZeroDivisionError when they all complete. """ # Create a LoadMonitor with a bucket size twice as big as the # REQUESTS_BEFORE_ADJUSTMENT so we can add all the requests at once size = base.LoadMonitor.REQUESTS_BEFORE_ADJUSTMENT * 2 load_monitor = base.LoadMonitor(0.1, size, size, size) success = True # Add all the requests at once for _ in range(size): success &= load_monitor.admit_request() # All the requests should have been admitted self.assertTrue(success) # Now, let the requests finish try: for _ in range(size): load_monitor.request_complete() load_monitor.update_latency(0.1) except ZeroDivisionError: success = False self.assertTrue(success)
def test_rate_decrease_on_hss_overload(self, mock_penaltycounter): """ Test that when we are accepting requests within target latency, but are getting hss overload responses, we decrease the permitted request rate. """ # Create a load monitor as we do in the main crest base, and save off initial rate load_monitor = base.LoadMonitor(0.1, 100, 100, 10) initial_rate = load_monitor.bucket.rate # We need this to be a float, so that the sleeps below don't round to 0 update_period = float(load_monitor.SECONDS_BEFORE_ADJUSTMENT) # The number of requests we need to send to go over the adjustment threshold is: threshold_requests = load_monitor.bucket.rate * load_monitor.SECONDS_BEFORE_ADJUSTMENT # To simulate load, we will add three sets of half this threshold over the time period # and then trigger the update_latency function at the end of it. # We update with a latency higher than our target to trigger the err threshold. for _ in range(3): for _ in range(threshold_requests/2): load_monitor.admit_request() load_monitor.request_complete() load_monitor.update_latency(0.08) sleep(update_period/4) # Do one last sleep, so that we pass the time threshold sleep(update_period/4 + 0.1) # Do one more request, and then test that the rate has decreased load_monitor.admit_request() load_monitor.request_complete() load_monitor.update_latency(0.08) final_rate = load_monitor.bucket.rate print("Initial rate {}, final rate {}".format(initial_rate, final_rate)) self.assertTrue(final_rate < initial_rate)