示例#1
0
    def test_query_api_with_retries_other_error_rate_limited(self):
        with mock.patch.object(KubernetesApi, "query_api") as mock_query:
            mock_query.side_effect = K8sApiPermanentError("Permanent Error")

            k8s = KubernetesApi()
            rate_limiter = BlockingRateLimiter(
                num_agents=1,
                initial_cluster_rate=100,
                max_cluster_rate=1000,
                min_cluster_rate=1,
                consecutive_success_threshold=1,
                strategy="multiply",
            )
            options = ApiQueryOptions(rate_limiter=rate_limiter, max_retries=0)
            self.assertRaises(
                K8sApiPermanentError,
                lambda: k8s.query_api_with_retries("/foo/bar", options),
            )
            self.assertEqual(rate_limiter.current_cluster_rate, 50.0)

            mock_query.side_effect = Exception("Some other exception")
            self.assertRaises(
                Exception,
                lambda: k8s.query_api_with_retries("/foo/bar", options))
            self.assertEqual(rate_limiter.current_cluster_rate, 25.0)
 def _create_rate_limiter():
     return BlockingRateLimiter(
         num_agents=1,
         initial_cluster_rate=initial_cluster_rate,
         max_cluster_rate=max_cluster_rate,
         min_cluster_rate=min_cluster_rate,
         consecutive_success_threshold=required_successes,
         strategy=BlockingRateLimiter.STRATEGY_MULTIPLY,
         increase_factor=increase_factor,
         backoff_factor=backoff_factor,
         max_concurrency=1,
         fake_clock=self._fake_clock)
示例#3
0
    def test_query_api_with_retries_not_found_not_rate_limited( self ):
        with mock.patch.object( KubernetesApi, "query_api" ) as mock_query:
            mock_query.side_effect = K8sApiNotFoundException( "/foo/bar" )

            k8s = KubernetesApi()
            rate_limiter = BlockingRateLimiter(
                num_agents=1, initial_cluster_rate=100, max_cluster_rate=1000, min_cluster_rate=1,
                consecutive_success_threshold=1,
                strategy='multiply',
            )
            options = ApiQueryOptions( rate_limiter=rate_limiter )
            self.assertRaises( K8sApiNotFoundException, lambda: k8s.query_api_with_retries( "/foo/bar", options ) )
            self.assertEqual( rate_limiter.current_cluster_rate, 200.0 )
示例#4
0
    def test_query_api_with_retries_success_not_rate_limited( self ):
        with mock.patch.object( KubernetesApi, "query_api" ) as mock_query:
            mock_query.return_value = { "success": "success" }

            k8s = KubernetesApi()
            rate_limiter = BlockingRateLimiter(
                num_agents=1, initial_cluster_rate=100, max_cluster_rate=1000, min_cluster_rate=1,
                consecutive_success_threshold=1,
                strategy='multiply',
            )
            options = ApiQueryOptions( rate_limiter=rate_limiter )
            result = k8s.query_api_with_retries( "/foo/bar", options )
            self.assertEqual( result, { "success": "success" } )
            self.assertEqual( rate_limiter.current_cluster_rate, 200.0 )
    def test_lazy_adjust_min_max_init_rates(self):
        """Tests the one-time lazy adjustments"""

        # Make sure proper adjustments are made if out of bounds
        BRL = BlockingRateLimiter
        rl = BRL(
            num_agents=1,
            initial_cluster_rate=BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE + 1000,
            max_cluster_rate=BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE + 2000000,
            min_cluster_rate=float(BRL.HARD_LIMIT_MIN_CLUSTER_RATE) / 10,
            consecutive_success_threshold=5,
        )
        rl._lazy_adjust_min_max_rates()
        self.assertEqual(rl._initial_cluster_rate,
                         BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE)
        self.assertEqual(rl._max_cluster_rate,
                         BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE + 2000000)
        self.assertEqual(rl._min_cluster_rate, BRL.HARD_LIMIT_MIN_CLUSTER_RATE)

        # Make sure adjustments are NOT made if within bounds
        rl = BRL(
            num_agents=1,
            initial_cluster_rate=BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE - 1,
            max_cluster_rate=BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE - 1,
            min_cluster_rate=float(BRL.HARD_LIMIT_MIN_CLUSTER_RATE) + 1,
            consecutive_success_threshold=5,
        )
        rl._lazy_adjust_min_max_rates()
        self.assertEqual(rl._initial_cluster_rate,
                         BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE - 1)
        self.assertEqual(rl._max_cluster_rate,
                         BRL.HARD_LIMIT_INITIAL_CLUSTER_RATE + -1)
        self.assertEqual(rl._min_cluster_rate,
                         BRL.HARD_LIMIT_MIN_CLUSTER_RATE + 1)

        # Init rate cannot be lower than min rate
        rl = BlockingRateLimiter(
            num_agents=1,
            initial_cluster_rate=0.1,
            max_cluster_rate=1000,
            min_cluster_rate=0,
            consecutive_success_threshold=5,
        )
        rl._lazy_adjust_min_max_rates()
        self.assertEqual(rl._initial_cluster_rate, 1)
    def _test_rate_limiter(
        self,
        num_agents,
        consecutive_success_threshold,
        initial_cluster_rate,
        max_cluster_rate,
        min_cluster_rate,
        experiment_duration,
        max_concurrency,
        expected_requests,
        allowed_variance,
        reported_outcome_generator=always_true(),
        increase_strategy=BlockingRateLimiter.STRATEGY_MULTIPLY,
        backoff_factor=0.5,
        increase_factor=2.0,
    ):
        """Main test logic that runs max_concurrency client threads for a defined experiment duration.

        The experiment is driven off a fake_clock (so it can complete in seconds, not minutes or hours).

        Each time a successful acquire() completes, a counter is incremented.  At experiment end, this counter
        should be close enough to a calculated expected value, based on the specified rate.

        Concurrency should not affect the overall rate of allowed acquisitions.

        The reported outcome by acquiring clients is determined by invoking the callable `reported_outcome_callable`.


        @param num_agents: Num agents in cluster (to derive agent rate from cluster rate)
        @param consecutive_success_threshold:
        @param initial_cluster_rate: Initial cluster rate
        @param max_cluster_rate:  Upper bound on cluster rate
        @param min_cluster_rate: Lower bound on cluster rate
        @param increase_strategy: Strategy for increasing rate
        @param experiment_duration: Experiment duration in seconds
        @param max_concurrency: Number of tokens to create
        @param expected_requests: Expected number of requests at the end of experiment
        @param allowed_variance: Allowed variance between expected and actual number of requests. (e.g. 0.1 = 10%)
        @param reported_outcome_generator: Generator to get reported outcome boolean value
        @param fake_clock_increment: Fake clock increment by (seconds)
        """
        rate_limiter = BlockingRateLimiter(
            num_agents=num_agents,
            initial_cluster_rate=initial_cluster_rate,
            max_cluster_rate=max_cluster_rate,
            min_cluster_rate=min_cluster_rate,
            consecutive_success_threshold=consecutive_success_threshold,
            strategy=increase_strategy,
            increase_factor=increase_factor,
            backoff_factor=backoff_factor,
            max_concurrency=max_concurrency,
            fake_clock=self._fake_clock,
        )

        # Create and start a list of consumer threads
        experiment_end_time = self._fake_clock.time() + experiment_duration
        threads = self.__create_consumer_threads(max_concurrency, rate_limiter,
                                                 experiment_end_time,
                                                 reported_outcome_generator)
        [t.setDaemon(True) for t in threads]
        [t.start() for t in threads]

        # Create and join and advancer thread (which in turn lasts until all client threads die
        advancer = self.__create_fake_clock_advancer_thread(
            rate_limiter, threads)
        advancer.start()
        advancer.join()

        requests = self._test_state['count']
        # Assert that count is close enough to the expected count
        observed_ratio = float(requests) / expected_requests
        self.assertGreater(observed_ratio, allowed_variance[0])
        self.assertLess(observed_ratio, allowed_variance[1])