async def test_token_bucket_refills_itself():
    CAPACITY = 50
    TOKENS_PER_SECOND = 1000
    bucket = TokenBucket(TOKENS_PER_SECOND, CAPACITY)

    # consume all of the tokens
    for _ in range(CAPACITY):
        await bucket.take()

    # enough time for the bucket to fully refill
    start_at = time.perf_counter()
    time_to_refill = CAPACITY / TOKENS_PER_SECOND
    while time.perf_counter() - start_at < time_to_refill:
        await asyncio.sleep(time_to_refill)

    # This should take roughly zero time
    start_at = time.perf_counter()

    for _ in range(CAPACITY):
        await bucket.take()

    end_at = time.perf_counter()

    delta = end_at - start_at

    await assert_close_to_zero(delta, CAPACITY)
Exemple #2
0
async def measure_zero(iterations):
    bucket = TokenBucket(1, iterations)
    start_at = time.perf_counter()
    for _ in range(iterations):
        await bucket.take()
    end_at = time.perf_counter()
    return end_at - start_at
Exemple #3
0
    def __init__(self,
                 chain: BaseAsyncChain,
                 db: BaseAsyncChainDB,
                 peer_pool: ETHPeerPool,
                 header_syncer: HeaderSyncerAPI,
                 block_importer: BaseBlockImporter,
                 token: CancelToken = None) -> None:
        super().__init__(chain, db, peer_pool, header_syncer, token)

        # track when block bodies are downloaded, so that blocks can be imported
        self._block_import_tracker = OrderedTaskPreparation(
            BlockImportPrereqs,
            id_extractor=attrgetter('hash'),
            # make sure that a block is not imported until the parent block is imported
            dependency_extractor=attrgetter('parent_hash'),
        )
        self._block_importer = block_importer

        # Track if any headers have been received yet
        self._got_first_header = asyncio.Event()

        # Rate limit the block import logs
        self._import_log_limiter = TokenBucket(
            0.33,  # show about one log per 3 seconds
            5,  # burst up to 5 logs after a lag
        )
Exemple #4
0
    def __init__(self,
                 chain: AsyncChainAPI,
                 db: BaseAsyncChainDB,
                 peer_pool: ETHPeerPool,
                 header_syncer: HeaderSyncerAPI,
                 block_importer: BaseBlockImporter,
                 token: CancelToken = None) -> None:
        super().__init__(chain, db, peer_pool, header_syncer, token)

        # track when block bodies are downloaded, so that blocks can be imported
        self._block_import_tracker = OrderedTaskPreparation(
            BlockImportPrereqs,
            id_extractor=attrgetter('hash'),
            # make sure that a block is not imported until the parent block is imported
            dependency_extractor=attrgetter('parent_hash'),
            # Avoid problems by keeping twice as much data as the import queue size
            max_depth=BLOCK_IMPORT_QUEUE_SIZE * 2,
        )
        self._block_importer = block_importer

        # Track if any headers have been received yet
        self._got_first_header = asyncio.Event()

        # Rate limit the block import logs
        self._import_log_limiter = TokenBucket(
            0.33,  # show about one log per 3 seconds
            5,  # burst up to 5 logs after a lag
        )

        # the queue of blocks that are downloaded and ready to be imported
        self._import_queue: 'asyncio.Queue[BlockAPI]' = asyncio.Queue(BLOCK_IMPORT_QUEUE_SIZE)

        self._import_active = asyncio.Lock()
    async def maybe_connect_more_peers(self) -> None:
        rate_limiter = TokenBucket(
            rate=1 / PEER_CONNECT_INTERVAL,
            capacity=MAX_SEQUENTIAL_PEER_CONNECT,
        )

        while self.is_operational:
            if self.is_full:
                await self.sleep(PEER_CONNECT_INTERVAL)
                continue

            await self.wait(rate_limiter.take())

            try:
                await asyncio.gather(*(self._add_peers_from_backend(backend)
                                       for backend in self.peer_backends))
            except OperationCancelled:
                break
            except asyncio.CancelledError:
                # no need to log this exception, this is expected
                raise
            except Exception:
                self.logger.exception(
                    "unexpected error during peer connection")
                # Continue trying to connect to peers, even if there was a
                # surprising failure during one of the attempts.
                continue
Exemple #6
0
def test_token_bucket_take_nowait():
    bucket = TokenBucket(1, 10)

    assert bucket.can_take(10)
    bucket.take_nowait(10)
    assert not bucket.can_take(1)

    with pytest.raises(NotEnoughTokens):
        bucket.take_nowait(1)
Exemple #7
0
async def test_token_bucket_can_take():
    bucket = TokenBucket(1, 10)

    assert bucket.can_take() is True  # can take 1
    assert bucket.can_take(
        bucket.get_num_tokens()) is True  # can take full capacity

    await bucket.take(10)  # empty the bucket

    assert bucket.can_take() is False
Exemple #8
0
    def __init__(self, peer: BasePeer, response_msg_type: Type[CommandAPI],
                 token: CancelToken) -> None:
        super().__init__(token)
        self._peer = peer
        self.response_msg_type = response_msg_type
        self._lock = asyncio.Lock()

        # token bucket for limiting timeouts.
        # - Refills at 1-token every 5 minutes
        # - Max capacity of 3 tokens
        self.timeout_bucket = TokenBucket(TIMEOUT_BUCKET_RATE,
                                          TIMEOUT_BUCKET_CAPACITY)
Exemple #9
0
async def test_token_bucket_get_num_tokens():
    bucket = TokenBucket(1, 10)

    # starts at full capacity
    assert bucket.get_num_tokens() == 10

    await bucket.take(5)
    assert 5 <= bucket.get_num_tokens() <= 5.1

    await bucket.take(bucket.get_num_tokens())

    assert 0 <= bucket.get_num_tokens() <= 0.1
async def test_token_bucket_initial_tokens():
    CAPACITY = 10
    bucket = TokenBucket(1000, CAPACITY)

    start_at = time.perf_counter()
    for _ in range(CAPACITY):
        await bucket.take()

    end_at = time.perf_counter()
    delta = end_at - start_at

    await assert_close_to_zero(delta, CAPACITY)
Exemple #11
0
    async def maybe_connect_more_peers(self) -> None:
        rate_limiter = TokenBucket(
            rate=1 / PEER_CONNECT_INTERVAL,
            capacity=MAX_SEQUENTIAL_PEER_CONNECT,
        )

        # We set this to 0 so that upon startup (when our RoutingTable will have only a few
        # entries) we use the less restrictive filter function and get as many connection
        # candidates as possible.
        last_candidates_count = 0
        while self.manager.is_running:
            if self.is_full:
                await asyncio.sleep(PEER_CONNECT_INTERVAL)
                continue

            await rate_limiter.take()

            if last_candidates_count >= self.available_slots:
                head = await self.get_chain_head()
                genesis_hash = await self.get_genesis_hash()
                fork_blocks = extract_fork_blocks(self.vm_configuration)
                should_skip = functools.partial(
                    skip_candidate_if_on_list_or_fork_mismatch,
                    genesis_hash,
                    head.block_number,
                    fork_blocks,
                )
            else:
                self.logger.debug(
                    "Didn't get enough candidates last time, falling back to skipping "
                    "only peers that are blacklisted or already connected to")
                should_skip = skip_candidate_if_on_list  # type: ignore

            try:
                candidate_counts = await asyncio.gather(
                    *(self._add_peers_from_backend(backend, should_skip)
                      for backend in self.peer_backends))
                last_candidates_count = sum(candidate_counts)
            except OperationCancelled:
                # FIXME: We may no longer need this; need to confirm that none of the tasks we
                # create use BaseService.
                break
            except asyncio.CancelledError:
                # no need to log this exception, this is expected
                raise
            except Exception:
                self.logger.exception(
                    "unexpected error during peer connection")
                # Continue trying to connect to peers, even if there was a
                # surprising failure during one of the attempts.
                continue
Exemple #12
0
    def __init__(self, peer: BasePeer, listening_for: Type[CommandAPI],
                 cancel_token: CancelToken) -> None:
        self._peer = peer
        self._cancel_token = cancel_token
        self._response_command_type = listening_for

        # This TokenBucket allows for the occasional invalid response at a
        # maximum rate of 1-per-10-minutes and allowing up to two in quick
        # succession.  We *allow* invalid responses because the ETH protocol
        # doesn't have strong correlation between request/response and certain
        # networking conditions can result in us interpreting a legitimate
        # message as an invalid response if messages arrive out of order or
        # late.
        self._invalid_response_bucket = TokenBucket(1 / 600, 2)
Exemple #13
0
async def test_token_bucket_initial_tokens():
    bucket = TokenBucket(1000, 10)

    start_at = time.perf_counter()
    for _ in range(10):
        await bucket.take()

    end_at = time.perf_counter()
    delta = end_at - start_at

    # since the bucket starts out full the loop
    # should take near zero time
    expected = await measure_zero(10)
    # drift is allowed to be up to 1000% since we're working with very small
    # numbers.
    assert_fuzzy_equal(delta, expected, allowed_drift=10)
Exemple #14
0
    async def maybe_connect_more_peers(self) -> None:
        rate_limiter = TokenBucket(
            rate=1 / PEER_CONNECT_INTERVAL,
            capacity=MAX_SEQUENTIAL_PEER_CONNECT,
        )

        while self.is_operational:
            if self.is_full:
                await self.sleep(PEER_CONNECT_INTERVAL)
                continue

            await self.wait(rate_limiter.take())

            await self.wait(
                asyncio.gather(*(self._add_peers_from_backend(backend)
                                 for backend in self.peer_backends)))
Exemple #15
0
async def test_token_bucket_hits_limit():
    bucket = TokenBucket(1000, 10)

    bucket.take_nowait(10)
    start_at = time.perf_counter()
    # first 10 tokens should be roughly instant
    # next 10 tokens should each take 1/1000th second each to generate.
    while True:
        if bucket.can_take(10):
            break
        else:
            await asyncio.sleep(0)

    end_at = time.perf_counter()

    # we use a zero-measure of 20 to account for the loop overhead.
    zero = await measure_zero(10)
    expected_delta = 10 / 1000 + zero
    delta = end_at - start_at

    # allow up to 10% difference in expected time
    assert_fuzzy_equal(delta, expected_delta, allowed_drift=0.1)
Exemple #16
0
    async def maybe_connect_more_peers(self) -> None:
        rate_limiter = TokenBucket(
            rate=1 / PEER_CONNECT_INTERVAL,
            capacity=MAX_SEQUENTIAL_PEER_CONNECT,
        )

        # We set this to 0 so that upon startup (when our RoutingTable will have only a few
        # entries) we use the less restrictive filter function and get as many connection
        # candidates as possible.
        last_candidates_count = 0
        while self.manager.is_running:
            if self.is_full:
                await asyncio.sleep(PEER_CONNECT_INTERVAL)
                continue

            await rate_limiter.take()

            if last_candidates_count >= self.available_slots:
                head = await self.get_chain_head()
                genesis_hash = await self.get_genesis_hash()
                fork_blocks = extract_fork_blocks(self.vm_configuration)
                should_skip = functools.partial(
                    skip_candidate_if_on_list_or_fork_mismatch,
                    genesis_hash,
                    head.block_number,
                    fork_blocks,
                )
            else:
                self.logger.debug(
                    "Didn't get enough candidates last time, falling back to skipping "
                    "only peers that are blacklisted or already connected to")
                should_skip = skip_candidate_if_on_list  # type: ignore

            candidate_counts = await asyncio.gather(*(
                self._add_peers_from_backend(backend, should_skip)
                for backend in self.peer_backends
            ))
            last_candidates_count = sum(candidate_counts)
async def test_token_bucket_hits_limit():
    CAPACITY = 50
    TOKENS_PER_SECOND = 1000
    bucket = TokenBucket(TOKENS_PER_SECOND, CAPACITY)

    bucket.take_nowait(CAPACITY)
    start_at = time.perf_counter()
    # first CAPACITY tokens should be roughly instant
    # next CAPACITY tokens should each take 1/TOKENS_PER_SECOND second each to generate.
    while True:
        if bucket.can_take(CAPACITY):
            break
        else:
            await asyncio.sleep(0)

    end_at = time.perf_counter()

    # we use a zero-measure of CAPACITY loops to account for the loop overhead.
    zero = await measure_zero(CAPACITY)
    expected_delta = CAPACITY / TOKENS_PER_SECOND + zero
    delta = end_at - start_at

    # allow up to 10% difference in expected time
    assert_fuzzy_equal(delta, expected_delta, allowed_drift=0.1)
Exemple #18
0
async def test_token_bucket_refills_itself():
    bucket = TokenBucket(1000, 10)

    # consume all of the tokens
    for _ in range(10):
        await bucket.take()

    # enough time for the bucket to fully refill
    await asyncio.sleep(20 / 1000)

    start_at = time.perf_counter()

    for _ in range(10):
        await bucket.take()

    end_at = time.perf_counter()

    delta = end_at - start_at
    # since the capacity should have been fully refilled, second loop time
    # should take near zero time
    expected = await measure_zero(10)
    # drift is allowed to be up to 300% since we're working with very small
    # numbers, and the performance in CI varies widely.
    assert_fuzzy_equal(delta, expected, allowed_drift=3)