示例#1
0
async def _skip_complete_headers_iterator(
    headers: Iterable[BlockHeader], logger: ExtendedDebugLogger,
    completion_check: Callable[[BlockHeader], Awaitable[bool]]
) -> AsyncIterator[BlockHeader]:
    """
    We only want headers that are missing, so we iterate over the list
    until we find the first missing header, after which we return all of
    the remaining headers.
    """
    iter_headers = iter(headers)
    # for logging:
    first_discarded = None
    last_discarded = None
    num_discarded = 0
    for header in iter_headers:
        is_present = await completion_check(header)
        if is_present:
            if first_discarded is None:
                first_discarded = header
            else:
                last_discarded = header
            num_discarded += 1
        else:
            yield header
            break

    logger.debug(
        "Discarding %d headers that we already have: %s...%s",
        num_discarded,
        first_discarded,
        last_discarded,
    )

    for header in iter_headers:
        yield header
示例#2
0
def test_node_request_tracker_get_next_timeout():
    tracker = TrieNodeRequestTracker(REPLY_TIMEOUT, ExtendedDebugLogger('name'))
    oldest_req_time = 1234

    # Populate the tracker with missing and active requests, one of each made at oldest_req_time
    # and one of each made 1s after that.
    peer1, peer2 = object(), object()
    tracker.missing[oldest_req_time] = []
    tracker.missing[oldest_req_time + 1] = []
    tracker.active_requests[peer1] = (oldest_req_time, [])
    tracker.active_requests[peer2] = (oldest_req_time + 1, [])

    # Our next shcheduled timeout must be the oldest_req_time + REPLY_TIMEOUT
    assert tracker.get_next_timeout() == oldest_req_time + REPLY_TIMEOUT

    # Now, if we pop any of the requests made at oldest_req_time, but leave one behind, the next
    # scheduled timeout will still be the same since we still have one request made at
    # oldest_req_time.
    tracker.missing.pop(oldest_req_time)
    assert tracker.get_next_timeout() == oldest_req_time + REPLY_TIMEOUT

    # Removing the last remaining request made at oldest_req_time will cause the next scheduled
    # timeout to be (oldest_req_time + 1) + REPLY_TIMEOUT as expected.
    tracker.active_requests.pop(peer1)
    assert tracker.get_next_timeout() == oldest_req_time + 1 + REPLY_TIMEOUT
示例#3
0
def test_node_request_tracker_get_timed_out():
    tracker = TrieNodeRequestTracker(REPLY_TIMEOUT, ExtendedDebugLogger('name'))
    peer1, peer2, peer3, peer4 = object(), object(), object(), object()
    peer_nodes = dict(
        (peer, [os.urandom(32) for _ in range(3)])
        for peer in [peer1, peer2, peer3, peer4])
    now = time.time()
    # Populate the tracker's active_requests with 4 requests, 2 of them made more than
    # REPLY_TIMEOUT seconds in the past and 2 made less than REPLY_TIMEOUT seconds ago.
    tracker.active_requests[peer1] = (now, peer_nodes[peer1])
    tracker.active_requests[peer2] = (now - REPLY_TIMEOUT - 1, peer_nodes[peer2])
    tracker.active_requests[peer3] = (now - REPLY_TIMEOUT - 2, peer_nodes[peer3])
    tracker.active_requests[peer4] = (now - REPLY_TIMEOUT + 1, peer_nodes[peer4])

    # get_timed_out() must return all node keys from requests made more than REPLY_TIMEOUT seconds
    # in the past.
    expected = set(peer_nodes[peer2] + peer_nodes[peer3])
    timed_out = tracker.get_timed_out()
    assert len(timed_out) == len(expected)
    assert set(timed_out) == expected

    # and it should remove the entries for those from the active_requests dict.
    assert peer2 not in tracker.active_requests
    assert peer3 not in tracker.active_requests
    assert peer1 in tracker.active_requests
    assert peer4 in tracker.active_requests
示例#4
0
def test_node_request_tracker_get_retriable_missing():
    tracker = TrieNodeRequestTracker(REPLY_TIMEOUT,
                                     ExtendedDebugLogger('name'))
    now = time.time()
    # Populate the tracker's missing dict with 4 requests, 2 of them made more than
    # REPLY_TIMEOUT seconds in the past and 2 made less than REPLY_TIMEOUT seconds ago.
    req1_time, req1_nodes = now, [os.urandom(32) for _ in range(3)]
    req2_time, req2_nodes = (now - REPLY_TIMEOUT -
                             1), [os.urandom(32) for _ in range(3)]
    req3_time, req3_nodes = (now - REPLY_TIMEOUT -
                             2), [os.urandom(32) for _ in range(3)]
    req4_time, req4_nodes = (now - REPLY_TIMEOUT +
                             1), [os.urandom(32) for _ in range(3)]
    tracker.missing[req1_time] = req1_nodes
    tracker.missing[req2_time] = req2_nodes
    tracker.missing[req3_time] = req3_nodes
    tracker.missing[req4_time] = req4_nodes

    expected = set(req2_nodes + req3_nodes)
    retriable_missing = tracker.get_retriable_missing()
    assert len(retriable_missing) == len(expected)
    assert set(retriable_missing) == expected

    assert req2_time not in tracker.missing
    assert req3_time not in tracker.missing
    assert req1_time in tracker.missing
    assert req4_time in tracker.missing
示例#5
0
class SamplePeerSubscriber(PeerSubscriber):
    logger = ExtendedDebugLogger("")

    subscription_msg_types = {Command}

    @property
    def msg_queue_maxsize(self) -> int:
        return 100
示例#6
0
文件: headers.py 项目: mhchia/trinity
async def _skip_complete_headers_iterator(
    headers: Iterable[BlockHeader], logger: ExtendedDebugLogger,
    completion_check: Callable[[BlockHeader], Awaitable[bool]]
) -> AsyncIterator[BlockHeader]:
    """
    We only want headers that are missing, so we iterate over the list
    until we find the first missing header, after which we return all of
    the remaining headers.
    """
    iter_headers = iter(headers)
    for header in iter_headers:
        is_present = await completion_check(header)
        if is_present:
            logger.debug("Discarding header that we already have: %s", header)
        else:
            yield header
            break

    for header in iter_headers:
        yield header
示例#7
0
 async def _test_trie_sync():
     src_trie, contents = make_random_trie(random)
     dest_db = FakeAsyncMemoryDB()
     nodes_cache = MemoryDB()
     scheduler = HexaryTrieSync(src_trie.root_hash, dest_db, nodes_cache,
                                ExtendedDebugLogger("test"))
     requests = scheduler.next_batch()
     while len(requests) > 0:
         results = []
         for request in requests:
             results.append([request.node_key, src_trie.db[request.node_key]])
         await scheduler.process(results)
         requests = scheduler.next_batch(10)
     dest_trie = HexaryTrie(dest_db, src_trie.root_hash)
     for key, value in contents.items():
         assert dest_trie[key] == value
示例#8
0
async def test_state_sync():
    raw_db, state_root, contents = make_random_state(1000)
    dest_db = FakeAsyncMemoryDB()
    nodes_cache = MemoryDB()
    scheduler = StateSync(state_root, dest_db, nodes_cache, ExtendedDebugLogger('test'))
    requests = scheduler.next_batch(10)
    while requests:
        results = []
        for request in requests:
            results.append([request.node_key, raw_db[request.node_key]])
        await scheduler.process(results)
        requests = scheduler.next_batch(10)

    result_account_db = AccountDB(dest_db, state_root)
    for addr, account_data in contents.items():
        balance, nonce, storage, code = account_data
        assert result_account_db.get_balance(addr) == balance
        assert result_account_db.get_nonce(addr) == nonce
        assert result_account_db.get_storage(addr, 0) == storage
        assert result_account_db.get_code(addr) == code