Example #1
0
async def test_wait_cancel_pending_tasks_on_completion(event_loop):
    token = CancelToken('token')
    token2 = CancelToken('token2')
    chain = token.chain(token2)
    event_loop.call_soon(token2.trigger)
    await chain.wait()
    await assert_only_current_task_not_done()
Example #2
0
async def test_token_wait(event_loop):
    token = CancelToken('token')
    event_loop.call_soon(token.trigger)
    done, pending = await asyncio.wait([token.wait()], timeout=0.1)
    assert len(done) == 1
    assert len(pending) == 0
    assert token.triggered
Example #3
0
 async def multiplex(self) -> AsyncIterator[None]:
     """
     API for running the background task that feeds individual protocol
     queues that allows each individual protocol to stream only its own
     messages.
     """
     # We generate a new token for each time the multiplexer is used to
     # multiplex so that we can reliably cancel it without requiring the
     # master token for the multiplexer to be cancelled.
     async with self._multiplex_lock:
         multiplex_token = CancelToken(
             'multiplex',
             loop=self.cancel_token.loop,
         ).chain(self.cancel_token)
         self._multiplex_token = multiplex_token
         fut = asyncio.ensure_future(self._do_multiplexing(multiplex_token))
         try:
             yield
         finally:
             multiplex_token.trigger()
             del self._multiplex_token
             if fut.done():
                 fut.result()
             else:
                 fut.cancel()
                 try:
                     await fut
                 except asyncio.CancelledError:
                     pass
Example #4
0
    def __init__(self,
                 chain: AsyncChain,
                 db: AsyncHeaderDB,
                 peer_pool: PeerPool,
                 token: CancelToken = None) -> None:
        self.complete_token = CancelToken(
            'trinity.sync.common.BaseHeaderChainSyncer.SyncCompleted')
        if token is None:
            master_service_token = self.complete_token
        else:
            master_service_token = token.chain(self.complete_token)
        super().__init__(master_service_token)
        self.chain = chain
        self.db = db
        self.peer_pool = peer_pool
        self._handler = PeerRequestHandler(self.db, self.logger,
                                           self.cancel_token)
        self._syncing = False
        self._sync_complete = asyncio.Event()
        self._sync_requests: asyncio.Queue[
            HeaderRequestingPeer] = asyncio.Queue()

        # pending queue size should be big enough to avoid starving the processing consumers, but
        # small enough to avoid wasteful over-requests before post-processing can happen
        max_pending_headers = ETHPeer.max_headers_fetch * 8
        self.header_queue = TaskQueue(max_pending_headers,
                                      attrgetter('block_number'))
Example #5
0
async def test_awaitables_are_cancelled_and_cleaned_up_on_outer_cancellation():
    token = CancelToken('token')

    ready = asyncio.Event()
    got_cancelation = asyncio.Event()

    async def _signal_then_sleep():
        ready.set()
        try:
            await asyncio.sleep(10)
        except asyncio.CancelledError:
            got_cancelation.set()
            raise

    fut = asyncio.ensure_future(_signal_then_sleep())

    task = asyncio.ensure_future(token.cancellable_wait(fut))

    # wait till we know the coro is running
    await ready.wait()

    # cancel the task
    task.cancel()

    try:
        await task
    except asyncio.CancelledError:
        pass

    # ensure that the task was indeed cancelled.
    await asyncio.wait_for(got_cancelation.wait(), timeout=0.01)
    await assert_only_current_task_not_done()
Example #6
0
    def __init__(self) -> None:
        self.hosts: Dict[str, 'Host'] = {}
        self.networks: Dict[str, 'Network'] = {}
        self.connections: Dict[CancelToken, asyncio.Future[None]] = {}

        self.cancel_token = CancelToken('Router')

        self._run_lock = asyncio.Lock()
        self.cleaned_up = asyncio.Event()
Example #7
0
 def __init__(self, privkey: datatypes.PrivateKey,
              address: kademlia.Address,
              bootstrap_nodes: Tuple[kademlia.Node, ...]) -> None:
     self.privkey = privkey
     self.address = address
     self.bootstrap_nodes = bootstrap_nodes
     self.this_node = kademlia.Node(self.pubkey, address)
     self.kademlia = kademlia.KademliaProtocol(self.this_node, wire=self)
     self.cancel_token = CancelToken('DiscoveryProtocol')
Example #8
0
    def __init__(self, event_bus: EndpointAPI,
                 trinity_config: TrinityConfig) -> None:
        self.trinity_config = trinity_config
        self._base_db = DBClient.connect(trinity_config.database_ipc_path)
        self._headerdb = AsyncHeaderDB(self._base_db)

        self._jsonrpc_ipc_path: Path = trinity_config.jsonrpc_ipc_path
        self._network_id = trinity_config.network_id

        self.event_bus = event_bus
        self.master_cancel_token = CancelToken(type(self).__name__)
Example #9
0
async def test_cancellable_wait_cancels_subtasks_when_cancelled(event_loop):
    token = CancelToken('')
    future = asyncio.ensure_future(token.cancellable_wait(asyncio.sleep(2)))
    with pytest.raises(asyncio.TimeoutError):
        # asyncio.wait_for() will timeout and then cancel our cancellable_wait() future, but
        # Task.cancel() doesn't immediately cancels the task
        # (https://docs.python.org/3/library/asyncio-task.html#asyncio.Task.cancel), so we need
        # the sleep below before we check that the task is actually cancelled.
        await asyncio.wait_for(future, timeout=0.01)
    await asyncio.sleep(0)
    assert future.cancelled()
    await assert_only_current_task_not_done()
Example #10
0
def test_token_chain_trigger_chain():
    token = CancelToken('token')
    token2 = CancelToken('token2')
    token3 = CancelToken('token3')
    chain = token.chain(token2).chain(token3)
    assert not chain.triggered
    chain.trigger()
    assert chain.triggered
    assert chain.triggered_token == chain
    assert not token.triggered
    assert not token2.triggered
    assert not token3.triggered
Example #11
0
async def test_wait_cancel_pending_tasks_on_cancellation(event_loop):
    """Test that cancelling a pending CancelToken.wait() coroutine doesn't leave .wait()
    coroutines for any chained tokens behind.
    """
    token = CancelToken('token').chain(CancelToken('token2')).chain(
        CancelToken('token3'))
    token_wait_coroutine = token.wait()
    done, pending = await asyncio.wait([token_wait_coroutine], timeout=0.1)
    assert len(done) == 0
    assert len(pending) == 1
    pending_task = pending.pop()
    assert pending_task._coro == token_wait_coroutine
    pending_task.cancel()
    await assert_only_current_task_not_done()
Example #12
0
async def _setup_alice_and_bob_factories(alice_chain_db, bob_chain_db):
    cancel_token = CancelToken(
        'trinity.get_directly_linked_peers_without_handshake')

    #
    # Alice
    #
    alice_context = BeaconContext(
        chain_db=alice_chain_db,
        network_id=1,
    )

    alice_factory = BCCPeerFactory(
        privkey=ecies.generate_privkey(),
        context=alice_context,
        token=cancel_token,
    )

    #
    # Bob
    #
    bob_context = BeaconContext(
        chain_db=bob_chain_db,
        network_id=1,
    )

    bob_factory = BCCPeerFactory(
        privkey=ecies.generate_privkey(),
        context=bob_context,
        token=cancel_token,
    )

    return alice_factory, bob_factory
Example #13
0
    def __init__(self,
                 transport: TransportAPI,
                 base_protocol: BaseP2PProtocol,
                 protocols: Sequence[ProtocolAPI],
                 token: CancelToken = None,
                 max_queue_size: int = 4096) -> None:
        self.logger = get_logger('p2p.multiplexer.Multiplexer')
        if token is None:
            loop = None
        else:
            loop = token.loop
        base_token = CancelToken(f'multiplexer[{transport.remote}]', loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)

        self._transport = transport
        # the base `p2p` protocol instance.
        self._base_protocol = base_protocol

        # the sub-protocol instances
        self._protocols = protocols

        # Lock to ensure that multiple call sites cannot concurrently stream
        # messages.
        self._multiplex_lock = asyncio.Lock()

        # Lock management on a per-protocol basis to ensure we only have one
        # stream consumer for each protocol.
        self._protocol_locks = {
            type(protocol): asyncio.Lock()
            for protocol
            in self.get_protocols()
        }

        # Each protocol gets a queue where messages for the individual protocol
        # are placed when streamed from the transport
        self._protocol_queues = {
            type(protocol): asyncio.Queue(max_queue_size)
            for protocol
            in self.get_protocols()
        }

        self._msg_counts = collections.defaultdict(int)
        self._last_msg_time = 0
async def _setup_alice_and_bob_factories(
        alice_headerdb=None, bob_headerdb=None,
        alice_peer_class=HLSPeer, bob_peer_class=None):
    if bob_peer_class is None:
        bob_peer_class = alice_peer_class

    cancel_token = CancelToken('helios.get_directly_linked_peers_without_handshake')

    #
    # Alice
    #
    if alice_headerdb is None:
        alice_headerdb = get_fresh_mainnet_headerdb()

    alice_context = ChainContext(
        headerdb=alice_headerdb,
        network_id=1,
        vm_configuration=tuple(),
    )

    if alice_peer_class is HLSPeer:
        alice_factory_class = HLSPeerFactory
    elif alice_peer_class is LESPeer:
        alice_factory_class = LESPeerFactory
    else:
        raise TypeError(f"Unknown peer class: {alice_peer_class}")

    alice_factory = alice_factory_class(
        privkey=ecies.generate_privkey(),
        context=alice_context,
        token=cancel_token,
    )

    #
    # Bob
    #
    if bob_headerdb is None:
        bob_headerdb = get_fresh_mainnet_headerdb()

    bob_context = ChainContext(
        headerdb=bob_headerdb,
        network_id=1,
        vm_configuration=tuple(),
    )

    if bob_peer_class is HLSPeer:
        bob_factory_class = HLSPeerFactory
    elif bob_peer_class is LESPeer:
        bob_factory_class = LESPeerFactory
    else:
        raise TypeError(f"Unknown peer class: {bob_peer_class}")

    bob_factory = bob_factory_class(
        privkey=ecies.generate_privkey(),
        context=bob_context,
        token=cancel_token,
    )

    return alice_factory, bob_factory
Example #15
0
    def __init__(self,
                 token: CancelToken = None,
                 loop: asyncio.AbstractEventLoop = None) -> None:
        self.events = ServiceEvents()
        self._run_lock = asyncio.Lock()
        self._child_services = WeakSet()
        self._tasks = WeakSet()
        self._finished_callbacks = []

        self._loop = loop

        base_token = CancelToken(type(self).__name__, loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)
Example #16
0
 def __init__(self,
              privkey: datatypes.PrivateKey,
              address: AddressAPI,
              bootstrap_nodes: Sequence[NodeAPI],
              cancel_token: CancelToken) -> None:
     self.privkey = privkey
     self.address = address
     self.bootstrap_nodes = bootstrap_nodes
     self.this_node = Node(self.pubkey, address)
     self.routing = RoutingTable(self.this_node)
     self.topic_table = TopicTable(self.logger)
     self.pong_callbacks = CallbackManager()
     self.ping_callbacks = CallbackManager()
     self.neighbours_callbacks = CallbackManager()
     self.topic_nodes_callbacks = CallbackManager()
     self.parity_pong_tokens: Dict[Hash32, Hash32] = {}
     self.cancel_token = CancelToken('DiscoveryProtocol').chain(cancel_token)
Example #17
0
 def __init__(self, bootnodes, honest_nodes: Set[NodeAPI],
              malicious_nodes: Set[NodeAPI], malpg):
     privkey = keys.PrivateKey(keccak(b"seed"))
     self.messages = []
     self.honest_nodes = honest_nodes
     self.malicious_nodes = malicious_nodes
     self.malpg = malpg
     super().__init__(privkey, AddressFactory(), bootnodes,
                      CancelToken("discovery-test"))
Example #18
0
async def test_token_wait_finishing_cleans_up_chained_token_waits():
    parent = CancelToken('parent')
    child = parent.chain(CancelToken('child'))

    # this schedules both the child's `wait()` and the parent's `wait()`
    fut = asyncio.ensure_future(child.wait())
    # yield for a moment to let them spin up.
    await asyncio.sleep(0.01)

    # ensure that there are some not-done tasks
    with pytest.raises(AssertionError):
        await assert_only_current_task_not_done()

    child.trigger()

    await asyncio.wait_for(fut, timeout=0.01)

    await assert_only_current_task_not_done()
Example #19
0
def test_token_chain_trigger_last():
    token = CancelToken('token')
    token2 = CancelToken('token2')
    token3 = CancelToken('token3')
    intermediate_chain = token.chain(token2)
    chain = intermediate_chain.chain(token3)
    assert not chain.triggered
    token3.trigger()
    assert chain.triggered
    assert chain.triggered_token == token3
    assert not intermediate_chain.triggered
Example #20
0
async def test_finished_task_with_exceptions_is_raised_on_cancellation():
    token = CancelToken('token')

    ready = asyncio.Event()

    async def _signal_then_raise():
        ready.set()
        raise ValueError("raising from _signal_then_raise")

    # schedule in the background
    task = asyncio.ensure_future(token.cancellable_wait(_signal_then_raise()))
    # wait until the coro is running and we know it's raised an error
    await ready.wait()
    # trigger the cancel token
    token.trigger()

    with pytest.raises(ValueError, match="raising from _signal_then_raise"):
        await task
    await assert_only_current_task_not_done()
Example #21
0
    def __init__(self,
                 token: CancelToken = None,
                 loop: asyncio.AbstractEventLoop = None) -> None:
        if self.logger is None:
            self.logger = cast(
                TraceLogger,
                logging.getLogger(self.__module__ + '.' +
                                  self.__class__.__name__))

        self._run_lock = asyncio.Lock()
        self.cleaned_up = asyncio.Event()
        self._child_services = []

        self.loop = loop
        base_token = CancelToken(type(self).__name__, loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)
Example #22
0
 async def run() -> None:
     try:
         await discovery.bootstrap()
         while True:
             await discovery.lookup_random(CancelToken("Unused"))
             print("====================================================")
             print("Random nodes: ",
                   list(discovery.get_nodes_to_connect(10)))
             print("====================================================")
     except OperationCancelled:
         await discovery.stop()
Example #23
0
async def test_cancelling_token_wait_cleans_up_chained_token_waits():
    parent = CancelToken('parent')
    child = parent.chain(CancelToken('child'))

    # this schedules both the child's `wait()` and the parent's `wait()`
    fut = asyncio.ensure_future(child.wait())
    # yield for a moment to let them spin up.
    await asyncio.sleep(0.01)

    # ensure that there are some not-done tasks
    with pytest.raises(AssertionError):
        await assert_only_current_task_not_done()

    # cancel the wait (which should also properly cancel the parent wait)
    fut.cancel()
    try:
        await fut
    except asyncio.CancelledError:
        pass
    await assert_only_current_task_not_done()
Example #24
0
    def __init__(self,
                 remote: Node,
                 private_key: datatypes.PrivateKey,
                 reader: asyncio.StreamReader,
                 writer: asyncio.StreamWriter,
                 token: CancelToken = None) -> None:
        self.remote = remote
        self._private_key = private_key
        self._reader = reader
        self._writer = writer

        if token is None:
            token = CancelToken('MemoryTransport')
        self._token = token
Example #25
0
class DiscoveryProtocolFactory(factory.Factory):
    class Meta:
        model = discovery.DiscoveryProtocol

    privkey = factory.LazyFunction(generate_privkey)
    address = factory.SubFactory(AddressFactory)
    bootstrap_nodes = factory.LazyFunction(tuple)
    cancel_token = factory.LazyFunction(lambda: CancelToken('discovery-test'))

    @classmethod
    def from_seed(cls, seed: bytes, *args: Any,
                  **kwargs: Any) -> discovery.DiscoveryProtocol:
        privkey = keys.PrivateKey(keccak(seed))
        return cls(*args, privkey=privkey, **kwargs)
Example #26
0
    def get_connected_readers(self, address: Address) -> ReaderWriterPair:
        external_reader, internal_writer = direct_pipe()
        internal_reader, external_writer = addressed_pipe(address)

        token = CancelToken(str(address)).chain(self.cancel_token)
        connection = asyncio.ensure_future(
            _connect_streams(
                internal_reader,
                internal_writer,
                cast(AddressedTransport, external_writer.transport).queue,
                token,
            ))
        self.connections[token] = connection

        return (external_reader, external_writer)
Example #27
0
async def test_other_awaitables_are_cancelled_if_one_finishes():
    token = CancelToken('token')

    short_fut = asyncio.ensure_future(asyncio.sleep(0))
    long_fut = asyncio.ensure_future(asyncio.sleep(10))

    await token.cancellable_wait(short_fut, long_fut)

    # they should both be done
    assert short_fut.done()
    assert long_fut.done()

    # only the long one should be cancelled
    assert not short_fut.cancelled()
    assert long_fut.cancelled()

    await assert_only_current_task_not_done()
Example #28
0
async def test_server_incoming_connection(monkeypatch, server, event_loop):
    use_eip8 = False
    token = CancelToken("initiator")
    initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY,
                                   use_eip8, token)
    reader, writer = await initiator.connect()
    # Send auth init message to the server, then read and decode auth ack
    aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake(
        initiator, reader, writer, token)

    transport = Transport(
        remote=RECEIVER_REMOTE,
        private_key=initiator.privkey,
        reader=reader,
        writer=writer,
        aes_secret=aes_secret,
        mac_secret=mac_secret,
        egress_mac=egress_mac,
        ingress_mac=ingress_mac,
    )
    initiator_peer = ParagonPeer(
        transport=transport,
        context=ParagonContext(),
        token=token,
    )
    # Perform p2p/sub-proto handshake, completing the full handshake and causing a new peer to be
    # added to the server's pool.
    await initiator_peer.do_p2p_handshake()
    await initiator_peer.do_sub_proto_handshake()

    # wait for peer to be processed
    while len(server.peer_pool) == 0:
        await asyncio.sleep(0)

    assert len(server.peer_pool.connected_nodes) == 1
    receiver_peer = list(server.peer_pool.connected_nodes.values())[0]
    assert isinstance(receiver_peer, ParagonPeer)
    assert initiator_peer.sub_proto is not None
    assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name
    assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version
    # test public key here in order to not access private `_private_key` variable.
    assert receiver_peer.transport.public_key == RECEIVER_PRIVKEY.public_key
Example #29
0
class WireMock():

    messages = []  # type: ignore
    cancel_token = CancelToken("WireMock")

    def __init__(self, sender):
        self.sender = sender

    def send_ping(self, node):
        echo = hex(random.randint(0, 2**256))[-32:]
        self.messages.append((node, 'ping', echo))
        return echo

    def send_pong(self, node, echo):
        self.messages.append((node, 'pong', echo))

    def send_find_node(self, node, nodeid):
        self.messages.append((node, 'find_node', nodeid))

    def send_neighbours(self, node, neighbours):
        self.messages.append((node, 'neighbours', neighbours))
Example #30
0
    async def wait_first(self,
                         *awaitables: Awaitable[_TReturn],
                         token: CancelToken = None,
                         timeout: float = None) -> _TReturn:
        """
        Wait for the first awaitable to complete, unless we timeout or the token chain is triggered.

        The given token is chained with this service's token, so triggering either will cancel
        this.

        Returns the result of the first one to complete.

        Raises TimeoutError if we timeout or OperationCancelled if the token chain is triggered.

        All pending futures are cancelled before returning.
        """
        if token is None:
            token_chain = self.cancel_token
        else:
            token_chain = token.chain(self.cancel_token)
        return await token_chain.cancellable_wait(*awaitables, timeout=timeout)