Exemple #1
0
    def __init__(self, multiplexer: MultiplexerAPI,
                 devp2p_receipt: DevP2PReceipt,
                 protocol_receipts: Sequence[HandshakeReceiptAPI],
                 is_dial_out: bool) -> None:
        self.logger = get_logger('p2p.connection.Connection')
        # The multiplexer passed to us will have been started when performing the handshake, so it
        # is already reading messages from the transport and storing them in per-protocol queues.
        self._multiplexer = multiplexer
        # Stop early in case the multiplexer is no longer streaming.
        self._multiplexer.raise_if_streaming_error()
        self._devp2p_receipt = devp2p_receipt
        self.protocol_receipts = tuple(protocol_receipts)
        self.is_dial_out = is_dial_out

        self._protocol_handlers = collections.defaultdict(set)
        self._command_handlers = collections.defaultdict(set)
        self._msg_handlers = set()

        # An event that controls when the connection will start reading from
        # the individual multiplexed protocol streams and feeding handlers.
        # This ensures that the connection does not start consuming messages
        # before all necessary handlers have been added
        self._handlers_ready = asyncio.Event()

        self.behaviors_applied = asyncio.Event()

        self._logics = {}
    def __init__(self,
                 transport: TransportAPI,
                 base_protocol: BaseP2PProtocol,
                 protocols: Sequence[ProtocolAPI],
                 max_queue_size: int = 4096) -> None:
        self.logger = get_logger('p2p.multiplexer.Multiplexer')
        self._transport = transport
        # the base `p2p` protocol instance.
        self._base_protocol = base_protocol

        # the sub-protocol instances
        self._protocols = protocols

        self._streaming_task: asyncio.Future[None] = None

        # Lock management on a per-protocol basis to ensure we only have one
        # stream consumer for each protocol.
        self._protocol_locks = {
            type(protocol): asyncio.Lock()
            for protocol in self.get_protocols()
        }

        # Each protocol gets a queue where messages for the individual protocol
        # are placed when streamed from the transport
        self._protocol_queues = {
            type(protocol): asyncio.Queue(max_queue_size)
            for protocol in self.get_protocols()
        }

        self._msg_counts = collections.defaultdict(int)
        self._last_msg_time = 0
        self._started_streaming = asyncio.Event()
Exemple #3
0
    def __init__(self,
                 privkey: keys.PrivateKey,
                 context: BasePeerContext,
                 max_peers: int = DEFAULT_MAX_PEERS,
                 event_bus: EndpointAPI = None,
                 metrics_registry: MetricsRegistry = None,
                 ) -> None:
        self.logger = get_logger(self.__module__ + '.' + self.__class__.__name__)

        self.privkey = privkey
        self.max_peers = max_peers
        self.context = context

        self.connected_nodes: Dict[SessionAPI, BasePeer] = {}

        self._subscribers: List[PeerSubscriber] = []
        self._event_bus = event_bus

        if metrics_registry is None:
            # Initialize with a MetricsRegistry from pyformance as p2p can not depend on Trinity
            # This is so that we don't need to pass a MetricsRegistry in tests and mocked pools.
            metrics_registry = MetricsRegistry()
        self._active_peer_counter = metrics_registry.counter('trinity.p2p/peers.counter')
        self._peer_reporter_registry = self.get_peer_reporter_registry(metrics_registry)

        # Restricts the number of concurrent connection attempts can be made
        self._connection_attempt_lock = asyncio.BoundedSemaphore(MAX_CONCURRENT_CONNECTION_ATTEMPTS)

        # Ensure we can only have a single concurrent handshake in flight per remote
        self._handshake_locks = ResourceLock()

        self.peer_backends = self.setup_peer_backends()
        self.connection_tracker = self.setup_connection_tracker()
 def __init__(self, identity_scheme_registry: IdentitySchemeRegistry,
              db: DatabaseAPI) -> None:
     self.db = db
     self.logger = get_logger(".".join((
         self.__module__,
         self.__class__.__name__,
     )))
     self._identity_scheme_registry = identity_scheme_registry
Exemple #5
0
 def __init__(self, remote: NodeAPI, private_key: datatypes.PrivateKey,
              reader: asyncio.StreamReader,
              writer: asyncio.StreamWriter) -> None:
     self.logger = get_logger('p2p.tools.memory_transport.MemoryTransport')
     self.remote = remote
     self.session = Session(remote)
     self._private_key = private_key
     self._reader = reader
     self._writer = writer
Exemple #6
0
class ParagonProtocol(BaseProtocol):
    name = 'paragon'
    version = 1
    commands = (
        BroadcastData,
        GetSum,
        Sum,
    )
    command_length = 3
    logger = get_logger("p2p.tools.paragon.proto.ParagonProtocol")
Exemple #7
0
    def __init__(
            self, remote: NodeAPI, privkey: datatypes.PrivateKey, use_eip8: bool) -> None:
        if remote is None:
            raise ValidationError("Cannot create handshake with None remote")
        elif remote.address is None:
            raise ValidationError("Cannot create handshake with remote address=None")

        self.logger = get_logger("p2p.peer.Handshake")
        self.remote = remote
        self.privkey = privkey
        self.ephemeral_privkey = ecies.generate_privkey()
        self.use_eip8 = use_eip8
Exemple #8
0
    def __init__(self, center_node_id: NodeID, bucket_size: int) -> None:
        self.logger = get_logger("p2p.kademlia.KademliaRoutingTable")
        self.center_node_id = center_node_id
        self.bucket_size = bucket_size

        self.buckets: Tuple[Deque[NodeID], ...] = tuple(
            collections.deque(maxlen=bucket_size)
            for _ in range(NUM_ROUTING_TABLE_BUCKETS))
        self.replacement_caches: Tuple[Deque[NodeID], ...] = tuple(
            collections.deque() for _ in range(NUM_ROUTING_TABLE_BUCKETS))

        self.bucket_update_order: Deque[int] = collections.deque()
    def __init__(
        self,
        connection: ConnectionAPI,
        context: BasePeerContext,
        event_bus: EndpointAPI = None,
    ) -> None:
        self.logger = get_logger('p2p.peer.BasePeer')
        self._finished_callbacks = []
        # Peer context object
        self.context = context

        # Connection instance
        self.connection = connection

        # TODO: need to remove this property but for now it is here to support
        # backwards compat
        for protocol_class in self.supported_sub_protocols:
            try:
                self.sub_proto = self.connection.get_protocol_by_type(
                    protocol_class)
            except UnknownProtocol:
                pass
            else:
                break
        else:
            raise ValidationError(
                "No supported subprotocols found in Connection")

        # Optional event bus handle
        self._event_bus = event_bus

        # Flag indicating whether the connection this peer represents was
        # established from a dial-out or dial-in (True: dial-in, False:
        # dial-out)
        # TODO: rename to `dial_in` and have a computed property for `dial_out`
        self.inbound = connection.is_dial_in
        self._subscribers: List[PeerSubscriber] = []

        # A counter of the number of messages this peer has received for each
        # message type.
        self.received_msgs: Dict[CommandAPI[Any],
                                 int] = collections.defaultdict(int)

        # Manages the boot process
        self.boot_manager = self.get_boot_manager()
        self.connection_tracker = self.setup_connection_tracker()

        self.process_handshake_receipts()
        # This API provides an awaitable so that users of the
        # `peer.connection.get_logic` APIs can wait until the logic APIs have
        # been installed to the connection.
        self.ready = asyncio.Event()
    def __init__(self, transport: TransportAPI, command_id_offset: int,
                 snappy_support: bool) -> None:
        self.logger = get_logger('p2p.protocol.Protocol')
        self.transport = transport
        self.command_id_offset = command_id_offset
        self.snappy_support = snappy_support

        self.command_id_by_type = {
            command_type: command_id_offset + command_type.protocol_command_id
            for command_type in self.commands
        }
        self.command_type_by_id = {
            command_id: command_type
            for command_type, command_id in self.command_id_by_type.items()
        }
Exemple #11
0
    def __init__(self,
                 transport: TransportAPI,
                 base_protocol: BaseP2PProtocol,
                 protocols: Sequence[ProtocolAPI],
                 token: CancelToken = None,
                 max_queue_size: int = 4096) -> None:
        self.logger = get_logger('p2p.multiplexer.Multiplexer')
        if token is None:
            loop = None
        else:
            loop = token.loop
        base_token = CancelToken(f'multiplexer[{transport.remote}]', loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)

        self._transport = transport
        # the base `p2p` protocol instance.
        self._base_protocol = base_protocol

        # the sub-protocol instances
        self._protocols = protocols

        # Lock to ensure that multiple call sites cannot concurrently stream
        # messages.
        self._multiplex_lock = asyncio.Lock()

        # Lock management on a per-protocol basis to ensure we only have one
        # stream consumer for each protocol.
        self._protocol_locks = {
            type(protocol): asyncio.Lock()
            for protocol
            in self.get_protocols()
        }

        # Each protocol gets a queue where messages for the individual protocol
        # are placed when streamed from the transport
        self._protocol_queues = {
            type(protocol): asyncio.Queue(max_queue_size)
            for protocol
            in self.get_protocols()
        }

        self._msg_counts = collections.defaultdict(int)
        self._last_msg_time = 0
Exemple #12
0
async def cancel_pending_tasks(*tasks: asyncio.Task[Any],
                               timeout: int) -> AsyncIterator[None]:
    """
    Cancel and await for all of the given tasks that are still pending, in no specific order.

    If all cancelled tasks have not completed after the given timeout, raise a TimeoutError.

    Ignores any asyncio.CancelledErrors.
    """
    try:
        yield
    finally:
        logger = get_logger('p2p.asyncio_utils.cancel_pending_tasks')
        cancelled: List[asyncio.Task[Any]] = []
        for task in tasks:
            if not task.done():
                task.cancel()
                cancelled.append(task)

        # It'd save us one indentation level on the block of code below if we had an early return
        # in case there are no cancelled tasks, but it turns out an early return inside a finally:
        # block silently cancels an active exception being raised, so we use an if/else to avoid
        # having to check if there is an active exception and re-raising it.
        if cancelled:
            logger.debug("Cancelled tasks %s, now waiting for them to return",
                         task)
            errors: List[BaseException] = []
            # Wait for all tasks in parallel so if any of them catches CancelledError and performs a
            # slow cleanup the othders don't have to wait for it.
            done, pending = await asyncio.wait(cancelled, timeout=timeout)
            if pending:
                errors.append(
                    asyncio.TimeoutError(
                        "Tasks never returned after being cancelled: %s",
                        pending))
            # We use future as the variable name here because that's what asyncio.wait returns
            # above.
            for future in done:
                # future.exception() will raise a CancelledError if the future was cancelled by us
                # above, so we must suppress that here.
                with contextlib.suppress(asyncio.CancelledError):
                    if future.exception():
                        errors.append(future.exception())
            if errors:
                raise MultiError(errors)
        else:
            logger.debug("No pending tasks in %s, returning", task)
Exemple #13
0
async def wait_first(tasks: Sequence[asyncio.Task[Any]],
                     max_wait_after_cancellation: float) -> None:
    """
    Wait for the first of the given tasks to complete, then cancels all others.

    If the completed task raised an exception, that is re-raised.

    If the task running us is cancelled, all tasks will be cancelled, in no specific order.

    If we get an exception from any of the cancelled tasks, they are re-raised as a
    trio.MultiError, which will include the exception from the completed task (if any) in their
    context.

    If the cancelled tasks don't return in max_wait_after_cancellation seconds, a TimeoutError
    will be raised.
    """
    for task in tasks:
        if not isinstance(task, asyncio.Task):
            raise ValueError(f"{task} is not an asyncio.Task")

    logger = get_logger('p2p.asyncio_utils.wait_first')
    async with cancel_pending_tasks(*tasks,
                                    timeout=max_wait_after_cancellation):
        try:
            done, pending = await asyncio.wait(
                tasks, return_when=asyncio.FIRST_COMPLETED)
        except (KeyboardInterrupt, asyncio.CancelledError) as err:
            logger.debug("Got %r waiting for %s, cancelling them all", err,
                         tasks)
            raise
        except BaseException:
            logger.exception(
                "Unexpected error waiting for %s, cancelling them all", tasks)
            raise
        else:
            logger.debug("Task %s finished, cancelling pending ones: %s", done,
                         pending)
            if len(done) != 1:
                raise Exception(
                    "Invariant: asyncio.wait() returned more than one task even "
                    "though we used return_when=asyncio.FIRST_COMPLETED: %s",
                    done)
            done_task = first(done)
            if done_task.exception():
                raise done_task.exception()
Exemple #14
0
    def __init__(self, multiplexer: MultiplexerAPI,
                 devp2p_receipt: DevP2PReceipt,
                 protocol_receipts: Sequence[HandshakeReceiptAPI],
                 is_dial_out: bool) -> None:
        self.logger = get_logger('p2p.connection.Connection')
        self._multiplexer = multiplexer
        self._devp2p_receipt = devp2p_receipt
        self.protocol_receipts = tuple(protocol_receipts)
        self.is_dial_out = is_dial_out

        self._protocol_handlers = collections.defaultdict(set)
        self._command_handlers = collections.defaultdict(set)
        self._msg_handlers = set()

        # An event that controls when the connection will start reading from
        # the individual multiplexed protocol streams and feeding handlers.
        # This ensures that the connection does not start consuming messages
        # before all necessary handlers have been added
        self._handlers_ready = asyncio.Event()

        self._logics = {}
    def add_msg(self, msg: PeerMessage) -> bool:
        """
        Add a :class:`~p2p.peer.PeerMessage` to the subscriber.
        """
        peer, cmd = msg

        if hasattr(self, 'logger'):
            logger = self.logger  # type: ignore
        else:
            logger = get_logger('p2p.peer.BasePeer')

        if not self.is_subscription_command(type(cmd)):
            logger.debug2(
                "Discarding %s msg from %s; not subscribed to msg type; "
                "subscriptions: %s",
                loggable(cmd),
                peer,
                self.subscription_msg_types,
            )
            return False

        try:
            logger.debug2(
                "Adding %s msg from %s to queue; queue_size=%d",
                loggable(cmd),
                peer,
                self.queue_size,
            )
            self.msg_queue.put_nowait(msg)
            return True
        except asyncio.queues.QueueFull:
            logger.warning(
                "%s msg queue is full; discarding %s msg from %s",
                self.__class__.__name__,
                loggable(cmd),
                peer,
            )
            return False
Exemple #16
0
 def logger(self) -> ExtendedDebugLogger:
     return get_logger('p2p.handshake.Handshaker')
Exemple #17
0
 def logger(self) -> ExtendedDebugLogger:
     return get_logger('p2p.peer.MsgBuffer')
 def __init__(self, peer: 'BasePeer') -> None:
     self.logger = get_logger('p2p.peer.BasePeerBootManager')
     self.peer = peer
Exemple #19
0
 def logger(self) -> ExtendedDebugLogger:
     if self._logger is None:
         self._logger = get_logger(
             self.__module__ + '.' + self.__class__.__name__
         )
     return self._logger
 def __init__(self) -> None:
     self.logger = get_logger('p2p.p2p_api.P2PAPI')
     self.add_child_behavior(PongWhenPinged().as_behavior())
     self.add_child_behavior(
         DisconnectIfIdle(constants.CONN_IDLE_TIMEOUT).as_behavior())
Exemple #21
0
 def __init__(self) -> None:
     self.logger = get_logger('p2p.p2p_api.P2PAPI')
     self.add_child_behavior(PongWhenPinged().as_behavior())