示例#1
0
    def _get_blocks(self,
                    start_block: BaseBeaconBlock,
                    max_blocks: int) -> Iterable[BaseBeaconBlock]:
        if max_blocks < 0:
            raise Exception("Invariant: max blocks cannot be negative")

        if max_blocks == 0:
            return

        yield start_block

        blocks_generator = cons(start_block, (
            self.db.get_canonical_block_by_slot(slot)
            for slot in itertools.count(start_block.slot + 1)
        ))
        max_blocks_generator = take(max_blocks, blocks_generator)

        try:
            # ensure only a connected chain is returned (breaks might occur if the start block is
            # not part of the canonical chain or if the canonical chain changes during execution)
            for parent, child in sliding_window(2, max_blocks_generator):
                if child.parent_root == parent.hash:
                    yield child
                else:
                    break
        except BlockNotFound:
            return
示例#2
0
    async def _ping_oldest_routing_table_entry(self) -> None:
        await self._routing_table_ready.wait()

        while self.manager.is_running:
            # Here we preserve the lazy iteration while still checking that the
            # iterable is not empty before passing it into `min` below which
            # throws an ambiguous `ValueError` otherwise if the iterable is
            # empty.
            nodes_iter = self.routing_table.iter_all_random()
            try:
                first_node_id = first(nodes_iter)
            except StopIteration:
                await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
                continue
            else:
                least_recently_ponged_node_id = min(
                    cons(first_node_id, nodes_iter),
                    key=lambda node_id: self._last_pong_at.get(node_id, 0),
                )

            too_old_at = trio.current_time() - ROUTING_TABLE_KEEP_ALIVE
            try:
                last_pong_at = self._last_pong_at[
                    least_recently_ponged_node_id]
            except KeyError:
                pass
            else:
                if last_pong_at > too_old_at:
                    await trio.sleep(last_pong_at - too_old_at)
                    continue

            did_bond = await self.bond(least_recently_ponged_node_id)
            if not did_bond:
                self.routing_table.remove(least_recently_ponged_node_id)
示例#3
0
    def _get_nodes_for_exploration(self) -> Iterator[Tuple[NodeID, int]]:
        candidates = self._get_ordered_candidates()
        candidate_triplets = sliding_window(
            3, caboose(cons(None, candidates), None))

        for left_id, node_id, right_id in candidate_triplets:
            # Filter out nodes that have already been queried
            if node_id in self.queried:
                continue
            elif node_id in self.in_flight:
                continue

            # By looking at the two closest *sibling* nodes we can determine
            # how much of their routing table we need to query.  We consider
            # the maximum logarithmic distance to either neighbor which
            # guarantees that we look up the region of the network that this
            # node knows the most about, but avoid querying buckets for which
            # other nodes are going to have a more complete view.
            if left_id is None:
                left_distance = 256
            else:
                left_distance = compute_log_distance(node_id, left_id)

            if right_id is None:
                right_distance = 256
            else:
                right_distance = compute_log_distance(node_id, right_id)

            # We use the maximum distance to ensure that we cover every part of
            # the address space.
            yield node_id, max(left_distance, right_distance)
示例#4
0
文件: sparse.py 项目: wschwab/trinity
def calc_merkle_tree_from_leaves(leaves: Sequence[Hash32]) -> MerkleTree:
    if len(leaves) == 0:
        raise ValueError("No leaves given")
    tree: Tuple[Sequence[Hash32], ...] = (leaves,)
    for i in range(TreeDepth):
        if len(tree[0]) % 2 == 1:
            tree = update_tuple_item(tree, 0, tuple(tree[0]) + (EmptyNodeHashes[i],))
        tree = tuple(cons(_hash_layer(tree[0]), tree))
    return MerkleTree(tree)
示例#5
0
def _get_partition_indices(encoded_sizes: Sequence[int],
                           max_payload_size: int) -> Iterable[int]:
    cumulative_sizes = accumulate(operator.add, encoded_sizes)
    offset = 0
    yield 0
    for idx, (last_size,
              size) in enumerate(sliding_window(2, cons(0, cumulative_sizes))):
        if size - offset > max_payload_size:
            offset = last_size
            yield idx
    yield len(encoded_sizes)
示例#6
0
async def stream_transport_messages(transport: TransportAPI,
                                    base_protocol: BaseP2PProtocol,
                                    *protocols: ProtocolAPI,
                                    ) -> AsyncIterator[Tuple[ProtocolAPI, CommandAPI[Any]]]:
    """
    Streams 2-tuples of (Protocol, Command) over the provided `Transport`
    """
    # A cache for looking up the proper protocol instance for a given command
    # id.
    command_id_cache: Dict[int, ProtocolAPI] = {}

    while not transport.is_closing:
        try:
            msg = await transport.recv()
        except PeerConnectionLost:
            return

        command_id = msg.command_id

        if msg.command_id not in command_id_cache:
            if command_id < base_protocol.command_length:
                command_id_cache[command_id] = base_protocol
            else:
                for protocol in protocols:
                    if command_id < protocol.command_id_offset + protocol.command_length:
                        command_id_cache[command_id] = protocol
                        break
                else:
                    protocol_infos = '  '.join(tuple(
                        (
                            f"{proto.name}@{proto.version}"
                            f"[offset={proto.command_id_offset},"
                            f"command_length={proto.command_length}]"
                        )
                        for proto in cons(base_protocol, protocols)
                    ))
                    raise UnknownProtocolCommand(
                        f"No protocol found for command_id {command_id}: Available "
                        f"protocol/offsets are: {protocol_infos}"
                    )

        msg_proto = command_id_cache[command_id]
        command_type = msg_proto.get_command_type_for_command_id(command_id)

        try:
            cmd = command_type.decode(msg, msg_proto.snappy_support)
        except (rlp.exceptions.DeserializationError, snappy_CompressedLengthError) as err:
            raise MalformedMessage(f"Failed to decode {msg} for {command_type}") from err

        yield msg_proto, cmd

        # yield to the event loop for a moment to allow `transport.is_closing`
        # a chance to update.
        await asyncio.sleep(0)
示例#7
0
async def get_node_versions(network: NetworkAPI, graph_db: GraphDatabaseAPI,
                            key: Key,
                            cache: TraversalCache) -> Tuple[SGNodeAPI, ...]:
    content_key = graph_key_to_content_key(key)

    send_channel, receive_channel = trio.open_memory_channel[SGNodeAPI](
        2048)  # too big...

    async def do_get_graph_node(location: Node,
                                send_channel: trio.abc.SendChannel[Node]):
        if location.node_id == network.client.local_node_id:
            return

        if (location, key) in cache:
            async with send_channel:
                await send_channel.send(cache[(location, key)])
            return

        with trio.move_on_after(GET_GRAPH_NODE_TIMEOUT):
            try:
                node = await network.get_graph_node(location, key=key)
            except NotFound:
                pass
            else:
                cache[(location, key)] = node
                async with send_channel:
                    await send_channel.send(node)

    content_locations = await network.locations(content_key)
    async with trio.open_nursery() as nursery:
        async with send_channel:
            for location in content_locations:
                nursery.start_soon(do_get_graph_node, location,
                                   send_channel.clone())

        async with receive_channel:
            nodes = tuple([node async for node in receive_channel])

    # Include the versions from our local database as well...
    try:
        nodes = tuple(cons(graph_db.get(key), nodes))
    except KeyError:
        pass

    return nodes
示例#8
0
    def _deserialize_stream_to_tuple(
            self, stream: IO[bytes]) -> Iterable[TDeserialized]:
        if self.element_sedes.is_fixed_sized:
            element_size = self.element_sedes.get_fixed_size()
            data = stream.read()
            if len(data) % element_size != 0:
                raise DeserializationError(
                    f"Invalid max_length. List is comprised of a fixed size sedes "
                    f"but total serialized data is not an even multiple of the "
                    f"element size. data max_length: {len(data)}  element size: "
                    f"{element_size}")
            for start_idx in range(0, len(data), element_size):
                segment = data[start_idx:start_idx + element_size]
                yield self.element_sedes.deserialize(segment)
        else:
            stream_zero_loc = stream.tell()
            try:
                first_offset = s_decode_offset(stream)
            except DeserializationError:
                if stream.tell() == stream_zero_loc:
                    # Empty list
                    return
                else:
                    raise

            num_remaining_offset_bytes = first_offset - stream.tell()
            if num_remaining_offset_bytes % OFFSET_SIZE != 0:
                raise DeserializationError(
                    f"Offset bytes was not a multiple of {OFFSET_SIZE}.  Got "
                    f"{num_remaining_offset_bytes}")

            num_remaining_offsets = num_remaining_offset_bytes // OFFSET_SIZE
            tail_offsets = tuple(
                s_decode_offset(stream) for _ in range(num_remaining_offsets))

            offsets = tuple(cons(first_offset, tail_offsets))

            for left_offset, right_offset in sliding_window(2, offsets):
                element_length = right_offset - left_offset
                element_data = read_exact(element_length, stream)
                yield self.element_sedes.deserialize(element_data)

            # simply reading to the end of the current stream gives us all of the final element data
            final_element_data = stream.read()
            yield self.element_sedes.deserialize(final_element_data)
示例#9
0
async def stream_transport_messages(transport: TransportAPI,
                                    base_protocol: BaseP2PProtocol,
                                    *protocols: ProtocolAPI,
                                    token: CancelToken = None,
                                    ) -> AsyncIterator[Tuple[ProtocolAPI, CommandAPI, Payload]]:
    """
    Streams 3-tuples of (Protocol, Command, Payload) over the provided `Transport`
    """
    # A cache for looking up the proper protocol instance for a given command
    # id.
    cmd_id_cache: Dict[int, ProtocolAPI] = {}

    while not transport.is_closing:
        raw_msg = await transport.recv(token)

        cmd_id = get_devp2p_cmd_id(raw_msg)

        if cmd_id not in cmd_id_cache:
            if cmd_id < base_protocol.cmd_length:
                cmd_id_cache[cmd_id] = base_protocol
            else:
                for protocol in protocols:
                    if cmd_id < protocol.cmd_id_offset + protocol.cmd_length:
                        cmd_id_cache[cmd_id] = protocol
                        break
                else:
                    protocol_infos = '  '.join(tuple(
                        f"{proto.name}@{proto.version}[offset={proto.cmd_id_offset},cmd_length={proto.cmd_length}]"  # noqa: E501
                        for proto in cons(base_protocol, protocols)
                    ))
                    raise UnknownProtocolCommand(
                        f"No protocol found for cmd_id {cmd_id}: Available "
                        f"protocol/offsets are: {protocol_infos}"
                    )

        msg_proto = cmd_id_cache[cmd_id]
        cmd = msg_proto.cmd_by_id[cmd_id]
        msg = cmd.decode(raw_msg)

        yield msg_proto, cmd, msg

        # yield to the event loop for a moment to allow `transport.is_closing`
        # a chance to update.
        await asyncio.sleep(0)
示例#10
0
async def stream_transport_messages(
    transport: TransportAPI,
    base_protocol: BaseP2PProtocol,
    *protocols: ProtocolAPI,
    token: CancelToken = None,
) -> AsyncIterator[Tuple[ProtocolAPI, CommandAPI[Any]]]:
    """
    Streams 2-tuples of (Protocol, Command) over the provided `Transport`
    """
    # A cache for looking up the proper protocol instance for a given command
    # id.
    command_id_cache: Dict[int, ProtocolAPI] = {}

    while not transport.is_closing:
        msg = await transport.recv(token)
        command_id = msg.command_id

        if msg.command_id not in command_id_cache:
            if command_id < base_protocol.command_length:
                command_id_cache[command_id] = base_protocol
            else:
                for protocol in protocols:
                    if command_id < protocol.command_id_offset + protocol.command_length:
                        command_id_cache[command_id] = protocol
                        break
                else:
                    protocol_infos = '  '.join(
                        tuple((f"{proto.name}@{proto.version}"
                               f"[offset={proto.command_id_offset},"
                               f"command_length={proto.command_length}]")
                              for proto in cons(base_protocol, protocols)))
                    raise UnknownProtocolCommand(
                        f"No protocol found for command_id {command_id}: Available "
                        f"protocol/offsets are: {protocol_infos}")

        msg_proto = command_id_cache[command_id]
        command_type = msg_proto.get_command_type_for_command_id(command_id)
        cmd = command_type.decode(msg, msg_proto.snappy_support)

        yield msg_proto, cmd

        # yield to the event loop for a moment to allow `transport.is_closing`
        # a chance to update.
        await asyncio.sleep(0)
示例#11
0
    def serialize(cls, enr: ENRAPI) -> Tuple[bytes, ...]:
        serialized_sequence_number = big_endian_int.serialize(
            enr.sequence_number)

        sorted_key_value_pairs = sorted(enr.items(),
                                        key=operator.itemgetter(0))

        serialized_keys = tuple(
            binary.serialize(key) for key, _ in sorted_key_value_pairs)
        values_and_serializers = tuple(
            (value, ENR_KEY_SEDES_MAPPING.get(key, FALLBACK_ENR_VALUE_SEDES))
            for key, value in sorted_key_value_pairs)
        serialized_values = tuple(
            value_serializer.serialize(value)
            for value, value_serializer in values_and_serializers)
        return tuple(
            cons(
                serialized_sequence_number,
                interleave((serialized_keys, serialized_values)),
            ))
示例#12
0
 def topics(self) -> List[HexStr]:
     arg_topics = tuple(arg.match_values for arg in self.indexed_args)
     return normalize_topic_list(cons(to_hex(self.event_topic), arg_topics))
示例#13
0
 def get_protocols(self) -> Tuple[ProtocolAPI, ...]:
     return tuple(cons(self._base_protocol, self._protocols))
示例#14
0
def serialize_paths(paths: Sequence[TreePath]) -> bytes:
    return b"".join((serialize_path(previous, path)
                     for previous, path in sliding_window(2, cons((), paths))))
示例#15
0
async def stream_transport_messages(transport: TransportAPI,
                                    base_protocol: BaseP2PProtocol,
                                    *protocols: ProtocolAPI,
                                    ) -> AsyncIterator[Tuple[ProtocolAPI, CommandAPI[Any]]]:
    """
    Streams 2-tuples of (Protocol, Command) over the provided `Transport`

    Raises a TimeoutError if nothing is received in constants.CONN_IDLE_TIMEOUT seconds.
    """
    # A cache for looking up the proper protocol instance for a given command
    # id.
    command_id_cache: Dict[int, ProtocolAPI] = {}
    loop = asyncio.get_event_loop()

    while not transport.is_closing:
        try:
            msg = await transport.recv()
        except PeerConnectionLost:
            transport.logger.debug(
                "Lost connection to %s, leaving stream_transport_messages()", transport.remote)
            return

        command_id = msg.command_id

        if msg.command_id not in command_id_cache:
            if command_id < base_protocol.command_length:
                command_id_cache[command_id] = base_protocol
            else:
                for protocol in protocols:
                    if command_id < protocol.command_id_offset + protocol.command_length:
                        command_id_cache[command_id] = protocol
                        break
                else:
                    protocol_infos = '  '.join(tuple(
                        (
                            f"{proto.name}@{proto.version}"
                            f"[offset={proto.command_id_offset},"
                            f"command_length={proto.command_length}]"
                        )
                        for proto in cons(base_protocol, protocols)
                    ))
                    raise UnknownProtocolCommand(
                        f"No protocol found for command_id {command_id}: Available "
                        f"protocol/offsets are: {protocol_infos}"
                    )

        msg_proto = command_id_cache[command_id]
        command_type = msg_proto.get_command_type_for_command_id(command_id)

        try:
            if len(msg.body) > MAX_IN_LOOP_DECODE_SIZE:
                cmd = await loop.run_in_executor(
                    None,
                    command_type.decode,
                    msg,
                    msg_proto.snappy_support,
                )
            else:
                cmd = command_type.decode(msg, msg_proto.snappy_support)

        except (rlp.exceptions.DeserializationError, snappy_CompressedLengthError) as err:
            raise MalformedMessage(f"Failed to decode {msg} for {command_type}") from err

        yield msg_proto, cmd

        # yield to the event loop for a moment to allow `transport.is_closing`
        # a chance to update.
        await asyncio.sleep(0)