Example #1
0
    def __init__(
        self,
        node: "AbstractGatewayNode",
        connection: AbstractGatewayBlockchainConnection,
    ):
        super().__init__(node, connection)
        self.node: "EthGatewayNode" = cast("EthGatewayNode", node)

        self.ordered_block_queue = deque(
            maxlen=gateway_constants.BLOCK_QUEUE_LENGTH_LIMIT)
        self.block_checking_alarms = {}
        self.block_check_repeat_count = defaultdict(int)

        self.accepted_block_hash_at_height = ExpiringDict(
            node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_accepted_block_by_height_{self.connection.endpoint}"
        )
        self.sent_block_at_height = ExpiringDict(
            node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_sent_block_at_height_{self.connection.endpoint}")
        self.best_sent_block = SentEthBlockInfo(INITIAL_BLOCK_HEIGHT,
                                                NULL_SHA256_HASH, 0)
        self.best_accepted_block = EthBlockInfo(INITIAL_BLOCK_HEIGHT,
                                                NULL_SHA256_HASH)

        self._block_hashes_by_height = ExpiringDict(
            node.alarm_queue,
            gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_hashes_by_heights_{self.connection.endpoint}",
        )
        self._height_by_block_hash = ExpiringDict(
            node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_height_by_hash_{self.connection.endpoint}")
        self._recovery_alarms_by_block_hash = {}
        self._partial_chainstate = deque()
Example #2
0
    def __init__(self, connection: "BtcNodeConnection"):
        super(BtcNodeConnectionProtocol, self).__init__(connection)

        connection.message_handlers.update({
            BtcMessageType.VERSION:
            self.msg_version,
            BtcMessageType.INVENTORY:
            self.msg_inv,
            BtcMessageType.BLOCK:
            self.msg_block,
            BtcMessageType.TRANSACTIONS:
            self.msg_tx,
            BtcMessageType.GET_BLOCKS:
            self.msg_get_blocks,
            BtcMessageType.GET_HEADERS:
            self.msg_get_headers,
            BtcMessageType.GET_DATA:
            self.msg_get_data,
            BtcMessageType.REJECT:
            self.msg_reject,
            BtcMessageType.COMPACT_BLOCK:
            self.msg_compact_block,
            BtcMessageType.BLOCK_TRANSACTIONS:
            self.msg_block_transactions
        })

        self.request_witness_data = False
        self._recovery_compact_blocks = ExpiringDict(
            self.node.alarm_queue,
            btc_constants.BTC_COMPACT_BLOCK_RECOVERY_TIMEOUT_S,
            f"{str(self)}_compact_btc_recoveries")
        self.ping_interval_s: int = gateway_constants.BLOCKCHAIN_PING_INTERVAL_S
        self.connection.node.alarm_queue.register_alarm(
            self.block_cleanup_poll_interval_s,
            self._request_blocks_confirmation)
    def __init__(self, sock: AbstractSocketConnectionProtocol,
                 node: Node) -> None:
        super(InternalNodeConnection, self).__init__(sock, node)

        # Enable buffering only on internal connections
        self.enable_buffered_send = node.opts.enable_buffered_send
        self.outputbuf = OutputBuffer(
            enable_buffering=self.enable_buffered_send)

        self.network_num = node.network_num
        self.version_manager = bloxroute_version_manager

        # Setting default protocol version; override when hello message received
        self.protocol_version = self.version_manager.CURRENT_PROTOCOL_VERSION

        self.pong_message = PongMessage()
        self.ack_message = AckMessage()

        self.can_send_pings = True
        self.pong_timeout_enabled = True

        self.ping_message_timestamps = ExpiringDict(
            self.node.alarm_queue, constants.REQUEST_EXPIRATION_TIME,
            f"{str(self)}_ping_timestamps")

        self.sync_ping_latencies: Dict[int, Optional[float]] = {}
        self._nonce_to_network_num: Dict[int, int] = {}
        self.message_validator = BloxrouteMessageValidator(
            None, self.protocol_version)
        self.tx_sync_service = TxSyncService(self)
        self.inbound_peer_latency: float = time.time()
    def __init__(self, node):
        self._node: AbstractGatewayNode = node
        self._holds = ExpiringDict(node.alarm_queue,
                                   node.opts.blockchain_block_hold_timeout_s,
                                   f"block_processing_holds")

        self._block_validator: Optional[AbstractBlockValidator] = None
        self._last_confirmed_block_number: Optional[int] = None
        self._last_confirmed_block_difficulty: Optional[int] = None
Example #5
0
    def __init__(self, connection, is_handshake_initiator, private_key,
                 public_key):
        super(EthNodeConnectionProtocol,
              self).__init__(connection, is_handshake_initiator, private_key,
                             public_key)

        connection.message_handlers.update({
            EthProtocolMessageType.STATUS:
            self.msg_status,
            EthProtocolMessageType.TRANSACTIONS:
            self.msg_tx,
            EthProtocolMessageType.GET_BLOCK_HEADERS:
            self.msg_get_block_headers,
            EthProtocolMessageType.GET_BLOCK_BODIES:
            self.msg_get_block_bodies,
            EthProtocolMessageType.GET_NODE_DATA:
            self.msg_proxy_request,
            EthProtocolMessageType.GET_RECEIPTS:
            self.msg_get_receipts,
            EthProtocolMessageType.BLOCK_HEADERS:
            self.msg_block_headers,
            EthProtocolMessageType.NEW_BLOCK:
            self.msg_block,
            EthProtocolMessageType.NEW_BLOCK_HASHES:
            self.msg_new_block_hashes,
            EthProtocolMessageType.BLOCK_BODIES:
            self.msg_block_bodies
        })

        self.waiting_checkpoint_headers_request = True

        # uses block hash as a key, and NewBlockParts structure as value
        self.pending_new_block_parts: ExpiringDict[
            Sha256Hash, NewBlockParts] = ExpiringDict(
                self.node.alarm_queue,
                eth_common_constants.NEW_BLOCK_PARTS_MAX_WAIT_S,
                f"{str(self)}_eth_pending_block_parts")

        # hashes of new blocks constructed from headers and bodies, ready to send to BDN
        self._ready_new_blocks: Deque[Sha256Hash] = deque()

        # queue of lists of hashes that are awaiting block bodies response
        self._block_bodies_requests: Deque[List[Sha256Hash]] = deque(
            maxlen=eth_common_constants.REQUESTED_NEW_BLOCK_BODIES_MAX_COUNT)

        if self.block_cleanup_poll_interval_s > 0:
            self.connection.node.alarm_queue.register_alarm(
                self.block_cleanup_poll_interval_s,
                self._request_blocks_confirmation)

        self.requested_blocks_for_confirmation: ExpiringDict[
            Sha256Hash, float] = ExpiringDict(
                self.node.alarm_queue, eth_common_constants.
                BLOCK_CONFIRMATION_REQUEST_CACHE_INTERVAL_S,
                f"{str(self)}_eth_requested_blocks")
        self._connection_established_time = 0.0
Example #6
0
    def __init__(self, node):
        self._node: AbstractGatewayNode = node
        self._holds = ExpiringDict(node.alarm_queue,
                                   node.opts.blockchain_block_hold_timeout_s,
                                   f"block_processing_holds")

        self._block_validator: Optional[AbstractBlockValidator] = None
        self._last_confirmed_block_number: Optional[int] = None
        self._last_confirmed_block_difficulty: Optional[int] = None
        self._blocks_failed_validation_history: LimitedSizeSet[
            Sha256Hash] = LimitedSizeSet(
                constants.BLOCKS_FAILED_VALIDATION_HISTORY_SIZE)
 def __init__(self, node: "AbstractGatewayNode"):
     super().__init__(node)
     self.node: "OntGatewayNode" = cast("OntGatewayNode", node)
     self._block_hashes_by_height = ExpiringDict(
         node.alarm_queue,
         gateway_constants.MAX_BLOCK_CACHE_TIME_S,
         "ont_block_queue_hashes_by_heights",
     )
Example #8
0
    def setUp(self):
        self.node = MockGatewayNode(
            gateway_helpers.get_gateway_opts(8000, max_block_interval_s=0))
        self.node.block_parts_storage = ExpiringDict(
            self.node.alarm_queue,
            gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            "eth_block_queue_parts",
        )

        self.node_connection = Mock()
        self.node_connection.is_active = MagicMock(return_value=True)
        self.node.set_known_total_difficulty = MagicMock()

        self.node_conn = self.node_connection

        self.block_queuing_service = EthBlockQueuingService(
            self.node, self.node_conn)
        self.node.block_queuing_service_manager.add_block_queuing_service(
            self.node_conn, self.block_queuing_service)
        self.node_conn.enqueue_msg = MagicMock()

        self.block_hashes = []
        self.block_messages = []
        self.block_headers = []
        self.block_bodies = []

        # block numbers: 1000-1019
        prev_block_hash = None
        for i in range(20):
            block_message = InternalEthBlockInfo.from_new_block_msg(
                mock_eth_messages.new_block_eth_protocol_message(
                    i, i + 1000, prev_block_hash=prev_block_hash))
            block_hash = block_message.block_hash()
            self.block_hashes.append(block_hash)
            self.block_messages.append(block_message)

            block_parts = block_message.to_new_block_parts()
            self.block_headers.append(
                BlockHeadersEthProtocolMessage.from_header_bytes(
                    block_parts.block_header_bytes).get_block_headers()[0])
            self.block_bodies.append(
                BlockBodiesEthProtocolMessage.from_body_bytes(
                    block_parts.block_body_bytes).get_blocks()[0])

            self.node.block_queuing_service_manager.push(
                block_hash, block_message)
            prev_block_hash = block_hash

        for block_hash in self.block_hashes:
            self.block_queuing_service.remove_from_queue(block_hash)

        self.block_queuing_service.best_sent_block = (1019,
                                                      self.block_hashes[-1],
                                                      time.time())
        self.block_queuing_service.best_accepted_block = (
            1019, self.block_hashes[-1])
Example #9
0
    def __init__(self, sock, address, node, from_me=False):
        super(InternalNodeConnection, self).__init__(sock, address, node, from_me)

        # Enable buffering only on internal connections
        self.enable_buffered_send = node.opts.enable_buffered_send
        self.outputbuf = OutputBuffer(enable_buffering=self.enable_buffered_send)

        self.network_num = node.network_num
        self.version_manager = bloxroute_version_manager

        # Setting default protocol version and message factory; override when hello message received
        self.message_factory = bloxroute_message_factory
        self.protocol_version = self.version_manager.CURRENT_PROTOCOL_VERSION

        self.ping_message = PingMessage()
        self.pong_message = PongMessage()
        self.ack_message = AckMessage()

        self.can_send_pings = True
        self.ping_message_timestamps = ExpiringDict(self.node.alarm_queue, constants.REQUEST_EXPIRATION_TIME)
        self.message_validator = BloxrouteMessageValidator(None, self.protocol_version)
    def __init__(self, node: "AbstractGatewayNode"):
        self.node = node

        # queue of tuple (block hash, timestamp) for blocks that need to be
        # sent to blockchain node
        self._block_queue: Deque[BlockQueueEntry] = deque(
            maxlen=gateway_constants.BLOCK_QUEUE_LENGTH_LIMIT)
        self._blocks_waiting_for_recovery: Dict[Sha256Hash, bool] = {}
        self._blocks: ExpiringDict[
            Sha256Hash, Optional[TBlockMessage]] = ExpiringDict(
                node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
                "block_queuing_service_blocks")

        self._blocks_seen_by_blockchain_node: ExpiringSet[
            Sha256Hash] = ExpiringSet(
                node.alarm_queue,
                gateway_constants.GATEWAY_BLOCKS_SEEN_EXPIRATION_TIME_S,
                "block_queuing_service_blocks_seen",
            )
Example #11
0
    def setUp(self):
        self.node = MockGatewayNode(
            gateway_helpers.get_gateway_opts(
                8000, max_block_interval_s=BLOCK_INTERVAL))
        self.node.block_parts_storage = ExpiringDict(
            self.node.alarm_queue,
            gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            "eth_block_queue_parts",
        )
        self.node.alarm_queue = AlarmQueue()
        self.node.set_known_total_difficulty = MagicMock()
        self.block_processing_service = EthBlockProcessingService(self.node)
        self.node.block_processing_service = self.block_processing_service

        self.node_connection = MockConnection(
            MockSocketConnection(1, self.node, ip_address=LOCALHOST,
                                 port=8002), self.node)
        self.node_connection.is_active = MagicMock(return_value=True)
        self.block_queuing_service = EthBlockQueuingService(
            self.node, self.node_connection)
        self.node.block_queuing_service_manager.add_block_queuing_service(
            self.node_connection, self.block_queuing_service)
        self.node_connection.enqueue_msg = MagicMock()

        self.node_connection_2 = MockConnection(
            MockSocketConnection(1, self.node, ip_address=LOCALHOST,
                                 port=8003), self.node)
        self.node_connection_2.is_active = MagicMock(return_value=True)
        self.block_queuing_service_2 = EthBlockQueuingService(
            self.node, self.node_connection_2)
        self.node.block_queuing_service_manager.add_block_queuing_service(
            self.node_connection_2, self.block_queuing_service_2)
        self.node_connection_2.enqueue_msg = MagicMock()

        self.blockchain_connections = [
            self.node_connection, self.node_connection_2
        ]
        self.block_queuing_services = [
            self.block_queuing_service, self.block_queuing_service_2
        ]

        time.time = MagicMock(return_value=time.time())
Example #12
0
class BtcNodeConnectionProtocol(BtcBaseConnectionProtocol):
    def __init__(self, connection: "BtcNodeConnection"):
        super(BtcNodeConnectionProtocol, self).__init__(connection)

        # pyre-fixme[16]: Optional type has no attribute `update`.
        connection.message_handlers.update({
            BtcMessageType.VERSION:
            self.msg_version,
            BtcMessageType.INVENTORY:
            self.msg_inv,
            BtcMessageType.BLOCK:
            self.msg_block,
            BtcMessageType.TRANSACTIONS:
            self.msg_tx,
            BtcMessageType.GET_BLOCKS:
            self.msg_proxy_request,
            BtcMessageType.GET_HEADERS:
            self.msg_proxy_request,
            BtcMessageType.GET_DATA:
            self.msg_get_data,
            BtcMessageType.REJECT:
            self.msg_reject,
            BtcMessageType.COMPACT_BLOCK:
            self.msg_compact_block,
            BtcMessageType.BLOCK_TRANSACTIONS:
            self.msg_block_transactions
        })

        self.request_witness_data = False
        self._recovery_compact_blocks = ExpiringDict(
            self.node.alarm_queue,
            btc_constants.BTC_COMPACT_BLOCK_RECOVERY_TIMEOUT_S,
            f"{str(self)}_compact_btc_recoveries")
        self.ping_interval_s: int = gateway_constants.BLOCKCHAIN_PING_INTERVAL_S
        self.connection.node.alarm_queue.register_alarm(
            self.block_cleanup_poll_interval_s,
            self._request_blocks_confirmation)

    def msg_version(self, msg: VersionBtcMessage) -> None:
        """
        Handle version message.
        Gateway initiates connection, so do not check for misbehavior. Record that we received the version message,
        send a verack, and synchronize chains if need be.
        :param msg: VERSION message
        """
        self.request_witness_data = msg.services(
        ) & NODE_WITNESS_SERVICE_FLAG > 0

        if self.request_witness_data:
            self.connection.log_debug(
                "Connection with Bitcoin node supports SegWit.")

        self.connection.on_connection_established()
        reply = VerAckBtcMessage(self.magic)
        self.connection.enqueue_msg(reply)

        send_compact_msg = SendCompactBtcMessage(
            self.magic, on_flag=self.node.opts.compact_block, version=1)

        self.node.alarm_queue.register_alarm(2, self.connection.enqueue_msg,
                                             send_compact_msg)

        self.node.alarm_queue.register_alarm(self.ping_interval_s,
                                             self.connection.send_ping)

        if self.connection.is_active():
            self.node.on_blockchain_connection_ready(self.connection)

    def msg_inv(self, msg: InvBtcMessage) -> None:
        """
        Handle an inventory message.

        Requests all transactions and blocks that haven't been previously seen.
        :param msg: INV message
        """
        contains_block = False
        inventory_requests = []
        block_hashes = []
        for inventory_type, item_hash in msg:
            if InventoryType.is_block(inventory_type):
                if not self.node.should_process_block_hash(item_hash):
                    continue
                block_hashes.append(item_hash)
                if item_hash not in self.node.blocks_seen.contents:
                    contains_block = True
                    inventory_requests.append((inventory_type, item_hash))
            else:
                inventory_requests.append((inventory_type, item_hash))

        self.node.block_cleanup_service.mark_blocks_and_request_cleanup(
            block_hashes)

        if inventory_requests:
            get_data = GetDataBtcMessage(
                magic=msg.magic(),
                inv_vects=inventory_requests,
                request_witness_data=self.request_witness_data)
            self.connection.enqueue_msg(get_data, prepend=contains_block)

        self.node.block_queuing_service.mark_blocks_seen_by_blockchain_node(
            block_hashes)

    def msg_get_data(self, msg: GetDataBtcMessage) -> None:
        """
        Handle GETDATA message from Bitcoin node.
        :param msg: GETDATA message
        """

        for inv_type, object_hash in msg:
            if InventoryType.is_block(inv_type):
                block_stats.add_block_event_by_block_hash(
                    object_hash,
                    BlockStatEventType.REMOTE_BLOCK_REQUESTED_BY_GATEWAY,
                    network_num=self.connection.network_num,
                    more_info="Protocol: {}, Network: {}".format(
                        self.node.opts.blockchain_protocol,
                        self.node.opts.blockchain_network))
            inv_msg = InvBtcMessage(magic=self.magic,
                                    inv_vects=[(InventoryType.MSG_BLOCK,
                                                object_hash)])
            self.node.send_msg_to_node(inv_msg)
        return self.msg_proxy_request(msg)

    def msg_reject(self, msg):
        """
        Handle REJECT message from Bitcoin node
        :param msg: REJECT message
        """

        # Send inv message to the send in case of rejected block
        # remaining sync communication will proxy to remote blockchain node
        if msg.message() == BtcMessageType.BLOCK:
            inv_msg = InvBtcMessage(magic=self.magic,
                                    inv_vects=[(InventoryType.MSG_BLOCK,
                                                msg.obj_hash())])
            self.node.send_msg_to_node(inv_msg)

    def msg_compact_block(self, msg: CompactBlockBtcMessage) -> None:
        """
        Handle COMPACT BLOCK message from Bitcoin node
        :param msg: COMPACT BLOCK message
        """

        block_hash = msg.block_hash()
        if not self.node.should_process_block_hash(block_hash):
            return

        short_ids_count = len(msg.short_ids())
        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.COMPACT_BLOCK_RECEIVED_FROM_BLOCKCHAIN_NODE,
            network_num=self.connection.network_num,
            peer=self.connection.peer_desc,
            more_info="{} short ids".format(short_ids_count))

        if block_hash in self.node.blocks_seen.contents:
            self.node.on_block_seen_by_blockchain_node(block_hash)
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.
                COMPACT_BLOCK_RECEIVED_FROM_BLOCKCHAIN_NODE_IGNORE_SEEN,
                network_num=self.connection.network_num,
                peer=self.connection.peer_desc)
            self.connection.log_info(
                "Discarding duplicate block {} from local blockchain node.",
                block_hash)
            return

        max_time_offset = self.node.opts.blockchain_block_interval * self.node.opts.blockchain_ignore_block_interval_count
        if time.time() - msg.timestamp() >= max_time_offset:
            self.connection.log_trace(
                "Received block {} more than {} seconds after it was created ({}). Ignoring.",
                block_hash, max_time_offset, msg.timestamp())
            return

        self.node.track_block_from_node_handling_started(block_hash)

        if short_ids_count < self.node.opts.compact_block_min_tx_count:
            self.connection.log_debug(
                "Compact block {} contains {} short transactions, less than limit {}. Requesting full block.",
                convert.bytes_to_hex(msg.block_hash().binary), short_ids_count,
                btc_constants.BTC_COMPACT_BLOCK_DECOMPRESS_MIN_TX_COUNT)
            get_data_msg = GetDataBtcMessage(magic=self.magic,
                                             inv_vects=[
                                                 (InventoryType.MSG_BLOCK,
                                                  msg.block_hash())
                                             ])
            self.node.send_msg_to_node(get_data_msg)
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.COMPACT_BLOCK_REQUEST_FULL,
                network_num=self.connection.network_num)
            return

        self.node.block_cleanup_service.on_new_block_received(
            msg.block_hash(), msg.prev_block_hash())
        self.node.on_block_seen_by_blockchain_node(block_hash)

        self.connection.log_info(
            "Processing compact block {} from local Bitcoin node.", block_hash)

        try:
            parse_result = self.node.block_processing_service.process_compact_block(
                msg, self.connection)
        except MessageConversionError as e:
            block_stats.add_block_event_by_block_hash(
                e.msg_hash,
                BlockStatEventType.BLOCK_CONVERSION_FAILED,
                network_num=self.connection.network_num,
                conversion_type=e.conversion_type.value)
            self.connection.log_warning(log_messages.PROCESS_BLOCK_FAILURE,
                                        e.msg_hash, e)
            get_data_msg = GetDataBtcMessage(magic=self.magic,
                                             inv_vects=[
                                                 (InventoryType.MSG_BLOCK,
                                                  msg.block_hash())
                                             ])
            self.node.send_msg_to_node(get_data_msg)
            return

        if not parse_result.success:
            self._recovery_compact_blocks.add(block_hash, parse_result)

            get_block_txs_msg = GetBlockTransactionsBtcMessage(
                magic=self.magic,
                block_hash=block_hash,
                indices=parse_result.missing_indices)
            self.node.send_msg_to_node(get_block_txs_msg)

    def msg_block_transactions(self, msg: BlockTransactionsBtcMessage) -> None:
        """
        Handle BLOCK TRANSACTIONS message from Bitcoin node.
        This is the message that is sent in reply to GET BLOCK TRANSACTIONS message.
        This message exchange happens if gateway is unable to parse compact block from Bitcoin node.
        :param msg: BLOCK TRANSACTIONS message
        """

        if msg.block_hash() in self._recovery_compact_blocks.contents:
            recovery_result = self._recovery_compact_blocks.contents[
                msg.block_hash()]
            self.node.block_processing_service.process_compact_block_recovery(
                msg, recovery_result, self.connection)

    def _build_get_blocks_message_for_block_confirmation(
            self, hashes: List[Sha256Hash]) -> AbstractMessage:
        return GetBlocksBtcMessage(version=self.version,
                                   magic=self.magic,
                                   hashes=hashes,
                                   hash_stop=NULL_BTC_BLOCK_HASH)
Example #13
0
 def __init__(self, alarm_queue: AlarmQueue) -> None:
     self._cache = ExpiringDict(
         alarm_queue, constants.SERIALIZED_MESSAGE_CACHE_EXPIRE_TIME_S,
         "serialized_message_cache")
Example #14
0
class BlockProcessingService:
    """
    Service class that process blocks.
    Blocks received from blockchain node are held if gateway receives a `blockhold` message from another gateway to
    prevent duplicate message sending.
    """
    def __init__(self, node):
        self._node: AbstractGatewayNode = node
        self._holds = ExpiringDict(node.alarm_queue,
                                   node.opts.blockchain_block_hold_timeout_s,
                                   f"block_processing_holds")

        self._block_validator: Optional[AbstractBlockValidator] = None
        self._last_confirmed_block_number: Optional[int] = None
        self._last_confirmed_block_difficulty: Optional[int] = None
        self._blocks_failed_validation_history: LimitedSizeSet[
            Sha256Hash] = LimitedSizeSet(
                constants.BLOCKS_FAILED_VALIDATION_HISTORY_SIZE)

    def place_hold(self, block_hash, connection) -> None:
        """
        Places hold on block hash and propagates message.
        :param block_hash: ObjectHash
        :param connection:
        """
        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.BLOCK_HOLD_REQUESTED,
            network_num=connection.network_num,
            peers=[connection],
        )

        if block_hash in self._node.blocks_seen.contents:
            return

        if block_hash not in self._holds.contents:
            self._holds.add(block_hash, BlockHold(time.time(), connection))
            conns = self._node.broadcast(
                BlockHoldingMessage(block_hash, self._node.network_num),
                broadcasting_conn=connection,
                connection_types=(ConnectionType.RELAY_BLOCK,
                                  ConnectionType.GATEWAY))
            if len(conns) > 0:
                block_stats.add_block_event_by_block_hash(
                    block_hash,
                    BlockStatEventType.BLOCK_HOLD_SENT_BY_GATEWAY_TO_PEERS,
                    network_num=self._node.network_num,
                    peers=conns,
                )

    def queue_block_for_processing(self, block_message, connection) -> None:
        """
        Queues up block for processing on timeout if hold message received.
        If no hold exists, compress and broadcast block immediately.
        :param block_message: block message to process
        :param connection: receiving connection (AbstractBlockchainConnection)
        """

        block_hash = block_message.block_hash()
        connection.log_info("Processing block {} from local blockchain node.",
                            block_hash)

        valid_block = self._validate_block_header_in_block_message(
            block_message)
        if not valid_block.is_valid:
            reason = valid_block.reason
            assert reason is not None
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.
                BLOCK_RECEIVED_FROM_BLOCKCHAIN_NODE_FAILED_VALIDATION,
                connection.network_num,
                more_info=reason)
            return

        if block_hash in self._holds.contents:
            hold: BlockHold = self._holds.contents[block_hash]
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_HOLD_HELD_BLOCK,
                network_num=connection.network_num,
                peers=[hold.holding_connection],
            )
            if hold.alarm is None:
                hold.alarm = self._node.alarm_queue.register_alarm(
                    self._node.opts.blockchain_block_hold_timeout_s,
                    self._holding_timeout, block_hash, hold)
                hold.block_message = block_message
                hold.connection = connection
        else:
            if self._node.opts.encrypt_blocks:
                # Broadcast holding message if gateway wants to encrypt blocks
                conns = self._node.broadcast(
                    BlockHoldingMessage(block_hash, self._node.network_num),
                    broadcasting_conn=connection,
                    prepend_to_queue=True,
                    connection_types=(ConnectionType.RELAY_BLOCK,
                                      ConnectionType.GATEWAY))
                if len(conns) > 0:
                    block_stats.add_block_event_by_block_hash(
                        block_hash,
                        BlockStatEventType.BLOCK_HOLD_SENT_BY_GATEWAY_TO_PEERS,
                        network_num=self._node.network_num,
                        peers=conns)
            self._process_and_broadcast_block(block_message, connection)

    def cancel_hold_timeout(self, block_hash, connection) -> None:
        """
        Lifts hold on block hash and cancels timeout.
        :param block_hash: ObjectHash
        :param connection: connection cancelling hold
        """
        if block_hash in self._holds.contents:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_HOLD_LIFTED,
                network_num=connection.network_num,
                peers=[connection],
            )

            hold = self._holds.contents[block_hash]
            if hold.alarm is not None:
                self._node.alarm_queue.unregister_alarm(hold.alarm)
            del self._holds.contents[block_hash]

    def process_block_broadcast(self, msg,
                                connection: AbstractRelayConnection) -> None:
        """
        Handle broadcast message receive from bloXroute.
        This is typically an encrypted block.
        """

        # TODO handle the situation where txs that received from relays while syncing are in the blocks that were
        #  ignored while syncing, so these txs won't be cleaned for 3 days
        if not self._node.should_process_block_hash(msg.block_hash()):
            return

        block_stats.add_block_event(
            msg,
            BlockStatEventType.ENC_BLOCK_RECEIVED_BY_GATEWAY_FROM_NETWORK,
            network_num=connection.network_num,
            more_info=stats_format.connection(connection))

        block_hash = msg.block_hash()
        is_encrypted = msg.is_encrypted()
        self._node.track_block_from_bdn_handling_started(
            block_hash, connection.peer_desc)

        if not is_encrypted:
            block = msg.blob()
            self._handle_decrypted_block(block, connection)
            return

        cipherblob = msg.blob()
        expected_hash = Sha256Hash(crypto.double_sha256(cipherblob))
        if block_hash != expected_hash:
            connection.log_warning(log_messages.BLOCK_WITH_INCONSISTENT_HASHES,
                                   expected_hash, block_hash)
            return

        if self._node.in_progress_blocks.has_encryption_key_for_hash(
                block_hash):
            connection.log_trace(
                "Already had key for received block. Sending block to node.")
            decrypt_start_timestamp = time.time()
            decrypt_start_datetime = datetime.datetime.utcnow()
            block = self._node.in_progress_blocks.decrypt_ciphertext(
                block_hash, cipherblob)

            if block is not None:
                block_stats.add_block_event(
                    msg,
                    BlockStatEventType.ENC_BLOCK_DECRYPTED_SUCCESS,
                    start_date_time=decrypt_start_datetime,
                    end_date_time=datetime.datetime.utcnow(),
                    network_num=connection.network_num,
                    more_info=stats_format.timespan(decrypt_start_timestamp,
                                                    time.time()))
                self._handle_decrypted_block(
                    block,
                    connection,
                    encrypted_block_hash_hex=convert.bytes_to_hex(
                        block_hash.binary))
            else:
                block_stats.add_block_event(
                    msg,
                    BlockStatEventType.ENC_BLOCK_DECRYPTION_ERROR,
                    network_num=connection.network_num)
        else:
            connection.log_trace("Received encrypted block. Storing.")
            self._node.in_progress_blocks.add_ciphertext(
                block_hash, cipherblob)
            block_received_message = BlockReceivedMessage(block_hash)
            conns = self._node.broadcast(
                block_received_message,
                connection,
                connection_types=(ConnectionType.GATEWAY, ))
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.ENC_BLOCK_SENT_BLOCK_RECEIPT,
                network_num=connection.network_num,
                peers=conns,
            )

    def process_block_key(self, msg,
                          connection: AbstractRelayConnection) -> None:
        """
        Handles key message receive from bloXroute.
        Looks for the encrypted block and decrypts; otherwise stores for later.
        """
        key = msg.key()
        block_hash = msg.block_hash()

        if not self._node.should_process_block_hash(block_hash):
            return

        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.ENC_BLOCK_KEY_RECEIVED_BY_GATEWAY_FROM_NETWORK,
            network_num=connection.network_num,
            peers=[connection])

        if self._node.in_progress_blocks.has_encryption_key_for_hash(
                block_hash):
            return

        if self._node.in_progress_blocks.has_ciphertext_for_hash(block_hash):
            connection.log_trace(
                "Cipher text found. Decrypting and sending to node.")
            decrypt_start_timestamp = time.time()
            decrypt_start_datetime = datetime.datetime.utcnow()
            block = self._node.in_progress_blocks.decrypt_and_get_payload(
                block_hash, key)

            if block is not None:
                block_stats.add_block_event_by_block_hash(
                    block_hash,
                    BlockStatEventType.ENC_BLOCK_DECRYPTED_SUCCESS,
                    start_date_time=decrypt_start_datetime,
                    end_date_time=datetime.datetime.utcnow(),
                    network_num=connection.network_num,
                    more_info=stats_format.timespan(decrypt_start_timestamp,
                                                    time.time()))
                self._handle_decrypted_block(
                    block,
                    connection,
                    encrypted_block_hash_hex=convert.bytes_to_hex(
                        block_hash.binary))
            else:
                block_stats.add_block_event_by_block_hash(
                    block_hash,
                    BlockStatEventType.ENC_BLOCK_DECRYPTION_ERROR,
                    network_num=connection.network_num)
        else:
            connection.log_trace(
                "No cipher text found on key message. Storing.")
            self._node.in_progress_blocks.add_key(block_hash, key)

        conns = self._node.broadcast(
            msg, connection, connection_types=(ConnectionType.GATEWAY, ))
        if len(conns) > 0:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.ENC_BLOCK_KEY_SENT_BY_GATEWAY_TO_PEERS,
                network_num=self._node.network_num,
                peers=conns)

    def retry_broadcast_recovered_blocks(self, connection) -> None:
        if self._node.block_recovery_service.recovered_blocks and self._node.opts.has_fully_updated_tx_service:
            for msg, recovery_source in self._node.block_recovery_service.recovered_blocks:
                self._handle_decrypted_block(
                    msg,
                    connection,
                    recovered=True,
                    recovered_txs_source=recovery_source)

            self._node.block_recovery_service.clean_up_recovered_blocks()

    def reset_last_confirmed_block_parameters(self):
        self._last_confirmed_block_number = None
        self._last_confirmed_block_difficulty = None

    def set_last_confirmed_block_parameters(
            self, last_confirmed_block_number: int,
            last_confirmed_block_difficulty: int) -> None:

        if not self._node.peer_relays:
            logger.debug(
                "Skip updating last confirmed block parameters because there is no connection to block relay"
            )
            return

        if self._last_confirmed_block_number is not None:

            old_last_confirmed_block_number = self._last_confirmed_block_number
            assert old_last_confirmed_block_number is not None

            if last_confirmed_block_number < old_last_confirmed_block_number:
                logger.trace(
                    "New last confirmed block number {} is smaller than current {}. Skipping operation.",
                    last_confirmed_block_number,
                    old_last_confirmed_block_number)
                return

        self._last_confirmed_block_number = last_confirmed_block_number
        self._last_confirmed_block_difficulty = last_confirmed_block_difficulty

        logger.trace(
            "Updated last confirmed block number to {} and difficulty to {}.",
            self._last_confirmed_block_number,
            self._last_confirmed_block_difficulty)

    def _compute_hold_timeout(self, _block_message) -> int:
        """
        Computes timeout after receiving block message before sending the block anyway if not received from network.
        TODO: implement algorithm for computing hold timeout
        :param block_message: block message to hold
        :return: time in seconds to wait
        """
        return self._node.opts.blockchain_block_hold_timeout_s

    def _holding_timeout(self, block_hash, hold):
        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.BLOCK_HOLD_TIMED_OUT,
            network_num=hold.connection.network_num,
            peers=[hold.connection])
        self._process_and_broadcast_block(hold.block_message, hold.connection)

    def _process_and_broadcast_block(
            self, block_message,
            connection: AbstractGatewayBlockchainConnection) -> None:
        """
        Compresses and propagates block message if enabled, else return.
        :param block_message: block message to propagate
        :param connection: receiving connection (AbstractBlockchainConnection)
        """
        block_hash = block_message.block_hash()
        message_converter = self._node.message_converter
        assert message_converter is not None
        try:
            bx_block, block_info = message_converter.block_to_bx_block(
                block_message, self._node.get_tx_service(),
                self._node.opts.enable_block_compression,
                self._node.network.min_tx_age_seconds)
        except MessageConversionError as e:
            block_stats.add_block_event_by_block_hash(
                e.msg_hash,
                BlockStatEventType.BLOCK_CONVERSION_FAILED,
                network_num=connection.network_num,
                conversion_type=e.conversion_type.value)
            connection.log_error(log_messages.BLOCK_COMPRESSION_FAIL,
                                 e.msg_hash, e)
            return

        if block_info.ignored_short_ids:
            assert block_info.ignored_short_ids is not None
            logger.debug("Ignoring {} new SIDs for {}: {}",
                         len(block_info.ignored_short_ids),
                         block_info.block_hash, block_info.ignored_short_ids)

        compression_rate = block_info.compression_rate
        assert compression_rate is not None
        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.BLOCK_COMPRESSED,
            start_date_time=block_info.start_datetime,
            end_date_time=block_info.end_datetime,
            network_num=connection.network_num,
            prev_block_hash=block_info.prev_block_hash,
            original_size=block_info.original_size,
            txs_count=block_info.txn_count,
            blockchain_network=self._node.opts.blockchain_network,
            blockchain_protocol=self._node.opts.blockchain_protocol,
            matching_block_hash=block_info.compressed_block_hash,
            matching_block_type=StatBlockType.COMPRESSED.value,
            more_info="Compression: {}->{} bytes, {}, {}; Tx count: {}".format(
                block_info.original_size, block_info.compressed_size,
                stats_format.percentage(compression_rate),
                stats_format.duration(block_info.duration_ms),
                block_info.txn_count))
        if self._node.opts.dump_short_id_mapping_compression:
            mapping = {}
            for short_id in block_info.short_ids:
                tx_hash = self._node.get_tx_service().get_transaction(
                    short_id).hash
                assert tx_hash is not None
                mapping[short_id] = convert.bytes_to_hex(tx_hash.binary)
            with open(
                    f"{self._node.opts.dump_short_id_mapping_compression_path}/"
                    f"{convert.bytes_to_hex(block_hash.binary)}", "w") as f:
                f.write(str(mapping))

        self._process_and_broadcast_compressed_block(bx_block, connection,
                                                     block_info, block_hash)

        self._node.log_blocks_network_content(self._node.network_num,
                                              block_message)

    def _process_and_broadcast_compressed_block(
            self, bx_block, connection: AbstractGatewayBlockchainConnection,
            block_info, block_hash: Sha256Hash) -> None:
        """
        Process a compressed block.
        :param bx_block: compress block message to process
        :param connection: receiving connection (AbstractBlockchainConnection)
        :param block_info: original block info
        :param block_hash: block hash
        """
        connection.node.neutrality_service.propagate_block_to_network(
            bx_block, connection, block_info)
        self._node.get_tx_service().track_seen_short_ids_delayed(
            block_hash, block_info.short_ids)

    def _handle_decrypted_block(
            self,
            bx_block: memoryview,
            connection: AbstractRelayConnection,
            encrypted_block_hash_hex: Optional[str] = None,
            recovered: bool = False,
            recovered_txs_source: Optional[RecoveredTxsSource] = None) -> None:
        transaction_service = self._node.get_tx_service()
        message_converter = self._node.message_converter
        assert message_converter is not None

        valid_block = self._validate_compressed_block_header(bx_block)
        if not valid_block.is_valid:
            reason = valid_block.reason
            assert reason is not None
            block_stats.add_block_event_by_block_hash(
                valid_block.block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_FAILED_VALIDATION,
                connection.network_num,
                more_info=reason)
            return

        # TODO: determine if a real block or test block. Discard if test block.
        if self._node.remote_node_conn or self._node.has_active_blockchain_peer(
        ):
            try:
                (block_message, block_info, unknown_sids,
                 unknown_hashes) = message_converter.bx_block_to_block(
                     bx_block, transaction_service)
                block_content_debug_utils.log_compressed_block_debug_info(
                    transaction_service, bx_block)
            except MessageConversionError as e:
                block_stats.add_block_event_by_block_hash(
                    e.msg_hash,
                    BlockStatEventType.BLOCK_CONVERSION_FAILED,
                    network_num=connection.network_num,
                    conversion_type=e.conversion_type.value)
                transaction_service.on_block_cleaned_up(e.msg_hash)
                connection.log_warning(log_messages.FAILED_TO_DECOMPRESS_BLOCK,
                                       e.msg_hash, e)
                return
        else:
            connection.log_warning(log_messages.LACK_BLOCKCHAIN_CONNECTION)
            return

        block_hash = block_info.block_hash
        all_sids = block_info.short_ids

        if encrypted_block_hash_hex is not None:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_TO_ENC_BLOCK_MATCH,
                matching_block_hash=encrypted_block_hash_hex,
                matching_block_type=StatBlockType.ENCRYPTED.value,
                network_num=connection.network_num)

        self.cancel_hold_timeout(block_hash, connection)

        if recovered:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_RECOVERY_COMPLETED,
                network_num=connection.network_num,
                more_info=str(recovered_txs_source))

        if block_hash in self._node.blocks_seen.contents:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_IGNORE_SEEN,
                start_date_time=block_info.start_datetime,
                end_date_time=block_info.end_datetime,
                network_num=connection.network_num,
                prev_block_hash=block_info.prev_block_hash,
                original_size=block_info.original_size,
                compressed_size=block_info.compressed_size,
                txs_count=block_info.txn_count,
                blockchain_network=self._node.opts.blockchain_network,
                blockchain_protocol=self._node.opts.blockchain_protocol,
                matching_block_hash=block_info.compressed_block_hash,
                matching_block_type=StatBlockType.COMPRESSED.value,
                more_info=stats_format.duration(block_info.duration_ms))
            self._node.track_block_from_bdn_handling_ended(block_hash)
            transaction_service.track_seen_short_ids(block_hash, all_sids)
            connection.log_info("Discarding duplicate block {} from the BDN.",
                                block_hash)
            if block_message is not None:
                self._node.on_block_received_from_bdn(block_hash,
                                                      block_message)
                if self._node.block_queuing_service_manager.get_block_data(
                        block_hash) is None:
                    self._node.block_queuing_service_manager.store_block_data(
                        block_hash, block_message)
            return

        if not recovered:
            connection.log_info("Received block {} from the BDN.", block_hash)
        else:
            connection.log_info("Successfully recovered block {}.", block_hash)

        if block_message is not None:
            compression_rate = block_info.compression_rate
            assert compression_rate is not None
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_SUCCESS,
                start_date_time=block_info.start_datetime,
                end_date_time=block_info.end_datetime,
                network_num=connection.network_num,
                prev_block_hash=block_info.prev_block_hash,
                original_size=block_info.original_size,
                compressed_size=block_info.compressed_size,
                txs_count=block_info.txn_count,
                blockchain_network=self._node.opts.blockchain_network,
                blockchain_protocol=self._node.opts.blockchain_protocol,
                matching_block_hash=block_info.compressed_block_hash,
                matching_block_type=StatBlockType.COMPRESSED.value,
                more_info="Compression rate {}, Decompression time {}, "
                "Queued behind {} blocks".format(
                    stats_format.percentage(compression_rate),
                    stats_format.duration(block_info.duration_ms),
                    self._node.block_queuing_service_manager.
                    get_length_of_each_queuing_service_stats_format()))

            self._on_block_decompressed(block_message)
            if recovered or self._node.block_queuing_service_manager.is_in_any_queuing_service(
                    block_hash):
                self._node.block_queuing_service_manager.update_recovered_block(
                    block_hash, block_message)
            else:
                self._node.block_queuing_service_manager.push(
                    block_hash, block_message)

            gateway_bdn_performance_stats_service.log_block_from_bdn()

            self._node.on_block_received_from_bdn(block_hash, block_message)
            transaction_service.track_seen_short_ids(block_hash, all_sids)

            self._node.publish_block(None, block_hash, block_message,
                                     FeedSource.BDN_SOCKET)
            self._node.log_blocks_network_content(self._node.network_num,
                                                  block_message)
        else:
            if self._node.block_queuing_service_manager.is_in_any_queuing_service(
                    block_hash) and not recovered:
                connection.log_trace(
                    "Handling already queued block again. Ignoring.")
                return

            self._node.block_recovery_service.add_block(
                bx_block, block_hash, unknown_sids, unknown_hashes)
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_WITH_UNKNOWN_TXS,
                start_date_time=block_info.start_datetime,
                end_date_time=block_info.end_datetime,
                network_num=connection.network_num,
                prev_block_hash=block_info.prev_block_hash,
                original_size=block_info.original_size,
                compressed_size=block_info.compressed_size,
                txs_count=block_info.txn_count,
                blockchain_network=self._node.opts.blockchain_network,
                blockchain_protocol=self._node.opts.blockchain_protocol,
                matching_block_hash=block_info.compressed_block_hash,
                matching_block_type=StatBlockType.COMPRESSED.value,
                more_info="{} sids, {} hashes, [{},...]".format(
                    len(unknown_sids), len(unknown_hashes), unknown_sids[:5]))

            connection.log_info(
                "Block {} requires short id recovery. Querying BDN...",
                block_hash)

            self.start_transaction_recovery(unknown_sids, unknown_hashes,
                                            block_hash, connection)
            if recovered:
                # should never happen –– this should not be called on blocks that have not recovered
                connection.log_error(log_messages.BLOCK_DECOMPRESSION_FAILURE,
                                     block_hash)
            else:
                self._node.block_queuing_service_manager.push(
                    block_hash, waiting_for_recovery=True)

    def start_transaction_recovery(
            self,
            unknown_sids: Iterable[int],
            unknown_hashes: Iterable[Sha256Hash],
            block_hash: Sha256Hash,
            connection: Optional[AbstractRelayConnection] = None) -> None:
        all_unknown_sids = []
        all_unknown_sids.extend(unknown_sids)
        tx_service = self._node.get_tx_service()

        # retrieving sids of txs with unknown contents
        for tx_hash in unknown_hashes:
            transaction_key = tx_service.get_transaction_key(tx_hash)
            tx_sid = tx_service.get_short_id_by_key(transaction_key)
            all_unknown_sids.append(tx_sid)

        if not self._node.opts.request_recovery:
            if connection is not None:
                network_num = connection.network_num
            else:
                network_num = self._node.network_num
            # log recovery started to match with recovery completing
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_RECOVERY_STARTED,
                network_num=network_num,
                txs_count=len(all_unknown_sids),
                more_info="recovery from relay is disabled",
            )
            return

        get_txs_message = GetTxsMessage(short_ids=all_unknown_sids)
        self._node.broadcast(
            get_txs_message,
            connection_types=(ConnectionType.RELAY_TRANSACTION, ))

        if connection is not None:
            tx_stats.add_txs_by_short_ids_event(
                all_unknown_sids,
                TransactionStatEventType.
                TX_UNKNOWN_SHORT_IDS_REQUESTED_BY_GATEWAY_FROM_RELAY,
                network_num=self._node.network_num,
                peers=[connection],
                block_hash=convert.bytes_to_hex(block_hash.binary))
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_RECOVERY_STARTED,
                network_num=connection.network_num,
                txs_count=len(all_unknown_sids),
                request_hash=convert.bytes_to_hex(
                    crypto.double_sha256(get_txs_message.rawbytes())))
        else:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_RECOVERY_REPEATED,
                network_num=self._node.network_num,
                txs_count=len(all_unknown_sids),
                request_hash=convert.bytes_to_hex(
                    crypto.double_sha256(get_txs_message.rawbytes())))

    def schedule_recovery_retry(
            self, block_awaiting_recovery: BlockRecoveryInfo) -> None:
        """
        Schedules a block recovery attempt. Repeated block recovery attempts result in longer timeouts,
        following `gateway_constants.BLOCK_RECOVERY_INTERVAL_S`'s pattern, until giving up.
        :param block_awaiting_recovery: info about recovering block
        :return:
        """
        block_hash = block_awaiting_recovery.block_hash
        recovery_attempts = self._node.block_recovery_service.recovery_attempts_by_block[
            block_hash]
        recovery_timed_out = time.time() - block_awaiting_recovery.recovery_start_time >= \
                             self._node.opts.blockchain_block_recovery_timeout_s
        if recovery_attempts >= gateway_constants.BLOCK_RECOVERY_MAX_RETRY_ATTEMPTS or recovery_timed_out:
            logger.error(log_messages.SHORT_ID_RECOVERY_FAIL, block_hash)
            self._node.block_recovery_service.cancel_recovery_for_block(
                block_hash)
            self._node.block_queuing_service_manager.remove(block_hash)
        else:
            delay = gateway_constants.BLOCK_RECOVERY_RECOVERY_INTERVAL_S[
                recovery_attempts]
            self._node.alarm_queue.register_approx_alarm(
                delay, delay / 2, self._trigger_recovery_retry,
                block_awaiting_recovery)

    def _trigger_recovery_retry(
            self, block_awaiting_recovery: BlockRecoveryInfo) -> None:
        block_hash = block_awaiting_recovery.block_hash
        if self._node.block_recovery_service.awaiting_recovery(block_hash):
            self._node.block_recovery_service.recovery_attempts_by_block[
                block_hash] += 1
            self.start_transaction_recovery(
                block_awaiting_recovery.unknown_short_ids,
                block_awaiting_recovery.unknown_transaction_hashes, block_hash)

    def _on_block_decompressed(self, block_msg) -> None:
        pass

    def _validate_block_header_in_block_message(
            self,
            block_message: AbstractBlockMessage) -> BlockValidationResult:
        block_header_bytes = self._get_block_header_bytes_from_block_message(
            block_message)
        validation_result = self._validate_block_header(block_header_bytes)

        if not validation_result.is_valid and validation_result.block_hash:
            block_hash = validation_result.block_hash
            assert block_hash is not None
            if block_hash in self._blocks_failed_validation_history:
                block_number = eth_common_utils.block_header_number(
                    block_header_bytes)
                block_difficulty = eth_common_utils.block_header_difficulty(
                    block_header_bytes)
                self.set_last_confirmed_block_parameters(
                    block_number, block_difficulty)

        return validation_result

    def _validate_compressed_block_header(
        self, compressed_block_bytes: Union[bytearray, memoryview]
    ) -> BlockValidationResult:
        block_header_bytes = self._get_compressed_block_header_bytes(
            compressed_block_bytes)
        validation_result = self._validate_block_header(block_header_bytes)
        if not validation_result.is_valid and validation_result.block_hash:
            block_hash = validation_result.block_hash
            assert block_hash is not None
            self._blocks_failed_validation_history.add(block_hash)
        return validation_result

    def _validate_block_header(
        self, block_header_bytes: Union[bytearray,
                                        memoryview]) -> BlockValidationResult:
        if self._block_validator and self._last_confirmed_block_number and self._last_confirmed_block_difficulty:
            block_validator = self._block_validator
            assert block_validator is not None
            return block_validator.validate_block_header(
                block_header_bytes, self._last_confirmed_block_number,
                self._last_confirmed_block_difficulty)
        logger.debug(
            "Skipping block validation. Block validator - {}, last confirmed block - {}, last confirmed block difficulty - {}",
            self._block_validator, self._last_confirmed_block_number,
            self._last_confirmed_block_difficulty)
        return BlockValidationResult(True, None, None)

    def _get_compressed_block_header_bytes(
        self, compressed_block_bytes: Union[bytearray, memoryview]
    ) -> Union[bytearray, memoryview]:
        pass

    def _get_block_header_bytes_from_block_message(
            self, block_message: AbstractBlockMessage
    ) -> Union[bytearray, memoryview]:
        pass
Example #15
0
class EthBlockQueuingService(
        AbstractBlockQueuingService[InternalEthBlockInfo,
                                    BlockHeadersEthProtocolMessage]):
    """
    Queues, pushes blocks to the Ethereum node, and handles get headers/bodies requests.

    If there are missing blocks in the network this class will not function optimally.
    """
    ordered_block_queue: Deque[OrderedQueuedBlock]

    block_checking_alarms: Dict[Sha256Hash, AlarmId]
    block_check_repeat_count: Dict[Sha256Hash, int]

    accepted_block_hash_at_height: ExpiringDict[int, Sha256Hash]
    sent_block_at_height: ExpiringDict[int, Sha256Hash]

    # best block sent to the Ethereum node
    best_sent_block: SentEthBlockInfo
    # best block accepted by Ethereum node
    best_accepted_block: EthBlockInfo

    _block_hashes_by_height: ExpiringDict[int, Set[Sha256Hash]]
    _height_by_block_hash: ExpiringDict[Sha256Hash, int]
    _highest_block_number: int = 0
    _recovery_alarms_by_block_hash: Dict[Sha256Hash, AlarmId]
    _next_push_alarm_id: Optional[AlarmId] = None
    _partial_chainstate: Deque[EthBlockInfo]

    def __init__(
        self,
        node: "AbstractGatewayNode",
        connection: AbstractGatewayBlockchainConnection,
    ):
        super().__init__(node, connection)
        self.node: "EthGatewayNode" = cast("EthGatewayNode", node)

        self.ordered_block_queue = deque(
            maxlen=gateway_constants.BLOCK_QUEUE_LENGTH_LIMIT)
        self.block_checking_alarms = {}
        self.block_check_repeat_count = defaultdict(int)

        self.accepted_block_hash_at_height = ExpiringDict(
            node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_accepted_block_by_height_{self.connection.endpoint}"
        )
        self.sent_block_at_height = ExpiringDict(
            node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_sent_block_at_height_{self.connection.endpoint}")
        self.best_sent_block = SentEthBlockInfo(INITIAL_BLOCK_HEIGHT,
                                                NULL_SHA256_HASH, 0)
        self.best_accepted_block = EthBlockInfo(INITIAL_BLOCK_HEIGHT,
                                                NULL_SHA256_HASH)

        self._block_hashes_by_height = ExpiringDict(
            node.alarm_queue,
            gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_hashes_by_heights_{self.connection.endpoint}",
        )
        self._height_by_block_hash = ExpiringDict(
            node.alarm_queue, gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            f"eth_block_queue_height_by_hash_{self.connection.endpoint}")
        self._recovery_alarms_by_block_hash = {}
        self._partial_chainstate = deque()

    def build_block_header_message(
            self, block_hash: Sha256Hash, block_message: InternalEthBlockInfo
    ) -> BlockHeadersEthProtocolMessage:
        if block_hash in self.node.block_parts_storage:
            block_header_bytes = self.node.block_parts_storage[
                block_hash].block_header_bytes
        else:
            block_header_bytes = (
                block_message.to_new_block_parts().block_header_bytes)
        return BlockHeadersEthProtocolMessage.from_header_bytes(
            block_header_bytes)

    def push(
        self,
        block_hash: Sha256Hash,
        block_msg: Optional[InternalEthBlockInfo] = None,
        waiting_for_recovery: bool = False,
    ) -> None:
        if block_msg is None and not waiting_for_recovery:
            raise ValueError(
                "Block message is required if not waiting for recovery of the block."
            )

        if block_hash in self._blocks:
            raise ValueError(
                f"Block with hash {block_hash} already exists in the queue.")

        if not self.can_add_block_to_queuing_service(block_hash):
            self.connection.log_debug(
                "Skipping adding {} to queue. Block already seen.", block_hash)
            return

        if waiting_for_recovery:
            self._add_to_queue(block_hash, waiting_for_recovery, block_msg)
            self.connection.log_debug(
                "Appended recovering block {} to the end of the queue (behind {} others).",
                block_hash,
                len(self.ordered_block_queue) - 1)
            self._schedule_recovery_timeout(block_hash)
            return

        assert block_msg is not None
        block_number = block_msg.block_number()
        if self._check_for_sent_or_queued_forked_block(block_hash,
                                                       block_number):
            # TODO: this line needs testing
            self.store_block_data(block_hash, block_msg)
            return

        position = self._add_to_queue(block_hash, waiting_for_recovery,
                                      block_msg)
        self.connection.log_debug(
            "Queued up block {} for sending to the blockchain node. "
            "Block is behind {} others (total size: {}).", block_hash,
            position, len(self.ordered_block_queue))
        if position == 0:
            self._schedule_alarm_for_next_item()

    def update_recovered_block(self, block_hash: Sha256Hash,
                               block_msg: InternalEthBlockInfo) -> None:
        if block_hash not in self._blocks or block_hash in self._blocks_seen_by_blockchain_node:
            return

        self.remove_from_queue(block_hash)
        timeout_alarm = self._recovery_alarms_by_block_hash.pop(block_hash)
        self.node.alarm_queue.unregister_alarm(timeout_alarm)

        self._blocks_waiting_for_recovery[block_hash] = False

        block_number = block_msg.block_number()
        if self._is_block_stale(block_number):
            self.connection.log_info(
                "Discarding block {} at height {} in queuing service. Block is stale.",
                block_hash, block_number)
            return

        if self._check_for_sent_or_queued_forked_block(block_hash,
                                                       block_number):
            self.connection.log_debug(
                "Discarding recovered block in queuing service.")
            return

        position = self._ordered_insert(block_hash, block_number, time.time())
        self.connection.log_debug(
            "Recovered block {}. Inserting into queue behind {} blocks (total length: {})",
            block_hash, position, len(self.ordered_block_queue))
        if position == 0:
            self._schedule_alarm_for_next_item()

    def store_block_data(self, block_hash: Sha256Hash,
                         block_msg: InternalEthBlockInfo) -> None:
        super().store_block_data(block_hash, block_msg)
        self._store_block_parts(block_hash, block_msg)

    def mark_blocks_seen_by_blockchain_node(self,
                                            block_hashes: List[Sha256Hash]):
        """
        Unused by Ethereum. Requires block number to function correctly.
        """

    def mark_block_seen_by_blockchain_node(
        self,
        block_hash: Sha256Hash,
        block_message: Optional[InternalEthBlockInfo],
        block_number: Optional[int] = None,
    ) -> None:
        """
        Stores information about the block and marks the block heights to
        track the Ethereum node's blockchain state.
        Either block message or block number must be provided.

        This function may be called multiple times with blocks at the same height,
        and each subsequent call will updated the currently known chain state.
        """
        if block_message is not None:
            self.store_block_data(block_hash, block_message)
            block_number = block_message.block_number()
        if block_number is None and block_hash in self._height_by_block_hash:
            block_number = self._height_by_block_hash[block_hash]

        assert block_number is not None

        super().mark_block_seen_by_blockchain_node(block_hash, block_message)
        self.accepted_block_hash_at_height[block_number] = block_hash
        best_height, _ = self.best_accepted_block
        if block_number >= best_height:
            self.best_accepted_block = EthBlockInfo(block_number, block_hash)
            if block_message or block_hash in self.node.block_parts_storage:
                self.node.publish_block(block_number, block_hash,
                                        block_message,
                                        FeedSource.BLOCKCHAIN_SOCKET)

        if block_hash in self.block_checking_alarms:
            self.node.alarm_queue.unregister_alarm(
                self.block_checking_alarms[block_hash])
            del self.block_checking_alarms[block_hash]
            self.block_check_repeat_count.pop(block_hash, 0)

        self.remove_from_queue(block_hash)
        self._schedule_alarm_for_next_item()

    def remove(self, block_hash: Sha256Hash) -> int:
        index = super().remove(block_hash)
        if block_hash in self._height_by_block_hash:
            height = self._height_by_block_hash.contents.pop(block_hash, None)
            self.connection.log_trace(
                "Removing block {} at height {} in queuing service",
                block_hash, height)
            if height:
                self._block_hashes_by_height.contents.get(
                    height, set()).discard(block_hash)
        return index

    def remove_from_queue(self, block_hash: Sha256Hash) -> int:
        index = super().remove_from_queue(block_hash)
        for i in range(len(self.ordered_block_queue)):
            if self.ordered_block_queue[i].block_hash == block_hash:
                del self.ordered_block_queue[i]
                break
        return index

    def send_block_to_node(
        self,
        block_hash: Sha256Hash,
        block_msg: Optional[InternalEthBlockInfo] = None,
    ) -> None:
        assert block_msg is not None

        # block must always be greater than previous best
        block_number = block_msg.block_number()
        best_height, _best_hash, _ = self.best_sent_block
        assert block_number > best_height

        new_block_parts = self.node.block_parts_storage[block_hash]

        if block_msg.has_total_difficulty():
            new_block_msg = block_msg.to_new_block_msg()
            super(EthBlockQueuingService,
                  self).send_block_to_node(block_hash, new_block_msg)
            self.node.set_known_total_difficulty(
                new_block_msg.block_hash(),
                new_block_msg.get_chain_difficulty())
        else:
            calculated_total_difficulty = self.node.try_calculate_total_difficulty(
                block_hash, new_block_parts)

            if calculated_total_difficulty is None:
                # Total difficulty may be unknown after a long fork or
                # if gateways just started. Announcing new block hashes to
                # ETH node in that case. It
                # will request header and body separately.
                new_block_headers_msg = NewBlockHashesEthProtocolMessage.from_block_hash_number_pair(
                    block_hash, new_block_parts.block_number)
                super(EthBlockQueuingService,
                      self).send_block_to_node(block_hash,
                                               new_block_headers_msg)
            else:
                new_block_msg = NewBlockEthProtocolMessage.from_new_block_parts(
                    new_block_parts, calculated_total_difficulty)
                super(EthBlockQueuingService,
                      self).send_block_to_node(block_hash, new_block_msg)

        self.node.log_blocks_network_content(self.node.network_num, block_msg)
        self.sent_block_at_height[block_number] = block_hash
        self.best_sent_block = SentEthBlockInfo(block_number, block_hash,
                                                time.time())
        self._schedule_confirmation_check(block_hash)

        if self.node.opts.filter_txs_factor > 0:
            self.node.on_transactions_in_block(
                block_msg.to_new_block_msg().txns())

    def partial_chainstate(self, required_length: int) -> Deque[EthBlockInfo]:
        """
        Builds a current chainstate based on the current head. Attempts to maintain
        the minimal length required chainstate as required by the length from the
        head as specified.

        :param required_length: length to extend partial chainstate to if needed
        """
        best_sent_height, best_sent_hash, _ = self.best_sent_block
        if best_sent_height == INITIAL_BLOCK_HEIGHT:
            return deque()

        if len(self._partial_chainstate) == 0:
            self._partial_chainstate.append(
                EthBlockInfo(best_sent_height, best_sent_hash))

        chain_head_height, chain_head_hash = self._partial_chainstate[-1]
        if chain_head_hash != best_sent_hash:
            height = best_sent_height
            head_hash = best_sent_hash
            missing_entries = deque()
            while height > chain_head_height:

                try:
                    head = self.node.block_storage[head_hash]
                    if head is None:
                        break
                    missing_entries.appendleft(EthBlockInfo(height, head_hash))

                    head_hash = head.prev_block_hash()
                    height -= 1
                except KeyError:
                    break

            # append to partial chain state
            if head_hash == chain_head_height:
                self._partial_chainstate.extend(missing_entries)
            # reorganization is required, rebuild to expected length
            else:
                self._partial_chainstate = missing_entries

        tail_height, tail_hash = self._partial_chainstate[0]
        tail = self.node.block_storage[tail_hash]
        assert tail is not None

        while len(self._partial_chainstate) < required_length:
            try:
                tail_hash = tail.prev_block_hash()
                tail_height -= 1
                tail = self.node.block_storage[tail_hash]
                if tail is None:
                    break
                self._partial_chainstate.appendleft(
                    EthBlockInfo(tail_height, tail_hash))
            except KeyError:
                break

        return self._partial_chainstate

    def try_send_bodies_to_node(self, block_hashes: List[Sha256Hash]) -> bool:
        """
        Creates and sends block bodies to blockchain connection.
        """
        bodies = []
        for block_hash in block_hashes:
            if block_hash not in self._blocks:
                self.connection.log_debug(
                    "{} was not found in queuing service. Aborting attempt to send bodies.",
                    block_hash)
                return False

            if not self.node.block_queuing_service_manager.is_in_common_block_storage(
                    block_hash):
                self.connection.log_debug(
                    "{} was not in the block storage. Aborting attempt to send bodies.",
                    block_hash)
                return False
            block_message = cast(InternalEthBlockInfo,
                                 self.node.block_storage[block_hash])

            if block_hash in self.node.block_parts_storage:
                block_body_bytes = self.node.block_parts_storage[
                    block_hash].block_body_bytes
            else:
                block_body_bytes = (
                    block_message.to_new_block_parts().block_body_bytes)

            partial_message = BlockBodiesEthProtocolMessage.from_body_bytes(
                block_body_bytes)
            block_bodies = partial_message.get_blocks()
            assert len(block_bodies) == 1
            bodies.append(block_bodies[0])

            height = self._height_by_block_hash.contents.get(block_hash, None)
            self.connection.log_debug(
                "Appending {} body ({}) for sending to blockchain node.",
                block_hash, height)

        full_message = BlockBodiesEthProtocolMessage(None, bodies)

        self.connection.enqueue_msg(full_message)
        return True

    def try_send_headers_to_node(self, block_hashes: List[Sha256Hash]) -> bool:
        """
        Creates and sends a block headers message to blockchain connection.

        In most cases, this method should be called with block hashes that are confirmed to
        exist in the block queuing service, but contains checks for safety for otherwise,
        and aborts the function if any headers are not found.
        """
        headers = []
        for block_hash in block_hashes:
            if block_hash not in self._blocks:
                self.connection.log_debug(
                    "{} was not found in queuing service. Aborting attempt to send headers.",
                    block_hash)
                return False

            if not self.node.block_queuing_service_manager.is_in_common_block_storage(
                    block_hash):
                self.connection.log_debug(
                    "{} was not in block storage. Aborting attempt to send headers",
                    block_hash)
                return False
            block_message = cast(InternalEthBlockInfo,
                                 self.node.block_storage[block_hash])

            partial_headers_message = self.build_block_header_message(
                block_hash, block_message)
            block_headers = partial_headers_message.get_block_headers()
            assert len(block_headers) == 1
            headers.append(block_headers[0])

            height = self._height_by_block_hash.contents.get(block_hash, None)
            self.connection.log_debug(
                "Appending {} header ({}) for sending to blockchain node.",
                block_hash, height)

        full_header_message = BlockHeadersEthProtocolMessage(None, headers)

        self.connection.enqueue_msg(full_header_message)
        return True

    def get_block_hashes_starting_from_hash(
            self, block_hash: Sha256Hash, max_count: int, skip: int,
            reverse: bool) -> Tuple[bool, List[Sha256Hash]]:
        """
        Finds up to max_count block hashes in queue that we still have headers
        and block messages queued up for.

        Returns (success, [found_hashes])
        """
        if block_hash not in self._blocks or block_hash not in self._height_by_block_hash:
            return False, []

        if block_hash in self._blocks_waiting_for_recovery and self._blocks_waiting_for_recovery[
                block_hash]:
            return False, []

        if not self.node.block_queuing_service_manager.is_in_common_block_storage(
                block_hash):
            return False, []

        best_height, _, _ = self.best_sent_block
        starting_height = self._height_by_block_hash[block_hash]
        look_back_length = best_height - starting_height + 1
        partial_chainstate = self.partial_chainstate(look_back_length)

        if not any(block_hash == chain_hash
                   for _, chain_hash in partial_chainstate):
            block_too_far_back = len(partial_chainstate) != look_back_length
            self.connection.log_trace(
                "Block {} is not included in the current chainstate. "
                "Returning empty set. Chainstate missing entries: {}",
                block_hash, block_too_far_back)
            return not block_too_far_back, []

        self.connection.log_trace(
            "Found block {} had height {} in queuing service. Continuing...",
            block_hash, starting_height)
        return self.get_block_hashes_starting_from_height(
            starting_height, max_count, skip, reverse)

    def get_block_hashes_starting_from_height(
        self,
        block_height: int,
        max_count: int,
        skip: int,
        reverse: bool,
    ) -> Tuple[bool, List[Sha256Hash]]:
        """
        Performs a 'best-effort' search for block hashes.

        Gives up if a fork is detected in the requested section of the chain.
        Gives up if a block is requested below the most recent block that's
        not tracked by this service.
        Returns as many blocks as possible if some of the blocks requested
        are in the future and have no been produced yet.

        The resulting list starts at `block_height`, and is ascending if
        reverse=False, and descending if reverse=True.

        Returns (success, [found_hashes])
        """
        block_hashes: List[Sha256Hash] = []
        height = block_height
        if reverse:
            lowest_requested_height = block_height - (max_count * skip)
            multiplier = -1
        else:
            lowest_requested_height = block_height
            multiplier = 1

        chain_state: Optional[Set[Sha256Hash]] = None

        while (len(block_hashes) < max_count
               and height in self._block_hashes_by_height):
            matching_hashes = self._block_hashes_by_height[height]

            # A fork has occurred: give up, and fallback to
            # remote blockchain sync
            if len(matching_hashes) > 1:
                self.connection.log_trace(
                    "Detected fork when searching for {} "
                    "block hashes starting from height {} "
                    "in queuing service.", max_count, block_height)
                if chain_state is None:
                    best_height, _, _ = self.best_sent_block
                    partial_state = self.partial_chainstate(
                        best_height - lowest_requested_height + 1)
                    chain_state = set(block_hash
                                      for _, block_hash in partial_state)

                for candidate_hash in matching_hashes:
                    if candidate_hash in chain_state:
                        block_hashes.append(candidate_hash)
                        break
                else:
                    self.connection.log_debug(
                        "Unexpectedly, none of the blocks at height {} were part "
                        "of the chainstate.", block_height)
                    return False, []
            else:
                block_hashes.append(next(iter(matching_hashes)))
            height += (1 + skip) * multiplier

        # If a block is requested too far in the past, abort and fallback
        # to remote blockchain sync
        if (height < self._highest_block_number
                and height not in self._block_hashes_by_height
                and max_count != len(block_hashes)):
            return False, []

        # Ok, Ethereum expects as many hashes as node contains.
        if max_count != len(block_hashes):
            self.connection.log_trace(
                "Could not find all {} requested block hashes in block queuing service. Only got {}.",
                max_count, len(block_hashes))

        return True, block_hashes

    def iterate_block_hashes_starting_from_hash(
        self,
        block_hash: Sha256Hash,
        max_count: int = gateway_constants.TRACKED_BLOCK_MAX_HASH_LOOKUP
    ) -> Iterator[Sha256Hash]:
        """
        iterate over cached blocks headers in descending order
        :param block_hash: starting block hash
        :param max_count: max number of elements to return
        :return: Iterator of block hashes in descending order
        """
        block_hash_ = block_hash
        for _ in range(max_count):
            if block_hash_ and block_hash_ in self.node.block_parts_storage:
                yield block_hash_
                block_hash_ = self.node.block_parts_storage[
                    block_hash_].get_previous_block_hash()
            else:
                break

    def iterate_recent_block_hashes(
        self,
        max_count: int = gateway_constants.TRACKED_BLOCK_MAX_HASH_LOOKUP
    ) -> Iterator[Sha256Hash]:
        """
        :param max_count:
        :return: Iterator[Sha256Hash] in descending order (last -> first)
        """
        if self._highest_block_number not in self._block_hashes_by_height:
            return iter([])
        block_hashes = self._block_hashes_by_height[self._highest_block_number]
        block_hash = next(iter(block_hashes))
        if len(block_hashes) > 1:
            logger.debug(
                f"iterating over queued blocks starting for a possible fork {block_hash}"
            )

        return self.iterate_block_hashes_starting_from_hash(
            block_hash, max_count=max_count)

    def get_block_parts(self,
                        block_hash: Sha256Hash) -> Optional[NewBlockParts]:
        if block_hash not in self.node.block_parts_storage:
            self.connection.log_debug(
                "requested transaction info for a block {} not in the queueing service",
                block_hash,
            )
            return None
        return self.node.block_parts_storage[block_hash]

    def get_block_body_from_message(
            self,
            block_hash: Sha256Hash) -> Optional[BlockBodiesEthProtocolMessage]:
        block_parts = self.get_block_parts(block_hash)
        if block_parts is None:
            return None
        return BlockBodiesEthProtocolMessage.from_body_bytes(
            block_parts.block_body_bytes)

    def log_memory_stats(self) -> None:
        hooks.add_obj_mem_stats(
            self.__class__.__name__,
            self.node.network_num,
            self.block_checking_alarms,
            "block_queue_block_checking_alarms",
            ObjectSize(
                size=len(self.block_checking_alarms) *
                (crypto.SHA256_HASH_LEN + constants.UL_INT_SIZE_IN_BYTES),
                flat_size=0,
                is_actual_size=False),
            object_item_count=len(self.block_checking_alarms),
            object_type=memory_utils.ObjectType.BASE,
            size_type=memory_utils.SizeType.ESTIMATE)

        hooks.add_obj_mem_stats(
            self.__class__.__name__,
            self.node.network_num,
            self.block_check_repeat_count,
            "block_queue_block_repeat_count",
            ObjectSize(
                size=len(self.block_check_repeat_count) *
                (crypto.SHA256_HASH_LEN + constants.UL_INT_SIZE_IN_BYTES),
                flat_size=0,
                is_actual_size=False),
            object_item_count=len(self.block_check_repeat_count),
            object_type=memory_utils.ObjectType.BASE,
            size_type=memory_utils.SizeType.ESTIMATE)

    def _store_block_parts(self, block_hash: Sha256Hash,
                           block_message: InternalEthBlockInfo) -> None:
        new_block_parts = block_message.to_new_block_parts()
        if block_hash not in self.node.block_parts_storage:
            self.node.block_parts_storage[block_hash] = new_block_parts
        block_number = block_message.block_number()
        if block_number > 0:
            self.connection.log_trace(
                "Adding headers for block {} at height: {} in queuing service",
                block_hash, block_number)
            if block_number in self._block_hashes_by_height:
                self._block_hashes_by_height[block_number].add(block_hash)
            else:
                self._block_hashes_by_height.add(block_number, {block_hash})
            self._height_by_block_hash[block_hash] = block_number
            if block_number > self._highest_block_number:
                self._highest_block_number = block_number
        else:
            logger.trace("No block height could be parsed for block: {}",
                         block_hash)

    def _schedule_confirmation_check(self, block_hash: Sha256Hash) -> None:
        self.block_checking_alarms[
            block_hash] = self.node.alarm_queue.register_alarm(
                eth_common_constants.CHECK_BLOCK_RECEIPT_DELAY_S,
                self._check_for_block_on_repeat,
                block_hash,
            )

    def _check_for_block_on_repeat(self, block_hash: Sha256Hash) -> float:
        get_confirmation_message = GetBlockHeadersEthProtocolMessage(
            None, block_hash.binary, 1, 0, 0)
        self.connection.enqueue_msg(get_confirmation_message)

        if self.block_check_repeat_count[
                block_hash] < eth_common_constants.CHECK_BLOCK_RECEIPT_MAX_COUNT:
            self.block_check_repeat_count[block_hash] += 1
            return eth_common_constants.CHECK_BLOCK_RECEIPT_INTERVAL_S
        else:
            del self.block_check_repeat_count[block_hash]
            del self.block_checking_alarms[block_hash]
            return constants.CANCEL_ALARMS

    def _schedule_alarm_for_next_item(self) -> None:
        """
        Sends the next top block if immediately available, otherwise schedules
        an alarm to check back in. Cancels all other instances of this alarm.
        Cleans out all stale blocks at front of queue.
        """
        next_push_alarm_id = self._next_push_alarm_id
        if next_push_alarm_id is not None:
            self.node.alarm_queue.unregister_alarm(next_push_alarm_id)
            self._next_push_alarm_id = None

        if len(self.ordered_block_queue) == 0:
            return

        while self.ordered_block_queue:
            block_hash, queued_time, block_number = self.ordered_block_queue[0]
            waiting_recovery = self._blocks_waiting_for_recovery[block_hash]

            if waiting_recovery:
                return

            assert block_number is not None

            if self._is_block_stale(block_number):
                self.connection.log_info(
                    "Discarding block {} from queuing service at height {}. Block is stale.",
                    block_hash, block_number)
                self.remove_from_queue(block_hash)
                continue

            block_msg = cast(InternalEthBlockInfo,
                             self.node.block_storage[block_hash])
            assert block_msg is not None
            self._try_immediate_send(block_hash, block_number, block_msg)
            _best_height, _best_hash, sent_time = self.best_sent_block
            elapsed_time = time.time() - sent_time
            timeout = self.node.opts.max_block_interval_s - elapsed_time
            self._run_or_schedule_alarm(timeout, self._send_top_block_to_node)
            break

    def _schedule_recovery_timeout(self, block_hash: Sha256Hash) -> None:
        self._recovery_alarms_by_block_hash[
            block_hash] = self.node.alarm_queue.register_alarm(
                gateway_constants.BLOCK_RECOVERY_MAX_QUEUE_TIME,
                self._on_block_recovery_timeout, block_hash)

    def _run_or_schedule_alarm(self, timeout: float, func: Callable) -> None:
        if timeout > 0:
            self._next_push_alarm_id = self.node.alarm_queue.register_alarm(
                timeout, func)
        elif not self.node.has_active_blockchain_peer():
            self.node.alarm_queue.register_alarm(
                gateway_constants.NODE_READINESS_FOR_BLOCKS_CHECK_INTERVAL_S,
                func,
            )
        else:
            func()

    def _send_top_block_to_node(self) -> None:
        if len(self.ordered_block_queue) == 0:
            return

        if not self.node.has_active_blockchain_peer():
            self._schedule_alarm_for_next_item()
            return

        block_hash, timestamp, _ = self.ordered_block_queue[0]
        waiting_recovery = self._blocks_waiting_for_recovery[block_hash]

        if waiting_recovery:
            self.connection.log_debug(
                "Unable to send block to node, requires recovery. "
                "Block hash {}.", block_hash)
            self._schedule_alarm_for_next_item()
            return

        block_msg = cast(InternalEthBlockInfo,
                         self.node.block_storage[block_hash])
        self.remove_from_queue(block_hash)

        self.send_block_to_node(block_hash, block_msg)

        self._schedule_alarm_for_next_item()
        return

    def _on_block_recovery_timeout(self, block_hash: Sha256Hash) -> None:
        self.connection.log_debug(
            "Removing block {} from queue. Recovery period has timed out.",
            block_hash)
        self.remove_from_queue(block_hash)
        if block_hash in self._blocks and self.node.block_queuing_service_manager.get_block_data(
                block_hash) is None:
            self.remove(block_hash)

    def _check_for_sent_or_queued_forked_block(self, block_hash: Sha256Hash,
                                               block_number: int) -> bool:
        """
        Returns True if a block has already been queued or sent at the same height.
        """

        is_duplicate = False
        more_info = ""
        for queued_block_hash, timestamp in self._block_queue:
            if (not self._blocks_waiting_for_recovery[queued_block_hash]
                    and queued_block_hash in self._height_by_block_hash):
                if block_number == self._height_by_block_hash[
                        queued_block_hash]:
                    self.connection.log_info(
                        "In queuing service, fork detected at height {}. Setting aside block {} in favor of {}.",
                        block_number, block_hash, queued_block_hash)
                    is_duplicate = True
                    more_info = "already queued"

        if block_number in self.sent_block_at_height:
            self.connection.log_info(
                "In queuing service, fork detected at height {}. "
                "Setting aside block {} in favor of already sent {}.",
                block_number, block_hash,
                self.sent_block_at_height[block_number])
            is_duplicate = True
            more_info = "already sent"

        if block_number in self.accepted_block_hash_at_height:
            self.connection.log_info(
                "In queuing service, fork detected at height {}. "
                "Setting aside block {} in favor of already accepted {}.",
                block_number, block_hash,
                self.accepted_block_hash_at_height[block_number])
            is_duplicate = True
            more_info = "already accepted"

        if is_duplicate:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_IGNORE_DUPLICATE_HEIGHT,
                self.node.network_num,
                more_info=more_info)

        return is_duplicate

    def _is_block_stale(self, block_number: int) -> bool:
        """
        Check the best sent block to ensure the current block isn't in the past.

        Don't need to check ordered queue, since best sent block is always < its smallest entry.
        """
        best_sent_height, _, _ = self.best_sent_block
        best_accepted_height, _ = self.best_accepted_block
        return block_number <= best_sent_height or block_number <= best_accepted_height

    def _add_to_queue(self, block_hash: Sha256Hash, waiting_for_recovery: bool,
                      block_msg: Optional[InternalEthBlockInfo]) -> int:
        timestamp = time.time()
        self._block_queue.append(BlockQueueEntry(block_hash, timestamp))
        self._blocks_waiting_for_recovery[block_hash] = waiting_for_recovery
        self._blocks.add(block_hash)

        if block_msg is not None:
            self.store_block_data(block_hash, block_msg)
            return self._ordered_insert(block_hash, block_msg.block_number(),
                                        timestamp)
        else:
            # blocks with no number go to the end of the queue
            self.ordered_block_queue.append(
                OrderedQueuedBlock(block_hash, timestamp, None))
            return len(self.ordered_block_queue) - 1

    def _ordered_insert(self, block_hash: Sha256Hash, block_number: int,
                        timestamp: float) -> int:
        index = 0
        while index < len(self.ordered_block_queue):
            queued_block_number = self.ordered_block_queue[index].block_number
            if (queued_block_number is not None
                    and block_number > queued_block_number):
                index += 1
            else:
                break

        if index == len(self.ordered_block_queue):
            self.ordered_block_queue.append(
                OrderedQueuedBlock(block_hash, timestamp, block_number))
            return index

        if block_number == self.ordered_block_queue[index]:
            raise ValueError(
                f"Cannot insert block with duplicate number {block_number}")

        self.ordered_block_queue.insert(
            index, OrderedQueuedBlock(block_hash, timestamp, block_number))
        return index

    def _try_immediate_send(self, block_hash: Sha256Hash, block_number: int,
                            block_msg: InternalEthBlockInfo) -> bool:
        best_sent_height, _, _ = self.best_sent_block
        best_height, _ = self.best_accepted_block
        if ((best_height == INITIAL_BLOCK_HEIGHT
             and best_sent_height == INITIAL_BLOCK_HEIGHT)
                or best_height + 1 == block_number):
            self.connection.log_debug(
                "Immediately propagating block {} at height {}. Block is of the next expected height.",
                block_hash, block_number)
            self.send_block_to_node(block_hash, block_msg)
            self.remove_from_queue(block_hash)
            self.node.publish_block(block_number, block_hash, block_msg,
                                    FeedSource.BDN_SOCKET)
            return True
        return False
Example #16
0
 def setUp(self):
     self.ALARM_QUEUE = AlarmQueue()
     self.e_dict = ExpiringDict(self.ALARM_QUEUE, self.EXPIRATION_TIME_S)
Example #17
0
    def __init__(self, opts, node_ssl_service: NodeSSLService) -> None:
        super(EthGatewayNode, self).__init__(
            opts, node_ssl_service,
            eth_common_constants.TRACKED_BLOCK_CLEANUP_INTERVAL_S)

        self.remote_blockchain_protocol_version = eth_common_constants.ETH_PROTOCOL_VERSION
        self.remote_blockchain_connection_established = False

        self._node_public_key = None
        self._remote_public_key = None

        if opts.node_public_key is not None:
            self._node_public_key = convert.hex_to_bytes(opts.node_public_key)
        elif opts.blockchain_peers is None:
            raise RuntimeError(
                "128 digit public key must be included with command-line specified blockchain peer."
            )
        if opts.remote_blockchain_peer is not None:
            if opts.remote_public_key is None:
                raise RuntimeError(
                    "128 digit public key must be included with command-line specified remote blockchain peer."
                )
            else:
                self._remote_public_key = convert.hex_to_bytes(
                    opts.remote_public_key)

        self.block_processing_service: EthBlockProcessingService = EthBlockProcessingService(
            self)
        self.block_parts_storage: ExpiringDict[
            Sha256Hash, NewBlockParts] = ExpiringDict(
                self.alarm_queue,
                gateway_constants.MAX_BLOCK_CACHE_TIME_S,
                "eth_block_queue_parts",
            )

        # List of know total difficulties, tuples of values (block hash, total difficulty)
        self._last_known_difficulties = deque(
            maxlen=eth_common_constants.LAST_KNOWN_TOTAL_DIFFICULTIES_MAX_COUNT
        )

        # queue of the block hashes requested from remote blockchain node during sync
        self._requested_remote_blocks_queue = deque()

        # number of remote block requests to skip in case if requests and responses got out of sync
        self._skip_remote_block_requests_stats_count = 0

        self.init_eth_gateway_stat_logging()
        self.init_eth_on_block_feed_stat_logging()

        self.message_converter = converter_factory.create_eth_message_converter(
            self.opts)
        self.eth_ws_proxy_publisher = EthWsProxyPublisher(
            opts.eth_ws_uri, self.feed_manager, self._tx_service, self)
        if self.opts.ws and not self.opts.eth_ws_uri:
            logger.warning(log_messages.ETH_WS_SUBSCRIBER_NOT_STARTED)

        self.average_block_gas_price = RunningAverage(
            gateway_constants.ETH_GAS_RUNNING_AVERAGE_SIZE)
        self.min_tx_from_node_gas_price = IntervalMinimum(
            gateway_constants.ETH_MIN_GAS_INTERVAL_S, self.alarm_queue)

        logger.info("Gateway enode url: {}", self.get_enode())
 def __init__(self, node):
     self._node: AbstractGatewayNode = node
     self._holds = ExpiringDict(node.alarm_queue,
                                node.opts.blockchain_block_hold_timeout_s,
                                f"block_processing_holds")
Example #19
0
class ExpiringDictTests(unittest.TestCase):
    EXPIRATION_TIME_S = 1

    def setUp(self):
        self.ALARM_QUEUE = AlarmQueue()
        self.e_dict = ExpiringDict(self.ALARM_QUEUE, self.EXPIRATION_TIME_S)

    def test_cleanup(self):

        kv1 = (1, 2)
        kv2 = (3, 4)
        kv3 = (5, 6)
        kv4 = (7, 8)
        kv5 = ("str1", 1)
        kv6 = ("str2", 2)

        # adding first 2 items to the dict
        self.e_dict.add(kv1[0], kv1[1])
        self.e_dict.add(kv2[0], kv2[1])

        time.time = MagicMock(return_value=time.time() +
                              self.EXPIRATION_TIME_S + 1)

        self.assertEqual(len(self.e_dict.contents), 2)
        self.assertTrue(kv1[0] in self.e_dict.contents)
        self.assertTrue(kv2[0] in self.e_dict.contents)
        self.assertEqual(self.e_dict.contents[kv1[0]], kv1[1])
        self.assertEqual(self.e_dict.contents[kv2[0]], kv2[1])

        # adding last 2 items to the dict
        self.e_dict.add(kv3[0], kv3[1])
        self.e_dict.add(kv4[0], kv4[1])
        self.e_dict.add(kv5[0], kv5[1])
        self.e_dict.add(kv6[0], kv6[1])

        self.ALARM_QUEUE.fire_alarms()

        # first 2 items are expired, last two have not
        self.assertFalse(kv1[0] in self.e_dict.contents)
        self.assertFalse(kv2[0] in self.e_dict.contents)
        self.assertTrue(kv3[0] in self.e_dict.contents)
        self.assertTrue(kv4[0] in self.e_dict.contents)
        self.assertTrue(kv5[0] in self.e_dict.contents)
        self.assertTrue(kv6[0] in self.e_dict.contents)

    def test_remove_item(self):

        kv1 = (1, 2)
        self.e_dict.add(kv1[0], kv1[1])
        self.assertTrue(kv1[0] in self.e_dict.contents)
        self.e_dict.remove_item(kv1[0])
        self.assertFalse(kv1[0] in self.e_dict.contents)

    def test_cleanup__not_existing_item(self):

        kv1 = (1, 2)
        self.e_dict.add(kv1[0], kv1[1])
        self.assertTrue(kv1[0] in self.e_dict.contents)
        self.e_dict.remove_item(kv1[0])
        self.assertFalse(kv1[0] in self.e_dict.contents)

        time.time = MagicMock(return_value=time.time() +
                              self.EXPIRATION_TIME_S + 1)

        self.ALARM_QUEUE.fire_alarms()

        self.assertFalse(kv1[0] in self.e_dict.contents)
Example #20
0
    def __init__(self,
                 opts: CommonOpts,
                 node_ssl_service: NodeSSLService,
                 connection_pool: Optional[ConnectionPool] = None):
        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()
        self.node_ssl_service = node_ssl_service
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)
        self.server_endpoints = [
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.external_port),
            # TODO: remove this after v1 is no longer supported
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.non_ssl_port)
        ]

        self.set_node_config_opts_from_sdn(opts)
        self.opts: CommonOpts = opts
        self.pending_connection_requests: Set[ConnectionPeerInfo] = set()
        self.pending_connection_attempts: Set[ConnectionPeerInfo] = set()
        self.recent_connections: ExpiringDict[str, int] = ExpiringDict(
            self.alarm_queue,
            constants.THROTTLE_RECONNECT_TIME_S,
            name="recent_connections")
        self.outbound_peers: Set[OutboundPeerModel] = opts.outbound_peers.copy(
        )

        if connection_pool is not None:
            self.connection_pool = connection_pool
        else:
            self.connection_pool = ConnectionPool()

        self.should_force_exit = False
        self.should_restart_on_high_memory = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        self.init_node_status_logging()
        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        self.start_sync_time: Optional[float] = None
        self.sync_metrics: Dict[int, Counter] = defaultdict(Counter)
        self.sync_short_id_buckets: Dict[
            int,
            TransactionShortIdBuckets] = defaultdict(TransactionShortIdBuckets)

        opts.has_fully_updated_tx_service = False

        self.check_sync_relay_connections_alarm_id: Optional[AlarmId] = None
        self.transaction_sync_timeout_alarm_id: Optional[AlarmId] = None

        self.requester = ThreadedRequestService(
            # pyre-fixme[16]: `Optional` has no attribute `name`.
            self.NODE_TYPE.name.lower(),
            self.alarm_queue,
            constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S)

        self._last_responsiveness_check_log_time = time.time()
        self._last_responsiveness_check_details = {}
        self.gc_logging_enabled = False
        self.serialized_message_cache = SerializedMessageCache(
            self.alarm_queue)

        self.alarm_queue.register_alarm(
            constants.RESPONSIVENESS_CHECK_INTERVAL_S,
            self._responsiveness_check_log)
Example #21
0
class InternalNodeConnection(AbstractConnection[Node]):
    __metaclass__ = ABCMeta

    def __init__(self, sock, address, node, from_me=False):
        super(InternalNodeConnection, self).__init__(sock, address, node, from_me)

        # Enable buffering only on internal connections
        self.enable_buffered_send = node.opts.enable_buffered_send
        self.outputbuf = OutputBuffer(enable_buffering=self.enable_buffered_send)

        self.network_num = node.network_num
        self.version_manager = bloxroute_version_manager

        # Setting default protocol version and message factory; override when hello message received
        self.message_factory = bloxroute_message_factory
        self.protocol_version = self.version_manager.CURRENT_PROTOCOL_VERSION

        self.ping_message = PingMessage()
        self.pong_message = PongMessage()
        self.ack_message = AckMessage()

        self.can_send_pings = True
        self.ping_message_timestamps = ExpiringDict(self.node.alarm_queue, constants.REQUEST_EXPIRATION_TIME)
        self.message_validator = BloxrouteMessageValidator(None, self.protocol_version)

    def disable_buffering(self):
        """
        Disable buffering on this particular connection.
        :return:
        """
        self.enable_buffered_send = False
        self.outputbuf.flush()
        self.outputbuf.enable_buffering = False
        self.socket_connection.send()

    def set_protocol_version_and_message_factory(self):
        """
        Gets protocol version from the first bytes of hello message if not known.
        Sets protocol version and creates message factory for that protocol version
        """

        # Outgoing connections use current version of protocol and message factory
        if self.from_me or self.state & ConnectionState.HELLO_RECVD:
            return True

        protocol_version = self.version_manager.get_connection_protocol_version(self.inputbuf)

        if protocol_version is None:
            return False

        if not self.version_manager.is_protocol_supported(protocol_version):
            self.log_debug(
                "Protocol version {} of remote node '{}' is not supported. Closing connection.",
                protocol_version,
                self.peer_desc
            )
            self.mark_for_close()
            return False

        self.protocol_version = protocol_version
        self.message_factory = self.version_manager.get_message_factory_for_version(protocol_version)

        self.log_trace("Detected incoming connection with protocol version {}".format(protocol_version))

        return True

    def pre_process_msg(self):
        success = self.set_protocol_version_and_message_factory()

        if not success:
            return False, None, None

        return super(InternalNodeConnection, self).pre_process_msg()

    def enqueue_msg(self, msg, prepend=False):
        if self.state & ConnectionState.MARK_FOR_CLOSE:
            return

        if self.protocol_version < self.version_manager.CURRENT_PROTOCOL_VERSION:
            versioned_message = self.version_manager.convert_message_to_older_version(self.protocol_version, msg)
        else:
            versioned_message = msg

        super(InternalNodeConnection, self).enqueue_msg(versioned_message, prepend)

    def pop_next_message(self, payload_len):
        msg = super(InternalNodeConnection, self).pop_next_message(payload_len)

        if msg is None or self.protocol_version >= self.version_manager.CURRENT_PROTOCOL_VERSION:
            return msg

        versioned_msg = self.version_manager.convert_message_from_older_version(self.protocol_version, msg)

        return versioned_msg

    def msg_hello(self, msg):
        super(InternalNodeConnection, self).msg_hello(msg)

        if self.state & ConnectionState.MARK_FOR_CLOSE:
            self.log_trace("Connection has been closed: {}, Ignoring: {} ", self, msg)
            return

        network_num = msg.network_num()

        if self.node.network_num != constants.ALL_NETWORK_NUM and network_num != self.node.network_num:
            self.log_warning(
                "Network number mismatch. Current network num {}, remote network num {}. Closing connection.",
                self.node.network_num, network_num)
            self.mark_for_close()
            return

        self.network_num = network_num
        self.node.alarm_queue.register_alarm(self.ping_interval_s, self.send_ping)

    def peek_broadcast_msg_network_num(self, input_buffer):

        if self.protocol_version == 1:
            return constants.DEFAULT_NETWORK_NUM

        return BroadcastMessage.peek_network_num(input_buffer)

    def send_ping(self):
        """
        Send a ping (and reschedule if called from alarm queue)
        """
        if self.can_send_pings and not self.state & ConnectionState.MARK_FOR_CLOSE:
            nonce = nonce_generator.get_nonce()
            msg = PingMessage(nonce=nonce)
            self.enqueue_msg(msg)
            self.ping_message_timestamps.add(nonce, time.time())
            return self.ping_interval_s
        return constants.CANCEL_ALARMS

    def msg_ping(self, msg):
        nonce = msg.nonce()
        self.enqueue_msg(PongMessage(nonce=nonce))

    def msg_pong(self, msg: PongMessage):
        nonce = msg.nonce()
        if nonce in self.ping_message_timestamps.contents:
            request_msg_timestamp = self.ping_message_timestamps.contents[nonce]
            request_response_time = time.time() - request_msg_timestamp
            self.log_trace("Pong for nonce {} had response time: {}", msg.nonce(), request_response_time)
            hooks.add_measurement(self.peer_desc, MeasurementType.PING, request_response_time)
        elif nonce is not None:
            self.log_debug("Pong message had no matching ping request. Nonce: {}", nonce)

    def msg_tx_service_sync_txs(self, msg: TxServiceSyncTxsMessage):
        """
        Transaction service sync message receive txs data
        """
        network_num = msg.network_num()
        self.node.last_sync_message_received_by_network[network_num] = time.time()
        tx_service = self.node.get_tx_service(network_num)
        txs_content_short_ids = msg.txs_content_short_ids()

        for tx_content_short_ids in txs_content_short_ids:
            tx_hash = tx_content_short_ids.tx_hash
            tx_service.set_transaction_contents(tx_hash, tx_content_short_ids.tx_content)
            for short_id in tx_content_short_ids.short_ids:
                tx_service.assign_short_id(tx_hash, short_id)

    def _create_txs_service_msg(self, network_num: int, tx_service_snap: List[Sha256Hash]) -> List[TxContentShortIds]:
        txs_content_short_ids: List[TxContentShortIds] = []
        txs_msg_len = 0

        while tx_service_snap:
            tx_hash = tx_service_snap.pop()
            tx_content_short_ids = TxContentShortIds(
                tx_hash,
                self.node.get_tx_service(network_num).get_transaction_by_hash(tx_hash),
                self.node.get_tx_service(network_num).get_short_ids(tx_hash)
            )

            txs_msg_len += txs_serializer.get_serialized_tx_content_short_ids_bytes_len(tx_content_short_ids)

            txs_content_short_ids.append(tx_content_short_ids)
            if txs_msg_len >= constants.TXS_MSG_SIZE:
                break

        return txs_content_short_ids

    def send_tx_service_sync_req(self, network_num: int):
        """
        sending transaction service sync request
        """
        self.enqueue_msg(TxServiceSyncReqMessage(network_num))

    def send_tx_service_sync_complete(self, network_num: int):
        self.enqueue_msg(TxServiceSyncCompleteMessage(network_num))

    def send_tx_service_sync_blocks_short_ids(self, network_num: int):
        blocks_short_ids: List[BlockShortIds] = []
        start_time = time.time()
        for block_hash, short_ids in self.node.get_tx_service(network_num).iter_short_ids_seen_in_block():
            blocks_short_ids.append(BlockShortIds(block_hash, short_ids))

        block_short_ids_msg = TxServiceSyncBlocksShortIdsMessage(network_num, blocks_short_ids)
        duration = time.time() - start_time
        self.log_trace("Sending {} block short ids took {:.3f} seconds.", len(blocks_short_ids), duration)
        self.enqueue_msg(block_short_ids_msg)

    def send_tx_service_sync_txs(self, network_num: int, tx_service_snap: List[Sha256Hash], duration: float = 0, msgs_count: int = 0, total_tx_count: int = 0, sending_tx_msgs_start_time: float = 0):
        if (time.time() - sending_tx_msgs_start_time) < constants.SENDING_TX_MSGS_TIMEOUT_MS:
            if tx_service_snap:
                start = time.time()
                txs_content_short_ids = self._create_txs_service_msg(network_num, tx_service_snap)
                self.enqueue_msg(TxServiceSyncTxsMessage(network_num, txs_content_short_ids))
                duration += time.time() - start
                msgs_count += 1
                total_tx_count += len(txs_content_short_ids)
            # checks again if tx_snap in case we still have msgs to send, else no need to wait
            # TX_SERVICE_SYNC_TXS_S seconds
            if tx_service_snap:
                self.node.alarm_queue.register_alarm(
                    constants.TX_SERVICE_SYNC_TXS_S, self.send_tx_service_sync_txs, network_num, tx_service_snap, duration, msgs_count, total_tx_count, sending_tx_msgs_start_time
                )
            else:   # if all txs were sent, send complete msg
                self.log_trace("Sending {} transactions and {} messages took {:.3f} seconds.",
                               total_tx_count, msgs_count, duration)
                self.send_tx_service_sync_complete(network_num)
        else:   # if time is up - upgrade this node as synced - giving up
            self.log_trace("Sending {} transactions and {} messages took more than {} seconds. Giving up.",
                           total_tx_count, msgs_count, constants.SENDING_TX_MSGS_TIMEOUT_MS)
            self.send_tx_service_sync_complete(network_num)

    def msg_tx_service_sync_complete(self, msg: TxServiceSyncCompleteMessage):
        network_num = msg.network_num()
        self.node.last_sync_message_received_by_network.pop(network_num, None)
        self.node.on_fully_updated_tx_service()
class InternalNodeConnection(AbstractConnection[Node]):
    __metaclass__ = ABCMeta

    def __init__(self, sock: AbstractSocketConnectionProtocol,
                 node: Node) -> None:
        super(InternalNodeConnection, self).__init__(sock, node)

        # Enable buffering only on internal connections
        self.enable_buffered_send = node.opts.enable_buffered_send
        self.outputbuf = OutputBuffer(
            enable_buffering=self.enable_buffered_send)

        self.network_num = node.network_num
        self.version_manager = bloxroute_version_manager

        # Setting default protocol version; override when hello message received
        self.protocol_version = self.version_manager.CURRENT_PROTOCOL_VERSION

        self.pong_message = PongMessage()
        self.ack_message = AckMessage()

        self.can_send_pings = True
        self.pong_timeout_enabled = True

        self.ping_message_timestamps = ExpiringDict(
            self.node.alarm_queue, constants.REQUEST_EXPIRATION_TIME,
            f"{str(self)}_ping_timestamps")

        self.sync_ping_latencies: Dict[int, Optional[float]] = {}
        self._nonce_to_network_num: Dict[int, int] = {}
        self.message_validator = BloxrouteMessageValidator(
            None, self.protocol_version)
        self.tx_sync_service = TxSyncService(self)
        self.inbound_peer_latency: float = time.time()

    def connection_message_factory(self) -> AbstractMessageFactory:
        return bloxroute_message_factory

    def ping_message(self) -> AbstractMessage:
        nonce = nonce_generator.get_nonce()
        self.ping_message_timestamps.add(nonce, time.time())
        return PingMessage(nonce)

    def disable_buffering(self):
        """
        Disable buffering on this particular connection.
        :return:
        """
        self.enable_buffered_send = False
        self.outputbuf.flush()
        self.outputbuf.enable_buffering = False
        self.socket_connection.send()

    def set_protocol_version_and_message_factory(self) -> bool:
        """
        Gets protocol version from the first bytes of hello message if not known.
        Sets protocol version and creates message factory for that protocol version
        """

        # Outgoing connections use current version of protocol and message factory
        if ConnectionState.HELLO_RECVD in self.state:
            return True

        protocol_version = self.version_manager.get_connection_protocol_version(
            self.inputbuf)

        if protocol_version is None:
            return False

        if not self.version_manager.is_protocol_supported(protocol_version):
            self.log_debug(
                "Protocol version {} of remote node '{}' is not supported. Closing connection.",
                protocol_version, self.peer_desc)
            self.mark_for_close()
            return False

        if protocol_version > self.version_manager.CURRENT_PROTOCOL_VERSION:
            self.log_debug(
                "Got message protocol {} that is higher the current version {}. Using current protocol version",
                protocol_version,
                self.version_manager.CURRENT_PROTOCOL_VERSION)
            protocol_version = self.version_manager.CURRENT_PROTOCOL_VERSION

        self.protocol_version = protocol_version
        self.message_factory = self.version_manager.get_message_factory_for_version(
            protocol_version)

        self.log_trace("Setting connection protocol version to {}".format(
            protocol_version))

        return True

    def pre_process_msg(self) -> ConnectionMessagePreview:
        success = self.set_protocol_version_and_message_factory()

        if not success:
            return ConnectionMessagePreview(False, True, None, None)

        return super(InternalNodeConnection, self).pre_process_msg()

    def enqueue_msg(self, msg, prepend=False):
        if not self.is_alive():
            return

        if self.protocol_version < self.version_manager.CURRENT_PROTOCOL_VERSION:
            versioned_message = self.version_manager.convert_message_to_older_version(
                self.protocol_version, msg)
        else:
            versioned_message = msg

        super(InternalNodeConnection,
              self).enqueue_msg(versioned_message, prepend)

    def pop_next_message(self, payload_len: int) -> AbstractMessage:
        msg = super(InternalNodeConnection, self).pop_next_message(payload_len)

        if msg is None or self.protocol_version >= self.version_manager.CURRENT_PROTOCOL_VERSION:
            return msg

        versioned_msg = self.version_manager.convert_message_from_older_version(
            self.protocol_version, msg)

        return versioned_msg

    def check_ping_latency_for_network(self, network_num: int) -> None:
        ping_message = cast(PingMessage, self.ping_message())
        self.enqueue_msg(ping_message)
        self._nonce_to_network_num[ping_message.nonce()] = network_num
        self.sync_ping_latencies[network_num] = None

    def msg_hello(self, msg):
        super(InternalNodeConnection, self).msg_hello(msg)

        if not self.is_alive():
            self.log_trace("Connection has been closed: {}, Ignoring: {} ",
                           self, msg)
            return

        network_num = msg.network_num()

        if self.node.network_num != constants.ALL_NETWORK_NUM and network_num != self.node.network_num:
            self.log_warning(log_messages.NETWORK_NUMBER_MISMATCH,
                             self.node.network_num, network_num)
            self.mark_for_close()
            return

        self.network_num = network_num

        self.schedule_pings()

    def peek_broadcast_msg_network_num(self, input_buffer):

        if self.protocol_version == 1:
            return constants.DEFAULT_NETWORK_NUM

        return BroadcastMessage.peek_network_num(input_buffer)

    # pylint: disable=arguments-differ
    def msg_ping(self, msg: PingMessage):
        nonce = msg.nonce()
        assumed_request_time = time.time(
        ) - nonce_generator.get_timestamp_from_nonce(nonce)
        self.inbound_peer_latency = assumed_request_time
        hooks.add_measurement(self.peer_desc, MeasurementType.PING_INCOMING,
                              assumed_request_time, self.peer_id)

        self.enqueue_msg(
            PongMessage(nonce=nonce, timestamp=nonce_generator.get_nonce()))

    # pylint: disable=arguments-differ
    def msg_pong(self, msg: PongMessage):
        super(InternalNodeConnection, self).msg_pong(msg)

        nonce = msg.nonce()
        timestamp = msg.timestamp()
        if timestamp:
            self.inbound_peer_latency = time.time(
            ) - nonce_generator.get_timestamp_from_nonce(timestamp)
        if nonce in self.ping_message_timestamps.contents:
            request_msg_timestamp = self.ping_message_timestamps.contents[
                nonce]
            request_response_time = time.time() - request_msg_timestamp

            if nonce in self._nonce_to_network_num:
                self.sync_ping_latencies[
                    self._nonce_to_network_num[nonce]] = request_response_time

            if request_response_time > constants.PING_PONG_TRESHOLD:
                self.log_debug(
                    "Ping/pong exchange nonce {} took {:.2f} seconds to complete.",
                    msg.nonce(), request_response_time)
            else:
                self.log_trace(
                    "Ping/pong exchange nonce {} took {:.2f} seconds to complete.",
                    msg.nonce(), request_response_time)

            hooks.add_measurement(self.peer_desc, MeasurementType.PING,
                                  request_response_time, self.peer_id)
            if timestamp:
                assumed_peer_response_time = nonce_generator.get_timestamp_from_nonce(
                    timestamp) - request_msg_timestamp
                hooks.add_measurement(self.peer_desc,
                                      MeasurementType.PING_OUTGOING,
                                      assumed_peer_response_time, self.peer_id)

        elif nonce is not None:
            self.log_debug(
                "Pong message had no matching ping request. Nonce: {}", nonce)

    def mark_for_close(self, should_retry: Optional[bool] = None):
        super(InternalNodeConnection, self).mark_for_close(should_retry)
        self.cancel_pong_timeout()

    def is_gateway_connection(self):
        return self.CONNECTION_TYPE in ConnectionType.GATEWAY

    def is_external_gateway_connection(self):
        # self.CONNECTION_TYPE == ConnectionType.GATEWAY is equal True only for V1 gateways
        return self.CONNECTION_TYPE in ConnectionType.EXTERNAL_GATEWAY or self.CONNECTION_TYPE == ConnectionType.GATEWAY

    def is_internal_gateway_connection(self) -> bool:
        return self.CONNECTION_TYPE in ConnectionType.INTERNAL_GATEWAY

    def is_relay_connection(self):
        return self.CONNECTION_TYPE in ConnectionType.RELAY_ALL

    def is_proxy_connection(self) -> bool:
        return self.CONNECTION_TYPE in ConnectionType.RELAY_PROXY

    def update_tx_sync_complete(self, network_num: int):
        if network_num in self.sync_ping_latencies:
            del self.sync_ping_latencies[network_num]
        self._nonce_to_network_num = {
            nonce: other_network_num
            for nonce, other_network_num in self._nonce_to_network_num.items()
            if other_network_num != network_num
        }