Exemple #1
0
    def send_block_to_node(self,
                           block_hash: Sha256Hash,
                           block_msg: Optional[TBlockMessage] = None) -> None:
        if not self.node.should_process_block_hash(block_hash):
            return

        if block_msg is None:
            block_msg = self.node.block_storage[block_hash]

        self.connection.log_info("Forwarding block {} to node", block_hash)

        assert block_msg is not None
        self.connection.enqueue_msg(block_msg)

        # TODO: revisit this metric for multi-node gateway
        (handling_time, relay_desc
         ) = self.node.track_block_from_bdn_handling_ended(block_hash)

        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.BLOCK_SENT_TO_BLOCKCHAIN_NODE,
            network_num=self.node.network_num,
            more_info="{} bytes; Handled in {}; R - {}; {}".format(
                len(block_msg.rawbytes()),
                stats_format.duration(handling_time),
                relay_desc,
                block_msg.extra_stats_data(),
            ),
        )
    def send_block_to_node(self,
                           block_hash: Sha256Hash,
                           block_msg: Optional[TBlockMessage] = None) -> None:
        if not self.node.should_process_block_hash(block_hash):
            return

        if block_msg is None:
            block_msg = self._blocks[block_hash]

        logger.info("Forwarding block {} to blockchain node.", block_hash)

        assert block_msg is not None
        self.node.send_msg_to_node(block_msg)
        (
            handling_time,
            relay_desc,
        ) = self.node.track_block_from_bdn_handling_ended(block_hash)

        # if tracking detailed send info, log this event only after all
        # bytes written to sockets
        if not self.node.opts.track_detailed_sent_messages:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_SENT_TO_BLOCKCHAIN_NODE,
                network_num=self.node.network_num,
                more_info="{} bytes; Handled in {}; R - {}; {}".format(
                    len(block_msg.rawbytes()),
                    stats_format.duration(handling_time),
                    relay_desc,
                    block_msg.extra_stats_data(),
                ),
            )
    def advance_bytes_written_to_socket(self, bytes_sent):
        if self.message_tracker and self.message_tracker.is_sending_block_message():
            assert self.node.opts.track_detailed_sent_messages

            entry = self.message_tracker.messages[0]
            super(AbstractGatewayBlockchainConnection, self).advance_bytes_written_to_socket(bytes_sent)

            if not self.message_tracker.is_sending_block_message():
                block_message = typing.cast(AbstractBlockMessage, entry.message)
                block_message_queue_time = entry.queued_time
                block_message_length = entry.length
                block_hash = block_message.block_hash()
                handling_time, relay_desc = self.node.track_block_from_bdn_handling_ended(block_hash)
                block_stats.add_block_event_by_block_hash(block_hash,
                                                          BlockStatEventType.BLOCK_SENT_TO_BLOCKCHAIN_NODE,
                                                          network_num=self.network_num,
                                                          more_info="{} in {}; Handled in {}; R - {}; {}".format(
                                                              stats_format.byte_count(
                                                                  block_message_length
                                                              ),
                                                              stats_format.timespan(
                                                                  block_message_queue_time,
                                                                  time.time()
                                                              ),
                                                              stats_format.duration(handling_time),
                                                              relay_desc,
                                                              block_message.extra_stats_data()
                                                          ))
        else:
            super(AbstractGatewayBlockchainConnection, self).advance_bytes_written_to_socket(bytes_sent)
Exemple #4
0
    def _format_block_info_stats(self, block_info):
        if block_info is None:
            return ""

        return "Compression: {}, {}" \
            .format(stats_format.duration(block_info.duration_ms),
                    stats_format.percentage(block_info.compression_rate))
Exemple #5
0
    def _propagate_unencrypted_block_to_network(self, bx_block, connection,
                                                block_info):
        if block_info is None:
            raise ValueError(
                "Block info is required to propagate unencrypted block")

        broadcast_message = BroadcastMessage(block_info.block_hash,
                                             self._node.network_num,
                                             is_encrypted=False,
                                             blob=bx_block)
        conns = self._node.broadcast(
            broadcast_message,
            connection,
            connection_types=[ConnectionType.RELAY_BLOCK])
        handling_duration = self._node.track_block_from_node_handling_ended(
            block_info.block_hash)
        block_stats.add_block_event_by_block_hash(
            block_info.block_hash,
            BlockStatEventType.ENC_BLOCK_SENT_FROM_GATEWAY_TO_NETWORK,
            network_num=self._node.network_num,
            requested_by_peer=False,
            peers=conns,
            more_info="Peers: {}; Unencrypted; {}; Handled in {}".format(
                stats_format.connections(conns),
                self._format_block_info_stats(block_info),
                stats_format.duration(handling_duration)))
        logger.info("Propagating block {} to the BDN.", block_info.block_hash)
        return broadcast_message
Exemple #6
0
    def _propagate_encrypted_block_to_network(self, bx_block, connection,
                                              block_info):

        if block_info is None or block_info.block_hash is None:
            block_hash = b"Unknown"
            requested_by_peer = True
        else:
            block_hash = block_info.block_hash
            requested_by_peer = False

        encrypt_start_datetime = datetime.datetime.utcnow()
        encrypt_start_timestamp = time.time()
        encrypted_block, raw_cipher_hash = self._node.in_progress_blocks.encrypt_and_add_payload(
            bx_block)

        compressed_size = len(bx_block)
        encrypted_size = len(encrypted_block)

        encryption_details = "Encryption: {}; Size change: {}->{}bytes, {}".format(
            stats_format.timespan(encrypt_start_timestamp, time.time()),
            compressed_size, encrypted_size,
            stats_format.ratio(encrypted_size, compressed_size))

        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.BLOCK_ENCRYPTED,
            start_date_time=encrypt_start_datetime,
            end_date_time=datetime.datetime.utcnow(),
            network_num=self._node.network_num,
            matching_block_hash=convert.bytes_to_hex(raw_cipher_hash),
            matching_block_type=StatBlockType.ENCRYPTED.value,
            more_info=encryption_details)

        cipher_hash = Sha256Hash(raw_cipher_hash)
        broadcast_message = BroadcastMessage(cipher_hash,
                                             self._node.network_num,
                                             is_encrypted=True,
                                             blob=encrypted_block)

        conns = self._node.broadcast(
            broadcast_message,
            connection,
            connection_types=[ConnectionType.RELAY_BLOCK])

        handling_duration = self._node.track_block_from_node_handling_ended(
            block_hash)
        block_stats.add_block_event_by_block_hash(
            cipher_hash,
            BlockStatEventType.ENC_BLOCK_SENT_FROM_GATEWAY_TO_NETWORK,
            network_num=self._node.network_num,
            requested_by_peer=requested_by_peer,
            peers=conns,
            more_info="Peers: {}; {}; {}; Requested by peer: {}; Handled in {}"
            .format(stats_format.connections(conns), encryption_details,
                    self._format_block_info_stats(block_info),
                    requested_by_peer,
                    stats_format.duration(handling_duration)))
        self.register_for_block_receipts(cipher_hash, bx_block)
        return broadcast_message
Exemple #7
0
    def get_info(self) -> Dict[str, Any]:
        assert self.interval_data is not None
        # pyre-fixme[16]: Optional type has no attribute `total_encryption_time`.
        if self.interval_data.total_encryption_time > 0:
            average_encryption_time = (
                self.interval_data.total_encryption_time
                / self.interval_data.total_encrypted_msgs_count
            )
        else:
            average_encryption_time = 0

        # pyre-fixme[16]: Optional type has no attribute `total_decryption_time`.
        if self.interval_data.total_decryption_time > 0:
            average_decryption_time = (
                self.interval_data.total_decryption_time
                / self.interval_data.total_decrypted_msgs_count
            )
        else:
            average_decryption_time = 0

        # pyre-fixme[16]: Optional type has no attribute `total_serialization_time`.
        if self.interval_data.total_serialization_time > 0:
            average_serialization_time = (
                self.interval_data.total_serialization_time
                / self.interval_data.total_serialized_msgs_count
            )
        else:
            average_serialization_time = 0

        return {
            # pyre-fixme[16]: Optional type has no attribute
            #  `total_encrypted_msgs_count`.
            "total_encrypted_msgs_count": self.interval_data.total_encrypted_msgs_count,
            "average_encryption_time": stats_format.duration(average_encryption_time * 1000),
            "max_encryption_time": stats_format.duration(
                # pyre-fixme[16]: Optional type has no attribute `max_encryption_time`.
                self.interval_data.max_encryption_time * 1000
            ),
            # pyre-fixme[16]: Optional type has no attribute
            #  `total_decrypted_msgs_count`.
            "total_decrypted_msgs_count": self.interval_data.total_decrypted_msgs_count,
            "average_decryption_time": stats_format.duration(average_decryption_time * 1000),
            "max_decryption_time": stats_format.duration(
                # pyre-fixme[16]: Optional type has no attribute `max_decryption_time`.
                self.interval_data.max_decryption_time * 1000
            ),
            # pyre-fixme[16]: Optional type has no attribute
            #  `total_serialized_msgs_count`.
            "total_serialized_msgs_count": self.interval_data.total_serialized_msgs_count,
            "average_serialization_time": stats_format.duration(average_serialization_time * 1000),
            "max_serialization_time": stats_format.duration(
                # pyre-fixme[16]: Optional type has no attribute
                #  `max_serialization_time`.
                self.interval_data.max_serialization_time * 1000
            ),
        }
    def get_info(self) -> Dict[str, Any]:
        if self.interval_data.total_encryption_time > 0:
            average_encryption_time = (
                self.interval_data.total_encryption_time /
                self.interval_data.total_encrypted_msgs_count)
        else:
            average_encryption_time = 0

        if self.interval_data.total_decryption_time > 0:
            average_decryption_time = (
                self.interval_data.total_decryption_time /
                self.interval_data.total_decrypted_msgs_count)
        else:
            average_decryption_time = 0

        if self.interval_data.total_serialization_time > 0:
            average_serialization_time = (
                self.interval_data.total_serialization_time /
                self.interval_data.total_serialized_msgs_count)
        else:
            average_serialization_time = 0

        return {
            "total_encrypted_msgs_count":
            self.interval_data.total_encrypted_msgs_count,
            "average_encryption_time":
            stats_format.duration(average_encryption_time * 1000),
            "max_encryption_time":
            stats_format.duration(self.interval_data.max_encryption_time *
                                  1000),
            "total_decrypted_msgs_count":
            self.interval_data.total_decrypted_msgs_count,
            "average_decryption_time":
            stats_format.duration(average_decryption_time * 1000),
            "max_decryption_time":
            stats_format.duration(self.interval_data.max_decryption_time *
                                  1000),
            "total_serialized_msgs_count":
            self.interval_data.total_serialized_msgs_count,
            "average_serialization_time":
            stats_format.duration(average_serialization_time * 1000),
            "max_serialization_time":
            stats_format.duration(self.interval_data.max_serialization_time *
                                  1000),
        }
    def _propagate_unencrypted_block_to_network(self, bx_block, connection, block_info):
        if block_info is None:
            raise ValueError("Block info is required to propagate unencrypted block")

        is_consensus_msg, = struct.unpack_from("?", bx_block[8:9])
        broadcast_type = BroadcastMessageType.CONSENSUS if is_consensus_msg else BroadcastMessageType.BLOCK

        broadcast_message = BroadcastMessage(block_info.block_hash, self._node.network_num,
                                             broadcast_type=broadcast_type, is_encrypted=False, blob=bx_block)

        conns = self._node.broadcast(broadcast_message, connection, connection_types=[ConnectionType.RELAY_BLOCK])
        handling_duration = self._node.track_block_from_node_handling_ended(block_info.block_hash)
        block_stats.add_block_event_by_block_hash(block_info.block_hash,
                                                  BlockStatEventType.ENC_BLOCK_SENT_FROM_GATEWAY_TO_NETWORK,
                                                  network_num=self._node.network_num,
                                                  broadcast_type=broadcast_type,
                                                  requested_by_peer=False,
                                                  peers=map(lambda conn: (conn.peer_desc, conn.CONNECTION_TYPE), conns),
                                                  more_info="Peers: {}; Unencrypted; {}; Handled in {}".format(
                                                      stats_format.connections(conns),
                                                      self._format_block_info_stats(block_info),
                                                      stats_format.duration(handling_duration)))
        logger.info("Propagating block {} to the BDN.", block_info.block_hash)
        return broadcast_message
    def push(
        self,
        block_hash: Sha256Hash,
        # pyre-fixme[9]: block_msg is declared to have type `Union[BlockOntMessage, OntConsensusMessage]`
        #  but is used as type `None`.
        block_msg: Union[BlockOntMessage, OntConsensusMessage] = None,
        waiting_for_recovery: bool = False,
    ):
        if self.node.opts.is_consensus and isinstance(block_msg,
                                                      BlockOntMessage):
            return
        super().push(block_hash, block_msg, waiting_for_recovery)
        self.connection.log_debug(
            "Added block {} to queuing service (waiting for recovery: {})",
            block_hash, waiting_for_recovery)
        self._clean_block_queue()

        if block_msg is None:
            return

        try:
            block_msg.validate_payload(block_msg.buf,
                                       block_msg.unpack(block_msg.buf))
        except ChecksumError:
            logger.debug(
                "Encountered checksum error, which can be caused by duplicate transaction. "
                "Stop processing block {}", block_hash)
            return
        block_hash = cast(OntObjectHash, block_hash)

        if isinstance(block_msg, BlockOntMessage):
            if block_hash in self._blocks and not waiting_for_recovery:
                self.node.update_current_block_height(block_msg.height(),
                                                      block_hash)
                inv_msg = InvOntMessage(
                    magic=block_msg.magic(),
                    inv_type=InventoryOntType.MSG_BLOCK,
                    blocks=[block_hash],
                )
                self.connection.enqueue_msg(inv_msg)
        # pyre-fixme[25]: `block_msg` has type `OntConsensusMessage`,
        #  assertion `not isinstance(block_msg, bxgateway.messages.ont.consensus_ont_message.OntConsensusMessage)`
        #  will always fail.
        elif isinstance(block_msg, OntConsensusMessage):
            if block_hash in self._blocks and not waiting_for_recovery:
                self.connection.log_info(
                    "Sending consensus message with block hash {} to blockchain node",
                    block_hash,
                )
                self.connection.enqueue_msg(block_msg)
                handling_time, relay_desc = self.node.track_block_from_bdn_handling_ended(
                    block_hash)
                block_stats.add_block_event_by_block_hash(
                    block_hash,
                    BlockStatEventType.BLOCK_SENT_TO_BLOCKCHAIN_NODE,
                    network_num=self.node.network_num,
                    broadcast_type=BroadcastMessageType.CONSENSUS,
                    more_info="{} bytes; Handled in {}; R - {}; {}".format(
                        len(block_msg.rawbytes()),
                        stats_format.duration(handling_time),
                        relay_desc,
                        block_msg.extra_stats_data(),
                    ),
                )
Exemple #11
0
    def _handle_decrypted_block(
            self,
            bx_block: memoryview,
            connection: AbstractRelayConnection,
            encrypted_block_hash_hex: Optional[str] = None,
            recovered: bool = False,
            recovered_txs_source: Optional[RecoveredTxsSource] = None) -> None:
        transaction_service = self._node.get_tx_service()
        message_converter = self._node.message_converter
        assert message_converter is not None

        valid_block = self._validate_compressed_block_header(bx_block)
        if not valid_block.is_valid:
            reason = valid_block.reason
            assert reason is not None
            block_stats.add_block_event_by_block_hash(
                valid_block.block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_FAILED_VALIDATION,
                connection.network_num,
                more_info=reason)
            return

        # TODO: determine if a real block or test block. Discard if test block.
        if self._node.remote_node_conn or self._node.has_active_blockchain_peer(
        ):
            try:
                (block_message, block_info, unknown_sids,
                 unknown_hashes) = message_converter.bx_block_to_block(
                     bx_block, transaction_service)
                block_content_debug_utils.log_compressed_block_debug_info(
                    transaction_service, bx_block)
            except MessageConversionError as e:
                block_stats.add_block_event_by_block_hash(
                    e.msg_hash,
                    BlockStatEventType.BLOCK_CONVERSION_FAILED,
                    network_num=connection.network_num,
                    conversion_type=e.conversion_type.value)
                transaction_service.on_block_cleaned_up(e.msg_hash)
                connection.log_warning(log_messages.FAILED_TO_DECOMPRESS_BLOCK,
                                       e.msg_hash, e)
                return
        else:
            connection.log_warning(log_messages.LACK_BLOCKCHAIN_CONNECTION)
            return

        block_hash = block_info.block_hash
        all_sids = block_info.short_ids

        if encrypted_block_hash_hex is not None:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_TO_ENC_BLOCK_MATCH,
                matching_block_hash=encrypted_block_hash_hex,
                matching_block_type=StatBlockType.ENCRYPTED.value,
                network_num=connection.network_num)

        self.cancel_hold_timeout(block_hash, connection)

        if recovered:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_RECOVERY_COMPLETED,
                network_num=connection.network_num,
                more_info=str(recovered_txs_source))

        if block_hash in self._node.blocks_seen.contents:
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_IGNORE_SEEN,
                start_date_time=block_info.start_datetime,
                end_date_time=block_info.end_datetime,
                network_num=connection.network_num,
                prev_block_hash=block_info.prev_block_hash,
                original_size=block_info.original_size,
                compressed_size=block_info.compressed_size,
                txs_count=block_info.txn_count,
                blockchain_network=self._node.opts.blockchain_network,
                blockchain_protocol=self._node.opts.blockchain_protocol,
                matching_block_hash=block_info.compressed_block_hash,
                matching_block_type=StatBlockType.COMPRESSED.value,
                more_info=stats_format.duration(block_info.duration_ms))
            self._node.track_block_from_bdn_handling_ended(block_hash)
            transaction_service.track_seen_short_ids(block_hash, all_sids)
            connection.log_info("Discarding duplicate block {} from the BDN.",
                                block_hash)
            if block_message is not None:
                self._node.on_block_received_from_bdn(block_hash,
                                                      block_message)
                if self._node.block_queuing_service_manager.get_block_data(
                        block_hash) is None:
                    self._node.block_queuing_service_manager.store_block_data(
                        block_hash, block_message)
            return

        if not recovered:
            connection.log_info("Received block {} from the BDN.", block_hash)
        else:
            connection.log_info("Successfully recovered block {}.", block_hash)

        if block_message is not None:
            compression_rate = block_info.compression_rate
            assert compression_rate is not None
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_SUCCESS,
                start_date_time=block_info.start_datetime,
                end_date_time=block_info.end_datetime,
                network_num=connection.network_num,
                prev_block_hash=block_info.prev_block_hash,
                original_size=block_info.original_size,
                compressed_size=block_info.compressed_size,
                txs_count=block_info.txn_count,
                blockchain_network=self._node.opts.blockchain_network,
                blockchain_protocol=self._node.opts.blockchain_protocol,
                matching_block_hash=block_info.compressed_block_hash,
                matching_block_type=StatBlockType.COMPRESSED.value,
                more_info="Compression rate {}, Decompression time {}, "
                "Queued behind {} blocks".format(
                    stats_format.percentage(compression_rate),
                    stats_format.duration(block_info.duration_ms),
                    self._node.block_queuing_service_manager.
                    get_length_of_each_queuing_service_stats_format()))

            self._on_block_decompressed(block_message)
            if recovered or self._node.block_queuing_service_manager.is_in_any_queuing_service(
                    block_hash):
                self._node.block_queuing_service_manager.update_recovered_block(
                    block_hash, block_message)
            else:
                self._node.block_queuing_service_manager.push(
                    block_hash, block_message)

            gateway_bdn_performance_stats_service.log_block_from_bdn()

            self._node.on_block_received_from_bdn(block_hash, block_message)
            transaction_service.track_seen_short_ids(block_hash, all_sids)

            self._node.publish_block(None, block_hash, block_message,
                                     FeedSource.BDN_SOCKET)
            self._node.log_blocks_network_content(self._node.network_num,
                                                  block_message)
        else:
            if self._node.block_queuing_service_manager.is_in_any_queuing_service(
                    block_hash) and not recovered:
                connection.log_trace(
                    "Handling already queued block again. Ignoring.")
                return

            self._node.block_recovery_service.add_block(
                bx_block, block_hash, unknown_sids, unknown_hashes)
            block_stats.add_block_event_by_block_hash(
                block_hash,
                BlockStatEventType.BLOCK_DECOMPRESSED_WITH_UNKNOWN_TXS,
                start_date_time=block_info.start_datetime,
                end_date_time=block_info.end_datetime,
                network_num=connection.network_num,
                prev_block_hash=block_info.prev_block_hash,
                original_size=block_info.original_size,
                compressed_size=block_info.compressed_size,
                txs_count=block_info.txn_count,
                blockchain_network=self._node.opts.blockchain_network,
                blockchain_protocol=self._node.opts.blockchain_protocol,
                matching_block_hash=block_info.compressed_block_hash,
                matching_block_type=StatBlockType.COMPRESSED.value,
                more_info="{} sids, {} hashes, [{},...]".format(
                    len(unknown_sids), len(unknown_hashes), unknown_sids[:5]))

            connection.log_info(
                "Block {} requires short id recovery. Querying BDN...",
                block_hash)

            self.start_transaction_recovery(unknown_sids, unknown_hashes,
                                            block_hash, connection)
            if recovered:
                # should never happen –– this should not be called on blocks that have not recovered
                connection.log_error(log_messages.BLOCK_DECOMPRESSION_FAILURE,
                                     block_hash)
            else:
                self._node.block_queuing_service_manager.push(
                    block_hash, waiting_for_recovery=True)
Exemple #12
0
    def _process_and_broadcast_block(
            self, block_message,
            connection: AbstractGatewayBlockchainConnection) -> None:
        """
        Compresses and propagates block message if enabled, else return.
        :param block_message: block message to propagate
        :param connection: receiving connection (AbstractBlockchainConnection)
        """
        block_hash = block_message.block_hash()
        message_converter = self._node.message_converter
        assert message_converter is not None
        try:
            bx_block, block_info = message_converter.block_to_bx_block(
                block_message, self._node.get_tx_service(),
                self._node.opts.enable_block_compression,
                self._node.network.min_tx_age_seconds)
        except MessageConversionError as e:
            block_stats.add_block_event_by_block_hash(
                e.msg_hash,
                BlockStatEventType.BLOCK_CONVERSION_FAILED,
                network_num=connection.network_num,
                conversion_type=e.conversion_type.value)
            connection.log_error(log_messages.BLOCK_COMPRESSION_FAIL,
                                 e.msg_hash, e)
            return

        if block_info.ignored_short_ids:
            assert block_info.ignored_short_ids is not None
            logger.debug("Ignoring {} new SIDs for {}: {}",
                         len(block_info.ignored_short_ids),
                         block_info.block_hash, block_info.ignored_short_ids)

        compression_rate = block_info.compression_rate
        assert compression_rate is not None
        block_stats.add_block_event_by_block_hash(
            block_hash,
            BlockStatEventType.BLOCK_COMPRESSED,
            start_date_time=block_info.start_datetime,
            end_date_time=block_info.end_datetime,
            network_num=connection.network_num,
            prev_block_hash=block_info.prev_block_hash,
            original_size=block_info.original_size,
            txs_count=block_info.txn_count,
            blockchain_network=self._node.opts.blockchain_network,
            blockchain_protocol=self._node.opts.blockchain_protocol,
            matching_block_hash=block_info.compressed_block_hash,
            matching_block_type=StatBlockType.COMPRESSED.value,
            more_info="Compression: {}->{} bytes, {}, {}; Tx count: {}".format(
                block_info.original_size, block_info.compressed_size,
                stats_format.percentage(compression_rate),
                stats_format.duration(block_info.duration_ms),
                block_info.txn_count))
        if self._node.opts.dump_short_id_mapping_compression:
            mapping = {}
            for short_id in block_info.short_ids:
                tx_hash = self._node.get_tx_service().get_transaction(
                    short_id).hash
                assert tx_hash is not None
                mapping[short_id] = convert.bytes_to_hex(tx_hash.binary)
            with open(
                    f"{self._node.opts.dump_short_id_mapping_compression_path}/"
                    f"{convert.bytes_to_hex(block_hash.binary)}", "w") as f:
                f.write(str(mapping))

        self._process_and_broadcast_compressed_block(bx_block, connection,
                                                     block_info, block_hash)

        self._node.log_blocks_network_content(self._node.network_num,
                                              block_message)
    def process_message(self):
        """
        Processes the next bytes on the socket's inputbuffer.
        Returns 0 in order to avoid being rescheduled if this was an alarm.
        """
        # pylint: disable=too-many-return-statements, too-many-branches, too-many-statements

        logger.trace("START PROCESSING from {}", self)

        start_time = time.time()
        messages_processed = defaultdict(int)
        total_bytes_processed = 0

        self.processing_message_index = 0

        while True:
            input_buffer_len_before = self.inputbuf.length
            is_full_msg = False
            payload_len = None
            msg = None
            msg_type = None

            try:
                # abort message processing if connection has been closed
                if not self.socket_connection.alive:
                    return

                is_full_msg, should_process, msg_type, payload_len = self.pre_process_msg()

                if not should_process and is_full_msg:
                    self.pop_next_bytes(payload_len)
                    continue

                self.message_validator.validate(
                    is_full_msg,
                    msg_type,
                    self.header_size,
                    payload_len,
                    self.inputbuf
                )

                self.process_msg_type(msg_type, is_full_msg, payload_len)

                if not is_full_msg:
                    break

                msg = self.pop_next_message(payload_len)
                total_bytes_processed += len(msg.rawbytes())

                # If there was some error in parsing this message, then continue the loop.
                if msg is None:
                    if self._report_bad_message():
                        return
                    continue

                # Full messages must be one of the handshake messages if the connection isn't established yet.
                if (
                    not self.established and msg_type not in self.hello_messages
                ):
                    self.log_warning(log_messages.UNEXPECTED_MESSAGE, msg_type)
                    self.mark_for_close()
                    return

                if self.log_throughput:
                    hooks.add_throughput_event(
                        NetworkDirection.INBOUND,
                        msg_type,
                        len(msg.rawbytes()),
                        self.peer_desc,
                        self.peer_id
                    )

                if not logger.isEnabledFor(msg.log_level()) and logger.isEnabledFor(LogLevel.INFO):
                    self._debug_message_tracker[msg_type] += 1
                elif len(self._debug_message_tracker) > 0:
                    self.log_debug(
                        "Processed the following messages types: {} over {:.2f} seconds.",
                        self._debug_message_tracker,
                        time.time() - self._last_debug_message_log_time
                    )
                    self._debug_message_tracker.clear()
                    self._last_debug_message_log_time = time.time()

                self._log_message(msg.log_level(), "Processing message: {}", msg)

                if msg_type in self.message_handlers:
                    msg_handler = self.message_handlers[msg_type]

                    handler_start = time.time()
                    msg_handler(msg)
                    performance_utils.log_operation_duration(
                        msg_handling_logger,
                        "Single message handler",
                        handler_start,
                        constants.MSG_HANDLERS_CYCLE_DURATION_WARN_THRESHOLD_S,
                        connection=self,
                        handler=msg_handler,
                        message=msg
                    )
                messages_processed[msg_type] += 1

            # TODO: Investigate possible solutions to recover from PayloadLenError errors
            except PayloadLenError as e:
                self.log_error(log_messages.COULD_NOT_PARSE_MESSAGE, e.msg)
                self.mark_for_close()
                return

            except MemoryError as e:
                self.log_error(log_messages.OUT_OF_MEMORY, e, exc_info=True)
                self.log_debug(
                    "Failed message bytes: {}",
                    self._get_last_msg_bytes(msg, input_buffer_len_before, payload_len)
                )
                raise

            except UnauthorizedMessageError as e:
                self.log_error(log_messages.UNAUTHORIZED_MESSAGE, e.msg.MESSAGE_TYPE, self.peer_desc)
                self.log_debug(
                    "Failed message bytes: {}",
                    self._get_last_msg_bytes(msg, input_buffer_len_before, payload_len)
                )

                # give connection a chance to restore its state and get ready to process next message
                self.clean_up_current_msg(payload_len, input_buffer_len_before == self.inputbuf.length)

                if self._report_bad_message():
                    return

            except MessageValidationError as e:
                if self.node.NODE_TYPE not in NodeType.GATEWAY_TYPE:
                    if isinstance(e, ControlFlagValidationError):
                        if e.is_cancelled_cut_through:
                            self.log_debug(
                                "Message validation failed for {} message: {}. Probably cut-through cancellation",
                                msg_type, e.msg)
                        else:
                            self.log_warning(log_messages.MESSAGE_VALIDATION_FAILED, msg_type, e.msg)
                    else:
                        self.log_warning(log_messages.MESSAGE_VALIDATION_FAILED, msg_type, e.msg)
                else:
                    self.log_debug("Message validation failed for {} message: {}.", msg_type, e.msg)
                self.log_debug("Failed message bytes: {}",
                               self._get_last_msg_bytes(msg, input_buffer_len_before, payload_len))

                if is_full_msg:
                    self.clean_up_current_msg(payload_len, input_buffer_len_before == self.inputbuf.length)
                else:
                    self.log_error(log_messages.UNABLE_TO_RECOVER_PARTIAL_MESSAGE)
                    self.mark_for_close()
                    return

                if self._report_bad_message():
                    return

            except NonVersionMessageError as e:
                if e.is_known:
                    self.log_debug("Received invalid handshake request on {}:{}, {}", self.peer_ip, self.peer_port,
                                   e.msg)
                else:
                    self.log_warning(log_messages.INVALID_HANDSHAKE, self.peer_ip, self.peer_port, e.msg)
                self.log_debug("Failed message bytes: {}",
                               self._get_last_msg_bytes(msg, input_buffer_len_before, payload_len))

                self.mark_for_close()
                return

            # TODO: Throw custom exception for any errors that come from input that has not been
            # validated and only catch that subclass of exceptions
            # pylint: disable=broad-except
            except Exception as e:

                # Attempt to recover connection by removing bad full message
                if is_full_msg:
                    self.log_error(log_messages.TRYING_TO_RECOVER_MESSAGE, e, exc_info=True)
                    self.log_debug("Failed message bytes: {}",
                                   self._get_last_msg_bytes(msg, input_buffer_len_before, payload_len))

                    # give connection a chance to restore its state and get ready to process next message
                    self.clean_up_current_msg(payload_len, input_buffer_len_before == self.inputbuf.length)

                # Connection is unable to recover from message processing error if incomplete message is received
                else:
                    self.log_error(log_messages.UNABLE_TO_RECOVER_FULL_MESSAGE, e, exc_info=True)
                    self.log_debug("Failed message bytes: {}",
                                   self._get_last_msg_bytes(msg, input_buffer_len_before, payload_len))
                    self.mark_for_close()
                    return

                if self._report_bad_message():
                    return
            else:
                self.num_bad_messages = 0

            self.processing_message_index += 1

        performance_utils.log_operation_duration(msg_handling_logger,
                                                 "Message handlers",
                                                 start_time,
                                                 constants.MSG_HANDLERS_DURATION_WARN_THRESHOLD_S,
                                                 connection=self, count=messages_processed)
        duration_ms = (time.time() - start_time) * 1000
        logger.trace("DONE PROCESSING from {}. Bytes processed: {}. Messages processed: {}. Duration: {}",
                     self, total_bytes_processed, messages_processed, stats_format.duration(duration_ms))
 def process_compact_block(
         self, block_message: CompactBlockBtcMessage, connection: BtcNodeConnection
 ) -> CompactBlockCompressionResult:
     """
     Process compact block for processing on timeout if hold message received.
     If no hold exists, compress and broadcast block immediately.
     :param block_message: compact block message to process
     :param connection: receiving connection (AbstractBlockchainConnection)
     """
     block_hash = block_message.block_hash()
     parse_result = self._node.message_converter.compact_block_to_bx_block(  # pyre-ignore
         block_message,
         self._node.get_tx_service()
     )
     block_info = parse_result.block_info
     if parse_result.success:
         block_stats.add_block_event_by_block_hash(
             block_hash,
             BlockStatEventType.COMPACT_BLOCK_COMPRESSED_SUCCESS,
             network_num=connection.network_num,
             start_date_time=block_info.start_datetime,
             end_date_time=block_info.end_datetime,
             duration=block_info.duration_ms / 1000,
             success=parse_result.success,
             txs_count=parse_result.block_info.txn_count,
             prev_block_hash=parse_result.block_info.prev_block_hash,
             original_size=block_info.original_size,
             more_info="Compression: {}->{} bytes, {}, {}; Tx count: {}".format(
                 block_info.original_size,
                 block_info.compressed_size,
                 stats_format.percentage(block_info.compression_rate),
                 stats_format.duration(block_info.duration_ms),
                 block_info.txn_count)
         )
         self._node.block_cleanup_service.on_new_block_received(block_hash, block_message.prev_block_hash())
         self._process_and_broadcast_compressed_block(
             parse_result.bx_block,
             connection,
             parse_result.block_info,
             block_hash
         )
     else:
         missing_indices = parse_result.missing_indices
         missing_indices_count = 0 if missing_indices is None else len(missing_indices)
         start_datetime = block_info.start_datetime
         end_datetime = datetime.utcnow()
         duration = (end_datetime - start_datetime).total_seconds()
         block_stats.add_block_event_by_block_hash(
             block_hash,
             BlockStatEventType.COMPACT_BLOCK_COMPRESSED_FAILED,
             network_num=connection.network_num,
             start_date_time=start_datetime,
             end_date_time=end_datetime,
             duration=duration,
             success=parse_result.success,
             missing_short_id_count=missing_indices_count,
             more_info="{:.2f}ms".format(
                 duration * 1000
             )
         )
         logger.warning(log_messages.UNKNOWN_SHORT_IDS,
                        missing_indices_count)
     return parse_result
    def msg_consensus(self, msg: OntConsensusMessage):
        if not self.node.opts.is_consensus:
            return
        if msg.consensus_data_type() != ont_constants.BLOCK_PROPOSAL_CONSENSUS_MESSAGE_TYPE:
            return

        block_hash = msg.block_hash()
        node = self.connection.node
        if not node.should_process_block_hash(block_hash):
            return

        node.block_cleanup_service.on_new_block_received(block_hash, msg.prev_block_hash())
        block_stats.add_block_event_by_block_hash(block_hash,
                                                  BlockStatEventType.BLOCK_RECEIVED_FROM_BLOCKCHAIN_NODE,
                                                  network_num=self.connection.network_num,
                                                  broadcast_type=BroadcastMessageType.CONSENSUS,
                                                  more_info="Protocol: {}, Network: {}".format(
                                                      node.opts.blockchain_protocol,
                                                      node.opts.blockchain_network
                                                  ),
                                                  msg_size=len(msg.rawbytes())
                                                  )

        if block_hash in self.connection.node.blocks_seen.contents:
            self.node.on_block_seen_by_blockchain_node(block_hash, self.connection)
            block_stats.add_block_event_by_block_hash(block_hash,
                                                      BlockStatEventType.BLOCK_RECEIVED_FROM_BLOCKCHAIN_NODE_IGNORE_SEEN,
                                                      network_num=self.connection.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS)
            self.connection.log_info(
                "Discarding duplicate consensus block {} from local blockchain node.",
                block_hash
            )
            return

        node.track_block_from_node_handling_started(block_hash)

        self.connection.log_info(
            "Processing consensus block {} from local blockchain node.",
            block_hash
        )

        # Broadcast BlockHoldingMessage through relays and gateways
        conns = self.node.broadcast(BlockHoldingMessage(block_hash, self.node.network_num),
                                    broadcasting_conn=self.connection, prepend_to_queue=True,
                                    connection_types=[ConnectionType.RELAY_BLOCK, ConnectionType.GATEWAY])
        if len(conns) > 0:
            block_stats.add_block_event_by_block_hash(block_hash,
                                                      BlockStatEventType.BLOCK_HOLD_SENT_BY_GATEWAY_TO_PEERS,
                                                      network_num=self.node.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS,
                                                      peers=conns
                                                      )

        try:
            bx_block, block_info = self.node.consensus_message_converter.block_to_bx_block(
                msg,
                self.node.get_tx_service(),
                self.node.opts.enable_block_compression,
                self.node.network.min_tx_age_seconds
            )
        except MessageConversionError as e:
            block_stats.add_block_event_by_block_hash(
                e.msg_hash,
                BlockStatEventType.BLOCK_CONVERSION_FAILED,
                network_num=self.connection.network_num,
                broadcast_type=BroadcastMessageType.CONSENSUS,
                conversion_type=e.conversion_type.value
            )
            self.connection.log_error(log_messages.BLOCK_COMPRESSION_FAIL_ONT_CONSENSUS, e.msg_hash, e)
            return

        if block_info.ignored_short_ids:
            self.connection.log_debug(
                "Ignoring {} new SIDs for {}: {}",
                len(block_info.ignored_short_ids), block_info.block_hash, block_info.ignored_short_ids
            )

        block_stats.add_block_event_by_block_hash(block_hash,
                                                  BlockStatEventType.BLOCK_COMPRESSED,
                                                  start_date_time=block_info.start_datetime,
                                                  end_date_time=block_info.end_datetime,
                                                  network_num=self.connection.network_num,
                                                  broadcast_type=BroadcastMessageType.CONSENSUS,
                                                  prev_block_hash=block_info.prev_block_hash,
                                                  original_size=block_info.original_size,
                                                  txs_count=block_info.txn_count,
                                                  blockchain_network=self.node.opts.blockchain_network,
                                                  blockchain_protocol=self.node.opts.blockchain_protocol,
                                                  matching_block_hash=block_info.compressed_block_hash,
                                                  matching_block_type=StatBlockType.COMPRESSED.value,
                                                  more_info="Consensus compression: {}->{} bytes, {}, {}; "
                                                            "Tx count: {}".format(
                                                      block_info.original_size,
                                                      block_info.compressed_size,
                                                      stats_format.percentage(block_info.compression_rate),
                                                      stats_format.duration(block_info.duration_ms),
                                                      block_info.txn_count
                                                  )
                                                  )

        self.node.block_processing_service._process_and_broadcast_compressed_block(
            bx_block, self.connection, block_info, block_hash
        )
    def push(
        self,
        block_hash: Sha256Hash,
        # pyre-fixme[9]: block_msg is declared to have type `Union[BlockOntMessage, OntConsensusMessage]`
        #  but is used as type `None`.
        block_msg: Union[BlockOntMessage, OntConsensusMessage] = None,
        waiting_for_recovery: bool = False,
    ):
        if self.node.opts.is_consensus and isinstance(block_msg,
                                                      BlockOntMessage):
            return
        # pyre-fixme[6]: Expected `Optional[Variable[bxgateway.services.abstract_block_queuing_service.TBlockMessage
        #  (bound to bxcommon.messages.abstract_block_message.AbstractBlockMessage)]]` for 2nd positional only
        #  parameter to call `AbstractBlockQueuingService.push` but got `Union[BlockOntMessage, OntConsensusMessage]`.
        super().push(block_hash, block_msg, waiting_for_recovery)
        logger.debug("Added block {} to queuing service", block_hash)
        self._clean_block_queue()

        if block_msg is None:
            return

        try:
            block_msg.validate_payload(block_msg.buf,
                                       block_msg.unpack(block_msg.buf))
        except ChecksumError:
            logger.debug(
                "Encountered checksum error, which can be caused by duplicate transaction. "
                "Stop processing block {}", block_hash)
            return
        block_hash = cast(OntObjectHash, block_hash)

        if isinstance(block_msg, BlockOntMessage):
            if block_hash in self._blocks and not waiting_for_recovery:
                self.node.update_current_block_height(block_msg.height(),
                                                      block_hash)
                inv_msg = InvOntMessage(
                    magic=block_msg.magic(),
                    inv_type=InventoryOntType.MSG_BLOCK,
                    blocks=[block_hash],
                )
                self.node.send_msg_to_node(inv_msg)
        # pyre-fixme[25]: `block_msg` has type `OntConsensusMessage`,
        #  assertion `not isinstance(block_msg, bxgateway.messages.ont.consensus_ont_message.OntConsensusMessage)`
        #  will always fail.
        elif isinstance(block_msg, OntConsensusMessage):
            if block_hash in self._blocks and not waiting_for_recovery:
                logger.info(
                    "Sending consensus message with block hash {} to blockchain node",
                    block_hash)
                # self.node.block_queuing_service.send_block_to_node(block_hash)
                # the above one line is good, except for the wrong broadcast type in BLOCK_SENT_TO_BLOCKCHAIN_NODE
                self.node.send_msg_to_node(block_msg)
                handling_time, relay_desc = self.node.track_block_from_bdn_handling_ended(
                    block_hash)
                if not self.node.opts.track_detailed_sent_messages:
                    block_stats.add_block_event_by_block_hash(
                        block_hash,
                        BlockStatEventType.BLOCK_SENT_TO_BLOCKCHAIN_NODE,
                        network_num=self.node.network_num,
                        broadcast_type=BroadcastMessageType.CONSENSUS,
                        more_info="{} bytes; Handled in {}; R - {}; {}".format(
                            len(block_msg.rawbytes()),
                            stats_format.duration(handling_time),
                            relay_desc,
                            block_msg.extra_stats_data(),
                        ),
                    )
    def _handle_decrypted_consensus_block(
        self,
        bx_block: memoryview,
        connection: AbstractRelayConnection,
        encrypted_block_hash_hex: Optional[str] = None,
        recovered: bool = False,
        recovered_txs_source: Optional[RecoveredTxsSource] = None
    ):
        transaction_service = self._node.get_tx_service()

        if self._node.has_active_blockchain_peer() or self._node.remote_node_conn:
            try:
                block_message, block_info, unknown_sids, unknown_hashes = \
                    self._node.consensus_message_converter.bx_block_to_block(bx_block, transaction_service)
            except MessageConversionError as e:
                block_stats.add_block_event_by_block_hash(
                    e.msg_hash,
                    BlockStatEventType.BLOCK_CONVERSION_FAILED,
                    network_num=connection.network_num,
                    broadcast_type=BroadcastMessageType.CONSENSUS,
                    conversion_type=e.conversion_type.value
                )
                transaction_service.on_block_cleaned_up(e.msg_hash)
                connection.log_warning(log_messages.FAILED_TO_DECOMPRESS_BLOCK_ONT_CONSENSUS, e.msg_hash, e)
                return
        else:
            connection.log_warning(log_messages.LACK_BLOCKCHAIN_CONNECTION_ONT_CONSENSUS)
            return

        block_hash = block_info.block_hash
        all_sids = block_info.short_ids

        if encrypted_block_hash_hex is not None:
            block_stats.add_block_event_by_block_hash(block_hash,
                                                      BlockStatEventType.BLOCK_TO_ENC_BLOCK_MATCH,
                                                      matching_block_hash=encrypted_block_hash_hex,
                                                      matching_block_type=StatBlockType.ENCRYPTED.value,
                                                      network_num=connection.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS)

        self.cancel_hold_timeout(block_hash, connection)

        if recovered:
            block_stats.add_block_event_by_block_hash(block_hash,
                                                      BlockStatEventType.BLOCK_RECOVERY_COMPLETED,
                                                      network_num=connection.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS,
                                                      more_info=str(recovered_txs_source))

        if block_hash in self._node.blocks_seen.contents:
            block_stats.add_block_event_by_block_hash(block_hash, BlockStatEventType.BLOCK_DECOMPRESSED_IGNORE_SEEN,
                                                      start_date_time=block_info.start_datetime,
                                                      end_date_time=block_info.end_datetime,
                                                      network_num=connection.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS,
                                                      prev_block_hash=block_info.prev_block_hash,
                                                      original_size=block_info.original_size,
                                                      compressed_size=block_info.compressed_size,
                                                      txs_count=block_info.txn_count,
                                                      blockchain_network=self._node.opts.blockchain_protocol,
                                                      blockchain_protocol=self._node.opts.blockchain_network,
                                                      matching_block_hash=block_info.compressed_block_hash,
                                                      matching_block_type=StatBlockType.COMPRESSED.value,
                                                      more_info=stats_format.duration(block_info.duration_ms))
            self._node.track_block_from_bdn_handling_ended(block_hash)
            transaction_service.track_seen_short_ids(block_hash, all_sids)
            connection.log_info(
                "Discarding duplicate consensus block {} from the BDN.",
                block_hash
            )
            return

        if not recovered:
            connection.log_info("Received consensus block {} from the BDN.", block_hash)
        else:
            connection.log_info("Successfully recovered consensus block {}.", block_hash)

        if block_message is not None:
            block_stats.add_block_event_by_block_hash(block_hash, BlockStatEventType.BLOCK_DECOMPRESSED_SUCCESS,
                                                      start_date_time=block_info.start_datetime,
                                                      end_date_time=block_info.end_datetime,
                                                      network_num=connection.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS,
                                                      prev_block_hash=block_info.prev_block_hash,
                                                      original_size=block_info.original_size,
                                                      compressed_size=block_info.compressed_size,
                                                      txs_count=block_info.txn_count,
                                                      blockchain_network=self._node.opts.blockchain_protocol,
                                                      blockchain_protocol=self._node.opts.blockchain_network,
                                                      matching_block_hash=block_info.compressed_block_hash,
                                                      matching_block_type=StatBlockType.COMPRESSED.value,
                                                      peer=connection.peer_desc,
                                                      more_info="Consensus compression rate {}, Decompression time {}, "
                                                                "Queued behind {} blocks".format(
                                                          stats_format.percentage(block_info.compression_rate),
                                                          stats_format.duration(block_info.duration_ms),
                                                          self._node.block_queueing_service_manager.get_length_of_each_queuing_service_stats_format()))

            if recovered or block_hash in self._node.block_queueing_service_manager:
                self._node.block_queueing_service_manager.update_recovered_block(block_hash, block_message)
            else:
                self._node.block_queueing_service_manager.push(block_hash, block_message)

            self._node.block_recovery_service.cancel_recovery_for_block(block_hash)
            # self._node.blocks_seen.add(block_hash)
            transaction_service.track_seen_short_ids(block_hash, all_sids)
        else:
            if block_hash in self._node.block_queueing_service_manager and not recovered:
                connection.log_trace("Handling already queued consensus block again. Ignoring.")
                return

            self._node.block_recovery_service.add_block(bx_block, block_hash, unknown_sids, unknown_hashes)
            block_stats.add_block_event_by_block_hash(block_hash,
                                                      BlockStatEventType.BLOCK_DECOMPRESSED_WITH_UNKNOWN_TXS,
                                                      start_date_time=block_info.start_datetime,
                                                      end_date_time=block_info.end_datetime,
                                                      network_num=connection.network_num,
                                                      broadcast_type=BroadcastMessageType.CONSENSUS,
                                                      prev_block_hash=block_info.prev_block_hash,
                                                      original_size=block_info.original_size,
                                                      compressed_size=block_info.compressed_size,
                                                      txs_count=block_info.txn_count,
                                                      blockchain_network=self._node.opts.blockchain_protocol,
                                                      blockchain_protocol=self._node.opts.blockchain_network,
                                                      matching_block_hash=block_info.compressed_block_hash,
                                                      matching_block_type=StatBlockType.COMPRESSED.value,
                                                      more_info="{} sids, {} hashes".format(
                                                          len(unknown_sids), len(unknown_hashes)))

            connection.log_info("Consensus block {} requires short id recovery. Querying BDN...", block_hash)

            self.start_transaction_recovery(unknown_sids, unknown_hashes, block_hash, connection)
            if recovered:
                # should never happen –– this should not be called on blocks that have not recovered
                connection.log_error(log_messages.BLOCK_DECOMPRESSION_FAILURE_ONT_CONSENSUS,
                                     block_hash)
            else:
                self._node.block_queueing_service_manager.push(block_hash, waiting_for_recovery=True)