def setUp(self):
        self.conn_pool1 = ConnectionPool()

        self.fileno1 = 1
        self.ip1 = "123.123.123.123"
        self.port1 = 1000
        self.node1 = MockNode(
            helpers.get_common_opts(1001, external_ip="128.128.128.128"))
        self.node_id1 = str(uuid.uuid1())
        self.conn1 = MockConnection(
            MockSocketConnection(self.fileno1,
                                 ip_address=self.ip1,
                                 port=self.port1), self.node1)

        self.fileno2 = 5
        self.ip2 = "234.234.234.234"
        self.port2 = 2000
        self.node2 = MockNode(
            helpers.get_common_opts(1003, external_ip="321.321.321.321"))
        self.node_id2 = str(uuid.uuid1())
        self.conn2 = MockConnection(
            MockSocketConnection(self.fileno2,
                                 ip_address=self.ip2,
                                 port=self.port2), self.node2)

        self.fileno3 = 6
        self.ip3 = "234.234.234.234"
        self.port3 = 3000
        self.node3 = MockNode(
            helpers.get_common_opts(1003, external_ip="213.213.213.213."))
        self.node_id3 = str(uuid.uuid1())
        self.conn3 = MockConnection(
            MockSocketConnection(self.fileno3,
                                 ip_address=self.ip3,
                                 port=self.port3), self.node3)
Exemple #2
0
    def __init__(self, opts: Namespace):
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)

        self.set_node_config_opts_from_sdn(opts)
        self.opts = opts
        self.connection_queue: Deque[Tuple[str, int]] = deque()
        self.disconnect_queue: Deque[DisconnectRequest] = deque()
        self.outbound_peers = opts.outbound_peers[:]

        self.connection_pool = ConnectionPool()

        self.should_force_exit = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Handle termination gracefully
        signal.signal(signal.SIGTERM, self._kill_node)
        signal.signal(signal.SIGINT, self._kill_node)
        signal.signal(signal.SIGSEGV, self._kill_node)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        opts.has_fully_updated_tx_service = False
        self.alarm_queue.register_alarm(constants.TX_SERVICE_SYNC_PROGRESS_S,
                                        self._sync_tx_services)
        self._check_sync_relay_connections_alarm_id = self.alarm_queue.register_alarm(
            constants.LAST_MSG_FROM_RELAY_THRESHOLD_S,
            self._check_sync_relay_connections)
        self._transaction_sync_timeout_alarm_id = self.alarm_queue.register_alarm(
            constants.TX_SERVICE_CHECK_NETWORKS_SYNCED_S,
            self._transaction_sync_timeout)
Exemple #3
0
def update(conn_pool: ConnectionPool, use_ext: bool, src_ver: str,
           ip_address: str, continent: str, country: str,
           update_required: bool, blockchain_peers: Set[BlockchainPeerInfo],
           account_id: Optional[str],
           quota_level: Optional[int]) -> Diagnostics:
    path = config.get_data_file(STATUS_FILE_NAME)
    if not os.path.exists(path):
        initialize(use_ext, src_ver, ip_address, continent, country,
                   update_required, account_id, quota_level)
    diagnostics = _load_status_from_file(use_ext, src_ver, ip_address,
                                         continent, country, update_required,
                                         account_id, quota_level)
    analysis = diagnostics.analysis
    network = analysis.network

    for network_type, individual_network in network.iter_network_type_pairs():
        for conn in individual_network:
            if conn.get_connection_state() == ConnectionState.DISCONNECTED or \
                    not conn_pool.has_connection(conn.ip_address, int(conn.port)):
                network.remove_connection(conn, network_type)

    for conn_type in CONN_TYPES:
        for conn in conn_pool.get_by_connection_types([conn_type]):
            network.add_connection(conn.CONNECTION_TYPE, conn.peer_desc,
                                   str(conn.file_no), conn.peer_id)

    for blockchain_peer in blockchain_peers:
        blockchain_ip_endpoint = IpEndpoint(blockchain_peer.ip,
                                            blockchain_peer.port)
        blockchain_conn = None
        if conn_pool.has_connection(blockchain_peer.ip, blockchain_peer.port):
            blockchain_conn = conn_pool.get_by_ipport(blockchain_peer.ip,
                                                      blockchain_peer.port)
        if blockchain_conn:
            network.add_connection(ConnectionType.BLOCKCHAIN_NODE,
                                   str(blockchain_ip_endpoint),
                                   str(blockchain_conn.file_no),
                                   blockchain_conn.peer_id)
        else:
            network.add_connection(ConnectionType.BLOCKCHAIN_NODE,
                                   str(blockchain_ip_endpoint), None, None)

    summary = network.get_summary(ip_address, continent, country,
                                  update_required, account_id, quota_level)
    assert summary.gateway_status is not None
    # pyre-fixme[16]: `Optional` has no attribute `value`.
    gateway_status.state(summary.gateway_status.value)
    diagnostics = Diagnostics(summary, analysis)

    _save_status_to_file(diagnostics)
    return diagnostics
 def setUp(self):
     self.node = MockNode(helpers.get_common_opts(8888))
     # noinspection PyTypeChecker
     connection = helpers.create_connection(MockConnection,
                                            self.node,
                                            port=9999)
     self.node.connection_pool = ConnectionPool()
     self.node.connection_pool.add(1, connection.peer_ip,
                                   connection.peer_port, connection)
     node_info_statistics.set_node(self.node)
 def setUp(self):
     self.node = MockGatewayNode(gateway_helpers.get_gateway_opts(8000))
     self.node.connection_pool = ConnectionPool()
     self.blockchain_sync_service = BlockchainSyncService(
         self.node, {BtcMessageType.GET_HEADERS: BtcMessageType.HEADERS})
     self.blockchain_connection = MockConnection(1, (LOCALHOST, 8001),
                                                 self.node)
     self.blockchain_connection.state |= ConnectionState.ESTABLISHED
     self.node.node_conn = self.blockchain_connection
     self.alarms = []
    def test_get_by_connection_types_performance(self):
        log_config.set_level([
            "bxcommon.connections.abstract_node",
            "bxcommon.services.transaction_service"
        ], LogLevel.INFO)
        conn_pool = ConnectionPool()
        self.conn1.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
        self.conn3.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        number_of_iteration = 100
        for i in range(40):
            ip = f"{i}.{i}.{i}.{i}"
            node = MockNode(helpers.get_common_opts(i, external_ip=ip))
            conn = MockConnection(
                MockSocketConnection(i, ip_address=ip, port=i), node)
            if i % 7 == 0:
                conn.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
            elif i % 5 == 0:
                conn.CONNECTION_TYPE = ConnectionType.RELAY_TRANSACTION
            elif i % 3 == 0:
                conn.CONNECTION_TYPE = ConnectionType.INTERNAL_GATEWAY
            else:
                conn.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY
            conn_pool.add(i, ip, i, conn)

        timeit_get_by_connections_types_one_type = timeit.timeit(
            lambda: conn_pool.get_by_connection_types([ConnectionType.GATEWAY]
                                                      ),
            number=number_of_iteration)
        timeit_get_by_connections_types_two_types = timeit.timeit(
            lambda: conn_pool.get_by_connection_types(
                [ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION]),
            number=number_of_iteration)
        print(
            f"\ntimeit_get_by_connections_types_one_type # 2:  {timeit_get_by_connections_types_one_type * 1000 / number_of_iteration:.4f}ms, "
            f"#connections: {len(list(conn_pool.get_by_connection_types([ConnectionType.GATEWAY])))}"
            f"\ntimeit_get_by_connections_types_two_types # 2: {timeit_get_by_connections_types_two_types * 1000 / number_of_iteration:.4f}ms, "
            f"#connections: {len(list(conn_pool.get_by_connection_types([ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION])))}"
        )

        print("*****")
        for c in conn_pool.get_by_connection_types(
            [ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION]):
            print(f"connection: {c}, connection type: {c.CONNECTION_TYPE}")
Exemple #7
0
def update(conn_pool: ConnectionPool, use_ext: bool, src_ver: str, ip_address: str, continent: str, country: str,
           update_required: bool, account_id: Optional[str], quota_level: Optional[int]) -> Diagnostics:
    path = config.get_data_file(STATUS_FILE_NAME)
    if not os.path.exists(path):
        initialize(use_ext, src_ver, ip_address, continent, country, update_required, account_id, quota_level)
    diagnostics = _load_status_from_file(use_ext, src_ver, ip_address, continent, country, update_required, account_id, quota_level)
    analysis = diagnostics.analysis
    network = analysis.network

    network.clear()

    for conn_type in CONN_TYPES:
        for conn in conn_pool.get_by_connection_types([conn_type]):
            network.add_connection(conn.CONNECTION_TYPE, conn.peer_desc, conn.file_no, conn.peer_id)

    summary = network.get_summary(ip_address, continent, country, update_required, account_id, quota_level)
    assert summary.gateway_status is not None
    # pyre-fixme[16]: `Optional` has no attribute `value`.
    gateway_status.state(summary.gateway_status.value)
    diagnostics = Diagnostics(summary, analysis)

    _save_status_to_file(diagnostics)
    return diagnostics
    def __init__(self,
                 opts: CommonOpts,
                 node_ssl_service: NodeSSLService,
                 connection_pool: Optional[ConnectionPool] = None):
        self.node_ssl_service = node_ssl_service
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)
        self.server_endpoints = [
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.external_port),
            # TODO: remove this after v1 is no longer supported
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.non_ssl_port)
        ]

        self.set_node_config_opts_from_sdn(opts)
        self.opts: CommonOpts = opts
        self.pending_connection_requests: Set[ConnectionPeerInfo] = set()
        self.pending_connection_attempts: Set[ConnectionPeerInfo] = set()
        self.outbound_peers: Set[OutboundPeerModel] = opts.outbound_peers.copy(
        )

        if connection_pool is not None:
            self.connection_pool = connection_pool
        else:
            self.connection_pool = ConnectionPool()

        self.should_force_exit = False
        self.should_restart_on_high_memory = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_node_status_logging()
        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        self.start_sync_time: Optional[float] = None
        self.sync_metrics: Dict[int, Counter] = defaultdict(Counter)
        self.sync_short_id_buckets: Dict[
            int,
            TransactionShortIdBuckets] = defaultdict(TransactionShortIdBuckets)

        opts.has_fully_updated_tx_service = False

        self.check_sync_relay_connections_alarm_id: Optional[AlarmId] = None
        self.transaction_sync_timeout_alarm_id: Optional[AlarmId] = None

        self.requester = ThreadedRequestService(
            # pyre-fixme[16]: `Optional` has no attribute `name`.
            self.NODE_TYPE.name.lower(),
            self.alarm_queue,
            constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S)

        self._last_responsiveness_check_log_time = time.time()
        self._last_responsiveness_check_details = {}
        self.gc_logging_enabled = False
        self.serialized_message_cache = SerializedMessageCache(
            self.alarm_queue)

        self.alarm_queue.register_alarm(
            constants.RESPONSIVENESS_CHECK_INTERVAL_S,
            self._responsiveness_check_log)
class AbstractNode:
    __meta__ = ABCMeta
    FLUSH_SEND_BUFFERS_INTERVAL = constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME * 2
    NODE_TYPE: Optional[NodeType] = None

    def __init__(self,
                 opts: CommonOpts,
                 node_ssl_service: NodeSSLService,
                 connection_pool: Optional[ConnectionPool] = None):
        self.node_ssl_service = node_ssl_service
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)
        self.server_endpoints = [
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.external_port),
            # TODO: remove this after v1 is no longer supported
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.non_ssl_port)
        ]

        self.set_node_config_opts_from_sdn(opts)
        self.opts: CommonOpts = opts
        self.pending_connection_requests: Set[ConnectionPeerInfo] = set()
        self.pending_connection_attempts: Set[ConnectionPeerInfo] = set()
        self.outbound_peers: Set[OutboundPeerModel] = opts.outbound_peers.copy(
        )

        if connection_pool is not None:
            self.connection_pool = connection_pool
        else:
            self.connection_pool = ConnectionPool()

        self.should_force_exit = False
        self.should_restart_on_high_memory = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_node_status_logging()
        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        self.start_sync_time: Optional[float] = None
        self.sync_metrics: Dict[int, Counter] = defaultdict(Counter)
        self.sync_short_id_buckets: Dict[
            int,
            TransactionShortIdBuckets] = defaultdict(TransactionShortIdBuckets)

        opts.has_fully_updated_tx_service = False

        self.check_sync_relay_connections_alarm_id: Optional[AlarmId] = None
        self.transaction_sync_timeout_alarm_id: Optional[AlarmId] = None

        self.requester = ThreadedRequestService(
            # pyre-fixme[16]: `Optional` has no attribute `name`.
            self.NODE_TYPE.name.lower(),
            self.alarm_queue,
            constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S)

        self._last_responsiveness_check_log_time = time.time()
        self._last_responsiveness_check_details = {}
        self.gc_logging_enabled = False
        self.serialized_message_cache = SerializedMessageCache(
            self.alarm_queue)

        self.alarm_queue.register_alarm(
            constants.RESPONSIVENESS_CHECK_INTERVAL_S,
            self._responsiveness_check_log)

    def get_sdn_address(self):
        """
        Placeholder for net event loop to get the sdn address (relay only).
        :return:
        """
        return

    @abstractmethod
    def get_tx_service(self,
                       network_num: Optional[int] = None
                       ) -> TransactionService:
        pass

    @abstractmethod
    def get_outbound_peer_info(self) -> List[ConnectionPeerInfo]:
        pass

    @abstractmethod
    def get_broadcast_service(self) -> BroadcastService:
        pass

    def sync_and_send_request_for_relay_peers(self, network_num: int) -> int:
        """
        Requests potential relay peers from SDN. Merges list with provided command line relays.

        This function retrieves from the SDN potential_relay_peers_by_network
        Then it try to ping for each relay (timeout of 2 seconds). The ping is done in parallel
        Once there are ping result, it calculate the best relay and decides if need to switch relays

        The above can take time, so the functions is split into several internal functions and use the thread pool
        not to block the main thread.
        """

        self.requester.send_threaded_request(
            sdn_http_service.fetch_potential_relay_peers_by_network,
            self.opts.node_id,
            network_num,
            # pyre-fixme[6]: Expected `Optional[Callable[[Future[Any]], Any]]` for 4th parameter `done_callback`
            #  to call `send_threaded_request` but got `BoundMethod[Callable(_process_blockchain_network_from_sdn)
            #  [[Named(self, AbstractRelayConnection), Named(get_blockchain_network_future, Future[Any])], Any],
            #  AbstractRelayConnection]`.
            done_callback=self.process_potential_relays_from_sdn)

        return constants.CANCEL_ALARMS

    def process_potential_relays_from_sdn(self,
                                          get_potential_relays_future: Future):
        pass

    @abstractmethod
    def build_connection(
        self, socket_connection: AbstractSocketConnectionProtocol
    ) -> Optional[AbstractConnection]:
        pass

    @abstractmethod
    def on_failed_connection_retry(self, ip: str, port: int,
                                   connection_type: ConnectionType,
                                   connection_state: ConnectionState) -> None:
        pass

    def connection_exists(self,
                          ip: str,
                          port: int,
                          peer_id: Optional[str] = None) -> bool:
        return self.connection_pool.has_connection(ip, port, peer_id)

    def on_connection_added(
        self, socket_connection: AbstractSocketConnectionProtocol
    ) -> Optional[AbstractConnection]:
        """
        Notifies the node that a connection is coming in.
        """
        # If we're already connected to the remote peer, log the event and request disconnect.
        self.pending_connection_attempts.discard(
            ConnectionPeerInfo(socket_connection.endpoint,
                               AbstractConnection.CONNECTION_TYPE))
        ip, port = socket_connection.endpoint
        peer_info = None
        if socket_connection.is_ssl:
            try:
                peer_info = self._get_socket_peer_info(socket_connection)
            except ConnectionAuthenticationError as e:
                logger.warning(log_messages.FAILED_TO_AUTHENTICATE_CONNECTION,
                               ip, port, e)
                socket_connection.mark_for_close(should_retry=False)
                return None

            if self.connection_exists(ip, port, peer_info.peer_id):
                logger.debug(
                    "Duplicate connection attempted to: {}:{} (peer id: {}). "
                    "Dropping.", ip, port, peer_info.peer_id)
                socket_connection.mark_for_close(should_retry=False)
                return None
        elif self.connection_exists(ip, port):
            logger.debug(
                "Duplicate connection attempt to {}:{}. Dropping.",
                ip,
                port,
            )
            socket_connection.mark_for_close(should_retry=False)
            return None

        connection = self._initialize_connection(socket_connection)
        if connection is None:
            return None

        if peer_info is not None:
            connection.on_connection_authenticated(peer_info)
            self.connection_pool.index_conn_node_id(peer_info.peer_id,
                                                    connection)

        connection.state |= ConnectionState.INITIALIZED
        logger.debug("Successfully initialized connection: {}", connection)
        return connection

    def on_connection_closed(self,
                             file_no: int,
                             mark_connection_for_close: bool = False):
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Unexpectedly closed connection not in pool. file_no: {}",
                file_no)
            return

        if mark_connection_for_close:
            conn.mark_for_close()

        self._destroy_conn(conn)

    def log_refused_connection(self, peer_info: ConnectionPeerInfo,
                               error: str):
        logger.info("Failed to connect to: {}, {}.", peer_info, error)

    def log_closed_connection(self, connection: AbstractConnection):
        if ConnectionState.ESTABLISHED not in connection.state:
            logger.info("Failed to connect to: {}.", connection)
        else:
            logger.info("Closed connection: {}", connection)

    def on_updated_peers(self,
                         outbound_peer_models: Set[OutboundPeerModel]) -> None:
        if not outbound_peer_models:
            logger.debug("Got peer update with no peers.")
            return

        logger.debug("Processing updated outbound peers: {}.",
                     outbound_peer_models)

        # Remove peers not in updated list or from command-line args.
        old_peers = self.outbound_peers

        # TODO: remove casting to set once the type of outbound peer model is verified globally
        remove_peers = set(old_peers) - set(outbound_peer_models) - set(
            self.opts.outbound_peers)

        for rem_peer in remove_peers:
            if self.connection_pool.has_connection(rem_peer.ip, rem_peer.port,
                                                   rem_peer.node_id):
                rem_conn = self.connection_pool.get_by_ipport(
                    rem_peer.ip, rem_peer.port, rem_peer.node_id)
                if rem_conn:
                    rem_conn.mark_for_close(False)

        # Connect to peers not in our known pool
        for peer in outbound_peer_models:
            peer_ip = peer.ip
            peer_port = peer.port
            if self.should_connect_to_new_outbound_peer(peer):
                self.enqueue_connection(
                    peer_ip,
                    peer_port,
                    convert.peer_node_to_connection_type(
                        # pyre-fixme[6]: Expected `NodeType` for 1st param but got
                        #  `Optional[NodeType]`.
                        self.NODE_TYPE,
                        peer.node_type))
        self.outbound_peers = outbound_peer_models

    def on_updated_node_model(self, new_node_model: NodeModel):
        """
        Updates `opts` according a newly updated `NodeModel`.
        This is currently unused on gateways.
        """
        logger.debug("Updating node attributes with new model: {}",
                     new_node_model)
        for key, val in new_node_model.__dict__.items():
            logger.trace("Updating attribute '{}': {} => {}", key,
                         self.opts.__dict__.get(key, 'None'), val)
            self.opts.__dict__[key] = val

    def should_connect_to_new_outbound_peer(
            self, outbound_peer: OutboundPeerModel) -> bool:
        return not self.connection_pool.has_connection(
            outbound_peer.ip, outbound_peer.port, outbound_peer.node_id)

    def on_bytes_received(self, file_no: int,
                          bytes_received: Union[bytearray, bytes]) -> None:
        """
        :param file_no:
        :param bytes_received:
        :return: True if the node should continue receiving bytes from the remote peer. False otherwise.
        """
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Received bytes for connection not in pool. file_no: {0}",
                file_no)
            return

        if not conn.is_alive():
            conn.log_trace("Skipping receiving bytes for closed connection.")
            return

        conn.add_received_bytes(bytes_received)
        conn.process_message()

    def get_bytes_to_send(self, file_no: int) -> Optional[memoryview]:
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Request to get bytes for connection not in pool. file_no: {}",
                file_no)
            return None

        if not conn.is_alive():
            conn.log_trace("Skipping sending bytes for closed connection.")
            return None

        return conn.get_bytes_to_send()

    def on_bytes_sent(self, file_no: int, bytes_sent: int):
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Bytes sent call for connection not in pool. file_no: {0}",
                file_no)
            return

        conn.advance_sent_bytes(bytes_sent)

    def on_bytes_written_to_socket(self, file_no: int,
                                   bytes_written: int) -> None:
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug("Bytes written call for connection not in pool: {}",
                         file_no)
            return

        conn.advance_bytes_written_to_socket(bytes_written)

    def fire_alarms(self) -> float:
        time_to_next = self.alarm_queue.fire_ready_alarms()
        if time_to_next is not None:
            return time_to_next
        else:
            return constants.MAX_EVENT_LOOP_TIMEOUT

    def force_exit(self):
        """
        Indicates if node should trigger exit in event loop. Primarily used for testing.

        Typically requires one additional socket call (e.g. connecting to this node via a socket)
        to finish terminating the event loop.
        """
        return self.should_force_exit

    async def close(self):
        logger.info("Node is closing! Closing everything.")

        shutdown_task = asyncio.ensure_future(self.close_all_connections())
        try:
            await asyncio.wait_for(shutdown_task,
                                   constants.NODE_SHUTDOWN_TIMEOUT_S)
        except Exception as e:  # pylint: disable=broad-except
            logger.exception(
                "Node shutdown failed due to an error: {}, force closing!", e)
        self.requester.close()
        self.cleanup_memory_stats_logging()

    async def close_all_connections(self):
        """
        Closes all connections from the node
        """
        for _, conn in self.connection_pool.items():
            conn.mark_for_close(should_retry=False)

    def broadcast(
        self,
        msg: AbstractMessage,
        broadcasting_conn: Optional[AbstractConnection] = None,
        prepend_to_queue: bool = False,
        connection_types: Optional[List[ConnectionType]] = None
    ) -> List[AbstractConnection]:
        """
        Broadcasts message msg to connections of the specified type except requester.
        """
        if connection_types is None:
            connection_types = [ConnectionType.RELAY_ALL]
        options = BroadcastOptions(broadcasting_conn, prepend_to_queue,
                                   connection_types)
        connections = self.broadcast_service.broadcast(msg, options)
        return connections

    def enqueue_connection(self, ip: str, port: int,
                           connection_type: ConnectionType):
        """
        Queues a connection up for the event loop to open a socket for.
        """
        peer_info = ConnectionPeerInfo(IpEndpoint(ip, port), connection_type)
        if peer_info in self.pending_connection_attempts:
            logger.debug(
                "Not adding {}, waiting until connection attempt to complete",
                peer_info)
        else:
            logger.trace("Enqueuing connection: {}.", peer_info)
            self.pending_connection_requests.add(peer_info)

    def dequeue_connection_requests(self) -> Optional[Set[ConnectionPeerInfo]]:
        """
        Returns the pending connection requests for the event loop to initiate a socket connection to.
        """
        if self.pending_connection_requests:
            pending_connection_requests = self.pending_connection_requests
            self.pending_connection_requests = set()
            self.pending_connection_attempts.update(
                pending_connection_requests)
            return pending_connection_requests
        else:
            return None

    def continue_retrying_connection(self, ip: str, port: int,
                                     connection_type: ConnectionType) -> bool:
        """
        Indicates whether to continue retrying connection. For most connections, this will will stop
        at the maximum of constants.MAX_CONNECT_RETRIES, but some connections should be always retried
        unless there's some fatal socket error.
        """
        is_sdn = ConnectionType.SDN in connection_type
        return is_sdn or self.num_retries_by_ip[
            (ip, port)] < constants.MAX_CONNECT_RETRIES

    def init_node_status_logging(self):
        node_stats_service.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        node_stats_service.flush_info)

    def init_throughput_logging(self):
        throughput_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        throughput_statistics.flush_info)

    def init_node_info_logging(self):
        node_info_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        node_info_statistics.flush_info)

    def cleanup_memory_stats_logging(self):
        memory_statistics.stop_recording()

    def init_block_stats_logging(self):
        block_stats.set_node(self)

    def init_tx_stats_logging(self):
        tx_stats.set_node(self)

    def flush_all_send_buffers(self):
        for conn in self.connection_pool:
            if conn.socket_connection.can_send:
                conn.socket_connection.send()
        return self.FLUSH_SEND_BUFFERS_INTERVAL

    def record_mem_stats(self, low_threshold: int, medium_threshold: int,
                         high_threshold: int):
        """
        When overridden, records identified memory stats and flushes them to std out
        :returns memory stats flush interval
        """
        total_memory = memory_utils.get_app_memory_usage()
        if total_memory > low_threshold:
            gc.collect()
            total_memory = memory_utils.get_app_memory_usage()
        self._record_mem_stats(total_memory > medium_threshold)

        return memory_statistics.flush_info(high_threshold)

    def _record_mem_stats(self, include_data_structure_memory: bool = False):
        if include_data_structure_memory:
            self.connection_pool.log_connection_pool_mem_stats()

    def set_node_config_opts_from_sdn(self, opts: CommonOpts) -> None:
        blockchain_networks: Dict[
            int, BlockchainNetworkModel] = opts.blockchain_networks
        for blockchain_network in blockchain_networks.values():
            tx_stats.configure_network(
                blockchain_network.network_num,
                blockchain_network.tx_percent_to_log_by_hash,
                blockchain_network.tx_percent_to_log_by_sid)
        bdn_tx_to_bx_tx.init(blockchain_networks)

    def dump_memory_usage(self, total_memory: int, threshold: int):
        if total_memory > threshold and logger.isEnabledFor(LogLevel.DEBUG):
            node_size = self.get_node_memory_size()
            memory_logger.debug(
                "Application consumed {} bytes which is over set limit {} bytes. Detailed memory report: {}",
                total_memory, threshold, node_size)

    def get_node_memory_size(self):
        return memory_utils.get_detailed_object_size(self)

    def on_input_received(self, file_no: int) -> bool:
        """handles an input event from the event loop

        :param file_no: the socket connection file_no
        :return: True if the connection is receivable, otherwise False
        """
        connection = self.connection_pool.get_by_fileno(file_no)
        if connection is None:
            return False
        return connection.on_input_received()

    async def init(self) -> None:
        self.requester.start()

    def handle_connection_closed(self, should_retry: bool,
                                 peer_info: ConnectionPeerInfo,
                                 connection_state: ConnectionState) -> None:
        self.pending_connection_attempts.discard(peer_info)
        peer_ip, peer_port = peer_info.endpoint
        connection_type = peer_info.connection_type
        if should_retry and self.continue_retrying_connection(
                peer_ip, peer_port, connection_type):
            self.alarm_queue.register_alarm(
                self._get_next_retry_timeout(peer_ip, peer_port),
                self._retry_init_client_socket, peer_ip, peer_port,
                connection_type)
        else:
            self.on_failed_connection_retry(peer_ip, peer_port,
                                            connection_type, connection_state)

    def get_server_ssl_ctx(self) -> SSLContext:
        return self.node_ssl_service.create_ssl_context(
            SSLCertificateType.PRIVATE)

    def get_target_ssl_ctx(self, endpoint: IpEndpoint,
                           connection_type: ConnectionType) -> SSLContext:
        logger.trace("Fetching SSL certificate for: {} ({}).", endpoint,
                     connection_type)
        return self.node_ssl_service.create_ssl_context(
            SSLCertificateType.PRIVATE)

    @abstractmethod
    def reevaluate_transaction_streamer_connection(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def on_new_subscriber_request(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def init_memory_stats_logging(self):
        raise NotImplementedError

    @abstractmethod
    def sync_tx_services(self):
        self.start_sync_time = time.time()
        self.sync_metrics = defaultdict(Counter)

    @abstractmethod
    def _transaction_sync_timeout(self) -> int:
        pass

    @abstractmethod
    def check_sync_relay_connections(self, conn: AbstractConnection) -> int:
        pass

    def _get_socket_peer_info(
            self,
            sock: AbstractSocketConnectionProtocol) -> AuthenticatedPeerInfo:
        assert sock.is_ssl
        assert self.NODE_TYPE is not None

        cert = sock.get_peer_certificate()
        node_type = extensions_factory.get_node_type(cert)
        try:
            connection_type = convert.peer_node_to_connection_type(
                # pyre-fixme[6]: Expected `NodeType` for 1st param but got
                #  `Optional[NodeType]`.
                self.NODE_TYPE,
                node_type)
        except (KeyError, ValueError):
            raise ConnectionAuthenticationError(
                f"Peer ssl certificate ({cert}) has an invalid node type: {node_type}!"
            )
        peer_id = extensions_factory.get_node_id(cert)
        if peer_id is None:
            raise ConnectionAuthenticationError(
                f"Peer ssl certificate ({cert}) does not contain a node id!")

        account_id = extensions_factory.get_account_id(cert)
        node_privileges = extensions_factory.get_node_privileges(cert)
        return AuthenticatedPeerInfo(connection_type, peer_id, account_id,
                                     node_privileges)

    def _should_log_closed_connection(self,
                                      _connection: AbstractConnection) -> bool:
        return True

    def _destroy_conn(self, conn: AbstractConnection):
        """
        Clean up the associated connection and update all data structures tracking it.

        Do not call this function directly to close a connection, unless circumstances do not allow cleaning shutting
        down the node via event loop lifecycle hooks (e.g. immediate shutdown).

        In connection handlers, use `AbstractConnection#mark_for_close`, and the connection will be cleaned up as part
        of event handling.
        In other node lifecycle events, use `enqueue_disconnect` to allow the event loop to trigger connection cleanup.

        :param conn connection to destroy
        """

        if self._should_log_closed_connection(conn):
            self.log_closed_connection(conn)

        should_retry = SocketConnectionStates.DO_NOT_RETRY not in conn.socket_connection.state

        logger.debug("Breaking connection to {}. Attempting retry: {}", conn,
                     should_retry)
        conn.dispose()
        self.connection_pool.delete(conn)
        self.handle_connection_closed(
            should_retry,
            ConnectionPeerInfo(conn.endpoint, conn.CONNECTION_TYPE),
            conn.state)

    def _initialize_connection(
        self, socket_connection: AbstractSocketConnectionProtocol
    ) -> Optional[AbstractConnection]:
        conn_obj = self.build_connection(socket_connection)
        ip, port = socket_connection.endpoint
        if conn_obj is not None:
            logger.debug("Connecting to: {}...", conn_obj)

            self.alarm_queue.register_alarm(constants.CONNECTION_TIMEOUT,
                                            self._connection_timeout, conn_obj)
            self.connection_pool.add(socket_connection.file_no, ip, port,
                                     conn_obj)

            if conn_obj.CONNECTION_TYPE == ConnectionType.SDN:
                # pyre-fixme[16]: `AbstractNode` has no attribute `sdn_connection`.
                self.sdn_connection = conn_obj
        else:
            logger.warning(log_messages.UNABLE_TO_DETERMINE_CONNECTION_TYPE,
                           ip, port)
            socket_connection.mark_for_close(should_retry=False)

        return conn_obj

    def on_network_synced(self, network_num: int) -> None:
        if network_num in self.last_sync_message_received_by_network:
            del self.last_sync_message_received_by_network[network_num]

    def on_fully_updated_tx_service(self):
        logger.debug(
            "Synced transaction state with BDN, last_sync_message_received_by_network: {}",
            self.last_sync_message_received_by_network)
        self.opts.has_fully_updated_tx_service = True

    def _connection_timeout(self, conn: AbstractConnection) -> int:
        """
        Check if the connection is established.
        If it is not established, we give up for untrusted connections and try again for trusted connections.
        """

        logger.trace("Checking connection status: {}", conn)

        if ConnectionState.ESTABLISHED in conn.state:
            logger.trace("Connection is still established: {}", conn)
            self.num_retries_by_ip[(conn.peer_ip, conn.peer_port)] = 0
            return constants.CANCEL_ALARMS

        if not conn.is_alive():
            logger.trace("Connection has already been marked for close: {}",
                         conn)
            return constants.CANCEL_ALARMS

        # Clean up the old connection and retry it if it is trusted
        logger.trace("Connection has timed out: {}", conn)
        conn.mark_for_close()

        # It is connect_to_address's job to schedule this function.
        return constants.CANCEL_ALARMS

    def _kill_node(self, _signum, _stack):
        """
        Kills the node immediately
        """
        self.should_force_exit = True
        raise TerminationError("Node killed.")

    def _get_next_retry_timeout(self, ip: str, port: int) -> int:
        """
        Returns Fibonnaci(n), where n is the number of retry attempts + 1, up to max of Fibonacci(8) == 13.
        """
        golden_ratio = (1 + 5**.5) / 2
        sequence_number = min(self.num_retries_by_ip[(ip, port)] + 1,
                              constants.MAX_CONNECT_TIMEOUT_INCREASE)
        return int((golden_ratio**sequence_number -
                    (1 - golden_ratio)**sequence_number) / 5**.5)

    def _retry_init_client_socket(self, ip: str, port: int,
                                  connection_type: ConnectionType):
        self.num_retries_by_ip[(ip, port)] += 1

        logger.debug("Retrying {} connection to {}:{}. Attempt #{}.",
                     connection_type, ip, port,
                     self.num_retries_by_ip[(ip, port)])
        self.enqueue_connection(ip, port, connection_type)

        return 0

    def _responsiveness_check_log(self):
        details = ""
        if self.gc_logging_enabled:
            gen0_stats, gen1_stats, gen2_stats = gc.get_stats()

            last_gen0_collections = self._last_responsiveness_check_details.get(
                "gen0_collections", 0)
            last_gen1_collections = self._last_responsiveness_check_details.get(
                "gen1_collections", 0)
            last_gen2_collections = self._last_responsiveness_check_details.get(
                "gen2_collections", 0)

            gen0_diff = gen0_stats["collections"] - last_gen0_collections
            gen1_diff = gen1_stats["collections"] - last_gen1_collections
            gen2_diff = gen2_stats["collections"] - last_gen2_collections

            details = (
                f"gen0_collections: {gen0_diff}, gen1_collections: {gen1_diff}, "
                f"gen2_collections: {gen2_diff}")
            self._last_responsiveness_check_details.update({
                "gen0_collections":
                gen0_stats["collections"],
                "gen1_collections":
                gen1_stats["collections"],
                "gen2_collections":
                gen2_stats["collections"],
            })

        performance_utils.log_operation_duration(
            performance_troubleshooting_logger,
            "Responsiveness Check",
            self._last_responsiveness_check_log_time,
            constants.RESPONSIVENESS_CHECK_INTERVAL_S +
            constants.RESPONSIVENESS_CHECK_DELAY_WARN_THRESHOLD_S,
            details=details)
        self._last_responsiveness_check_log_time = time.time()
        return constants.RESPONSIVENESS_CHECK_INTERVAL_S
Exemple #10
0
 def setUp(self) -> None:
     self.node = MockNode(helpers.get_common_opts(8000))
     self.connection_pool = ConnectionPool()
     self.sut = TestBroadcastService(self.connection_pool)
Exemple #11
0
class BroadcastServiceTest(AbstractTestCase):
    def setUp(self) -> None:
        self.node = MockNode(helpers.get_common_opts(8000))
        self.connection_pool = ConnectionPool()
        self.sut = TestBroadcastService(self.connection_pool)

    def _add_connection(self, fileno: int, port: int, network_num: int,
                        connection_type=MockConnection.CONNECTION_TYPE) -> MockConnection:
        conn = MockConnection(MockSocketConnection(fileno, self.node, ip_address=LOCALHOST, port=port), self.node)
        conn.network_num = network_num
        conn.on_connection_established()
        conn.CONNECTION_TYPE = connection_type

        self.connection_pool.add(fileno, LOCALHOST, port, conn)
        return conn

    def test_broadcast_to_network_numbers(self):
        all_matching_network_num = self._add_connection(0, 9000, ALL_NETWORK_NUM)
        matching_network_num = self._add_connection(1, 9001, 1)
        not_matching_network_num = self._add_connection(2, 9002, 2)

        message = BroadcastMessage(Sha256Hash(helpers.generate_hash()), 1, "", BroadcastMessageType.BLOCK, False,
                                   helpers.generate_bytearray(250))
        self.sut.broadcast(message, BroadcastOptions(connection_types=[MockConnection.CONNECTION_TYPE]))

        self.assertIn(message, all_matching_network_num.enqueued_messages)
        self.assertIn(message, matching_network_num.enqueued_messages)
        self.assertNotIn(message, not_matching_network_num.enqueued_messages)

    def test_broadcast_to_connection_type(self):
        relay_all_conn = self._add_connection(0, 9000, ALL_NETWORK_NUM, ConnectionType.RELAY_ALL)
        relay_block_conn = self._add_connection(1, 9001, ALL_NETWORK_NUM, ConnectionType.RELAY_BLOCK)
        relay_transaction_conn = self._add_connection(2, 9002, ALL_NETWORK_NUM, ConnectionType.RELAY_TRANSACTION)
        gateway_conn = self._add_connection(3, 9003, ALL_NETWORK_NUM, ConnectionType.EXTERNAL_GATEWAY)

        block_message = BroadcastMessage(Sha256Hash(helpers.generate_hash()), ALL_NETWORK_NUM, "",
                                         BroadcastMessageType.BLOCK, False, helpers.generate_bytearray(250))
        self.sut.broadcast(block_message, BroadcastOptions(connection_types=[ConnectionType.RELAY_BLOCK]))

        tx_message = BroadcastMessage(Sha256Hash(helpers.generate_hash()), ALL_NETWORK_NUM, "",
                                      BroadcastMessageType.BLOCK, False, helpers.generate_bytearray(250))
        self.sut.broadcast(tx_message, BroadcastOptions(connection_types=[ConnectionType.RELAY_TRANSACTION]))

        gateway_message = BroadcastMessage(Sha256Hash(helpers.generate_hash()), ALL_NETWORK_NUM, "",
                                           BroadcastMessageType.BLOCK, False, helpers.generate_bytearray(250))
        self.sut.broadcast(gateway_message, BroadcastOptions(connection_types=[ConnectionType.GATEWAY]))

        self.assertIn(block_message, relay_all_conn.enqueued_messages)
        self.assertIn(block_message, relay_block_conn.enqueued_messages)
        self.assertNotIn(block_message, relay_transaction_conn.enqueued_messages)
        self.assertNotIn(block_message, gateway_conn.enqueued_messages)

        self.assertIn(tx_message, relay_all_conn.enqueued_messages)
        self.assertNotIn(tx_message, relay_block_conn.enqueued_messages)
        self.assertIn(tx_message, relay_transaction_conn.enqueued_messages)
        self.assertNotIn(tx_message, gateway_conn.enqueued_messages)

        self.assertNotIn(gateway_message, relay_all_conn.enqueued_messages)
        self.assertNotIn(gateway_message, relay_block_conn.enqueued_messages)
        self.assertNotIn(gateway_message, relay_transaction_conn.enqueued_messages)
        self.assertIn(gateway_message, gateway_conn.enqueued_messages)
Exemple #12
0
class AbstractNode:
    __meta__ = ABCMeta
    FLUSH_SEND_BUFFERS_INTERVAL = constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME * 2
    NODE_TYPE = None

    def __init__(self, opts: Namespace):
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)

        self.set_node_config_opts_from_sdn(opts)
        self.opts = opts
        self.connection_queue: Deque[Tuple[str, int]] = deque()
        self.disconnect_queue: Deque[DisconnectRequest] = deque()
        self.outbound_peers = opts.outbound_peers[:]

        self.connection_pool = ConnectionPool()

        self.should_force_exit = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Handle termination gracefully
        signal.signal(signal.SIGTERM, self._kill_node)
        signal.signal(signal.SIGINT, self._kill_node)
        signal.signal(signal.SIGSEGV, self._kill_node)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        opts.has_fully_updated_tx_service = False
        self.alarm_queue.register_alarm(constants.TX_SERVICE_SYNC_PROGRESS_S,
                                        self._sync_tx_services)
        self._check_sync_relay_connections_alarm_id = self.alarm_queue.register_alarm(
            constants.LAST_MSG_FROM_RELAY_THRESHOLD_S,
            self._check_sync_relay_connections)
        self._transaction_sync_timeout_alarm_id = self.alarm_queue.register_alarm(
            constants.TX_SERVICE_CHECK_NETWORKS_SYNCED_S,
            self._transaction_sync_timeout)

    def get_sdn_address(self):
        """
        Placeholder for net event loop to get the sdn address (relay only).
        :return:
        """
        return

    @abstractmethod
    def get_tx_service(self, network_num=None):
        pass

    @abstractmethod
    def get_outbound_peer_addresses(self):
        pass

    @abstractmethod
    def get_broadcast_service(self) -> BroadcastService:
        pass

    def connection_exists(self, ip, port):
        return self.connection_pool.has_connection(ip, port)

    def on_connection_added(self, socket_connection: SocketConnection, ip: str,
                            port: int, from_me: bool):
        """
        Notifies the node that a connection is coming in.
        """
        fileno = socket_connection.fileno()

        # If we're already connected to the remote peer, log the event and request disconnect.
        if self.connection_exists(ip, port):
            logger.debug(
                "Duplicate connection attempted to: {0}:{1}. Dropping.", ip,
                port)

            # Schedule dropping the added connection and keep the old one.
            self.enqueue_disconnect(socket_connection, False)
        else:
            self._initialize_connection(socket_connection, ip, port, from_me)

    def on_connection_initialized(self, fileno: int):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Unexpectedly initialized connection not in pool. Fileno: {0}",
                fileno)
            return

        logger.debug("Connection initialized: {}", conn)
        conn.state |= ConnectionState.INITIALIZED

        # Reset num_retries when a connection established in order to support resetting the Fibonnaci logic
        # to determine next retry
        self.num_retries_by_ip[(conn.peer_ip, conn.peer_port)] = 0

    def on_connection_closed(self, fileno: int, retry_conn: bool):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Unexpectedly closed connection not in pool. Fileno: {0}",
                fileno)
            return

        logger.info("Closed connection: {}", conn)
        self._destroy_conn(conn, retry_connection=retry_conn)

    @abstractmethod
    def send_request_for_relay_peers(self):
        pass

    def on_updated_peers(self, outbound_peer_models):
        if not outbound_peer_models:
            logger.debug("Got peer update with no peers.")
            return

        logger.debug("Processing updated outbound peers: {}.",
                     outbound_peer_models)

        # Remove peers not in updated list or from command-line args.
        remove_peers = []
        old_peers = self.outbound_peers
        for old_peer in old_peers:
            if not (any(old_peer.ip == fixed_peer.ip
                        and old_peer.port == fixed_peer.port
                        for fixed_peer in self.opts.outbound_peers)
                    or any(new_peer.ip == old_peer.ip
                           and new_peer.port == old_peer.port
                           for new_peer in outbound_peer_models)):
                remove_peers.append(old_peer)

        for rem_peer in remove_peers:
            if self.connection_pool.has_connection(rem_peer.ip, rem_peer.port):
                rem_conn = self.connection_pool.get_by_ipport(
                    rem_peer.ip, rem_peer.port)
                if rem_conn:
                    self.mark_connection_for_close(rem_conn, False)

        # Connect to peers not in our known pool
        for peer in outbound_peer_models:
            peer_ip = peer.ip
            peer_port = peer.port
            if not self.connection_pool.has_connection(peer_ip, peer_port):
                self.enqueue_connection(peer_ip, peer_port)
        self.outbound_peers = outbound_peer_models

    def on_updated_sid_space(self, sid_start, sid_end):
        """
        Placeholder interface to receive sid updates from SDN over sockets and pass to relay node
        """

        return

    def on_bytes_received(self, fileno: int,
                          bytes_received: bytearray) -> None:
        """
        :param fileno:
        :param bytes_received:
        :return: True if the node should continue receiving bytes from the remote peer. False otherwise.
        """
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Received bytes for connection not in pool. Fileno: {0}",
                fileno)
            return

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            conn.log_trace("Skipping receiving bytes for closed connection.")
            return

        conn.add_received_bytes(bytes_received)

    def on_finished_receiving(self, fileno):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Received bytes for connection not in pool. Fileno: {0}",
                fileno)
            return

        conn.process_message()

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            self.enqueue_disconnect(conn.socket_connection, conn.from_me)

    def get_bytes_to_send(self, fileno):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Request to get bytes for connection not in pool. Fileno: {0}",
                fileno)
            return

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            conn.log_trace("Skipping sending bytes for closed connection.")
            return

        return conn.get_bytes_to_send()

    def on_bytes_sent(self, fileno, bytes_sent):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Bytes sent call for connection not in pool. Fileno: {0}",
                fileno)
            return

        conn.advance_sent_bytes(bytes_sent)

    def get_sleep_timeout(self, triggered_by_timeout, first_call=False):
        # TODO: remove first_call from this function. You can just fire all of the ready alarms on every call
        # to get the timeout.
        if first_call:
            _, timeout = self.alarm_queue.time_to_next_alarm()

            # Time out can be negative during debugging
            if timeout < 0:
                timeout = constants.DEFAULT_SLEEP_TIMEOUT

            return timeout
        else:
            time_to_next = self.alarm_queue.fire_ready_alarms(
                triggered_by_timeout)
            if self.connection_queue or self.disconnect_queue:
                # TODO: this should be constants.MIN_SLEEP_TIMEOUT, which is different for kqueues and epoll.
                # We want to process connection/disconnection requests ASAP.
                time_to_next = constants.DEFAULT_SLEEP_TIMEOUT

            return time_to_next

    def force_exit(self):
        """
        Indicates if node should trigger exit in event loop. Primarily used for testing.

        Typically requires one additional socket call (e.g. connecting to this node via a socket)
        to finish terminating the event loop.
        """
        return self.should_force_exit

    def close(self):
        logger.error("Node is closing! Closing everything.")

        for _fileno, conn in self.connection_pool.items():
            self._destroy_conn(conn, force_destroy=True)
        self.cleanup_memory_stats_logging()

    def broadcast(self, msg: AbstractMessage, broadcasting_conn: Optional[AbstractConnection] = None,
                  prepend_to_queue: bool = False, connection_types: Optional[List[ConnectionType]] = None) \
            -> List[AbstractConnection]:
        """
        Broadcasts message msg to connections of the specified type except requester.
        """
        if connection_types is None:
            connection_types = [ConnectionType.RELAY_ALL]
        options = BroadcastOptions(broadcasting_conn, prepend_to_queue,
                                   connection_types)
        return self.broadcast_service.broadcast(msg, options)

    @abstractmethod
    def build_connection(self, socket_connection: SocketConnection, ip: str, port: int, from_me: bool = False) \
            -> Optional[AbstractConnection]:
        pass

    def enqueue_connection(self, ip: str, port: int):
        """
        Queues a connection up for the event loop to open a socket for.
        """
        logger.trace("Enqueuing connection to {}:{}", ip, port)
        self.connection_queue.append((ip, port))

    def enqueue_disconnect(self,
                           socket_connection: SocketConnection,
                           should_retry: Optional[bool] = None):
        """
        Queues up a disconnect for the event loop to close the socket and destroy the connection object for.

        This should always be called with a value provided for `should_retry`, unless the connection object is unknown
        (e.g. from the event loop or SocketConnection classes).
        """
        fileno = socket_connection.fileno()
        logger.trace("Enqueuing disconnect from {}", fileno)

        if should_retry is None:
            conn = self.connection_pool.get_by_fileno(fileno)

            if conn is None:
                logger.debug(
                    "Unexpectedly tried to enqueue a disconnect without a connection object on fileno: {}",
                    fileno)
                should_retry = False
            else:
                conn.log_debug(
                    "Connection close triggered by socket layer or event loop."
                )
                conn.mark_for_close()
                should_retry = conn.from_me

        socket_connection.mark_for_close()
        self.disconnect_queue.append(DisconnectRequest(fileno, should_retry))

    def mark_connection_for_close(self,
                                  connection: AbstractConnection,
                                  should_retry: Optional[bool] = None):
        if should_retry is None:
            should_retry = connection.from_me
        connection.mark_for_close()
        self.enqueue_disconnect(connection.socket_connection, should_retry)

    def pop_next_connection_address(self) -> Optional[Tuple[str, int]]:
        """
        Returns the next connection address for the event loop to initiate a socket connection to.
        """
        if self.connection_queue:
            return self.connection_queue.popleft()

        return

    def pop_next_disconnect_connection(self) -> Optional[DisconnectRequest]:
        """
        Returns the next connection address for the event loop to destroy the socket connection for.

        The event loop is expected to call `on_connection_closed` afterward.
        """
        if self.disconnect_queue:
            return self.disconnect_queue.popleft()

        return

    def _destroy_conn(self,
                      conn,
                      retry_connection: bool = False,
                      force_destroy: bool = False):
        """
        Clean up the associated connection and update all data structures tracking it.

        Do not call this function directly to close a connection, unless circumstances do not allow cleaning shutting
        down the node via event loop lifecycle hooks (e.g. immediate shutdown).

        In connection handlers, use `AbstractConnection#mark_for_close`, and the connection will be cleaned up as part
        of event handling.
        In other node lifecycle events, use `enqueue_disconnect` to allow the event loop to trigger connection cleanup.

        :param conn connection to destroy
        :param retry_connection if connection should be retried
        :param force_destroy ignore connection state and force close. Avoid setting this except for fatal errors or
                             socket errors.
        """
        if force_destroy:
            conn.mark_for_close()

        if not conn.state & ConnectionState.MARK_FOR_CLOSE:
            raise ValueError(
                "Attempted to close connection that was not MARK_FOR_CLOSE.")

        logger.debug("Breaking connection to {}. Attempting retry: {}", conn,
                     retry_connection)
        conn.close()
        self.connection_pool.delete(conn)

        peer_ip, peer_port = conn.peer_ip, conn.peer_port
        if retry_connection:
            self.alarm_queue.register_alarm(
                self._get_next_retry_timeout(peer_ip, peer_port),
                self._retry_init_client_socket, peer_ip, peer_port,
                conn.CONNECTION_TYPE)
        else:
            self.on_failed_connection_retry(peer_ip, peer_port,
                                            conn.CONNECTION_TYPE)

    def should_retry_connection(self, ip: str, port: int,
                                connection_type: ConnectionType) -> bool:
        is_sdn = bool(connection_type & ConnectionType.SDN)
        return is_sdn or self.num_retries_by_ip[
            (ip, port)] < constants.MAX_CONNECT_RETRIES

    @abstractmethod
    def on_failed_connection_retry(self, ip: str, port: int,
                                   connection_type: ConnectionType) -> None:
        pass

    def init_throughput_logging(self):
        throughput_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        throughput_statistics.flush_info)

    def init_node_info_logging(self):
        node_info_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        node_info_statistics.flush_info)

    def init_memory_stats_logging(self):
        memory_statistics.set_node(self)
        memory_statistics.start_recording(self.record_mem_stats)

    def cleanup_memory_stats_logging(self):
        memory_statistics.stop_recording()

    def init_block_stats_logging(self):
        block_stats.set_node(self)

    def init_tx_stats_logging(self):
        tx_stats.set_node(self)

    def flush_all_send_buffers(self):
        for conn in self.connection_pool:
            if conn.socket_connection.can_send:
                conn.socket_connection.send()
        return self.FLUSH_SEND_BUFFERS_INTERVAL

    def record_mem_stats(self):
        """
        When overridden, records identified memory stats and flushes them to std out
        :returns memory stats flush interval
        """
        self.connection_pool.log_connection_pool_mem_stats()
        return memory_statistics.flush_info()

    def set_node_config_opts_from_sdn(self, opts):

        # TODO: currently hard-coding configuration values
        opts.stats_calculate_actual_size = False
        opts.log_detailed_block_stats = False

        blockchain_networks: List[
            BlockchainNetworkModel] = opts.blockchain_networks
        for blockchain_network in blockchain_networks:
            tx_stats.configure_network(blockchain_network.network_num,
                                       blockchain_network.tx_percent_to_log)

    def dump_memory_usage(self):
        total_mem_usage = memory_utils.get_app_memory_usage()

        if total_mem_usage >= self.next_report_mem_usage_bytes:
            node_size = memory_utils.get_detailed_object_size(self)
            memory_logger.statistics(
                "Application consumed {} bytes which is over set limit {} bytes. Detailed memory report: {}",
                total_mem_usage, self.next_report_mem_usage_bytes,
                json_utils.serialize(node_size))
            self.next_report_mem_usage_bytes = total_mem_usage + constants.MEMORY_USAGE_INCREASE_FOR_NEXT_REPORT_BYTES

    def on_input_received(self, file_no: int) -> bool:
        """handles an input event from the event loop

        :param file_no: the socket connection file_no
        :return: True if the connection is receivable, otherwise False
        """
        connection = self.connection_pool.get_by_fileno(file_no)
        if connection is None:
            return False
        return connection.on_input_received()

    def _initialize_connection(self, socket_connection: SocketConnection,
                               ip: str, port: int, from_me: bool):
        conn_obj = self.build_connection(socket_connection, ip, port, from_me)
        if conn_obj is not None:
            logger.info("Connecting to: {}...", conn_obj)

            self.alarm_queue.register_alarm(constants.CONNECTION_TIMEOUT,
                                            self._connection_timeout, conn_obj)
            self.connection_pool.add(socket_connection.fileno(), ip, port,
                                     conn_obj)

            if conn_obj.CONNECTION_TYPE == ConnectionType.SDN:
                self.sdn_connection = conn_obj
        else:
            logger.warning(
                "Could not determine expected connection type for {}:{}. Disconnecting...",
                ip, port)
            self.enqueue_disconnect(socket_connection, from_me)

    def on_fully_updated_tx_service(self):
        logger.info("Synced transaction state with BDN.")
        self.opts.has_fully_updated_tx_service = True
        sdn_http_service.submit_sync_txs_event(self.opts.node_id)

    def _connection_timeout(self, conn: AbstractConnection):
        """
        Check if the connection is established.
        If it is not established, we give up for untrusted connections and try again for trusted connections.
        """

        logger.trace("Checking connection status: {}", conn)

        if conn.state & ConnectionState.ESTABLISHED:
            logger.trace("Connection is still established: {}", conn)

            return constants.CANCEL_ALARMS

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            logger.trace("Connection has already been marked for close: {}",
                         conn)
            return constants.CANCEL_ALARMS

        # Clean up the old connection and retry it if it is trusted
        logger.trace("Connection has timed out: {}", conn)
        self.mark_connection_for_close(conn)

        # It is connect_to_address's job to schedule this function.
        return constants.CANCEL_ALARMS

    def _kill_node(self, _signum, _stack):
        """
        Kills the node immediately
        """
        self.should_force_exit = True
        raise TerminationError("Node killed.")

    def _get_next_retry_timeout(self, ip: str, port: int) -> int:
        """
        Returns Fibonnaci(n), where n is the number of retry attempts + 1, up to max of Fibonacci(8) == 13.
        """
        golden_ratio = (1 + 5**.5) / 2
        sequence_number = min(self.num_retries_by_ip[(ip, port)] + 1,
                              constants.MAX_CONNECT_TIMEOUT_INCREASE)
        return int((golden_ratio**sequence_number -
                    (1 - golden_ratio)**sequence_number) / 5**.5)

    def _retry_init_client_socket(self, ip, port, connection_type):
        self.num_retries_by_ip[(ip, port)] += 1

        if self.should_retry_connection(ip, port, connection_type):
            logger.debug("Retrying {} connection to {}:{}. Attempt #{}.",
                         connection_type, ip, port,
                         self.num_retries_by_ip[(ip, port)])
            self.enqueue_connection(ip, port)
            # In case of connection retry to SDN - no need to resync transactions on this node, just update
            # 'has_fully_updated_tx_service' attribute on SDN since it was set to false when the connection was
            # lost.
            if connection_type == ConnectionType.SDN:
                self.on_fully_updated_tx_service()
        else:
            del self.num_retries_by_ip[(ip, port)]
            logger.debug(
                "Maximum retry attempts exceeded. Dropping {} connection to {}:{}.",
                connection_type, ip, port)
            self.on_failed_connection_retry(ip, port, connection_type)

        return 0

    @abstractmethod
    def _sync_tx_services(self):
        pass

    @abstractmethod
    def _transaction_sync_timeout(self):
        pass

    @abstractmethod
    def _check_sync_relay_connections(self):
        pass
Exemple #13
0
    def setUp(self):
        self.conn_pool = ConnectionPool()
        self.source_version = "v1.0.0"
        self.ip_address = "0.0.0.0"
        self.continent = "NA"
        self.country = "United States"
        self.account_id = None

        self.fileno1 = 1
        self.ip1 = "123.123.123.123"
        self.port1 = 1000
        self.node1 = MockNode(
            helpers.get_common_opts(1001, external_ip="128.128.128.128"))
        self.node_id1 = str(uuid.uuid1())
        self.conn1 = MockConnection(
            MockSocketConnection(self.fileno1,
                                 self.node1,
                                 ip_address=self.ip1,
                                 port=self.port1), self.node1)
        self.conn1.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK

        self.fileno2 = 5
        self.ip2 = "234.234.234.234"
        self.port2 = 2000
        self.node2 = MockNode(
            helpers.get_common_opts(1003, external_ip="321.321.321.321"))
        self.node_id2 = str(uuid.uuid1())
        self.conn2 = MockConnection(
            MockSocketConnection(self.fileno2,
                                 self.node2,
                                 ip_address=self.ip2,
                                 port=self.port2), self.node2)
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_TRANSACTION

        self.fileno3 = 6
        self.ip3 = "234.234.234.234"
        self.port3 = 3000
        self.node3 = MockNode(
            helpers.get_common_opts(1003, external_ip="213.213.213.213"))
        self.node_id3 = str(uuid.uuid1())
        self.conn3 = MockConnection(
            MockSocketConnection(self.fileno3,
                                 self.node3,
                                 ip_address=self.ip3,
                                 port=self.port3), self.node3)
        self.conn3.CONNECTION_TYPE = ConnectionType.BLOCKCHAIN_NODE

        self.fileno4 = 8
        self.ip4 = "111.222.111.222"
        self.port4 = 3000
        self.node4 = MockNode(
            helpers.get_common_opts(1003, external_ip="101.101.101.101"))
        self.node_id4 = str(uuid.uuid1())
        self.conn4 = MockConnection(
            MockSocketConnection(self.fileno4,
                                 self.node4,
                                 ip_address=self.ip4,
                                 port=self.port4), self.node4)
        self.conn4.CONNECTION_TYPE = ConnectionType.REMOTE_BLOCKCHAIN_NODE
        self.quota_level = 0
        initialize(False, self.source_version, self.ip_address, self.continent,
                   self.country, False, self.account_id, self.quota_level)

        path = config.get_data_file(STATUS_FILE_NAME)
        self.addCleanup(os.remove, path)
Exemple #14
0
class StatusLogTest(AbstractTestCase):
    def setUp(self):
        self.conn_pool = ConnectionPool()
        self.source_version = "v1.0.0"
        self.ip_address = "0.0.0.0"
        self.continent = "NA"
        self.country = "United States"
        self.account_id = None

        self.fileno1 = 1
        self.ip1 = "123.123.123.123"
        self.port1 = 1000
        self.node1 = MockNode(
            helpers.get_common_opts(1001, external_ip="128.128.128.128"))
        self.node_id1 = str(uuid.uuid1())
        self.conn1 = MockConnection(
            MockSocketConnection(self.fileno1,
                                 self.node1,
                                 ip_address=self.ip1,
                                 port=self.port1), self.node1)
        self.conn1.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK

        self.fileno2 = 5
        self.ip2 = "234.234.234.234"
        self.port2 = 2000
        self.node2 = MockNode(
            helpers.get_common_opts(1003, external_ip="321.321.321.321"))
        self.node_id2 = str(uuid.uuid1())
        self.conn2 = MockConnection(
            MockSocketConnection(self.fileno2,
                                 self.node2,
                                 ip_address=self.ip2,
                                 port=self.port2), self.node2)
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_TRANSACTION

        self.fileno3 = 6
        self.ip3 = "234.234.234.234"
        self.port3 = 3000
        self.node3 = MockNode(
            helpers.get_common_opts(1003, external_ip="213.213.213.213"))
        self.node_id3 = str(uuid.uuid1())
        self.conn3 = MockConnection(
            MockSocketConnection(self.fileno3,
                                 self.node3,
                                 ip_address=self.ip3,
                                 port=self.port3), self.node3)
        self.conn3.CONNECTION_TYPE = ConnectionType.BLOCKCHAIN_NODE

        self.fileno4 = 8
        self.ip4 = "111.222.111.222"
        self.port4 = 3000
        self.node4 = MockNode(
            helpers.get_common_opts(1003, external_ip="101.101.101.101"))
        self.node_id4 = str(uuid.uuid1())
        self.conn4 = MockConnection(
            MockSocketConnection(self.fileno4,
                                 self.node4,
                                 ip_address=self.ip4,
                                 port=self.port4), self.node4)
        self.conn4.CONNECTION_TYPE = ConnectionType.REMOTE_BLOCKCHAIN_NODE
        self.quota_level = 0
        initialize(False, self.source_version, self.ip_address, self.continent,
                   self.country, False, self.account_id, self.quota_level)

        path = config.get_data_file(STATUS_FILE_NAME)
        self.addCleanup(os.remove, path)

    def test_on_initialize_logger(self):
        summary_loaded, analysis_loaded, environment_loaded, network_loaded = self._load_status_file(
        )
        self.assertEqual(0, len(network_loaded.block_relays))
        self.assertEqual(0, len(network_loaded.transaction_relays))
        self.assertEqual(0, len(network_loaded.blockchain_nodes))
        self.assertEqual(0, len(network_loaded.remote_blockchain_nodes))
        self.assertEqual(summary_loaded.gateway_status, GatewayStatus.OFFLINE)
        self.assertEqual(summary_loaded.account_info,
                         summary.gateway_status_get_account_info(None))
        self.assertEqual(summary_loaded.block_relay_connection_state, None)
        self.assertEqual(summary_loaded.transaction_relay_connection_state,
                         None)
        self.assertEqual(summary_loaded.blockchain_node_connection_state, None)
        self.assertEqual(
            summary_loaded.remote_blockchain_node_connection_state, None)
        self.assertEqual(summary_loaded.update_required, False)
        self.assertEqual(analysis_loaded.gateway_version, self.source_version)
        self.assertEqual(type(analysis_loaded.extensions_check),
                         ExtensionModulesState)
        self.assertEqual(type(environment_loaded.installation_type),
                         InstallationType)

    def test_on_get_connection_state_intialization(self):
        summary_loaded, analysis_loaded, environment_loaded, network_loaded = self._load_status_file(
        )
        self.assertEqual(0, len(network_loaded.block_relays))
        self.assertEqual(0, len(network_loaded.transaction_relays))
        self.assertEqual(0, len(network_loaded.blockchain_nodes))
        self.assertEqual(0, len(network_loaded.remote_blockchain_nodes))

    def test_on_update_one_connection(self):
        self.conn_pool.add(self.fileno1, self.ip1, self.port1, self.conn1)
        update(self.conn_pool, False, self.source_version, self.ip_address,
               self.continent, self.country, True, self.account_id,
               self.quota_level)
        summary_loaded, analysis_loaded, environment_loaded, network_loaded = self._load_status_file(
        )
        block_relay_loaded = network_loaded.block_relays[0]
        self.assertEqual(0, len(network_loaded.transaction_relays))
        self.assertEqual(0, len(network_loaded.blockchain_nodes))
        self.assertEqual(0, len(network_loaded.remote_blockchain_nodes))
        self.assertEqual(
            summary_loaded,
            network_loaded.get_summary(self.ip_address, self.continent,
                                       self.country, True, self.account_id,
                                       self.quota_level))
        self.assertEqual(summary_loaded.gateway_status,
                         GatewayStatus.WITH_ERRORS)
        self.assertEqual(summary_loaded.account_info,
                         summary.gateway_status_get_account_info(None))
        self.assertEqual(summary_loaded.block_relay_connection_state,
                         ConnectionState.ESTABLISHED)
        self.assertEqual(summary_loaded.transaction_relay_connection_state,
                         ConnectionState.DISCONNECTED)
        self.assertEqual(summary_loaded.blockchain_node_connection_state,
                         ConnectionState.DISCONNECTED)
        self.assertEqual(
            summary_loaded.remote_blockchain_node_connection_state,
            ConnectionState.DISCONNECTED)
        self.assertEqual(
            summary_loaded.quota_level,
            summary.gateway_status_get_quota_level(self.quota_level))
        self.assertEqual(summary_loaded.update_required, True)
        self.assertEqual(block_relay_loaded.ip_address, self.ip1)
        self.assertEqual(block_relay_loaded.port, str(self.port1))
        self.assertEqual(block_relay_loaded.fileno, str(self.fileno1))

    def test_on_update_all_connections(self):
        self._add_connections()
        update(self.conn_pool, False, self.source_version, self.ip_address,
               self.continent, self.country, False, self.account_id,
               self.quota_level)
        summary_loaded, analysis_loaded, environment_loaded, network_loaded = self._load_status_file(
        )
        block_relay_loaded = network_loaded.block_relays[0]
        transaction_relay_loaded = network_loaded.transaction_relays[0]
        blockchain_node_loaded = network_loaded.blockchain_nodes[0]
        remote_blockchain_node_loaded = network_loaded.remote_blockchain_nodes[
            0]
        self.assertEqual(
            summary_loaded,
            network_loaded.get_summary(self.ip_address, self.continent,
                                       self.country, False, self.account_id,
                                       self.quota_level))
        self.assertEqual(summary_loaded.gateway_status, GatewayStatus.ONLINE)
        self.assertEqual(summary_loaded.account_info,
                         summary.gateway_status_get_account_info(None))
        self.assertEqual(summary_loaded.block_relay_connection_state,
                         ConnectionState.ESTABLISHED)
        self.assertEqual(summary_loaded.transaction_relay_connection_state,
                         ConnectionState.ESTABLISHED)
        self.assertEqual(summary_loaded.blockchain_node_connection_state,
                         ConnectionState.ESTABLISHED)
        self.assertEqual(
            summary_loaded.remote_blockchain_node_connection_state,
            ConnectionState.ESTABLISHED)
        self.assertEqual(
            summary_loaded.quota_level,
            summary.gateway_status_get_quota_level(self.quota_level))
        self.assertEqual(block_relay_loaded.ip_address, self.ip1)
        self.assertEqual(block_relay_loaded.port, str(self.port1))
        self.assertEqual(block_relay_loaded.fileno, str(self.fileno1))
        self.assertEqual(transaction_relay_loaded.ip_address, self.ip2)
        self.assertEqual(transaction_relay_loaded.port, str(self.port2))
        self.assertEqual(transaction_relay_loaded.fileno, str(self.fileno2))
        self.assertEqual(blockchain_node_loaded.ip_address, self.ip3)
        self.assertEqual(blockchain_node_loaded.port, str(self.port3))
        self.assertEqual(blockchain_node_loaded.fileno, str(self.fileno3))
        self.assertEqual(remote_blockchain_node_loaded.ip_address, self.ip4)
        self.assertEqual(remote_blockchain_node_loaded.port, str(self.port4))
        self.assertEqual(remote_blockchain_node_loaded.fileno,
                         str(self.fileno4))

    def test_on_check_extensions(self):
        initialize(True, self.source_version, self.ip_address, self.continent,
                   self.country, False, self.account_id, self.quota_level)
        _, analysis_loaded, _, _ = self._load_status_file()
        self.assertNotEqual(analysis_loaded.extensions_check,
                            ExtensionModulesState.UNAVAILABLE)
        self.assertEqual(type(analysis_loaded.extensions_check),
                         ExtensionModulesState)

    def test_on_network_update_connection(self):
        new_desc3 = "1.1.1.1 80"
        new_ip3 = new_desc3.split()[0]
        new_port3 = new_desc3.split()[1]
        new_fileno3 = "10"
        self._add_connections()
        update(self.conn_pool, True, self.source_version, self.ip_address,
               self.continent, self.country, False, self.account_id,
               self.quota_level)
        summary_loaded, _, _, network_loaded = self._load_status_file()
        network_loaded.blockchain_nodes.clear()
        network_loaded.add_connection(ConnectionType.BLOCKCHAIN_NODE,
                                      new_desc3, new_fileno3)
        blockchain_node_loaded = network_loaded.blockchain_nodes[0]
        self.assertEqual(
            summary_loaded,
            network_loaded.get_summary(self.ip_address, self.continent,
                                       self.country, False, self.account_id,
                                       self.quota_level))
        self.assertEqual(summary_loaded.gateway_status, GatewayStatus.ONLINE)
        self.assertEqual(summary_loaded.account_info,
                         summary.gateway_status_get_account_info(None))
        self.assertEqual(
            summary_loaded.quota_level,
            summary.gateway_status_get_quota_level(self.quota_level))
        self.assertEqual(summary_loaded.blockchain_node_connection_state,
                         ConnectionState.ESTABLISHED)
        self.assertEqual(blockchain_node_loaded.ip_address, new_ip3)
        self.assertEqual(blockchain_node_loaded.port, new_port3)
        self.assertEqual(blockchain_node_loaded.fileno, new_fileno3)

    def _add_connections(self):
        self.conn_pool.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool.add(self.fileno2, self.ip2, self.port2, self.conn2)
        self.conn_pool.add(self.fileno3, self.ip3, self.port3, self.conn3)
        self.conn_pool.add(self.fileno4, self.ip4, self.port4, self.conn4)

    def _load_status_file(
            self) -> Tuple[Summary, Analysis, Environment, Network]:
        path = config.get_data_file(STATUS_FILE_NAME)
        self.assertTrue(os.path.exists(path))
        with open(path, "r", encoding="utf-8") as json_file:
            status_file = json_file.read()
        diagnostics_loaded = model_loader.load_model_from_json(
            Diagnostics, status_file)
        summary_loaded = diagnostics_loaded.summary
        analysis_loaded = diagnostics_loaded.analysis
        network_loaded = analysis_loaded.network
        environment_loaded = analysis_loaded.environment
        return summary_loaded, analysis_loaded, environment_loaded, network_loaded
class ConnectionPoolTest(AbstractTestCase):

    def setUp(self):
        self.conn_pool1 = ConnectionPool()

        self.fileno1 = 1
        self.ip1 = "123.123.123.123"
        self.port1 = 1000
        self.node1 = MockNode(helpers.get_common_opts(1001, external_ip="128.128.128.128"))
        self.node_id1 = str(uuid.uuid1())
        self.conn1 = MockConnection(MockSocketConnection(self.fileno1), (self.ip1, self.port1), self.node1)

        self.fileno2 = 5
        self.ip2 = "234.234.234.234"
        self.port2 = 2000
        self.node2 = MockNode(helpers.get_common_opts(1003, external_ip="321.321.321.321"))
        self.node_id2 = str(uuid.uuid1())
        self.conn2 = MockConnection(MockSocketConnection(self.fileno2), (self.ip2, self.port2), self.node2)

        self.fileno3 = 6
        self.ip3 = "234.234.234.234"
        self.port3 = 3000
        self.node3 = MockNode(helpers.get_common_opts(1003, external_ip="213.213.213.213."))
        self.node_id3 = str(uuid.uuid1())
        self.conn3 = MockConnection(MockSocketConnection(self.fileno3), (self.ip3, self.port3), self.node3)

    def test_add(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.assertEqual(self.conn1, self.conn_pool1.by_fileno[self.fileno1])
        self.assertEqual(self.conn1, self.conn_pool1.by_ipport[(self.ip1, self.port1)])
        self.assertEqual(1, self.conn_pool1.count_conn_by_ip[self.ip1])

        with self.assertRaises(AssertionError):
            self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)

        self.conn_pool1.add(ConnectionPool.INITIAL_FILENO + 1, "0.0.0.0", self.port1, self.conn1)
        self.assertEqual(ConnectionPool.INITIAL_FILENO * 2, self.conn_pool1.len_fileno)

    def test_update(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool1.add(self.fileno2, self.ip2, self.port2, self.conn2)
        self.conn_pool1.update_port(self.port1, self.port2, self.conn1)
        self.assertEqual(self.conn1, self.conn_pool1.get_by_ipport(self.ip1, self.port2))
        self.assertFalse(self.conn_pool1.has_connection(self.ip1, self.port1))

    def test_update_connnection_type(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool1.add(self.fileno2, self.ip2, self.port2, self.conn2)

        mock_connections = self.conn_pool1.get_by_connection_type(self.conn1.CONNECTION_TYPE)
        self.assertIn(self.conn1, mock_connections)
        self.assertIn(self.conn2, mock_connections)

        self.conn_pool1.update_connection_type(self.conn1, ConnectionType.RELAY_TRANSACTION)

        mock_connections = self.conn_pool1.get_by_connection_type(self.conn2.CONNECTION_TYPE)
        self.assertNotIn(self.conn1, mock_connections)
        self.assertIn(self.conn2, mock_connections)

        relay_connections = self.conn_pool1.get_by_connection_type(self.conn1.CONNECTION_TYPE)
        self.assertIn(self.conn1, relay_connections)
        self.assertNotIn(self.conn2, relay_connections)

    def test_has_connection(self):
        self._add_connections()
        self.assertTrue(self.conn_pool1.has_connection(self.ip1, self.port1))
        self.assertFalse(self.conn_pool1.has_connection("111.111.111.111", self.port1))
        self.assertFalse(self.conn_pool1.has_connection("111.111.111.111", 1))
        self.assertFalse(self.conn_pool1.has_connection(self.ip1, 1))

    def test_get_byipport(self):
        self._add_connections()
        self.assertEqual(self.conn1, self.conn_pool1.get_by_ipport(self.ip1, self.port1))
        with self.assertRaises(KeyError):
            self.conn_pool1.get_by_ipport(self.ip1, 1)

    def test_get_by_connection_type(self):
        self.conn1.CONNECTION_TYPE = ConnectionType.GATEWAY
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
        self.conn3.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        self._add_connections()

        gateway_connections = self.conn_pool1.get_by_connection_type(ConnectionType.GATEWAY)
        self.assertEqual(1, len(gateway_connections))
        self.assertIn(self.conn1, gateway_connections)

        relay_connections = self.conn_pool1.get_by_connection_type(ConnectionType.RELAY_BLOCK)
        self.assertEqual(2, len(relay_connections))
        self.assertIn(self.conn2, relay_connections)
        self.assertIn(self.conn3, relay_connections)

    def test_get_by_connection_types(self):
        self.conn1.CONNECTION_TYPE = ConnectionType.GATEWAY
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
        self.conn3.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        self._add_connections()

        gateway_and_relay_block_connections = self.conn_pool1.get_by_connection_types([
            ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION
        ])
        self.assertEqual(2, len(gateway_and_relay_block_connections))
        self.assertIn(self.conn1, gateway_and_relay_block_connections)
        self.assertIn(self.conn3, gateway_and_relay_block_connections)

    def test_get_by_fileno(self):
        self._add_connections()
        self.assertEqual(self.conn1, self.conn_pool1.get_by_fileno(self.fileno1))
        self.assertEqual(self.conn2, self.conn_pool1.get_by_fileno(self.fileno2))
        self.conn_pool1.add(6000, "0.0.0.0", 4000, self.conn3)
        self.assertIsNone(self.conn_pool1.get_by_fileno(7000))
        self.assertIsNone(self.conn_pool1.get_by_fileno(2))

    def test_get_num_conn_by_ip(self):
        self._add_connections()
        self.assertEqual(1, self.conn_pool1.get_num_conn_by_ip(self.ip1))
        self.assertEqual(2, self.conn_pool1.get_num_conn_by_ip(self.ip2))
        self.assertEqual(0, self.conn_pool1.get_num_conn_by_ip("222.222.222.222"))

    def test_delete(self):
        self._add_connections()
        self.conn_pool1.delete(self.conn1)
        self.assertIsNone(self.conn_pool1.get_by_fileno(self.fileno1))
        with self.assertRaises(KeyError):
            self.conn_pool1.get_by_ipport(self.ip1, self.port1)

        self.conn_pool1.delete(self.conn2)
        self.assertIsNone(self.conn_pool1.get_by_fileno(self.fileno2))
        self.assertEqual(1, self.conn_pool1.count_conn_by_ip[self.ip2])

    # noinspection PyTypeChecker
    def test_delete_removes_multiple_types(self):
        class TestConnectionType(Flag):
            A = auto()
            B = auto()
            AB = A | B

        conn = MockConnection(MockSocketConnection(), (LOCALHOST, 8000), self.node1)
        conn.CONNECTION_TYPE = TestConnectionType.AB
        self.conn_pool1.add(self.fileno1, LOCALHOST, 8000, conn)
        self.assertIn(conn, self.conn_pool1.get_by_connection_type(TestConnectionType.A))
        self.assertIn(conn, self.conn_pool1.get_by_connection_type(TestConnectionType.B))

        self.conn_pool1.delete(conn)
        self.assertNotIn(conn, self.conn_pool1.get_by_connection_type(TestConnectionType.A))
        self.assertNotIn(conn, self.conn_pool1.get_by_connection_type(TestConnectionType.B))

    def test_delete_by_fileno(self):
        self._add_connections()
        self.conn_pool1.delete_by_fileno(self.fileno1)
        with self.assertRaises(KeyError):
            self.conn_pool1.get_by_ipport(self.ip1, self.port1)

    def test_iter(self):
        self._add_connections()
        pool_connections = list(iter(self.conn_pool1))
        self.assertEqual(3, len(pool_connections))
        self.assertTrue(self.conn1 in pool_connections)
        self.assertTrue(self.conn2 in pool_connections)
        self.assertTrue(self.conn3 in pool_connections)

    def test_len(self):
        self.assertEqual(0, len(self.conn_pool1))
        self._add_connections()
        self.assertEqual(3, len(self.conn_pool1))
        self.conn_pool1.delete(self.conn2)
        self.assertEqual(2, len(self.conn_pool1))

    def test_get_by_node_id(self):
        self._add_connections()
        self.conn1.peer_id = self.node_id2
        self.conn_pool1.index_conn_node_id(self.node_id2, self.conn1)
        connections_for_node_id = self.conn_pool1.get_by_node_id(self.node_id2)
        self.assertEqual(1, len(connections_for_node_id))
        self.conn_pool1.add(4000, "0.0.0.0", 6000, self.conn2)
        self.conn_pool1.index_conn_node_id(self.node_id2, self.conn2)
        connections_for_node_id = self.conn_pool1.get_by_node_id(self.node_id2)
        self.assertEqual(2, len(connections_for_node_id))
        self.assertEqual(0, len(self.conn_pool1.get_by_node_id("node_id")))

    def _add_connections(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool1.add(self.fileno2, self.ip2, self.port2, self.conn2)
        self.conn_pool1.add(self.fileno3, self.ip3, self.port3, self.conn3)
class ConnectionPoolTest(AbstractTestCase):
    def setUp(self):
        self.conn_pool1 = ConnectionPool()

        self.fileno1 = 1
        self.ip1 = "123.123.123.123"
        self.port1 = 1000
        self.node1 = MockNode(
            helpers.get_common_opts(1001, external_ip="128.128.128.128"))
        self.node_id1 = str(uuid.uuid1())
        self.conn1 = MockConnection(
            MockSocketConnection(self.fileno1,
                                 ip_address=self.ip1,
                                 port=self.port1), self.node1)

        self.fileno2 = 5
        self.ip2 = "234.234.234.234"
        self.port2 = 2000
        self.node2 = MockNode(
            helpers.get_common_opts(1003, external_ip="321.321.321.321"))
        self.node_id2 = str(uuid.uuid1())
        self.conn2 = MockConnection(
            MockSocketConnection(self.fileno2,
                                 ip_address=self.ip2,
                                 port=self.port2), self.node2)

        self.fileno3 = 6
        self.ip3 = "234.234.234.234"
        self.port3 = 3000
        self.node3 = MockNode(
            helpers.get_common_opts(1003, external_ip="213.213.213.213."))
        self.node_id3 = str(uuid.uuid1())
        self.conn3 = MockConnection(
            MockSocketConnection(self.fileno3,
                                 ip_address=self.ip3,
                                 port=self.port3), self.node3)

    def test_add(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.assertEqual(self.conn1, self.conn_pool1.by_fileno[self.fileno1])
        self.assertEqual(self.conn1,
                         self.conn_pool1.by_ipport[(self.ip1, self.port1)])
        self.assertEqual(1, self.conn_pool1.count_conn_by_ip[self.ip1])

        with self.assertRaises(AssertionError):
            self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)

        self.conn_pool1.add(ConnectionPool.INITIAL_FILENO + 1, "0.0.0.0",
                            self.port1, self.conn1)
        self.assertEqual(ConnectionPool.INITIAL_FILENO * 2,
                         self.conn_pool1.len_fileno)

    def test_update(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool1.add(self.fileno2, self.ip2, self.port2, self.conn2)
        self.conn_pool1.update_port(self.port1, self.port2, self.conn1)
        self.assertEqual(self.conn1,
                         self.conn_pool1.get_by_ipport(self.ip1, self.port2))
        self.assertFalse(self.conn_pool1.has_connection(self.ip1, self.port1))

    def test_update_connection_type(self):
        self.conn1.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        self.conn1.peer_id = "1234"
        self.conn2.CONNECTION_TYPE = ConnectionType.INTERNAL_GATEWAY
        self.conn2.peer_id = "2345"
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool1.add(self.fileno2, self.ip2, self.port2, self.conn2)

        mock_connections = self.conn_pool1.get_by_connection_types(
            [self.conn1.CONNECTION_TYPE])
        self.assertIn(self.conn1, mock_connections)
        self.assertNotIn(self.conn2, mock_connections)

        self.conn_pool1.update_connection_type(self.conn1,
                                               ConnectionType.EXTERNAL_GATEWAY)

        mock_connections = self.conn_pool1.get_by_connection_types(
            [self.conn2.CONNECTION_TYPE])
        self.assertNotIn(self.conn1, mock_connections)

        mock_connections = self.conn_pool1.get_by_connection_types(
            [self.conn2.CONNECTION_TYPE])
        self.assertIn(self.conn2, mock_connections)

        mock_connections = self.conn_pool1.get_by_connection_types(
            [self.conn1.CONNECTION_TYPE])
        self.assertIn(self.conn1, mock_connections)

        mock_connections = self.conn_pool1.get_by_connection_types(
            [self.conn1.CONNECTION_TYPE])
        self.assertNotIn(self.conn2, mock_connections)

        mock_connections = self.conn_pool1.get_by_connection_types(
            [ConnectionType.RELAY_TRANSACTION])
        self.assertNotIn(self.conn1, mock_connections)
        self.assertNotIn(self.conn2, mock_connections)

    def test_has_connection(self):
        self._add_connections()
        self.assertTrue(self.conn_pool1.has_connection(self.ip1, self.port1))
        self.assertFalse(
            self.conn_pool1.has_connection("111.111.111.111", self.port1))
        self.assertFalse(self.conn_pool1.has_connection("111.111.111.111", 1))
        self.assertFalse(self.conn_pool1.has_connection(self.ip1, 1))

    def test_get_byipport(self):
        self._add_connections()
        self.assertEqual(self.conn1,
                         self.conn_pool1.get_by_ipport(self.ip1, self.port1))
        with self.assertRaises(KeyError):
            self.conn_pool1.get_by_ipport(self.ip1, 1)

    def test_get_by_connection_type(self):
        self.conn1.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
        self.conn3.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        self._add_connections()

        gateway_connections = list(
            self.conn_pool1.get_by_connection_types(
                [ConnectionType.EXTERNAL_GATEWAY]))
        self.assertEqual(1, len(gateway_connections))
        self.assertIn(self.conn1, gateway_connections)

        relay_connections = list(
            self.conn_pool1.get_by_connection_types(
                [ConnectionType.RELAY_BLOCK]))
        self.assertEqual(2, len(relay_connections))
        self.assertIn(self.conn2, relay_connections)
        self.assertIn(self.conn3, relay_connections)

    def test_get_by_connection_types(self):
        self.conn1.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
        self.conn3.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        self._add_connections()

        gateway_and_relay_block_connections = self.conn_pool1.get_by_connection_types(
            [
                ConnectionType.EXTERNAL_GATEWAY,
                ConnectionType.RELAY_TRANSACTION
            ])
        self.assertEqual(2, len(list(gateway_and_relay_block_connections)))

        gateway_and_relay_block_connections = self.conn_pool1.get_by_connection_types(
            [
                ConnectionType.EXTERNAL_GATEWAY,
                ConnectionType.RELAY_TRANSACTION
            ])
        self.assertIn(self.conn1, gateway_and_relay_block_connections)

        gateway_and_relay_block_connections = self.conn_pool1.get_by_connection_types(
            [
                ConnectionType.EXTERNAL_GATEWAY,
                ConnectionType.RELAY_TRANSACTION
            ])
        self.assertIn(self.conn3, gateway_and_relay_block_connections)

        relay_transaction_connections = self.conn_pool1.get_by_connection_types(
            [ConnectionType.RELAY_TRANSACTION])
        self.assertEqual(1, len(list(relay_transaction_connections)))

        relay_transaction_connections = self.conn_pool1.get_by_connection_types(
            [ConnectionType.RELAY_ALL])
        self.assertIn(self.conn3, relay_transaction_connections)

        relay_transaction_connections = self.conn_pool1.get_by_connection_types(
            [ConnectionType.RELAY_ALL])
        self.assertNotIn(self.conn1, relay_transaction_connections)

    def test_get_by_fileno(self):
        self._add_connections()
        self.assertEqual(self.conn1,
                         self.conn_pool1.get_by_fileno(self.fileno1))
        self.assertEqual(self.conn2,
                         self.conn_pool1.get_by_fileno(self.fileno2))
        self.conn_pool1.add(6000, "0.0.0.0", 4000, self.conn3)
        self.assertIsNone(
            self.conn_pool1.get_by_fileno(self.conn_pool1.len_fileno))
        self.assertIsNone(self.conn_pool1.get_by_fileno(7000))
        self.assertIsNone(self.conn_pool1.get_by_fileno(2))

    def test_delete(self):
        self.conn1.CONNECTION_TYPE = ConnectionType.RELAY_TRANSACTION
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        self.conn3.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY

        self._add_connections()
        self.conn_pool1.delete(self.conn1)
        self.assertIsNone(self.conn_pool1.get_by_fileno(self.fileno1))
        self.assertEqual(
            0,
            len(self.conn_pool1.by_connection_type[
                self.conn1.CONNECTION_TYPE]))
        with self.assertRaises(KeyError):
            self.conn_pool1.get_by_ipport(self.ip1, self.port1)

        self.conn_pool1.delete(self.conn2)
        self.assertIsNone(self.conn_pool1.get_by_fileno(self.fileno2))
        self.assertEqual(1, self.conn_pool1.count_conn_by_ip[self.ip2])
        self.assertEqual(
            0,
            len(self.conn_pool1.by_connection_type[
                self.conn2.CONNECTION_TYPE]))

    # noinspection PyTypeChecker
    def test_delete_removes_multiple_types(self):
        class TestConnectionType(SerializableFlag):
            A = auto()
            B = auto()
            AB = A | B

        conn = MockConnection(
            MockSocketConnection(ip_address=LOCALHOST, port=8000), self.node1)
        conn.CONNECTION_TYPE = TestConnectionType.AB
        self.conn_pool1.add(self.fileno1, LOCALHOST, 8000, conn)
        self.assertIn(
            conn,
            self.conn_pool1.get_by_connection_types([TestConnectionType.A]))
        self.assertIn(
            conn,
            self.conn_pool1.get_by_connection_types([TestConnectionType.B]))

        self.conn_pool1.delete(conn)
        self.assertNotIn(
            conn,
            self.conn_pool1.get_by_connection_types([TestConnectionType.A]))
        self.assertNotIn(
            conn,
            self.conn_pool1.get_by_connection_types([TestConnectionType.B]))

    def test_iter(self):
        self._add_connections()
        pool_connections = list(iter(self.conn_pool1))
        self.assertEqual(3, len(pool_connections))
        self.assertTrue(self.conn1 in pool_connections)
        self.assertTrue(self.conn2 in pool_connections)
        self.assertTrue(self.conn3 in pool_connections)

    def test_len(self):
        self.assertEqual(0, len(self.conn_pool1))
        self._add_connections()
        self.assertEqual(3, len(self.conn_pool1))
        self.conn_pool1.delete(self.conn2)
        self.assertEqual(2, len(self.conn_pool1))

    @skip("Run this test only for local debugging")
    def test_get_by_connection_types_performance(self):
        log_config.set_level([
            "bxcommon.connections.abstract_node",
            "bxcommon.services.transaction_service"
        ], LogLevel.INFO)
        conn_pool = ConnectionPool()
        self.conn1.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY
        self.conn2.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
        self.conn3.CONNECTION_TYPE = ConnectionType.RELAY_ALL
        number_of_iteration = 100
        for i in range(40):
            ip = f"{i}.{i}.{i}.{i}"
            node = MockNode(helpers.get_common_opts(i, external_ip=ip))
            conn = MockConnection(
                MockSocketConnection(i, ip_address=ip, port=i), node)
            if i % 7 == 0:
                conn.CONNECTION_TYPE = ConnectionType.RELAY_BLOCK
            elif i % 5 == 0:
                conn.CONNECTION_TYPE = ConnectionType.RELAY_TRANSACTION
            elif i % 3 == 0:
                conn.CONNECTION_TYPE = ConnectionType.INTERNAL_GATEWAY
            else:
                conn.CONNECTION_TYPE = ConnectionType.EXTERNAL_GATEWAY
            conn_pool.add(i, ip, i, conn)

        timeit_get_by_connections_types_one_type = timeit.timeit(
            lambda: conn_pool.get_by_connection_types([ConnectionType.GATEWAY]
                                                      ),
            number=number_of_iteration)
        timeit_get_by_connections_types_two_types = timeit.timeit(
            lambda: conn_pool.get_by_connection_types(
                [ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION]),
            number=number_of_iteration)
        print(
            f"\ntimeit_get_by_connections_types_one_type # 2:  {timeit_get_by_connections_types_one_type * 1000 / number_of_iteration:.4f}ms, "
            f"#connections: {len(list(conn_pool.get_by_connection_types([ConnectionType.GATEWAY])))}"
            f"\ntimeit_get_by_connections_types_two_types # 2: {timeit_get_by_connections_types_two_types * 1000 / number_of_iteration:.4f}ms, "
            f"#connections: {len(list(conn_pool.get_by_connection_types([ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION])))}"
        )

        print("*****")
        for c in conn_pool.get_by_connection_types(
            [ConnectionType.GATEWAY, ConnectionType.RELAY_TRANSACTION]):
            print(f"connection: {c}, connection type: {c.CONNECTION_TYPE}")

    def _add_connections(self):
        self.conn_pool1.add(self.fileno1, self.ip1, self.port1, self.conn1)
        self.conn_pool1.add(self.fileno2, self.ip2, self.port2, self.conn2)
        self.conn_pool1.add(self.fileno3, self.ip3, self.port3, self.conn3)