예제 #1
0
    def __init__(self, opts: Namespace):
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)

        self.set_node_config_opts_from_sdn(opts)
        self.opts = opts
        self.connection_queue: Deque[Tuple[str, int]] = deque()
        self.disconnect_queue: Deque[DisconnectRequest] = deque()
        self.outbound_peers = opts.outbound_peers[:]

        self.connection_pool = ConnectionPool()

        self.should_force_exit = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Handle termination gracefully
        signal.signal(signal.SIGTERM, self._kill_node)
        signal.signal(signal.SIGINT, self._kill_node)
        signal.signal(signal.SIGSEGV, self._kill_node)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        opts.has_fully_updated_tx_service = False
        self.alarm_queue.register_alarm(constants.TX_SERVICE_SYNC_PROGRESS_S,
                                        self._sync_tx_services)
        self._check_sync_relay_connections_alarm_id = self.alarm_queue.register_alarm(
            constants.LAST_MSG_FROM_RELAY_THRESHOLD_S,
            self._check_sync_relay_connections)
        self._transaction_sync_timeout_alarm_id = self.alarm_queue.register_alarm(
            constants.TX_SERVICE_CHECK_NETWORKS_SYNCED_S,
            self._transaction_sync_timeout)
예제 #2
0
    def setUp(self):
        super().setUp()

        self.node1.alarm_queue = AlarmQueue()
        self.node2.alarm_queue = AlarmQueue()

        self.network_num = 1
        self.magic = 12345
        self.version = 23456
        self.prev_block_hash = bytearray(crypto.double_sha256(b"123"))
        self.prev_block = BtcObjectHash(self.prev_block_hash,
                                        length=crypto.SHA256_HASH_LEN)
        self.merkle_root_hash = bytearray(crypto.double_sha256(b"234"))
        self.merkle_root = BtcObjectHash(self.merkle_root_hash,
                                         length=crypto.SHA256_HASH_LEN)
        self.bits = 2
        self.nonce = 3

        opts = self.gateway_1_opts()
        if opts.use_extensions:
            helpers.set_extensions_parallelism()
        self.btc_message_converter = btc_message_converter_factory.create_btc_message_converter(
            self.magic, opts)

        self.btc_transactions = [
            TxBtcMessage(self.magic, self.version, [], [], i)
            for i in range(self.TRANSACTIONS_COUNT)
        ]
        self.btc_transactions_for_block = [
            tx_btc_message.rawbytes()[btc_constants.BTC_HDR_COMMON_OFF:]
            for tx_btc_message in self.btc_transactions
        ]
        self.transactions = [
            self.btc_message_converter.tx_to_bx_txs(tx_btc_message,
                                                    self.network_num)[0][0]
            for tx_btc_message in self.btc_transactions
        ]
        self.transactions_with_short_ids = [
            TxMessage(tx_message.tx_hash(), tx_message.network_num(), "",
                      i + 1, tx_message.tx_val())
            for i, tx_message in enumerate(self.transactions)
        ]
        self.transactions_with_no_content = [
            TxMessage(tx_message.tx_hash(), tx_message.network_num(), "",
                      i + 1) for i, tx_message in enumerate(self.transactions)
        ]
        self.transactions_by_short_id = {
            tx_message.short_id(): tx_message
            for tx_message in self.transactions_with_short_ids
        }
        self.block = BlockBtcMessage(self.magic, self.version,
                                     self.prev_block, self.merkle_root,
                                     int(time.time()), self.bits, self.nonce,
                                     self.btc_transactions_for_block)
예제 #3
0
def initialize_split_relay_node():
    relay_connections = [
        OutboundPeerModel(LOCALHOST, 8001, node_type=NodeType.RELAY_BLOCK)
    ]
    network_latency.get_best_relays_by_ping_latency_one_per_country = MagicMock(
        return_value=[relay_connections[0]])
    opts = gateway_helpers.get_gateway_opts(8000,
                                            split_relays=True,
                                            include_default_btc_args=True)
    if opts.use_extensions:
        helpers.set_extensions_parallelism()
    node = GatewayNode(opts)
    node.enqueue_connection = MagicMock()

    node._register_potential_relay_peers(
        node._find_best_relay_peers(
            network_latency.get_best_relays_by_ping_latency_one_per_country()))
    node.enqueue_connection.assert_has_calls([
        call(LOCALHOST, 8001, ConnectionType.RELAY_BLOCK),
        call(LOCALHOST, 8002, ConnectionType.RELAY_TRANSACTION),
    ],
                                             any_order=True)
    node.on_connection_added(
        MockSocketConnection(1, node, ip_address=LOCALHOST, port=8001))
    node.on_connection_added(
        MockSocketConnection(2, node, ip_address=LOCALHOST, port=8002))

    node.alarm_queue = AlarmQueue()
    node.enqueue_connection.reset_mock()
    return node
예제 #4
0
    def test_get_object_size(self):
        mock_node = MockNode(get_common_opts(1234))
        object_size = memory_utils.get_object_size(mock_node)

        self.assertIsInstance(object_size, ObjectSize)
        self.assertTrue(object_size.size > 0)
        self.assertTrue(object_size.flat_size > 0)
        self.assertTrue(object_size.is_actual_size)
        self.assertEqual(0, len(object_size.references))

        ex_set = ExpiringSet(AlarmQueue(), 10, "testset")
        s = set()
        h1 = Sha256Hash(b"1" * 32)
        h2 = Sha256Hash(b"0" * 32)
        print(memory_utils.get_object_size(ex_set).size)
        print(memory_utils.get_object_size(s).size)
        print(memory_utils.get_object_size(h1).size)
        print(memory_utils.get_object_size(h2).size)
        print(memory_utils.get_special_size(ex_set).size)
        print(memory_utils.get_special_size(s).size)
        print(memory_utils.get_special_size(h1).size)
        print(memory_utils.get_special_size(h2).size)
        ex_set.add(h1)
        ex_set.add(h2)
        s.add(h1)
        s.add(h2)
        print(memory_utils.get_object_size(ex_set).size)
        print(memory_utils.get_special_size(ex_set).size)
        print(memory_utils.get_object_size(s).size)
예제 #5
0
class ExpiringSetTest(AbstractTestCase):
    ALARM_QUEUE = AlarmQueue()
    EXPIRATION_TIME_S = 1

    def setUp(self):
        self.sut = ExpiringSet(self.ALARM_QUEUE, self.EXPIRATION_TIME_S)

    def test_cleanup(self):
        test_item = "dummy_text"
        self.sut.add(test_item)
        self.assertTrue(test_item in self.sut.contents)

        time.time = MagicMock(return_value=time.time() + self.EXPIRATION_TIME_S + 1)
        self.ALARM_QUEUE.fire_alarms()
        self.assertFalse(test_item in self.sut.contents)

    def test_cleanup__not_existing_item(self):
        test_item = "dummy_text"
        self.sut.add(test_item)
        self.assertTrue(test_item in self.sut.contents)

        self.sut.contents.remove(test_item)
        self.assertFalse(test_item in self.sut.contents)

        time.time = MagicMock(return_value=time.time() + self.EXPIRATION_TIME_S + 1)
        self.ALARM_QUEUE.fire_alarms()
        self.assertFalse(test_item in self.sut.contents)

    def test_get_recent(self):
        for i in range(5):
            self.sut.add(i)

        self.assertEqual([4, 3, 2], self.sut.get_recent_items(3))
        self.assertEqual([4, 3, 2, 1, 0], self.sut.get_recent_items(6))
        self.assertEqual([4], self.sut.get_recent_items(1))
예제 #6
0
파일: mock_node.py 프로젝트: aspin/bxcommon
    def __init__(self, opts: Namespace):
        super(MockNode, self).__init__(opts)
        self.alarm_queue = AlarmQueue()
        self.network_num = DEFAULT_NETWORK_NUM

        self.broadcast_messages = []

        self._tx_service = TransactionService(self, self.network_num)
        self._tx_services = {}
예제 #7
0
    def __init__(self,
                 opts: CommonOpts,
                 node_ssl_service: Optional[NodeSSLService] = None) -> None:
        if node_ssl_service is None:
            node_ssl_service = MockNodeSSLService(self.NODE_TYPE, MagicMock())
        super(MockNode, self).__init__(opts, node_ssl_service)
        self.alarm_queue = AlarmQueue()
        self.network_num = DEFAULT_NETWORK_NUM

        self.broadcast_messages = []

        self._tx_service = TransactionService(self, self.network_num)
        self._tx_services = {}
예제 #8
0
    def setUp(self):
        self.node = MockGatewayNode(
            gateway_helpers.get_gateway_opts(
                8000, max_block_interval_s=BLOCK_INTERVAL))
        self.node.block_parts_storage = ExpiringDict(
            self.node.alarm_queue,
            gateway_constants.MAX_BLOCK_CACHE_TIME_S,
            "eth_block_queue_parts",
        )
        self.node.alarm_queue = AlarmQueue()
        self.node.set_known_total_difficulty = MagicMock()
        self.block_processing_service = EthBlockProcessingService(self.node)
        self.node.block_processing_service = self.block_processing_service

        self.node_connection = MockConnection(
            MockSocketConnection(1, self.node, ip_address=LOCALHOST,
                                 port=8002), self.node)
        self.node_connection.is_active = MagicMock(return_value=True)
        self.block_queuing_service = EthBlockQueuingService(
            self.node, self.node_connection)
        self.node.block_queuing_service_manager.add_block_queuing_service(
            self.node_connection, self.block_queuing_service)
        self.node_connection.enqueue_msg = MagicMock()

        self.node_connection_2 = MockConnection(
            MockSocketConnection(1, self.node, ip_address=LOCALHOST,
                                 port=8003), self.node)
        self.node_connection_2.is_active = MagicMock(return_value=True)
        self.block_queuing_service_2 = EthBlockQueuingService(
            self.node, self.node_connection_2)
        self.node.block_queuing_service_manager.add_block_queuing_service(
            self.node_connection_2, self.block_queuing_service_2)
        self.node_connection_2.enqueue_msg = MagicMock()

        self.blockchain_connections = [
            self.node_connection, self.node_connection_2
        ]
        self.block_queuing_services = [
            self.block_queuing_service, self.block_queuing_service_2
        ]

        time.time = MagicMock(return_value=time.time())
    def setUp(self):
        self.node = MockGatewayNode(
            gateway_helpers.get_gateway_opts(
                8000, max_block_interval=BLOCK_INTERVAL))
        self.node.alarm_queue = AlarmQueue()

        self.node_connection = MagicMock()
        self.node_connection.is_active = MagicMock(return_value=True)
        self.node.set_known_total_difficulty = MagicMock()

        self.node.node_conn = self.node_connection

        self.block_queuing_service = EthBlockQueuingService(self.node)
        self.block_processing_service = EthBlockProcessingService(self.node)

        self.node.block_queuing_service = self.block_queuing_service
        self.node.block_processing_service = self.block_processing_service

        self.node.send_msg_to_node = MagicMock()

        time.time = MagicMock(return_value=time.time())
    def setUp(self) -> None:
        self.gateway_node = MockGatewayNode(
            gateway_helpers.get_gateway_opts(8000, split_relays=True))
        self.gateway_node.alarm_queue = AlarmQueue()
        self.gateway_node.requester = ThreadedRequestService(
            "mock_thread_service", self.gateway_node.alarm_queue,
            constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S)
        self.gateway_node.requester.start()

        self.outbound_peer_models = [
            OutboundPeerModel("1.1.1.1", 1609, node_type=NodeType.RELAY_BLOCK),
            OutboundPeerModel("1.1.1.2", 1609, node_type=NodeType.RELAY_BLOCK),
            OutboundPeerModel("1.1.1.3", 1609, node_type=NodeType.RELAY_BLOCK),
        ]
        sdn_http_service.fetch_potential_relay_peers_by_network = MagicMock(
            side_effect=lambda *args: self.outbound_peer_models)
        sdn_http_service.submit_peer_connection_error_event = MagicMock()
        self.latencies = [10, 20, 30]
        ping_latency.get_ping_latencies = MagicMock(side_effect=lambda *args: [
            NodeLatencyInfo(self.outbound_peer_models[0], self.latencies[0]),
            NodeLatencyInfo(self.outbound_peer_models[1], self.latencies[1]),
            NodeLatencyInfo(self.outbound_peer_models[2], self.latencies[2]),
        ])
        self.gateway_node.enqueue_connection = MagicMock()
class BlockEncryptedCacheTest(AbstractTestCase):
    ALARM_QUEUE = AlarmQueue()

    def setUp(self):
        payload = bytearray(i for i in range(100))
        self.sut = BlockEncryptedCache(self.ALARM_QUEUE)
        _, self.block_hash = self.sut.encrypt_and_add_payload(payload)

    def test_remove_item__bytes(self):
        self.assertTrue(self.sut.has_ciphertext_for_hash(self.block_hash))
        self.sut.remove_item(self.block_hash)
        self.assertFalse(self.sut.has_ciphertext_for_hash(self.block_hash))

    def test_remove_item__object_hash(self):
        self.assertTrue(self.sut.has_ciphertext_for_hash(self.block_hash))
        object_hash = Sha256Hash(self.block_hash)
        self.sut.remove_item(object_hash)
        self.assertFalse(self.sut.has_ciphertext_for_hash(self.block_hash))

    def test_remove_item__memoryview(self):
        self.assertTrue(self.sut.has_ciphertext_for_hash(self.block_hash))
        mem_view = memoryview(self.block_hash)
        self.sut.remove_item(mem_view)
        self.assertFalse(self.sut.has_ciphertext_for_hash(self.block_hash))
예제 #12
0
 def setUp(self):
     self.connection = helpers.create_connection(InternalNodeConnection)
     self.connection.state = self.connection.state | ConnectionState.ESTABLISHED
     self.alarm_queue = AlarmQueue()
     self.connection.node.alarm_queue = self.alarm_queue
예제 #13
0
class AlarmQueueTest(AbstractTestCase):

    def setUp(self):
        self.alarm_queue = AlarmQueue()

    def function_to_pass(self, first, second):
        return first + second

    def test_register_alarm(self):
        alarm_id = self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.assertEqual(1, self.alarm_queue.uniq_count)
        self.assertEqual(0, self.alarm_queue.alarms[0].count)
        self.assertEqual(0, alarm_id.count)

    def test_register_approx_alarm(self):
        self.alarm_queue.register_approx_alarm(1, 3, self.function_to_pass, 1, 5)
        self.assertEqual(1, len(self.alarm_queue.approx_alarms_scheduled[self.function_to_pass]))
        self.assertEqual(self.function_to_pass,
                         self.alarm_queue.approx_alarms_scheduled[self.function_to_pass][0].alarm.fn)

    def test_unregister_alarm(self):
        alarm_id1 = self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        alarm_id2 = self.alarm_queue.register_alarm(1, self.function_to_pass, 2, 9)
        self.assertEqual(2, len(self.alarm_queue.alarms))
        self.alarm_queue.unregister_alarm(alarm_id1)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.alarm_queue.unregister_alarm(alarm_id2)
        self.assertEqual(0, len(self.alarm_queue.alarms))

    def test_fire_alarms(self):
        self.alarm_queue.register_alarm(1, self.function_to_pass, 0, 0)
        self.alarm_queue.register_alarm(5, self.function_to_pass, 0, 0)
        time.time = MagicMock(return_value=time.time() + 2)
        self.alarm_queue.fire_alarms()
        self.assertEqual(1, len(self.alarm_queue.alarms))

    def test_time_to_next_alarm(self):
        self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.assertLess(0, self.alarm_queue.time_to_next_alarm()[1])
        time.time = MagicMock(return_value=time.time() + 2)
        self.assertGreater(0, self.alarm_queue.time_to_next_alarm()[1])

    def test_fire_ready_alarms(self):
        self.alarm_queue.register_alarm(1, self.function_to_pass, 0, 0)
        self.alarm_queue.register_alarm(5, self.function_to_pass, 0, 0)
        time.time = MagicMock(return_value=time.time() + 2)
        time_to_next_alarm = self.alarm_queue.fire_ready_alarms(False)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.assertLess(0, time_to_next_alarm)
예제 #14
0
    def __init__(self,
                 opts: CommonOpts,
                 node_ssl_service: NodeSSLService,
                 connection_pool: Optional[ConnectionPool] = None):
        self.node_ssl_service = node_ssl_service
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)
        self.server_endpoints = [
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.external_port),
            # TODO: remove this after v1 is no longer supported
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.non_ssl_port)
        ]

        self.set_node_config_opts_from_sdn(opts)
        self.opts: CommonOpts = opts
        self.pending_connection_requests: Set[ConnectionPeerInfo] = set()
        self.pending_connection_attempts: Set[ConnectionPeerInfo] = set()
        self.outbound_peers: Set[OutboundPeerModel] = opts.outbound_peers.copy(
        )

        if connection_pool is not None:
            self.connection_pool = connection_pool
        else:
            self.connection_pool = ConnectionPool()

        self.should_force_exit = False
        self.should_restart_on_high_memory = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_node_status_logging()
        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        self.start_sync_time: Optional[float] = None
        self.sync_metrics: Dict[int, Counter] = defaultdict(Counter)
        self.sync_short_id_buckets: Dict[
            int,
            TransactionShortIdBuckets] = defaultdict(TransactionShortIdBuckets)

        opts.has_fully_updated_tx_service = False

        self.check_sync_relay_connections_alarm_id: Optional[AlarmId] = None
        self.transaction_sync_timeout_alarm_id: Optional[AlarmId] = None

        self.requester = ThreadedRequestService(
            # pyre-fixme[16]: `Optional` has no attribute `name`.
            self.NODE_TYPE.name.lower(),
            self.alarm_queue,
            constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S)

        self._last_responsiveness_check_log_time = time.time()
        self._last_responsiveness_check_details = {}
        self.gc_logging_enabled = False
        self.serialized_message_cache = SerializedMessageCache(
            self.alarm_queue)

        self.alarm_queue.register_alarm(
            constants.RESPONSIVENESS_CHECK_INTERVAL_S,
            self._responsiveness_check_log)
예제 #15
0
class AbstractNode:
    __meta__ = ABCMeta
    FLUSH_SEND_BUFFERS_INTERVAL = constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME * 2
    NODE_TYPE: Optional[NodeType] = None

    def __init__(self,
                 opts: CommonOpts,
                 node_ssl_service: NodeSSLService,
                 connection_pool: Optional[ConnectionPool] = None):
        self.node_ssl_service = node_ssl_service
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)
        self.server_endpoints = [
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.external_port),
            # TODO: remove this after v1 is no longer supported
            IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.non_ssl_port)
        ]

        self.set_node_config_opts_from_sdn(opts)
        self.opts: CommonOpts = opts
        self.pending_connection_requests: Set[ConnectionPeerInfo] = set()
        self.pending_connection_attempts: Set[ConnectionPeerInfo] = set()
        self.outbound_peers: Set[OutboundPeerModel] = opts.outbound_peers.copy(
        )

        if connection_pool is not None:
            self.connection_pool = connection_pool
        else:
            self.connection_pool = ConnectionPool()

        self.should_force_exit = False
        self.should_restart_on_high_memory = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_node_status_logging()
        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        self.start_sync_time: Optional[float] = None
        self.sync_metrics: Dict[int, Counter] = defaultdict(Counter)
        self.sync_short_id_buckets: Dict[
            int,
            TransactionShortIdBuckets] = defaultdict(TransactionShortIdBuckets)

        opts.has_fully_updated_tx_service = False

        self.check_sync_relay_connections_alarm_id: Optional[AlarmId] = None
        self.transaction_sync_timeout_alarm_id: Optional[AlarmId] = None

        self.requester = ThreadedRequestService(
            # pyre-fixme[16]: `Optional` has no attribute `name`.
            self.NODE_TYPE.name.lower(),
            self.alarm_queue,
            constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S)

        self._last_responsiveness_check_log_time = time.time()
        self._last_responsiveness_check_details = {}
        self.gc_logging_enabled = False
        self.serialized_message_cache = SerializedMessageCache(
            self.alarm_queue)

        self.alarm_queue.register_alarm(
            constants.RESPONSIVENESS_CHECK_INTERVAL_S,
            self._responsiveness_check_log)

    def get_sdn_address(self):
        """
        Placeholder for net event loop to get the sdn address (relay only).
        :return:
        """
        return

    @abstractmethod
    def get_tx_service(self,
                       network_num: Optional[int] = None
                       ) -> TransactionService:
        pass

    @abstractmethod
    def get_outbound_peer_info(self) -> List[ConnectionPeerInfo]:
        pass

    @abstractmethod
    def get_broadcast_service(self) -> BroadcastService:
        pass

    def sync_and_send_request_for_relay_peers(self, network_num: int) -> int:
        """
        Requests potential relay peers from SDN. Merges list with provided command line relays.

        This function retrieves from the SDN potential_relay_peers_by_network
        Then it try to ping for each relay (timeout of 2 seconds). The ping is done in parallel
        Once there are ping result, it calculate the best relay and decides if need to switch relays

        The above can take time, so the functions is split into several internal functions and use the thread pool
        not to block the main thread.
        """

        self.requester.send_threaded_request(
            sdn_http_service.fetch_potential_relay_peers_by_network,
            self.opts.node_id,
            network_num,
            # pyre-fixme[6]: Expected `Optional[Callable[[Future[Any]], Any]]` for 4th parameter `done_callback`
            #  to call `send_threaded_request` but got `BoundMethod[Callable(_process_blockchain_network_from_sdn)
            #  [[Named(self, AbstractRelayConnection), Named(get_blockchain_network_future, Future[Any])], Any],
            #  AbstractRelayConnection]`.
            done_callback=self.process_potential_relays_from_sdn)

        return constants.CANCEL_ALARMS

    def process_potential_relays_from_sdn(self,
                                          get_potential_relays_future: Future):
        pass

    @abstractmethod
    def build_connection(
        self, socket_connection: AbstractSocketConnectionProtocol
    ) -> Optional[AbstractConnection]:
        pass

    @abstractmethod
    def on_failed_connection_retry(self, ip: str, port: int,
                                   connection_type: ConnectionType,
                                   connection_state: ConnectionState) -> None:
        pass

    def connection_exists(self,
                          ip: str,
                          port: int,
                          peer_id: Optional[str] = None) -> bool:
        return self.connection_pool.has_connection(ip, port, peer_id)

    def on_connection_added(
        self, socket_connection: AbstractSocketConnectionProtocol
    ) -> Optional[AbstractConnection]:
        """
        Notifies the node that a connection is coming in.
        """
        # If we're already connected to the remote peer, log the event and request disconnect.
        self.pending_connection_attempts.discard(
            ConnectionPeerInfo(socket_connection.endpoint,
                               AbstractConnection.CONNECTION_TYPE))
        ip, port = socket_connection.endpoint
        peer_info = None
        if socket_connection.is_ssl:
            try:
                peer_info = self._get_socket_peer_info(socket_connection)
            except ConnectionAuthenticationError as e:
                logger.warning(log_messages.FAILED_TO_AUTHENTICATE_CONNECTION,
                               ip, port, e)
                socket_connection.mark_for_close(should_retry=False)
                return None

            if self.connection_exists(ip, port, peer_info.peer_id):
                logger.debug(
                    "Duplicate connection attempted to: {}:{} (peer id: {}). "
                    "Dropping.", ip, port, peer_info.peer_id)
                socket_connection.mark_for_close(should_retry=False)
                return None
        elif self.connection_exists(ip, port):
            logger.debug(
                "Duplicate connection attempt to {}:{}. Dropping.",
                ip,
                port,
            )
            socket_connection.mark_for_close(should_retry=False)
            return None

        connection = self._initialize_connection(socket_connection)
        if connection is None:
            return None

        if peer_info is not None:
            connection.on_connection_authenticated(peer_info)
            self.connection_pool.index_conn_node_id(peer_info.peer_id,
                                                    connection)

        connection.state |= ConnectionState.INITIALIZED
        logger.debug("Successfully initialized connection: {}", connection)
        return connection

    def on_connection_closed(self,
                             file_no: int,
                             mark_connection_for_close: bool = False):
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Unexpectedly closed connection not in pool. file_no: {}",
                file_no)
            return

        if mark_connection_for_close:
            conn.mark_for_close()

        self._destroy_conn(conn)

    def log_refused_connection(self, peer_info: ConnectionPeerInfo,
                               error: str):
        logger.info("Failed to connect to: {}, {}.", peer_info, error)

    def log_closed_connection(self, connection: AbstractConnection):
        if ConnectionState.ESTABLISHED not in connection.state:
            logger.info("Failed to connect to: {}.", connection)
        else:
            logger.info("Closed connection: {}", connection)

    def on_updated_peers(self,
                         outbound_peer_models: Set[OutboundPeerModel]) -> None:
        if not outbound_peer_models:
            logger.debug("Got peer update with no peers.")
            return

        logger.debug("Processing updated outbound peers: {}.",
                     outbound_peer_models)

        # Remove peers not in updated list or from command-line args.
        old_peers = self.outbound_peers

        # TODO: remove casting to set once the type of outbound peer model is verified globally
        remove_peers = set(old_peers) - set(outbound_peer_models) - set(
            self.opts.outbound_peers)

        for rem_peer in remove_peers:
            if self.connection_pool.has_connection(rem_peer.ip, rem_peer.port,
                                                   rem_peer.node_id):
                rem_conn = self.connection_pool.get_by_ipport(
                    rem_peer.ip, rem_peer.port, rem_peer.node_id)
                if rem_conn:
                    rem_conn.mark_for_close(False)

        # Connect to peers not in our known pool
        for peer in outbound_peer_models:
            peer_ip = peer.ip
            peer_port = peer.port
            if self.should_connect_to_new_outbound_peer(peer):
                self.enqueue_connection(
                    peer_ip,
                    peer_port,
                    convert.peer_node_to_connection_type(
                        # pyre-fixme[6]: Expected `NodeType` for 1st param but got
                        #  `Optional[NodeType]`.
                        self.NODE_TYPE,
                        peer.node_type))
        self.outbound_peers = outbound_peer_models

    def on_updated_node_model(self, new_node_model: NodeModel):
        """
        Updates `opts` according a newly updated `NodeModel`.
        This is currently unused on gateways.
        """
        logger.debug("Updating node attributes with new model: {}",
                     new_node_model)
        for key, val in new_node_model.__dict__.items():
            logger.trace("Updating attribute '{}': {} => {}", key,
                         self.opts.__dict__.get(key, 'None'), val)
            self.opts.__dict__[key] = val

    def should_connect_to_new_outbound_peer(
            self, outbound_peer: OutboundPeerModel) -> bool:
        return not self.connection_pool.has_connection(
            outbound_peer.ip, outbound_peer.port, outbound_peer.node_id)

    def on_bytes_received(self, file_no: int,
                          bytes_received: Union[bytearray, bytes]) -> None:
        """
        :param file_no:
        :param bytes_received:
        :return: True if the node should continue receiving bytes from the remote peer. False otherwise.
        """
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Received bytes for connection not in pool. file_no: {0}",
                file_no)
            return

        if not conn.is_alive():
            conn.log_trace("Skipping receiving bytes for closed connection.")
            return

        conn.add_received_bytes(bytes_received)
        conn.process_message()

    def get_bytes_to_send(self, file_no: int) -> Optional[memoryview]:
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Request to get bytes for connection not in pool. file_no: {}",
                file_no)
            return None

        if not conn.is_alive():
            conn.log_trace("Skipping sending bytes for closed connection.")
            return None

        return conn.get_bytes_to_send()

    def on_bytes_sent(self, file_no: int, bytes_sent: int):
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug(
                "Bytes sent call for connection not in pool. file_no: {0}",
                file_no)
            return

        conn.advance_sent_bytes(bytes_sent)

    def on_bytes_written_to_socket(self, file_no: int,
                                   bytes_written: int) -> None:
        conn = self.connection_pool.get_by_fileno(file_no)

        if conn is None:
            logger.debug("Bytes written call for connection not in pool: {}",
                         file_no)
            return

        conn.advance_bytes_written_to_socket(bytes_written)

    def fire_alarms(self) -> float:
        time_to_next = self.alarm_queue.fire_ready_alarms()
        if time_to_next is not None:
            return time_to_next
        else:
            return constants.MAX_EVENT_LOOP_TIMEOUT

    def force_exit(self):
        """
        Indicates if node should trigger exit in event loop. Primarily used for testing.

        Typically requires one additional socket call (e.g. connecting to this node via a socket)
        to finish terminating the event loop.
        """
        return self.should_force_exit

    async def close(self):
        logger.info("Node is closing! Closing everything.")

        shutdown_task = asyncio.ensure_future(self.close_all_connections())
        try:
            await asyncio.wait_for(shutdown_task,
                                   constants.NODE_SHUTDOWN_TIMEOUT_S)
        except Exception as e:  # pylint: disable=broad-except
            logger.exception(
                "Node shutdown failed due to an error: {}, force closing!", e)
        self.requester.close()
        self.cleanup_memory_stats_logging()

    async def close_all_connections(self):
        """
        Closes all connections from the node
        """
        for _, conn in self.connection_pool.items():
            conn.mark_for_close(should_retry=False)

    def broadcast(
        self,
        msg: AbstractMessage,
        broadcasting_conn: Optional[AbstractConnection] = None,
        prepend_to_queue: bool = False,
        connection_types: Optional[List[ConnectionType]] = None
    ) -> List[AbstractConnection]:
        """
        Broadcasts message msg to connections of the specified type except requester.
        """
        if connection_types is None:
            connection_types = [ConnectionType.RELAY_ALL]
        options = BroadcastOptions(broadcasting_conn, prepend_to_queue,
                                   connection_types)
        connections = self.broadcast_service.broadcast(msg, options)
        return connections

    def enqueue_connection(self, ip: str, port: int,
                           connection_type: ConnectionType):
        """
        Queues a connection up for the event loop to open a socket for.
        """
        peer_info = ConnectionPeerInfo(IpEndpoint(ip, port), connection_type)
        if peer_info in self.pending_connection_attempts:
            logger.debug(
                "Not adding {}, waiting until connection attempt to complete",
                peer_info)
        else:
            logger.trace("Enqueuing connection: {}.", peer_info)
            self.pending_connection_requests.add(peer_info)

    def dequeue_connection_requests(self) -> Optional[Set[ConnectionPeerInfo]]:
        """
        Returns the pending connection requests for the event loop to initiate a socket connection to.
        """
        if self.pending_connection_requests:
            pending_connection_requests = self.pending_connection_requests
            self.pending_connection_requests = set()
            self.pending_connection_attempts.update(
                pending_connection_requests)
            return pending_connection_requests
        else:
            return None

    def continue_retrying_connection(self, ip: str, port: int,
                                     connection_type: ConnectionType) -> bool:
        """
        Indicates whether to continue retrying connection. For most connections, this will will stop
        at the maximum of constants.MAX_CONNECT_RETRIES, but some connections should be always retried
        unless there's some fatal socket error.
        """
        is_sdn = ConnectionType.SDN in connection_type
        return is_sdn or self.num_retries_by_ip[
            (ip, port)] < constants.MAX_CONNECT_RETRIES

    def init_node_status_logging(self):
        node_stats_service.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        node_stats_service.flush_info)

    def init_throughput_logging(self):
        throughput_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        throughput_statistics.flush_info)

    def init_node_info_logging(self):
        node_info_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        node_info_statistics.flush_info)

    def cleanup_memory_stats_logging(self):
        memory_statistics.stop_recording()

    def init_block_stats_logging(self):
        block_stats.set_node(self)

    def init_tx_stats_logging(self):
        tx_stats.set_node(self)

    def flush_all_send_buffers(self):
        for conn in self.connection_pool:
            if conn.socket_connection.can_send:
                conn.socket_connection.send()
        return self.FLUSH_SEND_BUFFERS_INTERVAL

    def record_mem_stats(self, low_threshold: int, medium_threshold: int,
                         high_threshold: int):
        """
        When overridden, records identified memory stats and flushes them to std out
        :returns memory stats flush interval
        """
        total_memory = memory_utils.get_app_memory_usage()
        if total_memory > low_threshold:
            gc.collect()
            total_memory = memory_utils.get_app_memory_usage()
        self._record_mem_stats(total_memory > medium_threshold)

        return memory_statistics.flush_info(high_threshold)

    def _record_mem_stats(self, include_data_structure_memory: bool = False):
        if include_data_structure_memory:
            self.connection_pool.log_connection_pool_mem_stats()

    def set_node_config_opts_from_sdn(self, opts: CommonOpts) -> None:
        blockchain_networks: Dict[
            int, BlockchainNetworkModel] = opts.blockchain_networks
        for blockchain_network in blockchain_networks.values():
            tx_stats.configure_network(
                blockchain_network.network_num,
                blockchain_network.tx_percent_to_log_by_hash,
                blockchain_network.tx_percent_to_log_by_sid)
        bdn_tx_to_bx_tx.init(blockchain_networks)

    def dump_memory_usage(self, total_memory: int, threshold: int):
        if total_memory > threshold and logger.isEnabledFor(LogLevel.DEBUG):
            node_size = self.get_node_memory_size()
            memory_logger.debug(
                "Application consumed {} bytes which is over set limit {} bytes. Detailed memory report: {}",
                total_memory, threshold, node_size)

    def get_node_memory_size(self):
        return memory_utils.get_detailed_object_size(self)

    def on_input_received(self, file_no: int) -> bool:
        """handles an input event from the event loop

        :param file_no: the socket connection file_no
        :return: True if the connection is receivable, otherwise False
        """
        connection = self.connection_pool.get_by_fileno(file_no)
        if connection is None:
            return False
        return connection.on_input_received()

    async def init(self) -> None:
        self.requester.start()

    def handle_connection_closed(self, should_retry: bool,
                                 peer_info: ConnectionPeerInfo,
                                 connection_state: ConnectionState) -> None:
        self.pending_connection_attempts.discard(peer_info)
        peer_ip, peer_port = peer_info.endpoint
        connection_type = peer_info.connection_type
        if should_retry and self.continue_retrying_connection(
                peer_ip, peer_port, connection_type):
            self.alarm_queue.register_alarm(
                self._get_next_retry_timeout(peer_ip, peer_port),
                self._retry_init_client_socket, peer_ip, peer_port,
                connection_type)
        else:
            self.on_failed_connection_retry(peer_ip, peer_port,
                                            connection_type, connection_state)

    def get_server_ssl_ctx(self) -> SSLContext:
        return self.node_ssl_service.create_ssl_context(
            SSLCertificateType.PRIVATE)

    def get_target_ssl_ctx(self, endpoint: IpEndpoint,
                           connection_type: ConnectionType) -> SSLContext:
        logger.trace("Fetching SSL certificate for: {} ({}).", endpoint,
                     connection_type)
        return self.node_ssl_service.create_ssl_context(
            SSLCertificateType.PRIVATE)

    @abstractmethod
    def reevaluate_transaction_streamer_connection(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def on_new_subscriber_request(self) -> None:
        raise NotImplementedError

    @abstractmethod
    def init_memory_stats_logging(self):
        raise NotImplementedError

    @abstractmethod
    def sync_tx_services(self):
        self.start_sync_time = time.time()
        self.sync_metrics = defaultdict(Counter)

    @abstractmethod
    def _transaction_sync_timeout(self) -> int:
        pass

    @abstractmethod
    def check_sync_relay_connections(self, conn: AbstractConnection) -> int:
        pass

    def _get_socket_peer_info(
            self,
            sock: AbstractSocketConnectionProtocol) -> AuthenticatedPeerInfo:
        assert sock.is_ssl
        assert self.NODE_TYPE is not None

        cert = sock.get_peer_certificate()
        node_type = extensions_factory.get_node_type(cert)
        try:
            connection_type = convert.peer_node_to_connection_type(
                # pyre-fixme[6]: Expected `NodeType` for 1st param but got
                #  `Optional[NodeType]`.
                self.NODE_TYPE,
                node_type)
        except (KeyError, ValueError):
            raise ConnectionAuthenticationError(
                f"Peer ssl certificate ({cert}) has an invalid node type: {node_type}!"
            )
        peer_id = extensions_factory.get_node_id(cert)
        if peer_id is None:
            raise ConnectionAuthenticationError(
                f"Peer ssl certificate ({cert}) does not contain a node id!")

        account_id = extensions_factory.get_account_id(cert)
        node_privileges = extensions_factory.get_node_privileges(cert)
        return AuthenticatedPeerInfo(connection_type, peer_id, account_id,
                                     node_privileges)

    def _should_log_closed_connection(self,
                                      _connection: AbstractConnection) -> bool:
        return True

    def _destroy_conn(self, conn: AbstractConnection):
        """
        Clean up the associated connection and update all data structures tracking it.

        Do not call this function directly to close a connection, unless circumstances do not allow cleaning shutting
        down the node via event loop lifecycle hooks (e.g. immediate shutdown).

        In connection handlers, use `AbstractConnection#mark_for_close`, and the connection will be cleaned up as part
        of event handling.
        In other node lifecycle events, use `enqueue_disconnect` to allow the event loop to trigger connection cleanup.

        :param conn connection to destroy
        """

        if self._should_log_closed_connection(conn):
            self.log_closed_connection(conn)

        should_retry = SocketConnectionStates.DO_NOT_RETRY not in conn.socket_connection.state

        logger.debug("Breaking connection to {}. Attempting retry: {}", conn,
                     should_retry)
        conn.dispose()
        self.connection_pool.delete(conn)
        self.handle_connection_closed(
            should_retry,
            ConnectionPeerInfo(conn.endpoint, conn.CONNECTION_TYPE),
            conn.state)

    def _initialize_connection(
        self, socket_connection: AbstractSocketConnectionProtocol
    ) -> Optional[AbstractConnection]:
        conn_obj = self.build_connection(socket_connection)
        ip, port = socket_connection.endpoint
        if conn_obj is not None:
            logger.debug("Connecting to: {}...", conn_obj)

            self.alarm_queue.register_alarm(constants.CONNECTION_TIMEOUT,
                                            self._connection_timeout, conn_obj)
            self.connection_pool.add(socket_connection.file_no, ip, port,
                                     conn_obj)

            if conn_obj.CONNECTION_TYPE == ConnectionType.SDN:
                # pyre-fixme[16]: `AbstractNode` has no attribute `sdn_connection`.
                self.sdn_connection = conn_obj
        else:
            logger.warning(log_messages.UNABLE_TO_DETERMINE_CONNECTION_TYPE,
                           ip, port)
            socket_connection.mark_for_close(should_retry=False)

        return conn_obj

    def on_network_synced(self, network_num: int) -> None:
        if network_num in self.last_sync_message_received_by_network:
            del self.last_sync_message_received_by_network[network_num]

    def on_fully_updated_tx_service(self):
        logger.debug(
            "Synced transaction state with BDN, last_sync_message_received_by_network: {}",
            self.last_sync_message_received_by_network)
        self.opts.has_fully_updated_tx_service = True

    def _connection_timeout(self, conn: AbstractConnection) -> int:
        """
        Check if the connection is established.
        If it is not established, we give up for untrusted connections and try again for trusted connections.
        """

        logger.trace("Checking connection status: {}", conn)

        if ConnectionState.ESTABLISHED in conn.state:
            logger.trace("Connection is still established: {}", conn)
            self.num_retries_by_ip[(conn.peer_ip, conn.peer_port)] = 0
            return constants.CANCEL_ALARMS

        if not conn.is_alive():
            logger.trace("Connection has already been marked for close: {}",
                         conn)
            return constants.CANCEL_ALARMS

        # Clean up the old connection and retry it if it is trusted
        logger.trace("Connection has timed out: {}", conn)
        conn.mark_for_close()

        # It is connect_to_address's job to schedule this function.
        return constants.CANCEL_ALARMS

    def _kill_node(self, _signum, _stack):
        """
        Kills the node immediately
        """
        self.should_force_exit = True
        raise TerminationError("Node killed.")

    def _get_next_retry_timeout(self, ip: str, port: int) -> int:
        """
        Returns Fibonnaci(n), where n is the number of retry attempts + 1, up to max of Fibonacci(8) == 13.
        """
        golden_ratio = (1 + 5**.5) / 2
        sequence_number = min(self.num_retries_by_ip[(ip, port)] + 1,
                              constants.MAX_CONNECT_TIMEOUT_INCREASE)
        return int((golden_ratio**sequence_number -
                    (1 - golden_ratio)**sequence_number) / 5**.5)

    def _retry_init_client_socket(self, ip: str, port: int,
                                  connection_type: ConnectionType):
        self.num_retries_by_ip[(ip, port)] += 1

        logger.debug("Retrying {} connection to {}:{}. Attempt #{}.",
                     connection_type, ip, port,
                     self.num_retries_by_ip[(ip, port)])
        self.enqueue_connection(ip, port, connection_type)

        return 0

    def _responsiveness_check_log(self):
        details = ""
        if self.gc_logging_enabled:
            gen0_stats, gen1_stats, gen2_stats = gc.get_stats()

            last_gen0_collections = self._last_responsiveness_check_details.get(
                "gen0_collections", 0)
            last_gen1_collections = self._last_responsiveness_check_details.get(
                "gen1_collections", 0)
            last_gen2_collections = self._last_responsiveness_check_details.get(
                "gen2_collections", 0)

            gen0_diff = gen0_stats["collections"] - last_gen0_collections
            gen1_diff = gen1_stats["collections"] - last_gen1_collections
            gen2_diff = gen2_stats["collections"] - last_gen2_collections

            details = (
                f"gen0_collections: {gen0_diff}, gen1_collections: {gen1_diff}, "
                f"gen2_collections: {gen2_diff}")
            self._last_responsiveness_check_details.update({
                "gen0_collections":
                gen0_stats["collections"],
                "gen1_collections":
                gen1_stats["collections"],
                "gen2_collections":
                gen2_stats["collections"],
            })

        performance_utils.log_operation_duration(
            performance_troubleshooting_logger,
            "Responsiveness Check",
            self._last_responsiveness_check_log_time,
            constants.RESPONSIVENESS_CHECK_INTERVAL_S +
            constants.RESPONSIVENESS_CHECK_DELAY_WARN_THRESHOLD_S,
            details=details)
        self._last_responsiveness_check_log_time = time.time()
        return constants.RESPONSIVENESS_CHECK_INTERVAL_S
예제 #16
0
 def setUp(self):
     self.alarm_queue = AlarmQueue()
예제 #17
0
class AlarmQueueTest(AbstractTestCase):
    def setUp(self):
        self.alarm_queue = AlarmQueue()

    def function_to_pass(self, first, second):
        return first + second

    def test_register_alarm(self):
        alarm_id = self.alarm_queue.register_alarm(1, self.function_to_pass, 1,
                                                   5)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.assertEqual(1, self.alarm_queue.uniq_count)
        self.assertEqual(0, self.alarm_queue.alarms[0].count)
        self.assertEqual(0, alarm_id.count)

    def test_register_approx_alarm(self):
        self.alarm_queue.register_approx_alarm(1, 3, self.function_to_pass, 1,
                                               5)
        self.assertEqual(
            1,
            len(self.alarm_queue.approx_alarms_scheduled[
                self.function_to_pass]))
        self.assertEqual(
            self.function_to_pass, self.alarm_queue.approx_alarms_scheduled[
                self.function_to_pass][0].alarm.fn)

    def test_unregister_alarm(self):
        alarm_id1 = self.alarm_queue.register_alarm(1, self.function_to_pass,
                                                    1, 5)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        alarm_id2 = self.alarm_queue.register_alarm(1, self.function_to_pass,
                                                    2, 9)
        self.assertEqual(2, len(self.alarm_queue.alarms))
        self.alarm_queue.unregister_alarm(alarm_id1)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.alarm_queue.unregister_alarm(alarm_id2)
        self.assertEqual(0, len(self.alarm_queue.alarms))

    def test_fire_alarms(self):
        self.alarm_queue.register_alarm(1, self.function_to_pass, 0, 0)
        self.alarm_queue.register_alarm(5, self.function_to_pass, 0, 0)
        time.time = MagicMock(return_value=time.time() + 2)
        self.alarm_queue.fire_alarms()
        self.assertEqual(1, len(self.alarm_queue.alarms))

    def test_time_to_next_alarm(self):
        self.assertIsNone(self.alarm_queue.time_to_next_alarm())
        self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.assertLess(0, self.alarm_queue.time_to_next_alarm())
        time.time = MagicMock(return_value=time.time() + 2)
        self.assertGreater(0, self.alarm_queue.time_to_next_alarm())

    def test_fire_ready_alarms(self):
        self.alarm_queue.register_alarm(1, self.function_to_pass, 0, 0)
        self.alarm_queue.register_alarm(5, self.function_to_pass, 0, 0)
        time.time = MagicMock(return_value=time.time() + 2)
        time_to_next_alarm = self.alarm_queue.fire_ready_alarms()
        self.assertEqual(1, len(self.alarm_queue.alarms))
        self.assertLess(0, time_to_next_alarm)

    def test_approx_alarm_cleans_up_even_with_exceptions(self):
        def raise_exception(should_raise: bool):
            if should_raise:
                raise Exception()
            else:
                pass

        self.alarm_queue.register_approx_alarm(0, 1, raise_exception, True)
        self.alarm_queue.register_approx_alarm(2, 1, raise_exception, False)
        self.alarm_queue.fire_alarms()

        time.time = MagicMock(return_value=time.time() + 2)
        self.alarm_queue.fire_alarms()
예제 #18
0
class EncryptedCacheTest(AbstractTestCase):
    ALARM_QUEUE = AlarmQueue()

    def test_encrypt_and_store(self):
        payload = bytearray(i for i in range(100))
        sut = EncryptedCache(10, self.ALARM_QUEUE)
        ciphertext, block_hash = sut.encrypt_and_add_payload(payload)

        self.assertEqual(1, len(sut))
        cache_item = sut._cache.get(block_hash)

        self.assertEqual(ciphertext, cache_item.ciphertext)
        self.assertEqual(
            payload, symmetric_decrypt(cache_item.key, cache_item.ciphertext))

    def test_decrypt_and_get(self):
        payload = bytearray(i for i in range(100))
        sut1 = EncryptedCache(10, self.ALARM_QUEUE)
        ciphertext, block_hash = sut1.encrypt_and_add_payload(payload)
        key = sut1.get_encryption_key(block_hash)

        sut2 = EncryptedCache(10, self.ALARM_QUEUE)
        sut2.add_ciphertext(block_hash, ciphertext)
        decrypted = sut2.decrypt_and_get_payload(block_hash, key)

        self.assertEqual(payload, decrypted)

    def test_decrypt_ciphertext(self):
        payload = bytearray(i for i in range(100))
        sut1 = EncryptedCache(10, self.ALARM_QUEUE)
        ciphertext, block_hash = sut1.encrypt_and_add_payload(payload)
        key = sut1.get_encryption_key(block_hash)

        sut2 = EncryptedCache(10, self.ALARM_QUEUE)
        sut2.add_key(block_hash, key)
        decrypted = sut2.decrypt_ciphertext(block_hash, ciphertext)

        self.assertEqual(payload, decrypted)

    def test_cant_decrypt_incomplete_content(self):
        ciphertext = b"foobar"
        hash_key = b"baz"

        sut1 = EncryptedCache(10, self.ALARM_QUEUE)
        sut1.add_ciphertext(hash_key, ciphertext)

        self.assertIsNone(sut1.decrypt_ciphertext(hash_key, ciphertext))

    def test_cant_decrypt_wrong_keys(self):
        ciphertext = b"foobar" * 50  # ciphertext needs to be long enough to contain a nonce
        hash_key = b"bbaz"
        bad_encryption_key = b"q" * KEY_SIZE

        sut1 = EncryptedCache(10, self.ALARM_QUEUE)
        sut1.add_ciphertext(hash_key, ciphertext)
        sut1.add_key(hash_key, bad_encryption_key)

        self.assertIsNone(sut1.decrypt_ciphertext(hash_key, ciphertext))

    def test_cache_cleanup(self):
        ciphertext = b"foobar"
        hash_key = b"baz"

        sut = EncryptedCache(10, self.ALARM_QUEUE)
        sut.add_ciphertext(hash_key, ciphertext)
        self.assertEqual(1, len(sut))

        time.time = MagicMock(return_value=time.time() + 20)
        self.ALARM_QUEUE.fire_alarms()
        self.assertEqual(0, len(sut))
예제 #19
0
 def setUp(self) -> None:
     self.alarm_queue = AlarmQueue()
     self.sut = EthPendingTransactionFeed(self.alarm_queue)
예제 #20
0
    def __init__(self, interval_len_s: int, alarm_queue: AlarmQueue):
        self.current_minimum: int = 0

        self._interval_len_s = interval_len_s
        self._next_interval_minimum: Optional[int] = None
        alarm_queue.register_alarm(self._interval_len_s, self._on_interval_end)
예제 #21
0
 def setUp(self):
     self.connection = helpers.create_connection(InternalNodeConnection)
     self.connection.on_connection_established()
     self.alarm_queue = AlarmQueue()
     self.connection.node.alarm_queue = self.alarm_queue
 def setUp(self) -> None:
     self.alarm_queue = AlarmQueue()
     self.sut = EthPendingTransactionFeed(self.alarm_queue)
     self.message_converter = EthNormalMessageConverter()
예제 #23
0
class AbstractNode:
    __meta__ = ABCMeta
    FLUSH_SEND_BUFFERS_INTERVAL = constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME * 2
    NODE_TYPE = None

    def __init__(self, opts: Namespace):
        logger.debug("Initializing node of type: {}", self.NODE_TYPE)

        self.set_node_config_opts_from_sdn(opts)
        self.opts = opts
        self.connection_queue: Deque[Tuple[str, int]] = deque()
        self.disconnect_queue: Deque[DisconnectRequest] = deque()
        self.outbound_peers = opts.outbound_peers[:]

        self.connection_pool = ConnectionPool()

        self.should_force_exit = False

        self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int)

        # Handle termination gracefully
        signal.signal(signal.SIGTERM, self._kill_node)
        signal.signal(signal.SIGINT, self._kill_node)
        signal.signal(signal.SIGSEGV, self._kill_node)

        # Event handling queue for delayed events
        self.alarm_queue = AlarmQueue()

        self.init_throughput_logging()
        self.init_node_info_logging()
        self.init_memory_stats_logging()
        self.init_block_stats_logging()
        self.init_tx_stats_logging()

        # TODO: clean this up alongside outputbuffer holding time
        # this is Nagle's algorithm and we need to implement it properly
        # flush buffers regularly because of output buffer holding time
        self.alarm_queue.register_approx_alarm(
            self.FLUSH_SEND_BUFFERS_INTERVAL,
            constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME,
            self.flush_all_send_buffers)

        self.network_num = opts.blockchain_network_num
        self.broadcast_service = self.get_broadcast_service()

        # converting setting in MB to bytes
        self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024

        if opts.dump_removed_short_ids:
            os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True)

        # each time a network has an update regarding txs, blocks, etc. register in a dict,
        # this way can verify if node lost connection to requested relay.

        self.last_sync_message_received_by_network: Dict[int, float] = {}

        opts.has_fully_updated_tx_service = False
        self.alarm_queue.register_alarm(constants.TX_SERVICE_SYNC_PROGRESS_S,
                                        self._sync_tx_services)
        self._check_sync_relay_connections_alarm_id = self.alarm_queue.register_alarm(
            constants.LAST_MSG_FROM_RELAY_THRESHOLD_S,
            self._check_sync_relay_connections)
        self._transaction_sync_timeout_alarm_id = self.alarm_queue.register_alarm(
            constants.TX_SERVICE_CHECK_NETWORKS_SYNCED_S,
            self._transaction_sync_timeout)

    def get_sdn_address(self):
        """
        Placeholder for net event loop to get the sdn address (relay only).
        :return:
        """
        return

    @abstractmethod
    def get_tx_service(self, network_num=None):
        pass

    @abstractmethod
    def get_outbound_peer_addresses(self):
        pass

    @abstractmethod
    def get_broadcast_service(self) -> BroadcastService:
        pass

    def connection_exists(self, ip, port):
        return self.connection_pool.has_connection(ip, port)

    def on_connection_added(self, socket_connection: SocketConnection, ip: str,
                            port: int, from_me: bool):
        """
        Notifies the node that a connection is coming in.
        """
        fileno = socket_connection.fileno()

        # If we're already connected to the remote peer, log the event and request disconnect.
        if self.connection_exists(ip, port):
            logger.debug(
                "Duplicate connection attempted to: {0}:{1}. Dropping.", ip,
                port)

            # Schedule dropping the added connection and keep the old one.
            self.enqueue_disconnect(socket_connection, False)
        else:
            self._initialize_connection(socket_connection, ip, port, from_me)

    def on_connection_initialized(self, fileno: int):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Unexpectedly initialized connection not in pool. Fileno: {0}",
                fileno)
            return

        logger.debug("Connection initialized: {}", conn)
        conn.state |= ConnectionState.INITIALIZED

        # Reset num_retries when a connection established in order to support resetting the Fibonnaci logic
        # to determine next retry
        self.num_retries_by_ip[(conn.peer_ip, conn.peer_port)] = 0

    def on_connection_closed(self, fileno: int, retry_conn: bool):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Unexpectedly closed connection not in pool. Fileno: {0}",
                fileno)
            return

        logger.info("Closed connection: {}", conn)
        self._destroy_conn(conn, retry_connection=retry_conn)

    @abstractmethod
    def send_request_for_relay_peers(self):
        pass

    def on_updated_peers(self, outbound_peer_models):
        if not outbound_peer_models:
            logger.debug("Got peer update with no peers.")
            return

        logger.debug("Processing updated outbound peers: {}.",
                     outbound_peer_models)

        # Remove peers not in updated list or from command-line args.
        remove_peers = []
        old_peers = self.outbound_peers
        for old_peer in old_peers:
            if not (any(old_peer.ip == fixed_peer.ip
                        and old_peer.port == fixed_peer.port
                        for fixed_peer in self.opts.outbound_peers)
                    or any(new_peer.ip == old_peer.ip
                           and new_peer.port == old_peer.port
                           for new_peer in outbound_peer_models)):
                remove_peers.append(old_peer)

        for rem_peer in remove_peers:
            if self.connection_pool.has_connection(rem_peer.ip, rem_peer.port):
                rem_conn = self.connection_pool.get_by_ipport(
                    rem_peer.ip, rem_peer.port)
                if rem_conn:
                    self.mark_connection_for_close(rem_conn, False)

        # Connect to peers not in our known pool
        for peer in outbound_peer_models:
            peer_ip = peer.ip
            peer_port = peer.port
            if not self.connection_pool.has_connection(peer_ip, peer_port):
                self.enqueue_connection(peer_ip, peer_port)
        self.outbound_peers = outbound_peer_models

    def on_updated_sid_space(self, sid_start, sid_end):
        """
        Placeholder interface to receive sid updates from SDN over sockets and pass to relay node
        """

        return

    def on_bytes_received(self, fileno: int,
                          bytes_received: bytearray) -> None:
        """
        :param fileno:
        :param bytes_received:
        :return: True if the node should continue receiving bytes from the remote peer. False otherwise.
        """
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Received bytes for connection not in pool. Fileno: {0}",
                fileno)
            return

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            conn.log_trace("Skipping receiving bytes for closed connection.")
            return

        conn.add_received_bytes(bytes_received)

    def on_finished_receiving(self, fileno):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Received bytes for connection not in pool. Fileno: {0}",
                fileno)
            return

        conn.process_message()

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            self.enqueue_disconnect(conn.socket_connection, conn.from_me)

    def get_bytes_to_send(self, fileno):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Request to get bytes for connection not in pool. Fileno: {0}",
                fileno)
            return

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            conn.log_trace("Skipping sending bytes for closed connection.")
            return

        return conn.get_bytes_to_send()

    def on_bytes_sent(self, fileno, bytes_sent):
        conn = self.connection_pool.get_by_fileno(fileno)

        if conn is None:
            logger.debug(
                "Bytes sent call for connection not in pool. Fileno: {0}",
                fileno)
            return

        conn.advance_sent_bytes(bytes_sent)

    def get_sleep_timeout(self, triggered_by_timeout, first_call=False):
        # TODO: remove first_call from this function. You can just fire all of the ready alarms on every call
        # to get the timeout.
        if first_call:
            _, timeout = self.alarm_queue.time_to_next_alarm()

            # Time out can be negative during debugging
            if timeout < 0:
                timeout = constants.DEFAULT_SLEEP_TIMEOUT

            return timeout
        else:
            time_to_next = self.alarm_queue.fire_ready_alarms(
                triggered_by_timeout)
            if self.connection_queue or self.disconnect_queue:
                # TODO: this should be constants.MIN_SLEEP_TIMEOUT, which is different for kqueues and epoll.
                # We want to process connection/disconnection requests ASAP.
                time_to_next = constants.DEFAULT_SLEEP_TIMEOUT

            return time_to_next

    def force_exit(self):
        """
        Indicates if node should trigger exit in event loop. Primarily used for testing.

        Typically requires one additional socket call (e.g. connecting to this node via a socket)
        to finish terminating the event loop.
        """
        return self.should_force_exit

    def close(self):
        logger.error("Node is closing! Closing everything.")

        for _fileno, conn in self.connection_pool.items():
            self._destroy_conn(conn, force_destroy=True)
        self.cleanup_memory_stats_logging()

    def broadcast(self, msg: AbstractMessage, broadcasting_conn: Optional[AbstractConnection] = None,
                  prepend_to_queue: bool = False, connection_types: Optional[List[ConnectionType]] = None) \
            -> List[AbstractConnection]:
        """
        Broadcasts message msg to connections of the specified type except requester.
        """
        if connection_types is None:
            connection_types = [ConnectionType.RELAY_ALL]
        options = BroadcastOptions(broadcasting_conn, prepend_to_queue,
                                   connection_types)
        return self.broadcast_service.broadcast(msg, options)

    @abstractmethod
    def build_connection(self, socket_connection: SocketConnection, ip: str, port: int, from_me: bool = False) \
            -> Optional[AbstractConnection]:
        pass

    def enqueue_connection(self, ip: str, port: int):
        """
        Queues a connection up for the event loop to open a socket for.
        """
        logger.trace("Enqueuing connection to {}:{}", ip, port)
        self.connection_queue.append((ip, port))

    def enqueue_disconnect(self,
                           socket_connection: SocketConnection,
                           should_retry: Optional[bool] = None):
        """
        Queues up a disconnect for the event loop to close the socket and destroy the connection object for.

        This should always be called with a value provided for `should_retry`, unless the connection object is unknown
        (e.g. from the event loop or SocketConnection classes).
        """
        fileno = socket_connection.fileno()
        logger.trace("Enqueuing disconnect from {}", fileno)

        if should_retry is None:
            conn = self.connection_pool.get_by_fileno(fileno)

            if conn is None:
                logger.debug(
                    "Unexpectedly tried to enqueue a disconnect without a connection object on fileno: {}",
                    fileno)
                should_retry = False
            else:
                conn.log_debug(
                    "Connection close triggered by socket layer or event loop."
                )
                conn.mark_for_close()
                should_retry = conn.from_me

        socket_connection.mark_for_close()
        self.disconnect_queue.append(DisconnectRequest(fileno, should_retry))

    def mark_connection_for_close(self,
                                  connection: AbstractConnection,
                                  should_retry: Optional[bool] = None):
        if should_retry is None:
            should_retry = connection.from_me
        connection.mark_for_close()
        self.enqueue_disconnect(connection.socket_connection, should_retry)

    def pop_next_connection_address(self) -> Optional[Tuple[str, int]]:
        """
        Returns the next connection address for the event loop to initiate a socket connection to.
        """
        if self.connection_queue:
            return self.connection_queue.popleft()

        return

    def pop_next_disconnect_connection(self) -> Optional[DisconnectRequest]:
        """
        Returns the next connection address for the event loop to destroy the socket connection for.

        The event loop is expected to call `on_connection_closed` afterward.
        """
        if self.disconnect_queue:
            return self.disconnect_queue.popleft()

        return

    def _destroy_conn(self,
                      conn,
                      retry_connection: bool = False,
                      force_destroy: bool = False):
        """
        Clean up the associated connection and update all data structures tracking it.

        Do not call this function directly to close a connection, unless circumstances do not allow cleaning shutting
        down the node via event loop lifecycle hooks (e.g. immediate shutdown).

        In connection handlers, use `AbstractConnection#mark_for_close`, and the connection will be cleaned up as part
        of event handling.
        In other node lifecycle events, use `enqueue_disconnect` to allow the event loop to trigger connection cleanup.

        :param conn connection to destroy
        :param retry_connection if connection should be retried
        :param force_destroy ignore connection state and force close. Avoid setting this except for fatal errors or
                             socket errors.
        """
        if force_destroy:
            conn.mark_for_close()

        if not conn.state & ConnectionState.MARK_FOR_CLOSE:
            raise ValueError(
                "Attempted to close connection that was not MARK_FOR_CLOSE.")

        logger.debug("Breaking connection to {}. Attempting retry: {}", conn,
                     retry_connection)
        conn.close()
        self.connection_pool.delete(conn)

        peer_ip, peer_port = conn.peer_ip, conn.peer_port
        if retry_connection:
            self.alarm_queue.register_alarm(
                self._get_next_retry_timeout(peer_ip, peer_port),
                self._retry_init_client_socket, peer_ip, peer_port,
                conn.CONNECTION_TYPE)
        else:
            self.on_failed_connection_retry(peer_ip, peer_port,
                                            conn.CONNECTION_TYPE)

    def should_retry_connection(self, ip: str, port: int,
                                connection_type: ConnectionType) -> bool:
        is_sdn = bool(connection_type & ConnectionType.SDN)
        return is_sdn or self.num_retries_by_ip[
            (ip, port)] < constants.MAX_CONNECT_RETRIES

    @abstractmethod
    def on_failed_connection_retry(self, ip: str, port: int,
                                   connection_type: ConnectionType) -> None:
        pass

    def init_throughput_logging(self):
        throughput_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        throughput_statistics.flush_info)

    def init_node_info_logging(self):
        node_info_statistics.set_node(self)
        self.alarm_queue.register_alarm(constants.FIRST_STATS_INTERVAL_S,
                                        node_info_statistics.flush_info)

    def init_memory_stats_logging(self):
        memory_statistics.set_node(self)
        memory_statistics.start_recording(self.record_mem_stats)

    def cleanup_memory_stats_logging(self):
        memory_statistics.stop_recording()

    def init_block_stats_logging(self):
        block_stats.set_node(self)

    def init_tx_stats_logging(self):
        tx_stats.set_node(self)

    def flush_all_send_buffers(self):
        for conn in self.connection_pool:
            if conn.socket_connection.can_send:
                conn.socket_connection.send()
        return self.FLUSH_SEND_BUFFERS_INTERVAL

    def record_mem_stats(self):
        """
        When overridden, records identified memory stats and flushes them to std out
        :returns memory stats flush interval
        """
        self.connection_pool.log_connection_pool_mem_stats()
        return memory_statistics.flush_info()

    def set_node_config_opts_from_sdn(self, opts):

        # TODO: currently hard-coding configuration values
        opts.stats_calculate_actual_size = False
        opts.log_detailed_block_stats = False

        blockchain_networks: List[
            BlockchainNetworkModel] = opts.blockchain_networks
        for blockchain_network in blockchain_networks:
            tx_stats.configure_network(blockchain_network.network_num,
                                       blockchain_network.tx_percent_to_log)

    def dump_memory_usage(self):
        total_mem_usage = memory_utils.get_app_memory_usage()

        if total_mem_usage >= self.next_report_mem_usage_bytes:
            node_size = memory_utils.get_detailed_object_size(self)
            memory_logger.statistics(
                "Application consumed {} bytes which is over set limit {} bytes. Detailed memory report: {}",
                total_mem_usage, self.next_report_mem_usage_bytes,
                json_utils.serialize(node_size))
            self.next_report_mem_usage_bytes = total_mem_usage + constants.MEMORY_USAGE_INCREASE_FOR_NEXT_REPORT_BYTES

    def on_input_received(self, file_no: int) -> bool:
        """handles an input event from the event loop

        :param file_no: the socket connection file_no
        :return: True if the connection is receivable, otherwise False
        """
        connection = self.connection_pool.get_by_fileno(file_no)
        if connection is None:
            return False
        return connection.on_input_received()

    def _initialize_connection(self, socket_connection: SocketConnection,
                               ip: str, port: int, from_me: bool):
        conn_obj = self.build_connection(socket_connection, ip, port, from_me)
        if conn_obj is not None:
            logger.info("Connecting to: {}...", conn_obj)

            self.alarm_queue.register_alarm(constants.CONNECTION_TIMEOUT,
                                            self._connection_timeout, conn_obj)
            self.connection_pool.add(socket_connection.fileno(), ip, port,
                                     conn_obj)

            if conn_obj.CONNECTION_TYPE == ConnectionType.SDN:
                self.sdn_connection = conn_obj
        else:
            logger.warning(
                "Could not determine expected connection type for {}:{}. Disconnecting...",
                ip, port)
            self.enqueue_disconnect(socket_connection, from_me)

    def on_fully_updated_tx_service(self):
        logger.info("Synced transaction state with BDN.")
        self.opts.has_fully_updated_tx_service = True
        sdn_http_service.submit_sync_txs_event(self.opts.node_id)

    def _connection_timeout(self, conn: AbstractConnection):
        """
        Check if the connection is established.
        If it is not established, we give up for untrusted connections and try again for trusted connections.
        """

        logger.trace("Checking connection status: {}", conn)

        if conn.state & ConnectionState.ESTABLISHED:
            logger.trace("Connection is still established: {}", conn)

            return constants.CANCEL_ALARMS

        if conn.state & ConnectionState.MARK_FOR_CLOSE:
            logger.trace("Connection has already been marked for close: {}",
                         conn)
            return constants.CANCEL_ALARMS

        # Clean up the old connection and retry it if it is trusted
        logger.trace("Connection has timed out: {}", conn)
        self.mark_connection_for_close(conn)

        # It is connect_to_address's job to schedule this function.
        return constants.CANCEL_ALARMS

    def _kill_node(self, _signum, _stack):
        """
        Kills the node immediately
        """
        self.should_force_exit = True
        raise TerminationError("Node killed.")

    def _get_next_retry_timeout(self, ip: str, port: int) -> int:
        """
        Returns Fibonnaci(n), where n is the number of retry attempts + 1, up to max of Fibonacci(8) == 13.
        """
        golden_ratio = (1 + 5**.5) / 2
        sequence_number = min(self.num_retries_by_ip[(ip, port)] + 1,
                              constants.MAX_CONNECT_TIMEOUT_INCREASE)
        return int((golden_ratio**sequence_number -
                    (1 - golden_ratio)**sequence_number) / 5**.5)

    def _retry_init_client_socket(self, ip, port, connection_type):
        self.num_retries_by_ip[(ip, port)] += 1

        if self.should_retry_connection(ip, port, connection_type):
            logger.debug("Retrying {} connection to {}:{}. Attempt #{}.",
                         connection_type, ip, port,
                         self.num_retries_by_ip[(ip, port)])
            self.enqueue_connection(ip, port)
            # In case of connection retry to SDN - no need to resync transactions on this node, just update
            # 'has_fully_updated_tx_service' attribute on SDN since it was set to false when the connection was
            # lost.
            if connection_type == ConnectionType.SDN:
                self.on_fully_updated_tx_service()
        else:
            del self.num_retries_by_ip[(ip, port)]
            logger.debug(
                "Maximum retry attempts exceeded. Dropping {} connection to {}:{}.",
                connection_type, ip, port)
            self.on_failed_connection_retry(ip, port, connection_type)

        return 0

    @abstractmethod
    def _sync_tx_services(self):
        pass

    @abstractmethod
    def _transaction_sync_timeout(self):
        pass

    @abstractmethod
    def _check_sync_relay_connections(self):
        pass
예제 #24
0
 def setUp(self):
     self.ALARM_QUEUE = AlarmQueue()
     self.e_dict = ExpiringDict(self.ALARM_QUEUE, self.EXPIRATION_TIME_S)
예제 #25
0
class ExpiringDictTests(unittest.TestCase):
    EXPIRATION_TIME_S = 1

    def setUp(self):
        self.ALARM_QUEUE = AlarmQueue()
        self.e_dict = ExpiringDict(self.ALARM_QUEUE, self.EXPIRATION_TIME_S)

    def test_cleanup(self):

        kv1 = (1, 2)
        kv2 = (3, 4)
        kv3 = (5, 6)
        kv4 = (7, 8)
        kv5 = ("str1", 1)
        kv6 = ("str2", 2)

        # adding first 2 items to the dict
        self.e_dict.add(kv1[0], kv1[1])
        self.e_dict.add(kv2[0], kv2[1])

        time.time = MagicMock(return_value=time.time() +
                              self.EXPIRATION_TIME_S + 1)

        self.assertEqual(len(self.e_dict.contents), 2)
        self.assertTrue(kv1[0] in self.e_dict.contents)
        self.assertTrue(kv2[0] in self.e_dict.contents)
        self.assertEqual(self.e_dict.contents[kv1[0]], kv1[1])
        self.assertEqual(self.e_dict.contents[kv2[0]], kv2[1])

        # adding last 2 items to the dict
        self.e_dict.add(kv3[0], kv3[1])
        self.e_dict.add(kv4[0], kv4[1])
        self.e_dict.add(kv5[0], kv5[1])
        self.e_dict.add(kv6[0], kv6[1])

        self.ALARM_QUEUE.fire_alarms()

        # first 2 items are expired, last two have not
        self.assertFalse(kv1[0] in self.e_dict.contents)
        self.assertFalse(kv2[0] in self.e_dict.contents)
        self.assertTrue(kv3[0] in self.e_dict.contents)
        self.assertTrue(kv4[0] in self.e_dict.contents)
        self.assertTrue(kv5[0] in self.e_dict.contents)
        self.assertTrue(kv6[0] in self.e_dict.contents)

    def test_remove_item(self):

        kv1 = (1, 2)
        self.e_dict.add(kv1[0], kv1[1])
        self.assertTrue(kv1[0] in self.e_dict.contents)
        self.e_dict.remove_item(kv1[0])
        self.assertFalse(kv1[0] in self.e_dict.contents)

    def test_cleanup__not_existing_item(self):

        kv1 = (1, 2)
        self.e_dict.add(kv1[0], kv1[1])
        self.assertTrue(kv1[0] in self.e_dict.contents)
        self.e_dict.remove_item(kv1[0])
        self.assertFalse(kv1[0] in self.e_dict.contents)

        time.time = MagicMock(return_value=time.time() +
                              self.EXPIRATION_TIME_S + 1)

        self.ALARM_QUEUE.fire_alarms()

        self.assertFalse(kv1[0] in self.e_dict.contents)