def action(self, opts: CommonOpts, node_ssl_service: NodeSSLService) -> None: account_id = node_ssl_service.get_account_id() if account_id: # TODO: use local cache for account_model account_model = sdn_http_service.fetch_account_model(account_id) if account_model: opts.set_account_options(account_model)
def set_node_model(opts: CommonOpts, node_ssl_service) -> None: """ Must be executed after set_network_info :param self: :param opts: :param node_ssl_service: :return: """ node_model = None if opts.node_id: # Test network, get pre-configured peers from the SDN. node_model = sdn_http_service.fetch_node_attributes(opts.node_id) if not node_model: node_model = _register_node(opts, node_ssl_service) if node_model.cert is not None: private_cert = ssl_serializer.deserialize_cert(node_model.cert) node_ssl_service.blocking_store_node_certificate(private_cert) ssl_context = node_ssl_service.create_ssl_context(SSLCertificateType.PRIVATE) sdn_http_service.reset_pool(ssl_context) # Add opts from SDN, but don't overwrite CLI args default_values_to_update = [None, -1] for key, val in node_model.__dict__.items(): if opts.__dict__.get(key) in default_values_to_update: opts.__dict__[key] = val
def __init__(self, opts: CommonOpts, node_ssl_service: NodeSSLService, connection_pool: Optional[ConnectionPool] = None): self.node_ssl_service = node_ssl_service logger.debug("Initializing node of type: {}", self.NODE_TYPE) self.server_endpoints = [ IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.external_port), # TODO: remove this after v1 is no longer supported IpEndpoint(constants.LISTEN_ON_IP_ADDRESS, opts.non_ssl_port) ] self.set_node_config_opts_from_sdn(opts) self.opts: CommonOpts = opts self.pending_connection_requests: Set[ConnectionPeerInfo] = set() self.pending_connection_attempts: Set[ConnectionPeerInfo] = set() self.outbound_peers: Set[OutboundPeerModel] = opts.outbound_peers.copy( ) if connection_pool is not None: self.connection_pool = connection_pool else: self.connection_pool = ConnectionPool() self.should_force_exit = False self.should_restart_on_high_memory = False self.num_retries_by_ip: Dict[Tuple[str, int], int] = defaultdict(int) # Event handling queue for delayed events self.alarm_queue = AlarmQueue() self.init_node_status_logging() self.init_throughput_logging() self.init_node_info_logging() self.init_memory_stats_logging() self.init_block_stats_logging() self.init_tx_stats_logging() # TODO: clean this up alongside outputbuffer holding time # this is Nagle's algorithm and we need to implement it properly # flush buffers regularly because of output buffer holding time self.alarm_queue.register_approx_alarm( self.FLUSH_SEND_BUFFERS_INTERVAL, constants.OUTPUT_BUFFER_BATCH_MAX_HOLD_TIME, self.flush_all_send_buffers) self.network_num = opts.blockchain_network_num self.broadcast_service = self.get_broadcast_service() # converting setting in MB to bytes self.next_report_mem_usage_bytes = self.opts.dump_detailed_report_at_memory_usage * 1024 * 1024 if opts.dump_removed_short_ids: os.makedirs(opts.dump_removed_short_ids_path, exist_ok=True) # each time a network has an update regarding txs, blocks, etc. register in a dict, # this way can verify if node lost connection to requested relay. self.last_sync_message_received_by_network: Dict[int, float] = {} self.start_sync_time: Optional[float] = None self.sync_metrics: Dict[int, Counter] = defaultdict(Counter) self.sync_short_id_buckets: Dict[ int, TransactionShortIdBuckets] = defaultdict(TransactionShortIdBuckets) opts.has_fully_updated_tx_service = False self.check_sync_relay_connections_alarm_id: Optional[AlarmId] = None self.transaction_sync_timeout_alarm_id: Optional[AlarmId] = None self.requester = ThreadedRequestService( # pyre-fixme[16]: `Optional` has no attribute `name`. self.NODE_TYPE.name.lower(), self.alarm_queue, constants.THREADED_HTTP_POOL_SLEEP_INTERVAL_S) self._last_responsiveness_check_log_time = time.time() self._last_responsiveness_check_details = {} self.gc_logging_enabled = False self.serialized_message_cache = SerializedMessageCache( self.alarm_queue) self.alarm_queue.register_alarm( constants.RESPONSIVENESS_CHECK_INTERVAL_S, self._responsiveness_check_log)
def get_common_opts( port: int, external_ip: str = constants.LOCALHOST, node_id: Optional[str] = None, outbound_peers: Optional[List[OutboundPeerModel]] = None, blockchain_network_num: int = constants.ALL_NETWORK_NUM, block_confirmations_count: int = 2, final_tx_confirmations_count: int = 4, rpc_port: int = 28332, continent: str = "NA", country: str = "United States", region: str = "us-east-1", parallelism_degree: int = 1, split_relays: bool = False, sid_expire_time: int = 30, rpc: bool = False, transaction_validation: bool = True, rpc_use_ssl: bool = False, **kwargs, ) -> CommonOpts: if node_id is None: node_id = f"Node at {port}" if outbound_peers is None: outbound_peers = [] arg_parser = argparse.ArgumentParser(add_help=False) cli.add_argument_parser_logging(arg_parser, default_log_level=LogLevel.DEBUG) opts = arg_parser.parse_args([]) # opts = Namespace() opts.__dict__.update( { "external_ip": external_ip, "external_port": port, "node_id": node_id, "memory_stats_interval": 3600, "dump_detailed_report_at_memory_usage": 100, "dump_removed_short_ids": False, "dump_removed_short_ids_path": "", "transaction_pool_memory_limit": 200000000, "use_extensions": constants.USE_EXTENSION_MODULES, "import_extensions": constants.USE_EXTENSION_MODULES, "tx_mem_pool_bucket_size": constants.DEFAULT_TX_MEM_POOL_BUCKET_SIZE, "throughput_stats_interval": constants.THROUGHPUT_STATS_INTERVAL_S, "info_stats_interval": constants.INFO_STATS_INTERVAL_S, "sync_tx_service": True, "source_version": "v1.0.0", "non_ssl_port": 3000, "enable_node_cache": True, "rpc_port": rpc_port, "rpc_host": constants.LOCALHOST, "rpc_user": "", "rpc_password": "", "rpc_use_ssl": rpc_use_ssl, "rpc_ssl_base_url": "", "continent": continent, "country": country, "region": region, "hostname": "bxlocal", "sdn_url": f"{constants.LOCALHOST}:8080", "enable_buffered_send": False, "block_compression_debug": False, "enable_tcp_quickack": True, "thread_pool_parallelism_degree": config.get_thread_pool_parallelism_degree( str(parallelism_degree) ), "data_dir": config.get_default_data_path(), "ca_cert_url": "https://certificates.blxrbdn.com/ca", "private_ssl_base_url": "https://certificates.blxrbdn.com", "rpc": rpc, "transaction_validation": transaction_validation, "using_private_ip_connection": False, } ) for key, val in kwargs.items(): opts.__dict__[key] = val common_opts = CommonOpts.from_opts(opts) # some attributes are usually set by the node runner common_opts.__dict__.update({ "node_type": AbstractNode.NODE_TYPE, "outbound_peers": outbound_peers, "sid_expire_time": sid_expire_time, "split_relays": split_relays, "blockchain_networks": { 0: blockchain_network( "Bitcoin", "Mainnet", 0, 15, 15, final_tx_confirmations_count, block_confirmations_count, ), 1: blockchain_network( "Bitcoin", "Testnet", 1, 15, 15, final_tx_confirmations_count, block_confirmations_count, ), 4: blockchain_network( "BitcoinCash", "Testnet", 4, 15, 15, 24, block_confirmations_count, ), 5: blockchain_network( "Ethereum", "Mainnet", 5, 5, 5, 24, block_confirmations_count, ), 3: blockchain_network( "Ethereum", "Testnet", 3, 5, 5, final_tx_confirmations_count, block_confirmations_count, ), 33: blockchain_network( "Ontology", "Mainnet", 33, 5, 5, final_tx_confirmations_count, block_confirmations_count, ), 10: blockchain_network( "Ethereum", "BSC-Mainnet", 10, 5, 5, 24, block_confirmations_count, ), }, "blockchain_network_num": blockchain_network_num, }) return common_opts
def setUp(self): self.blockchain_network = helpers.blockchain_network( protocol="Bitcoin", network_name="Mainnet", network_num=1, block_interval=600, final_tx_confirmations_count=6) self.set_ssl_folder() opts = { "log_path": "", "to_stdout": True, "external_port": 0, "external_ip": "1.1.1.1", "node_id": None, "blockchain_network": self.blockchain_network.network, "network_num": self.blockchain_network.network_num, "blockchain_protocol": self.blockchain_network.protocol, "blockchain_networks": { self.blockchain_network.network_num: self.blockchain_network }, "log_level": LogLevel.INFO, "log_format": LogFormat.PLAIN, "log_flush_immediately": True, "log_fluentd_enable": False, "log_fluentd_host": None, "use_extensions": True, "log_fluentd_queue_size": 1000, "thread_pool_parallelism_degree": config.get_thread_pool_parallelism_degree( str(constants.DEFAULT_THREAD_POOL_PARALLELISM_DEGREE), ), "log_level_overrides": {}, "source_version": "v1.0.0", "ca_cert_url": self.ssl_folder_url, "private_ssl_base_url": self.ssl_folder_url, "data_dir": config.get_default_data_path(), "log_level_fluentd": LogLevel.DEBUG, "log_level_stdout": LogLevel.TRACE, "sdn_url": "https://localhost:8080", } for item in CommonOpts.__dataclass_fields__: if item not in opts: opts[item] = None self.opts = CommonOpts.from_opts(Namespace(**opts)) log_config.create_logger(None, LogLevel.WARNING) self.event_loop_mock = EventLoopMock()
def action(self, opts: CommonOpts, node_ssl_service: NodeSSLService) -> None: opts.validate_network_opts()
def validate_network_opts(opts: CommonOpts, _node_ssl_service: NodeSSLService) -> None: opts.validate_network_opts()