Пример #1
0
def _setup_standalone_component(
    component_type: Union[Type['TrioIsolatedComponent'],
                          Type['AsyncioIsolatedComponent']],
    app_identifier: str,
) -> Tuple[Union['TrioIsolatedComponent', 'AsyncioIsolatedComponent'], Tuple[
        str, ...]]:
    if app_identifier == APP_IDENTIFIER_ETH1:
        app_cfg: Type[BaseAppConfig] = Eth1AppConfig
    elif app_identifier == APP_IDENTIFIER_BEACON:
        app_cfg = BeaconAppConfig
    else:
        raise ValueError("Unknown app identifier: %s", app_identifier)

    # Require a root dir to be specified as we don't want to mess with the default one.
    for action in parser._actions:
        if action.dest == 'trinity_root_dir':
            action.required = True
            break

    component_type.configure_parser(parser, subparser)
    parser.add_argument(
        '--connect-to-endpoints',
        help=
        "A list of event bus IPC files for components we should connect to",
        nargs='+',
        default=tuple(),
    )
    args = parser.parse_args()
    # FIXME: Figure out a way to avoid having to set this.
    args.sync_mode = SYNC_FULL
    args.enable_metrics = False

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    if args.log_levels is not None:
        for name, level in args.log_levels.items():
            if name is None:
                name = ''
            get_logger(name).setLevel(level)

    trinity_config = TrinityConfig.from_parser_args(args, app_identifier,
                                                    (app_cfg, ))
    trinity_config.trinity_root_dir.mkdir(exist_ok=True)
    if not is_data_dir_initialized(trinity_config):
        initialize_data_dir(trinity_config)
    boot_info = BootInfo(
        args=args,
        trinity_config=trinity_config,
        min_log_level=None,
        logger_levels=None,
        profile=False,
    )
    return component_type(boot_info), args.connect_to_endpoints
Пример #2
0
class NoopMetricsService(Service, MetricsServiceAPI):
    """
    A ``MetricsServiceAPI`` implementation that provides a stub registry where every action results
    in a noop. Intended to be used when collecting of metrics is disabled. Every collected metric
    will only incur the cost of a noop.
    """

    logger = get_logger(
        'trinity.components.builtin.metrics.NoopMetricsService')

    def __init__(self,
                 influx_server: str = '',
                 influx_user: str = '',
                 influx_password: str = '',
                 influx_database: str = '',
                 host: str = '',
                 reporting_frequency: int = 10):

        self._registry = NoopMetricsRegistry()

    @property
    def registry(self) -> NoopMetricsRegistry:
        """
        Return the :class:`trinity.components.builtin.metrics.registry.NoopMetricsRegistry` at which
        metrics instruments can be registered and retrieved.
        """
        return self._registry

    async def run(self) -> None:
        self.logger.info("Running NoopMetricsService")
        await self.manager.wait_finished()

    async def continuously_report(self) -> None:
        pass
Пример #3
0
def test_get_logger():
    logger = get_logger("foo.bar.baz")
    assert isinstance(logger, eth_utils.ExtendedDebugLogger)
    assert logger.parent.name == "foo.bar"
    assert isinstance(logger.parent, eth_utils.ExtendedDebugLogger)
    assert logger.parent.parent.name == "foo"
    assert isinstance(logger.parent.parent, eth_utils.ExtendedDebugLogger)
Пример #4
0
class TxComponent(AsyncioIsolatedComponent):
    name = "TxComponent"

    logger = get_logger('trinity.components.tx_pool.TxPool')

    @classmethod
    def configure_parser(cls, arg_parser: ArgumentParser,
                         subparser: _SubParsersAction) -> None:
        arg_parser.add_argument(
            "--disable-tx-pool",
            action="store_true",
            help="Disables the Transaction Pool",
        )

    @classmethod
    def validate_cli(cls, boot_info: BootInfo) -> None:
        network_id = boot_info.trinity_config.network_id
        if network_id not in {
                MAINNET_NETWORK_ID, ROPSTEN_NETWORK_ID, GOERLI_NETWORK_ID
        }:
            if not boot_info.args.disable_tx_pool:
                raise ValidationError(
                    "The TxPool component only supports Mainnet, Ropsten and Goerli."
                    "You can run with the transaction pool disabled using "
                    "--disable-tx-pool")

    @property
    def is_enabled(self) -> bool:
        light_mode = self._boot_info.args.sync_mode == SYNC_LIGHT
        is_disable = self._boot_info.args.disable_tx_pool
        is_supported = not light_mode
        is_enabled = not is_disable and is_supported

        if not is_disable and not is_supported:
            self.logger.warning(
                "Transaction pool disabled.  Not supported in light mode.")

        return is_enabled

    async def do_run(self, event_bus: EndpointAPI) -> None:
        boot_info = self._boot_info
        trinity_config = boot_info.trinity_config
        db = DBClient.connect(trinity_config.database_ipc_path)
        with db:
            app_config = trinity_config.get_app_config(Eth1AppConfig)
            chain_config = app_config.get_chain_config()

            chain = chain_config.full_chain_class(db)

            validator = DefaultTransactionValidator.from_network_id(
                chain,
                boot_info.trinity_config.network_id,
            )

            proxy_peer_pool = ETHProxyPeerPool(event_bus,
                                               TO_NETWORKING_BROADCAST_CONFIG)
            async with background_asyncio_service(proxy_peer_pool):
                tx_pool = TxPool(event_bus, proxy_peer_pool, validator)
                async with background_asyncio_service(tx_pool) as manager:
                    await manager.wait_finished()
Пример #5
0
 def __init__(self, chain: AsyncChainAPI, chaindb: BaseAsyncChainDB,
              base_db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
     self.logger = get_logger('trinity.sync.full.FullChainSyncer')
     self.chain = chain
     self.chaindb = chaindb
     self.base_db = base_db
     self.peer_pool = peer_pool
Пример #6
0
class AsyncioComponentForTest(AsyncioIsolatedComponent):
    name = "component-test-asyncio"
    endpoint_name = 'component-test-asyncio'
    logger = get_logger('trinity.testing.AsyncioComponentForTest')

    def get_subprocess_kwargs(self) -> SubprocessKwargs:
        # This is needed so that pytest can capture the subproc's output. Otherwise it will crash
        # with a "io.UnsupportedOperation: redirected stdin is pseudofile, has no fileno()" error.
        return merge(
            super().get_subprocess_kwargs(), {
                'stdin': subprocess.PIPE,
                'stdout': subprocess.PIPE,
                'stderr': subprocess.PIPE,
            })

    @property
    def is_enabled(self) -> bool:
        return True

    async def do_run(self, event_bus: EndpointAPI) -> None:
        self.logger.info('Entered `do_run`')
        service = ComponentTestService(event_bus)
        try:
            async with background_asyncio_service(service) as manager:
                self.logger.info('Running service')
                try:
                    await manager.wait_finished()
                finally:
                    self.logger.info('Exiting `do_run`')
        finally:
            # XXX: We never reach this line, so if you run test_isolated_component.py by itself it
            # will pass but hang forever after pytest reports success.
            # Figuring this out is probably the key to fixing our shutdown.
            self.logger.info('Finished: `do_run`')
Пример #7
0
    class PausingVM(original_vm_class):  # type: ignore
        logger = get_logger(f'eth.vm.base.VM.{original_vm_class.__name__}')

        last_log_time = 0.0

        @classmethod
        def get_state_class(cls) -> Type[StateAPI]:
            return PausingVMState

        def get_beam_stats(self) -> BeamStats:
            return self.state.stats_counter

        def transaction_applied_hook(
                self, transaction_index: int,
                transactions: Sequence[SignedTransactionAPI],
                base_header: BlockHeaderAPI, partial_header: BlockHeaderAPI,
                computation: ComputationAPI, receipt: ReceiptAPI) -> None:

            num_transactions = len(transactions)

            now = time.monotonic()
            if urgent:
                # The currently-importing block
                if transaction_index == num_transactions - 1:
                    logger = self.logger.info
                    log_header = "Beamed"
                elif now - self.last_log_time > MIN_GAS_LOG_WAIT:
                    logger = self.logger.info
                    log_header = "Beaming"
                else:
                    # Logged an update too recently, skipping...
                    return
            else:
                # Don't log anything for preview executions, for now
                return

            beam_stats = self.get_beam_stats()
            fraction_complete = partial_header.gas_used / base_header.gas_used
            if fraction_complete:
                total_est = beam_stats.data_pause_time / fraction_complete
                est_time = humanize_seconds(total_est -
                                            beam_stats.data_pause_time)
            else:
                est_time = "?"

            logger(
                "%s: #%d txn %d/%d, rtt: %.3fs, wait: %s, nodes: %d, gas: %s/%s (%.1f%%) ETA %s",
                log_header,
                base_header.block_number,
                transaction_index + 1,
                num_transactions,
                beam_stats.avg_rtt,
                humanize_seconds(beam_stats.data_pause_time),
                beam_stats.num_nodes,
                f"{partial_header.gas_used:,d}",
                f"{base_header.gas_used:,d}",
                100 * fraction_complete,
                est_time,
            )
            self.last_log_time = now
Пример #8
0
 def __init__(self, headerdb: BaseAsyncHeaderDB,
              peer_pool: LESPeerPool) -> None:
     PeerSubscriber.__init__(self)
     self.logger = get_logger('trinity.sync.light.LightPeerChain')
     self.headerdb = headerdb
     self.peer_pool = peer_pool
     self._pending_replies = weakref.WeakValueDictionary()
Пример #9
0
    def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool,
                 queen_tracker: QueenTrackerAPI,
                 event_bus: EndpointAPI) -> None:
        self.logger = get_logger('trinity.sync.beam.BeamDownloader')
        self._db = db
        self._trie_db = HexaryTrie(db)
        self._event_bus = event_bus

        # Track the needed node data that is urgent and important:
        buffer_size = MAX_STATE_FETCH * REQUEST_BUFFER_MULTIPLIER
        self._node_tasks = TaskQueue[Hash32](buffer_size, lambda task: 0)

        # list of events waiting on new data
        self._new_data_event: asyncio.Event = asyncio.Event()
        self._preview_events = {}

        self._peer_pool = peer_pool

        # Track node data for upcoming blocks
        self._block_number_lookup = defaultdict(lambda: BlockNumber(0))
        self._maybe_useful_nodes = TaskQueue[Hash32](
            buffer_size,
            # Prefer trie nodes from earliest blocks
            lambda node_hash: self._block_number_lookup[node_hash],
        )

        self._num_urgent_requests_by_peer = Counter()
        self._num_predictive_requests_by_peer = Counter()

        self._queen_tracker = queen_tracker
        self._threadpool = ThreadPoolExecutor()
        asyncio.get_event_loop().set_default_executor(self._threadpool)
Пример #10
0
 def __init__(self, state_downloader: BeamDownloader,
              event_bus: EndpointAPI) -> None:
     self.logger = get_logger(
         'trinity.sync.beam.chain.MissingDataEventHandler')
     self._state_downloader = state_downloader
     self._event_bus = event_bus
     self._minimum_beam_block_number = 0
Пример #11
0
    async def run(self) -> None:
        """
        Call chain is:

        - multiprocessing.Process -> _run_process
            * isolates to a new process
        - _run_process -> run_process
            * sets up subprocess logging
        - run_process -> _do_run
            * runs the event loop and transitions into async context
        - _do_run -> do_run
            * sets up event bus and then enters user function.
        """
        process = ctx.Process(
            target=self.run_process,
            args=(self._boot_info, ),
        )
        loop = asyncio.get_event_loop()
        await loop.run_in_executor(None, process.start)
        try:
            await loop.run_in_executor(None, process.join)
        finally:
            kill_process_gracefully(
                process,
                get_logger('trinity.extensibility.TrioIsolatedComponent'),
            )
Пример #12
0
    def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool,
                 queen_tracker: QueenTrackerAPI,
                 event_bus: EndpointAPI) -> None:
        self.logger = get_logger('trinity.sync.beam.BeamDownloader')
        self._db = db
        self._trie_db = HexaryTrie(db)
        self._event_bus = event_bus

        # Track the needed node data that is urgent and important:
        buffer_size = MAX_STATE_FETCH * REQUEST_BUFFER_MULTIPLIER
        self._node_tasks = TaskQueue[Hash32](buffer_size, lambda task: 0)

        # list of events waiting on new data
        self._new_data_events: Set[asyncio.Event] = set()

        self._peer_pool = peer_pool

        # Track node data for upcoming blocks
        self._maybe_useful_nodes = TaskQueue[Hash32](
            buffer_size,
            # Everything is the same priority, for now
            lambda node_hash: 0,
        )

        self._num_urgent_requests_by_peer = Counter()
        self._num_predictive_requests_by_peer = Counter()

        self._queen_tracker = queen_tracker
Пример #13
0
    def __init__(self,
                 chain: AsyncChainAPI,
                 db: BaseAsyncChainDB,
                 peer_pool: ETHPeerPool,
                 enable_backfill: bool = True,
                 checkpoint: Checkpoint = None) -> None:
        self.logger = get_logger('trinity.sync.header.chain.HeaderChainSyncer')
        self._db = db
        self._checkpoint = checkpoint
        self._enable_backfill = enable_backfill
        self._chain = chain
        self._peer_pool = peer_pool

        if checkpoint is None:
            self._launch_strategy: SyncLaunchStrategyAPI = FromGenesisLaunchStrategy(
                db)
        else:
            self._launch_strategy = FromCheckpointLaunchStrategy(
                db,
                chain,
                checkpoint,
                peer_pool,
            )

        self._header_syncer = ETHHeaderChainSyncer(chain, db, peer_pool,
                                                   self._launch_strategy)
Пример #14
0
    def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
        self.logger = get_logger(
            'trinity.sync.beam.backfill.BeamStateBackfill')
        self._db = db

        self._peer_pool = peer_pool

        self._is_missing: Set[Hash32] = set()

        self._num_requests_by_peer = Counter()

        self._queening_queue = QueeningQueue(peer_pool)

        # Track the nodes that we are requesting in the account trie
        self._account_tracker = TrieNodeRequestTracker()

        self._storage_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}
        self._bytecode_trackers: Dict[Hash32, TrieNodeRequestTracker] = {}

        # The most recent root hash to use to navigate the trie
        self._next_trie_root_hash: Optional[Hash32] = None
        self._begin_backfill = asyncio.Event()

        # Only acquire peasant peers for backfill if there are no other coros
        #   waiting for a peasant. Any other waiter is assumed to be higher priority.
        self._external_peasant_usage = SilenceObserver(
            minimum_silence_duration=GAP_BETWEEN_TESTS)
Пример #15
0
 def __init__(self, peer_pool: ETHPeerPool) -> None:
     self.logger = get_logger('trinity.sync.beam.queen.QueeningQueue')
     self._peer_pool = peer_pool
     self._knights = WaitingPeers(NodeDataV65)
     self._peasants = WaitingPeers(NodeDataV65)
     self._queen_updated = asyncio.Event()
     self._desired_knights = 0
     self._num_peers = 0
Пример #16
0
    def __init__(self, chain: AsyncChainAPI, db: BaseAsyncChainDB,
                 peer_pool: ETHPeerPool) -> None:

        self.logger = get_logger(
            'trinity.sync.header.chain.SequentialHeaderChainGapSyncer')
        self._chain = chain
        self._db = db
        self._peer_pool = peer_pool
Пример #17
0
 def __init__(self, event_bus: EndpointAPI, preferred_nodes: List[NodeAPI],
              max_peers: int) -> None:
     self.event_bus = event_bus
     self.max_peers = max_peers
     self.preferred_nodes = preferred_nodes
     self._preferred_node_tracker = {}
     self.logger = get_logger(
         'trinity.components.preferred_node.PreferredNodeService')
Пример #18
0
 def __init__(self, chain: AsyncChainAPI, db: BaseAsyncHeaderDB,
              peer: BaseChainPeer) -> None:
     self.logger = get_logger('trinity.sync.common.chain.PeerHeaderSyncer')
     self.chain = chain
     self.db = db
     self.sync_progress: SyncProgress = None
     self._peer = peer
     self._target_header_hash = peer.head_info.head_hash
Пример #19
0
    class PausingVM(original_vm_class):  # type: ignore
        logger = get_logger(f'eth.vm.base.VM.{original_vm_class.__name__}')

        @classmethod
        def get_state_class(cls) -> Type[StateAPI]:
            return PausingVMState

        def get_beam_stats(self) -> BeamStats:
            return self.state.stats_counter
Пример #20
0
class BlockImportServer(Service):
    logger = get_logger('trinity.sync.beam.BlockImportServer')

    def __init__(
            self,
            event_bus: EndpointAPI,
            beam_chain: BeamChain) -> None:
        self._event_bus = event_bus
        self._beam_chain = beam_chain

    async def run(self) -> None:
        self.manager.run_daemon_task(self.serve, self._event_bus, self._beam_chain)
        await self.manager.wait_finished()

    async def serve(
            self,
            event_bus: EndpointAPI,
            beam_chain: BeamChain) -> None:
        """
        Listen to DoStatelessBlockImport events, and import block when received.
        Reply with StatelessBlockImportDone when import is complete.
        """

        loop = asyncio.get_event_loop()
        async for event in event_bus.stream(DoStatelessBlockImport):
            # launch in new thread, so we don't block the event loop!
            import_completion = loop.run_in_executor(
                # Maybe build the pausing chain inside the new process?
                None,
                partial_import_block(beam_chain, event.block),
            )

            # Wrapped in `asyncio.shield` because we want to hang the service from
            #   shutting down until block import is complete.
            # In the tests, for example, we await cancel() this service, so that we know
            #   that the in-progress block is complete. Then below, we do not send back
            #   the import completion (so the import server won't get triggered again).
            try:
                await asyncio.shield(import_completion)
            except StateUnretrievable as exc:
                self.logger.debug(
                    "Not broadcasting about %s Beam import. Listening for next request, because %r",
                    event.block,
                    exc
                )
            else:
                if self.manager.is_running:
                    _broadcast_import_complete(
                        event_bus,
                        event.block,
                        event.broadcast_config(),
                        import_completion,  # type: ignore
                    )

            if not self.manager.is_running:
                break
Пример #21
0
    def __init__(self, chain: AsyncChainAPI, db: BaseAsyncChainDB,
                 peer_pool: ETHPeerPool) -> None:

        self.logger = get_logger(
            'trinity.sync.header.chain.SequentialHeaderChainGapSyncer')
        self._chain = chain
        self._db = db
        self._peer_pool = peer_pool
        self._pauser = Pauser()
        self._max_backfill_header_at_once = MAX_BACKFILL_HEADERS_AT_ONCE
Пример #22
0
 def __init__(self,
              header_syncer: ETHHeaderChainSyncer,
              db: BaseAsyncHeaderDB,
              force_end_block_number: int = None,
              launch_strategy: SyncLaunchStrategyAPI = None) -> None:
     self.logger = get_logger('trinity.sync.beam.chain.HeaderOnlyPersist')
     self._db = db
     self._header_syncer = header_syncer
     self._final_headers: Tuple[BlockHeaderAPI, ...] = None
     self._force_end_block_number = force_end_block_number
     self._launch_strategy = launch_strategy
Пример #23
0
    def __init__(self,
                 chain: AsyncChainAPI,
                 db: BaseAsyncChainDB,
                 peer_pool: ETHPeerPool,
                 max_headers: int = None) -> None:

        self.logger = get_logger(
            'trinity.sync.header.chain.HeaderChainGapSyncer')
        self._chain = chain
        self._db = db
        self._peer_pool = peer_pool
        self._max_headers = max_headers
Пример #24
0
    def __init__(self,
                 handler: Callable[..., Any],
                 host: str = '127.0.0.1',
                 port: int = 8545) -> None:
        self.host = host
        self.port = port
        self.server = web.Server(handler)
        self.logger = get_logger('trinity.http.HTTPServer')

        # aiohttp logs every HTTP request as INFO so we want to reduce the general log level for
        # this particular logger to WARNING except if the Trinity is configured to write DEBUG2 logs
        if logging.getLogger().level != DEBUG2_LEVEL_NUM:
            logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
Пример #25
0
    def __init__(self, node_hashes: Sequence[Hash32]) -> None:
        self.logger = get_logger(
            "trinity.protocol.eth.validators.GetNodeDataValidator")
        self.node_hashes = node_hashes

        # Check for uniqueness
        num_requested = len(node_hashes)
        num_unique = len(set(node_hashes))
        if num_requested != num_unique:
            self.logger.warning(
                "GetNodeData: Asked peer for %d trie nodes, but %d were duplicates",
                num_requested,
                num_requested - num_unique,
            )
Пример #26
0
    def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool) -> None:
        self.logger = get_logger('trinity.sync.beam.backfill.BeamStateBackfill')
        self._db = db

        # Pending nodes to download
        self._node_hashes: List[Hash32] = []

        self._peer_pool = peer_pool

        self._is_missing: Set[Hash32] = set()

        self._num_requests_by_peer = Counter()

        self._queening_queue = QueeningQueue(peer_pool)
Пример #27
0
    def __init__(self,
                 chain: AsyncChainAPI,
                 db: BaseAsyncHeaderDB,
                 peer: TChainPeer,
                 launch_strategy: SyncLaunchStrategyAPI = None) -> None:
        self.logger = get_logger('trinity.sync.common.headers.SkeletonSyncer')
        self._chain = chain
        self._db = db
        if launch_strategy is None:
            launch_strategy = FromGenesisLaunchStrategy(db, chain)

        self._launch_strategy = launch_strategy
        self.peer = peer
        max_pending_headers = peer.max_headers_fetch * 8
        self._fetched_headers = asyncio.Queue(max_pending_headers)
Пример #28
0
 def __init__(self,
              chain: AsyncChainAPI,
              chaindb: BaseAsyncChainDB,
              base_db: AtomicDatabaseAPI,
              peer_pool: ETHPeerPool,
              event_bus: EndpointAPI,
              checkpoint: Checkpoint = None,
              force_beam_block_number: BlockNumber = None) -> None:
     self.logger = get_logger('trinity.sync.beam.service.BeamSyncService')
     self.chain = chain
     self.chaindb = chaindb
     self.base_db = base_db
     self.peer_pool = peer_pool
     self.event_bus = event_bus
     self.checkpoint = checkpoint
     self.force_beam_block_number = force_beam_block_number
Пример #29
0
class AsyncioIsolatedComponent(BaseIsolatedComponent):
    logger = get_logger('trinity.extensibility.asyncio.AsyncioIsolatedComponent')

    async def _run_in_process(
            self,
            async_fn: Callable[..., TReturn],
            *args: Any,
            subprocess_kwargs: 'SubprocessKwargs' = None,
    ) -> TReturn:
        return await run_in_process(async_fn, *args, subprocess_kwargs=subprocess_kwargs)

    async def _do_run(self) -> None:
        with child_process_logging(self._boot_info):
            endpoint_name = self.get_endpoint_name()
            event_bus_service = AsyncioEventBusService(
                self._boot_info.trinity_config,
                endpoint_name,
            )
            async with background_asyncio_service(event_bus_service):
                event_bus = await event_bus_service.get_event_bus()

                try:
                    if self._boot_info.profile:
                        with profiler(f'profile_{self.get_endpoint_name()}'):
                            await self.do_run(event_bus)
                    else:
                        # XXX: When open_in_process() injects a KeyboardInterrupt into us (via
                        # coro.throw()), we hang forever here, until open_in_process() times out
                        # and sends us a SIGTERM, at which point we exit without executing either
                        # the except or the finally blocks below.
                        # See https://github.com/ethereum/trinity/issues/1711 for more.
                        await self.do_run(event_bus)
                except KeyboardInterrupt:
                    # Currently we never reach this code path, but when we fix the issue above it
                    # will be needed.
                    return
                finally:
                    # Once we start seeing this in the logs after a Ctrl-C, we'll likely have
                    # figured out the issue above.
                    self.logger.debug("%s: do_run() finished", self)

    @abstractmethod
    async def do_run(self, event_bus: EndpointAPI) -> None:
        """
        Define the entry point of the component. Should be overwritten in subclasses.
        """
        ...
Пример #30
0
class ConnectionTrackerServer(Service):
    """
    Server to handle the event bus communication for BlacklistEvent and
    GetBlacklistedPeersRequest/Response events
    """
    logger = get_logger(
        'trinity.components.network_db.ConnectionTrackerServer')

    def __init__(self, event_bus: EndpointAPI,
                 tracker: BaseConnectionTracker) -> None:
        self.tracker = tracker
        self.event_bus = event_bus

    async def handle_get_blacklisted_requests(self) -> None:
        async for req in self.event_bus.stream(GetBlacklistedPeersRequest):
            self.logger.debug2('Received get_blacklisted request')
            blacklisted = await self.tracker.get_blacklisted()
            await self.event_bus.broadcast(
                GetBlacklistedPeersResponse(blacklisted),
                req.broadcast_config())

    async def handle_blacklist_command(self) -> None:
        async for command in self.event_bus.stream(BlacklistEvent):
            self.logger.debug2(
                'Received blacklist commmand: remote: %s | timeout: %s | reason: %s',
                command.remote,
                humanize_seconds(command.timeout_seconds),
                command.reason,
            )
            self.tracker.record_blacklist(command.remote,
                                          command.timeout_seconds,
                                          command.reason)

    async def run(self) -> None:
        self.logger.debug("Running ConnectionTrackerServer")

        self.manager.run_daemon_task(
            self.handle_blacklist_command,
            name='ConnectionTrackerServer.handle_blacklist_command',
        )
        self.manager.run_daemon_task(
            self.handle_get_blacklisted_requests,
            name='ConnectionTrackerServer.handle_get_blacklisted_requests,',
        )

        await self.manager.wait_finished()