async def run(self) -> None: """Run an infinite loop refreshing our NAT port mapping. On every iteration we configure the port mapping with a lifetime of 30 minutes and then sleep for that long as well. """ while self.manager.is_running: async for _ in every(UPNP_PORTMAP_DURATION): with trio.move_on_after(UPNP_DISCOVER_TIMEOUT_SECONDS) as scope: try: internal_ip, external_ip = await trio.to_thread.run_sync( setup_port_map, self.port, UPNP_PORTMAP_DURATION, ) event = UPnPMapping(str(external_ip)) self.logger.debug( "NAT portmap created, broadcasting UPnPMapping event: %s", event) await self.event_bus.broadcast(event, FIRE_AND_FORGET_BROADCASTING) except PortMapFailed as err: self.logger.error("Failed to setup NAT portmap: %s", err) except Exception: self.logger.exception("Error setuping NAT portmap") if scope.cancelled_caught: self.logger.error("Timeout attempting to setup UPnP port map")
async def run(self) -> None: async for _ in every(ROUTING_TABLE_PING_INTERVAL): # noqa: F841 if len(self.routing_table) > 0: node_id = self.routing_table.get_oldest_entry() self.logger.debug("Pinging %s", encode_hex(node_id)) await self.ping(node_id) else: self.logger.warning("Routing table is empty, no one to ping")
async def collect_blockchain_metrics(manager: ManagerAPI, boot_info: BootInfo, event_bus: EndpointAPI, registry: HostMetricsRegistry, frequency_seconds: int) -> None: blockchain_block_gauge = registry.gauge('trinity.blockchain/head/block.gauge') async for _ in trio_utils.every(frequency_seconds): current = read_blockchain_stats(boot_info, event_bus) blockchain_block_gauge.set_value(current.latest_block)
async def run(self) -> None: async for _ in every(ROUTING_TABLE_PING_INTERVAL): # noqa: F841 if not self.routing_table.is_empty: log_distance = self.routing_table.get_least_recently_updated_log_distance( ) candidates = self.routing_table.get_nodes_at_log_distance( log_distance) node_id = candidates[-1] self.logger.debug("Pinging %s", encode_hex(node_id)) await self.ping(node_id) else: self.logger.warning("Routing table is empty, no one to ping")
async def collect_process_metrics(manager: ManagerAPI, registry: HostMetricsRegistry, frequency_seconds: int) -> None: previous: SystemStats = None cpu_sysload_gauge = registry.gauge('trinity.system/cpu/sysload.gauge') cpu_syswait_gauge = registry.gauge('trinity.system/cpu/syswait.gauge') disk_readdata_meter = registry.meter('trinity.system/disk/readdata.meter') disk_writedata_meter = registry.meter( 'trinity.system/disk/writedata.meter') network_in_packets_meter = registry.meter( 'trinity.network/in/packets/total.meter') network_out_packets_meter = registry.meter( 'trinity.network/out/packets/total.meter') process_count_gauge = registry.gauge( 'trinity.system/processes/count.gauge') thread_count_gauge = registry.gauge('trinity.system/threads/count.gauge') async for _ in trio_utils.every(frequency_seconds): current = SystemStats( cpu_stats=read_cpu_stats(), disk_stats=read_disk_stats(), network_stats=read_network_stats(), process_stats=read_process_stats(), ) if previous is not None: global_time = current.cpu_stats.global_time - previous.cpu_stats.global_time cpu_sysload_gauge.set_value(global_time / frequency_seconds) global_wait = current.cpu_stats.global_wait_io - previous.cpu_stats.global_wait_io cpu_syswait_gauge.set_value(global_wait / frequency_seconds) read_bytes = current.disk_stats.read_bytes - previous.disk_stats.read_bytes disk_readdata_meter.mark(read_bytes) write_bytes = current.disk_stats.write_bytes - previous.disk_stats.write_bytes disk_writedata_meter.mark(write_bytes) in_packets = current.network_stats.in_packets - previous.network_stats.in_packets network_in_packets_meter.mark(in_packets) out_packets = current.network_stats.out_packets - previous.network_stats.out_packets network_out_packets_meter.mark(out_packets) process_count_gauge.set_value(current.process_stats.process_count) thread_count_gauge.set_value(current.process_stats.thread_count) previous = current
async def test_every_send(autojump_clock): start_time = trio.current_time() every_generator = every(2, initial_delay=1) first_time = await every_generator.__anext__() assert first_time == pytest.approx(start_time + 1) second_time = await every_generator.asend(3) assert second_time == pytest.approx(first_time + 2 + 3) third_time = await every_generator.asend(1) assert third_time == pytest.approx(second_time + 2 + 1)
async def test_every_late(autojump_clock): start_time = trio.current_time() every_generator = every(2, initial_delay=1) first_time = await every_generator.__anext__() await trio.sleep(3) second_time = await every_generator.__anext__() assert second_time == pytest.approx(first_time + 2) assert trio.current_time() == pytest.approx(start_time + 1 + 3) third_time = await every_generator.__anext__() assert third_time == pytest.approx(second_time + 2) assert trio.current_time() == pytest.approx(third_time)
async def report_stats(self) -> None: async for _ in trio_utils.every(self._refresh_interval): self.logger.debug( "============================= Stats =======================") full_buckets = [ bucket for bucket in self.routing.buckets if bucket.is_full ] total_nodes = sum([len(bucket) for bucket in self.routing.buckets]) nodes_in_replacement_cache = sum([ len(bucket.replacement_cache) for bucket in self.routing.buckets ]) self.logger.debug( "Routing table has %s nodes in %s buckets (%s of which are full), and %s nodes " "are in the replacement cache", total_nodes, len(self.routing.buckets), len(full_buckets), nodes_in_replacement_cache) self.logger.debug("ENR DB has a total of %s entries", len(self._enr_db)) self.logger.debug( "===========================================================")
async def periodically_try_to_fill_pool_with_preferred_nodes(self) -> None: """ Check pool for available peer slots, and brodacast command to connect to preferred nodes if there's room in the pool. """ # seed _preferred_node_tracker so all preferred nodes are considered eligible initially for node in self.preferred_nodes: self._preferred_node_tracker[node] = time.monotonic( ) - self.preferred_node_recycle_time await self.event_bus.wait_until_any_endpoint_subscribed_to( GetConnectedPeersRequest) async for _ in trio_utils.every(self.pool_availability_check_period): response = await self.event_bus.request(GetConnectedPeersRequest()) available_slots = self.max_peers - len(response.peers) if available_slots > 0: connected_peers = [ peer.session.remote for peer in response.peers ] eligible_nodes = self._sample_eligible_preferred_nodes( connected_peers, available_slots) for node in eligible_nodes: await self._connect_to_preferred_node(node)
async def periodically_refresh(self) -> None: async for _ in trio_utils.every(self._refresh_interval): await self.lookup_random()
async def run(self) -> None: async for _ in every(ROUTING_TABLE_LOOKUP_INTERVAL): target = NodeID(secrets.token_bytes(32)) await self.lookup(target)
async def continuously_report(self) -> None: async for _ in trio_utils.every(self._reporting_frequency): await self.report_now()