Esempio n. 1
0
    async def _manage_routing_table(self) -> None:
        # First load all the bootnode ENRs into our database
        for enr in self._bootnodes:
            try:
                self.enr_db.set_enr(enr)
            except OldSequenceNumber:
                pass

        # Now repeatedly try to bond with each bootnode until one succeeds.
        async with trio.open_nursery() as nursery:
            while self.manager.is_running:
                for enr in self._bootnodes:
                    if enr.node_id == self.local_node_id:
                        continue
                    endpoint = self._endpoint_for_enr(enr)
                    nursery.start_soon(self._bond, enr.node_id, endpoint)

                with trio.move_on_after(10):
                    await self._routing_table_ready.wait()
                    break

        # TODO: Need better logic here for more quickly populating the
        # routing table.  Should start off aggressively filling in the
        # table, only backing off once the table contains some minimum
        # number of records **or** searching for new records fails to find
        # new nodes.  Maybe use a TokenBucket
        async for _ in every(30):
            async with trio.open_nursery() as nursery:
                target_node_id = NodeID(secrets.token_bytes(32))
                found_enrs = await self.recursive_find_nodes(target_node_id)
                for enr in found_enrs:
                    endpoint = self._endpoint_for_enr(enr)
                    nursery.start_soon(self._bond, enr.node_id, endpoint)
Esempio n. 2
0
 async def _periodically_report_crawl_stats(self) -> None:
     async for _ in every(5, 5):
         stats = self._explorer.get_stats()
         citizen_count = (
             stats.seen
             - stats.unresponsive
             - stats.invalid
             - stats.unreachable
             - stats.pending
         )
         self.logger.info("crawl-stats:  citizens=%d  %s", citizen_count, stats)
Esempio n. 3
0
 async def run(self) -> None:
     async for _ in every(ROUTING_TABLE_PING_INTERVAL):  # noqa: F841
         if not self.routing_table.is_empty:
             log_distance = (
                 self.routing_table.get_least_recently_updated_log_distance()
             )
             candidates = self.routing_table.get_nodes_at_log_distance(log_distance)
             node_id = candidates[-1]
             self.logger.debug("Pinging %s", encode_hex(node_id))
             await self.ping(node_id)
         else:
             self.logger.debug("Routing table is empty, no one to ping")
Esempio n. 4
0
async def test_every_send(autojump_clock):
    start_time = trio.current_time()

    every_generator = every(2, initial_delay=1)

    first_time = await every_generator.__anext__()
    assert first_time == pytest.approx(start_time + 1)

    second_time = await every_generator.asend(3)
    assert second_time == pytest.approx(first_time + 2 + 3)

    third_time = await every_generator.asend(1)
    assert third_time == pytest.approx(second_time + 2 + 1)
Esempio n. 5
0
 async def _periodically_report_routing_table(self) -> None:
     async for _ in every(30, initial_delay=30):
         non_empty_buckets = tuple(
             (idx, bucket)
             for idx, bucket in enumerate(reversed(self.routing_table.buckets))
             if bucket
         )
         total_size = sum(len(bucket) for idx, bucket in non_empty_buckets)
         bucket_info = "|".join(
             tuple(f"{idx}:{len(bucket)}" for idx, bucket in non_empty_buckets)
         )
         self.logger.debug(
             "routing-table-info: size=%d  buckets=%s", total_size, bucket_info,
         )
Esempio n. 6
0
async def test_every_late(autojump_clock):
    start_time = trio.current_time()

    every_generator = every(2, initial_delay=1)

    first_time = await every_generator.__anext__()
    await trio.sleep(3)

    second_time = await every_generator.__anext__()
    assert second_time == pytest.approx(first_time + 2)
    assert trio.current_time() == pytest.approx(start_time + 1 + 3)

    third_time = await every_generator.__anext__()
    assert third_time == pytest.approx(second_time + 2)
    assert trio.current_time() == pytest.approx(third_time)
Esempio n. 7
0
    async def run(self) -> None:
        """
        Run an infinite loop refreshing our NAT port mapping.

        On every iteration we configure the port mapping with a lifetime of 30 minutes and then
        sleep for that long as well.
        """
        while self.manager.is_running:
            async for _ in every(UPNP_PORTMAP_DURATION):
                with trio.move_on_after(
                        UPNP_DISCOVER_TIMEOUT_SECONDS) as scope:
                    self._ready.set()
                    try:
                        internal_ip, external_ip = await trio.to_thread.run_sync(
                            setup_port_map,
                            self.port,
                            UPNP_PORTMAP_DURATION,
                        )
                        self._external_ip = external_ip
                        self._internal_ip = internal_ip
                        self._has_ip_addresses.set()
                        async with self._ip_changed:
                            self._ip_changed.notify_all()

                        self.logger.debug(
                            "NAT portmap created: internal=%s  external=%s",
                            internal_ip,
                            external_ip,
                        )
                    except PortMapFailed as err:
                        self.logger.error("Failed to setup NAT portmap: %s",
                                          err)
                    except Exception:
                        self.logger.exception("Error setuping NAT portmap")

                if scope.cancelled_caught:
                    self.logger.error(
                        "Timeout attempting to setup UPnP port map")
Esempio n. 8
0
 async def run(self) -> None:
     async for _ in every(ROUTING_TABLE_LOOKUP_INTERVAL):
         target = NodeID(secrets.token_bytes(32))
         await self.lookup(target)
Esempio n. 9
0
 async def _periodically_log_stats(self) -> None:
     async for _ in every(5, 5):
         self._network.logger.debug("%s[stats]: %s", self, self.get_stats())
Esempio n. 10
0
    async def _periodically_advertise_content(self) -> None:
        await self._network.routing_table_ready()

        send_channel, receive_channel = trio.open_memory_channel[ContentKey](
            self._concurrency)

        for _ in range(self._concurrency):
            self.manager.run_daemon_task(self._broadcast_worker,
                                         receive_channel)

        async for _ in every(30 * 60):
            start_at = trio.current_time()

            total_keys = len(self.content_storage)
            if not total_keys:
                continue

            first_key = first(
                self.content_storage.iter_closest(
                    NodeID(secrets.token_bytes(32))))

            self.logger.info(
                "content-processing-starting: total=%d  start=%s",
                total_keys,
                first_key.hex(),
            )

            processed_keys = 0

            last_key = first_key
            has_wrapped_around = False

            while self.manager.is_running:
                elapsed = trio.current_time() - start_at
                content_keys = tuple(
                    take(
                        self._concurrency * 2,
                        self.content_storage.enumerate_keys(
                            start_key=last_key),
                    ))

                # TODO: We need to adjust the
                # `ContentStorageAPI.enumerate_keys` to allow a
                # non-inclusive left bound so we can query all the keys
                # **after** the last key we processed.
                if content_keys and content_keys[0] == last_key:
                    content_keys = content_keys[1:]

                if not content_keys:
                    last_key = None
                    has_wrapped_around = True
                    continue

                for content_key in content_keys:
                    await send_channel.send(content_key)

                last_key = content_keys[-1]
                if has_wrapped_around and last_key >= first_key:
                    break

                processed_keys += len(content_keys)
                progress = processed_keys * 100 / total_keys

                self.logger.debug(
                    "content-processing: progress=%0.1f  processed=%d  "
                    "total=%d  at=%s  elapsed=%s",
                    progress,
                    processed_keys,
                    total_keys,
                    "None" if last_key is None else last_key.hex(),
                    humanize_seconds(int(elapsed)),
                )

            self.logger.info(
                "content-processing-finished: processed=%d/%d  elapsed=%s",
                processed_keys,
                total_keys,
                humanize_seconds(int(elapsed)),
            )