Beispiel #1
0
    async def connect_to_nodes(self, nodes: Iterator[NodeAPI]) -> None:
        # create an generator for the nodes
        nodes_iter = iter(nodes)

        while True:
            if self.is_full or not self.is_operational:
                return

            # only attempt to connect to up to the maximum number of available
            # peer slots that are open.
            available_peer_slots = self.max_peers - len(self)
            batch_size = clamp(1, 10, available_peer_slots)
            batch = tuple(take(batch_size, nodes_iter))

            # There are no more *known* nodes to connect to.
            if not batch:
                return

            self.logger.debug(
                'Initiating %d peer connection attempts with %d open peer slots',
                len(batch),
                available_peer_slots,
            )
            # Try to connect to the peers concurrently.
            await asyncio.gather(
                *(self.connect_to_node(node) for node in batch),
                loop=self.get_event_loop(),
            )
Beispiel #2
0
    async def ensure_nodes_present(self,
                                   node_hashes: Collection[Hash32],
                                   urgent: bool = True) -> int:
        """
        Wait until the nodes that are the preimages of `node_hashes` are available in the database.
        If one is not available in the first check, request it from peers.

        :param urgent: Should this node be downloaded urgently? If False, download as backfill

        Note that if your ultimate goal is an account or storage data, it's probably better to use
        download_account or download_storage. This method is useful for other
        scenarios, like bytecode lookups or intermediate node lookups.

        :return: how many nodes had to be downloaded
        """
        if urgent:
            t = Timer()
            num_nodes_found = await self._wait_for_nodes(
                node_hashes,
                self._node_tasks,
                BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
            )
            # If it took to long to get a single urgent node, then increase "spread" factor
            if len(node_hashes
                   ) == 1 and t.elapsed > MAX_ACCEPTABLE_WAIT_FOR_URGENT_NODE:
                new_spread_factor = clamp(
                    0,
                    self._max_spread_beam_factor(),
                    self._spread_factor + 1,
                )
                if new_spread_factor != self._spread_factor:
                    self.logger.debug(
                        "spread-beam-update: Urgent node latency=%.3fs, update factor %d to %d",
                        t.elapsed,
                        self._spread_factor,
                        new_spread_factor,
                    )
                    self._queen_tracker.set_desired_knight_count(
                        new_spread_factor)
                    self._spread_factor = new_spread_factor
        else:
            num_nodes_found = await self._wait_for_nodes(
                node_hashes,
                self._maybe_useful_nodes,
                BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
            )

        return num_nodes_found
Beispiel #3
0
    async def _find_urgent_nodes(self, queen: ETHPeer,
                                 urgent_hashes: Tuple[Hash32, ...],
                                 batch_id: int) -> None:

        # Generate and schedule the tasks to request the urgent node(s) from multiple peers
        knights = tuple(self._queen_tracker.pop_knights())
        urgent_requests = [
            create_task(
                self._get_nodes(peer, urgent_hashes, urgent=True),
                name=f"BeamDownloader._get_nodes({peer.remote}, ...)",
            ) for peer in (queen, ) + knights
        ]

        # Process the returned nodes, in the order they complete
        urgent_timer = Timer()
        async with cleanup_tasks(*urgent_requests):
            for result_coro in asyncio.as_completed(urgent_requests):
                nodes_returned, new_nodes, peer = await result_coro
                time_on_urgent = urgent_timer.elapsed

                # After the first peer returns something, cancel all other pending tasks
                if len(nodes_returned) > 0:
                    # Stop waiting for other peer responses
                    break
                elif peer == queen:
                    self.logger.debug("queen %s returned 0 urgent nodes of %r",
                                      peer, urgent_hashes)
                    # Wait for the next peer response

        # Log the received urgent nodes
        if peer == queen:
            log_header = "beam-queen-urgent-rtt"
        else:
            log_header = "spread-beam-urgent-rtt"
        self.logger.debug(
            "%s: got %d/%d +%d nodes in %.3fs from %s (%s)",
            log_header,
            len(nodes_returned),
            len(urgent_hashes),
            len(new_nodes),
            time_on_urgent,
            peer.remote,
            urgent_hashes[0][:2].hex(),
        )

        # Stat updates
        self._total_processed_nodes += len(new_nodes)
        self._urgent_processed_nodes += len(new_nodes)
        self._time_on_urgent += time_on_urgent

        # If it took to long to get a single urgent node, then increase "spread" factor
        if len(urgent_hashes
               ) == 1 and time_on_urgent > MAX_ACCEPTABLE_WAIT_FOR_URGENT_NODE:
            new_spread_factor = clamp(
                0,
                self._max_spread_beam_factor(),
                self._spread_factor + 1,
            )
            if new_spread_factor != self._spread_factor:
                self.logger.debug(
                    "spread-beam-update: Urgent node latency=%.3fs, update factor %d to %d",
                    time_on_urgent,
                    self._spread_factor,
                    new_spread_factor,
                )
                self._queen_tracker.set_desired_knight_count(new_spread_factor)
                self._spread_factor = new_spread_factor

        # Complete the task in the TaskQueue
        task_hashes = tuple(node_hash for node_hash, _ in nodes_returned)
        await self._node_tasks.complete(batch_id, task_hashes)

        # Re-insert the peers for the next request
        for knight in knights:
            self._queen_tracker.insert_peer(knight)
Beispiel #4
0
def return_value_decimal(lower: decimal.Decimal, upper: decimal.Decimal,
                         value: decimal.Decimal) -> decimal.Decimal:
    return clamp(lower, upper, value)
Beispiel #5
0
def return_value_float(lower: float, upper: float, value: float) -> float:
    return clamp(lower, upper, value)
Beispiel #6
0
def return_value_int(lower: int, upper: int, value: int) -> int:
    return clamp(lower, upper, value)