async def test_random_pruning(ignore_duplicates, recomplete_idx, batch_size,
                              task_series, prune_depth):

    ti = OrderedTaskPreparation(
        NoPrerequisites,
        identity,
        lambda x: x - 1,
        accept_dangling_tasks=True,
        max_depth=prune_depth,
    )
    ti.set_finished_dependency(task_series[0])

    for idx, task_batch in enumerate(partition_all(batch_size, task_series)):
        if ignore_duplicates:
            registerable_tasks = task_batch
        else:
            registerable_tasks = set(task_batch)

        if idx == recomplete_idx:
            task_to_mark_finished = task_batch[0] - 1
            if task_to_mark_finished not in ti._tasks:
                ti.set_finished_dependency(task_to_mark_finished)

        try:
            ti.register_tasks(registerable_tasks,
                              ignore_duplicates=ignore_duplicates)
        except DuplicateTasks:
            if ignore_duplicates:
                raise
            else:
                continue
        if ti.has_ready_tasks():
            await wait(ti.ready_tasks())
Exemplo n.º 2
0
    async def _process_transactions(self) -> None:
        while self.is_operational:
            buffer: List[SignedTransactionAPI] = []

            # wait for there to be items available on the queue.
            buffer.extend(await self._internal_queue.get())

            # continue to pull items from the queue synchronously until the
            # queue is either empty or we hit a sufficient size to justify
            # sending to our peers.
            while not self._internal_queue.empty():
                if len(buffer) > BATCH_LOW_WATER:
                    break
                buffer.extend(self._internal_queue.get_nowait())

            # Now that the queue is either empty or we have an adequate number
            # to send to our peers, broadcast them to the appropriate peers.
            for batch in partition_all(BATCH_HIGH_WATER, buffer):
                for receiving_peer in await self._peer_pool.get_peers():
                    filtered_tx = self._filter_tx_for_peer(
                        receiving_peer, batch)
                    if len(filtered_tx) == 0:
                        continue

                    self.logger.debug2(
                        'Relaying %d transactions to %s',
                        len(filtered_tx),
                        receiving_peer,
                    )
                    receiving_peer.sub_proto.send_transactions(filtered_tx)
                    self._add_txs_to_bloom(receiving_peer.session, filtered_tx)
                    # release to the event loop since this loop processes a
                    # lot of data queue up a lot of outbound messages.
                    await asyncio.sleep(0)
Exemplo n.º 3
0
 async def send_locations(self,
                          node: Node,
                          *,
                          request_id: int,
                          locations: Collection[Node]) -> int:
     if node.node_id == self.local_node_id:
         raise ValueError("Cannot send to self")
     batches = tuple(partition_all(NODES_PER_PAYLOAD, locations))
     self.logger.debug("Sending Locations with %d nodes to %s", len(locations), node)
     if batches:
         total_batches = len(batches)
         for batch in batches:
             payload = tuple(
                 node.to_payload()
                 for node in batch
             )
             response = Message(
                 Locations(request_id, total_batches, payload),
                 node,
             )
             await self.message_dispatcher.send_message(response)
             await self.events.sent_locations.trigger(response)
         return total_batches
     else:
         response = Message(
             Locations(request_id, 1, ()),
             node,
         )
         await self.message_dispatcher.send_message(response)
         await self.events.sent_locations.trigger(response)
         return 1
Exemplo n.º 4
0
async def get_linked_validators(event_loop,
                                event_bus) -> Tuple[Validator, Validator]:
    all_indices = tuple(index for index in range(len(keymap)))
    global_peer_count = 2
    alice_indices, bob_indices = partition_all(
        len(all_indices) // global_peer_count, all_indices)
    alice = await get_validator(event_loop, event_bus, alice_indices)
    bob = await get_validator(event_loop, event_bus, bob_indices)
    return alice, bob
Exemplo n.º 5
0
    async def _explore(
        self,
        node_id: NodeID,
        max_distance: int,
    ) -> None:
        """
        Explore the neighborhood around the given `node_id` out to the
        specified `max_distance`
        """
        async with trio.open_nursery() as nursery:
            for distances in partition_all(2, range(max_distance, 0, -1)):
                try:
                    found_enrs = await self._network.find_nodes(
                        node_id, *distances)
                except trio.TooSlowError:
                    self.unresponsive.add(node_id)
                    return
                except MissingEndpointFields:
                    self.unreachable.add(node_id)
                    return
                except ValidationError:
                    self.invalid.add(node_id)
                    return
                else:
                    # once we encounter a pair of buckets that elicits an empty
                    # response we assume that all subsequent buckets will also
                    # be empty.
                    if not found_enrs:
                        self.logger.debug(
                            "explore-finish: node_id=%s  covered=%d-%d",
                            node_id.hex(),
                            max_distance,
                            distances[0],
                        )
                        break

                for enr in found_enrs:
                    try:
                        self._network.enr_db.set_enr(enr)
                    except OldSequenceNumber:
                        pass

                # check if we have found any new records.  If so, queue them and
                # wake up the new workers.  This is guarded by the `condition`
                # object to ensure we maintain a consistent view of the `seen`
                # nodes.
                async with self._condition:
                    new_enrs = tuple(enr for enr in reduce_enrs(found_enrs)
                                     if enr.node_id not in self.seen)

                    if new_enrs:
                        self.seen.update(enr.node_id for enr in new_enrs)
                        self._condition.notify_all()

                # use the `NetworkProtocol.bond` to perform a liveliness check
                for enr in new_enrs:
                    nursery.start_soon(self._bond_then_send, enr)
def decode_from_bin(input_bin):
    """
    0100000101010111010000110100100101001001 -> ASCII
    """
    for chunk in partition_all(8, input_bin):
        yield sum(
            2**exp * bit
            for exp, bit
            in enumerate(reversed(chunk))
        )
Exemplo n.º 7
0
async def get_linked_validators(event_loop, event_bus,
                                monkeypatch) -> Tuple[Validator, Validator]:
    keymap = mk_keymap_of_size(NUM_VALIDATORS)
    all_indices = tuple(index for index in range(len(keymap)))
    global_peer_count = 2
    alice_indices, bob_indices = partition_all(
        len(all_indices) // global_peer_count, all_indices)
    alice = await get_validator(event_loop, event_bus, monkeypatch,
                                alice_indices)
    bob = await get_validator(event_loop, event_bus, monkeypatch, bob_indices)
    return alice, bob
Exemplo n.º 8
0
    async def announce(self, key: bytes, who: Node) -> None:
        self.logger.debug("Starting announce for: %s", encode_hex(key))
        content_id = content_key_to_node_id(key)
        found_nodes = await self.iterative_lookup(content_id)

        async def do_advertise(node: Node) -> None:
            with trio.move_on_after(ADVERTISE_TIMEOUT):
                await self.client.advertise(node, key=key, who=who)

        for batch in partition_all(KADEMLIA_ANNOUNCE_CONCURRENCY, found_nodes):
            async with trio.open_nursery() as nursery:
                for node in batch:
                    nursery.start_soon(do_advertise, node)
        self.logger.debug(
            "Finished announce to %d peers for: %s",
            len(found_nodes),
            encode_hex(key),
        )