Esempio n. 1
0
    def __init__(self, name: str):
        super().__init__(name)

        self._running = trio.Event()
        self._stopped = trio.Event()

        # Temporary storage for SendChannels used in the `stream` API.  Uses a
        # `WeakSet` to automate cleanup.
        self._stream_channels = collections.defaultdict(set)
        self._pending_requests = {}
        self._sync_handlers = collections.defaultdict(list)
        self._async_handlers = collections.defaultdict(list)

        self._run_lock = trio.Lock()

        # Signal when a new remote connection is established
        self._remote_connections_changed = trio.Condition()  # type: ignore

        # Signal when at least one remote has had a subscription change.
        self._remote_subscriptions_changed = trio.Condition()  # type: ignore

        # events used to signal that the endpoint has fully booted up.
        self._message_processing_loop_running = trio.Event()
        self._connection_loop_running = trio.Event()
        self._process_broadcasts_running = trio.Event()

        # internal signal that local subscriptions have changed.
        self._subscriptions_changed = trio.Event()

        self._socket_bound = trio.Event()
        self._server_stopped = trio.Event()
Esempio n. 2
0
async def adaptive_timeout(
    *tasks: Tuple[Callable[..., Awaitable[None]], Sequence[Any]],
    threshold: int = 1,
    variance: float = 2,
) -> None:
    """
    Given a set of tasks this function will run them concurrently.  Once at
    least `threshold` have completed, the average completion time is measured.
    The remaining tasks are then given `avg_task_time * variance` to complete after
    which they will be cancelled
    """
    if threshold >= len(tasks):
        raise ValueError(
            "The `threshold` value must be less than the number of tasks")
    elif threshold < 1:
        raise ValueError("The `threshold` value must be 1 or greater")

    # a mutable list to track the average task time
    task_times: List[float] = []
    condition = trio.Condition()

    # a thin wrapper around the provided tasks which measures their execution
    # time.
    async def task_wrapper(task_fn: Callable[..., Awaitable[None]],
                           args: Sequence[Any]) -> None:
        nonlocal task_times

        start_at = trio.current_time()
        await task_fn(*args)
        elapsed = trio.current_time() - start_at
        async with condition:
            task_times.append(elapsed)
            condition.notify_all()

    async with trio.open_nursery() as nursery:
        for task_fn, task_args in tasks:
            nursery.start_soon(task_wrapper, task_fn, task_args)

        start_at = trio.current_time()

        # wait for `threshold` tasks to complete
        while len(task_times) < threshold:
            async with condition:
                await condition.wait()

        # measure the average task completion time and calculate the remaining
        # timeout for the remaining tasks.
        avg_task_time = sum(task_times) / len(task_times)
        timeout_at = start_at + (avg_task_time * variance)
        timeout_remaining = timeout_at - trio.current_time()

        # apply the calculated timeout on the remaining tasks
        if timeout_remaining > 0:
            with trio.move_on_after(timeout_remaining):
                while len(task_times) < len(tasks):
                    async with condition:
                        await condition.wait()

        # cancel any remaining tasks.
        nursery.cancel_scope.cancel()
Esempio n. 3
0
    def __init__(self, *, nursery):
        self.nursery = nursery
        self.broadcaster = ca.Broadcaster(our_role=ca.CLIENT)
        self.log = self.broadcaster.log
        self.command_bundle_queue = trio.Queue(capacity=1000)
        self.broadcaster_command_condition = trio.Condition()
        self._cleanup_condition = trio.Condition()
        self._cleanup_event = trio.Event()

        # UDP socket broadcasting to CA servers
        self.udp_sock = None
        self.registered = False  # refers to RepeaterRegisterRequest
        self.unanswered_searches = {}  # map search id (cid) to name
        self.search_results = {}  # map name to address
        self.new_id = ThreadsafeCounter(
            dont_clash_with=self.unanswered_searches)
Esempio n. 4
0
    def __init__(self, backend_pool, db, cachedir, max_size, max_entries=768):
        log.debug('Initializing')

        self.path = cachedir
        self.db = db
        self.backend_pool = backend_pool
        self.cache = CacheDict(max_size, max_entries)
        self.mlock = MultiLock()
        self.in_transit = set()
        self.upload_threads = []
        self.removal_threads = []
        self.transfer_completed = trio.Condition()

        # Will be initialized once threads are available
        self.to_upload = None
        self.to_remove = None

        if os.path.exists(self.path):
            self.load_cache()
            log.info('Loaded %d entries from cache', len(self.cache))
        else:
            os.mkdir(self.path)

        # Initialized fromt the outside to prevent cyclic dependency
        self.fs = None
Esempio n. 5
0
    def __init__(self,
                 network: NetworkProtocol,
                 target: NodeID,
                 concurrency: int = 3) -> None:
        self.logger = get_extended_debug_logger("ddht.Explorer")

        self._network = network
        self.target = target
        self._concurrency = concurrency

        self._condition = trio.Condition()

        self.in_flight = set()
        self.seen = set()
        self.queried = {self._network.local_node_id}
        self.unresponsive = set()
        self.unreachable = set()
        self.invalid = set()

        # Using a relatively small buffer size here ensures that we are applying
        # back-pressure against the workers.  If the consumer is only consuming a
        # few nodes, we don't need to continue issuing requests.
        self._send_channel, self._receive_channel = trio.open_memory_channel[
            ENRAPI](16)

        # signal that the initial set of nodes for exploration has been seeded.
        self._exploration_seeded = trio.Event()

        # signal that the service is up and running and ready for nodes to be streamed.
        self._ready = trio.Event()
Esempio n. 6
0
 def provision_condition(cls) -> Any:
     if cls.has_trio_loop():
         return trio.Condition()
     if cls.has_asyncio_loop():
         return asyncio.Condition()
     else:
         return threading.Condition()
Esempio n. 7
0
 def __init__(self, port: int) -> None:
     """
     :param port: The port that a server wants to bind to on this machine, and
     make publicly accessible.
     """
     self.port = port
     self._has_ip_addresses = trio.Event()
     self._ip_changed = trio.Condition()
Esempio n. 8
0
 def __init__(self, pool_init, _n, temperr, persistent, listen_addr):
     self.pool = pool_init
     self._n = _n
     self.temperr = {*set(temperr), BrokenSubprocessError}
     self.persistent = persistent
     self.listen_addr = listen_addr
     self.cond_res = trio.Condition()
     self.soacceptor = None
Esempio n. 9
0
 def __init__(self, circuit, client, context):
     super().__init__(circuit, client, context)
     self.nursery = context.nursery
     self.QueueFull = trio.WouldBlock
     self.command_queue = trio.Queue(ca.MAX_COMMAND_BACKLOG)
     self.new_command_condition = trio.Condition()
     self.subscription_queue = trio.Queue(ca.MAX_TOTAL_SUBSCRIPTION_BACKLOG)
     self.write_event = Event()
     self.events_on = trio.Event()
Esempio n. 10
0
    def __init__(self, *, nursery):
        self.nursery = nursery
        self.broadcaster = ca.Broadcaster(our_role=ca.CLIENT)
        self.log = self.broadcaster.log
        self.command_chan = open_memory_channel(1000)
        self.broadcaster_command_condition = trio.Condition()
        self._cleanup_condition = trio.Condition()
        self._cleanup_event = trio.Event()

        # UDP socket broadcasting to CA servers
        self.udp_sock = None
        self.registered = False  # refers to RepeaterRegisterRequest
        self.unanswered_searches = {}  # map search id (cid) to name
        self.search_results = {}  # map name to address
        self.new_id = ThreadsafeCounter(
            dont_clash_with=self.unanswered_searches)

        self.environ = get_environment_variables()
        self.ca_server_port = self.environ['EPICS_CA_SERVER_PORT']
Esempio n. 11
0
 def __init__(self, circuit, *, nursery):
     self.circuit = circuit  # a caproto.VirtualCircuit
     self.log = self.circuit.log
     self.nursery = nursery
     self.channels = {}  # map cid to Channel
     self.ioids = {}  # map ioid to Channel
     self.ioid_data = {}  # map ioid to server response
     self.subscriptionids = {}  # map subscriptionid to Channel
     self.connected = True
     self.socket = None
     self.command_chan = open_memory_channel(1000)
     self.new_command_condition = trio.Condition()
     self._socket_lock = trio.Lock()
Esempio n. 12
0
    def __init__(
        self,
        local_name: str,
        conn: ConnectionAPI,
        subscriptions_changed: ConditionAPI,
        new_msg_func: Callable[[Broadcast], Awaitable[Any]],
    ) -> None:
        super().__init__(local_name, conn, new_msg_func)

        self._notify_lock = trio.Lock()  # type: ignore

        self._received_response = trio.Condition()  # type: ignore
        self._received_subscription = trio.Condition()  # type: ignore

        self._running = trio.Event()  # type: ignore
        self._stopped = trio.Event()  # type: ignore

        self._received_subscription = subscriptions_changed

        self._subscriptions_initialized = trio.Event()  # type: ignore

        self._running = trio.Event()  # type: ignore
        self._stopped = trio.Event()  # type: ignore
        self._ready = trio.Event()  # type: ignore
Esempio n. 13
0
    def __init__(self, circuit, client, context):
        super().__init__(circuit, client, context)
        self.nursery = context.nursery
        self.QueueFull = trio.WouldBlock

        self.command_chan = open_memory_channel(ca.MAX_COMMAND_BACKLOG)

        # For compatibility with server common:
        self.command_queue = self.command_chan.send

        self.new_command_condition = trio.Condition()
        self.subscription_chan = open_memory_channel(ca.MAX_TOTAL_SUBSCRIPTION_BACKLOG)

        # For compatibility with server common:
        self.subscription_queue = self.subscription_chan.send

        self.write_event = Event()
        self.events_on = trio.Event()
Esempio n. 14
0
 async def start_async_works(self):
     async with trio.open_nursery() as nursery:
         cond = trio.Condition()
         for key, af in reversed(self.async_funcs.items()):
             nursery.start_soon(af, cond)
     logger.info("Work all done in {}".format(time.time() - self.t_start))
Esempio n. 15
0
 def __init__(self, circuit, client, context):
     super().__init__(circuit, client, context)
     self.nursery = context.nursery
     self.command_queue = trio.Queue(1000)
     self.new_command_condition = trio.Condition()
Esempio n. 16
0
 def __init__(self):
     self.locked_keys = set()
     self.cond = trio.Condition()
Esempio n. 17
0
async def common_recursive_find_nodes(
    network: NetworkProtocol,
    target: NodeID,
    *,
    concurrency: int = 3,
    unresponsive_cache: Dict[NodeID, float] = UNRESPONSIVE_CACHE,
) -> AsyncIterator[trio.abc.ReceiveChannel[ENRAPI]]:
    """
    An optimized version of the recursive lookup algorithm for a kademlia
    network.

    Continually lookup nodes in the target part of the network, keeping track
    of all of the nodes we have seen.

    Exit once we have queried all of the `k` closest nodes to the target.

    The concurrency structure here is optimized to minimize the effect of
    unresponsive nodes on the total time it takes to perform the recursive
    lookup.  Some requests will hang for up to 10 seconds.  The
    `adaptive_timeout` combined with the multiple concurrent workers helps
    mitigate the overall slowdown caused by a few unresponsive nodes since the
    other queries can be issues concurrently.
    """
    network.logger.debug2("Recursive find nodes: %s", target.hex())
    start_at = trio.current_time()

    # The set of NodeID values we have already queried.
    queried_node_ids: Set[NodeID] = set()

    # The set of NodeID that timed out
    #
    # The `local_node_id` is
    # included in this as a convenience mechanism so that we don't have to
    # continually fiter it out of the various filters
    unresponsive_node_ids: Set[NodeID] = {network.local_node_id}

    # We maintain a cache of nodes that were recently deemed unresponsive
    # within the last 10 minutes.
    unresponsive_node_ids.update(
        node_id
        for node_id, last_unresponsive_at in unresponsive_cache.items()
        if trio.current_time() - last_unresponsive_at < 300
    )

    # Accumulator of the node_ids we have seen
    received_node_ids: Set[NodeID] = set()

    # Tracker for node_ids that are actively being requested.
    in_flight: Set[NodeID] = set()

    condition = trio.Condition()

    def get_unqueried_node_ids() -> Tuple[NodeID, ...]:
        """
        Get the three nodes that are closest to the target such that the node
        is in the closest `k` nodes which haven't been deemed unresponsive.
        """
        # Construct an iterable of *all* the nodes we know about ordered by
        # closeness to the target.
        candidates = iter_closest_nodes(
            target, network.routing_table, received_node_ids
        )
        # Remove any unresponsive nodes from that iterable
        responsive_candidates = itertools.filterfalse(
            lambda node_id: node_id in unresponsive_node_ids, candidates
        )
        # Grab the closest K
        closest_k_candidates = take(
            network.routing_table.bucket_size, responsive_candidates,
        )
        # Filter out any from the closest K that we've already queried or that are in-flight
        closest_k_unqueried = itertools.filterfalse(
            lambda node_id: node_id in queried_node_ids or node_id in in_flight,
            closest_k_candidates,
        )

        return tuple(take(3, closest_k_unqueried))

    async def do_lookup(
        node_id: NodeID, send_channel: trio.abc.SendChannel[ENRAPI]
    ) -> None:
        """
        Perform an individual lookup on the target part of the network from the
        given `node_id`
        """
        if node_id == target:
            distance = 0
        else:
            distance = compute_log_distance(node_id, target)

        try:
            found_enrs = await network.find_nodes(node_id, distance)
        except (trio.TooSlowError, MissingEndpointFields, ValidationError):
            unresponsive_node_ids.add(node_id)
            unresponsive_cache[node_id] = trio.current_time()
            return
        except trio.Cancelled:
            # We don't add these to the unresponsive cache since they didn't
            # necessarily exceed the fulle 10s request/response timeout.
            unresponsive_node_ids.add(node_id)
            raise

        for enr in found_enrs:
            try:
                network.enr_db.set_enr(enr)
            except OldSequenceNumber:
                pass

        async with condition:
            new_enrs = tuple(
                enr for enr in found_enrs if enr.node_id not in received_node_ids
            )
            received_node_ids.update(enr.node_id for enr in new_enrs)

        for enr in new_enrs:
            try:
                await send_channel.send(enr)
            except (trio.BrokenResourceError, trio.ClosedResourceError):
                # In the event that the consumer of `recursive_find_nodes`
                # exits early before the lookup has completed we can end up
                # operating on a closed channel.
                return

    async def worker(
        worker_id: NodeID, send_channel: trio.abc.SendChannel[ENRAPI]
    ) -> None:
        """
        Pulls unqueried nodes from the closest k nodes and performs a
        concurrent lookup on them.
        """
        for round in itertools.count():
            async with condition:
                node_ids = get_unqueried_node_ids()

                if not node_ids:
                    await condition.wait()
                    continue

                # Mark the node_ids as having been queried.
                queried_node_ids.update(node_ids)
                # Mark the node_ids as being in-flight.
                in_flight.update(node_ids)

                # Some of the node ids may have come from our routing table.
                # These won't be present in the `received_node_ids` so we
                # detect this here and send them over the channel.
                try:
                    for node_id in node_ids:
                        if node_id not in received_node_ids:
                            enr = network.enr_db.get_enr(node_id)
                            received_node_ids.add(node_id)
                            await send_channel.send(enr)
                except (trio.BrokenResourceError, trio.ClosedResourceError):
                    # In the event that the consumer of `recursive_find_nodes`
                    # exits early before the lookup has completed we can end up
                    # operating on a closed channel.
                    return

            if len(node_ids) == 1:
                await do_lookup(node_ids[0], send_channel)
            else:
                tasks = tuple(
                    (do_lookup, (node_id, send_channel)) for node_id in node_ids
                )
                try:
                    await adaptive_timeout(*tasks, threshold=1, variance=2.0)
                except trio.TooSlowError:
                    pass

            async with condition:
                # Remove the `node_ids` from the in_flight set.
                in_flight.difference_update(node_ids)

                condition.notify_all()

    async def _monitor_done(send_channel: trio.abc.SendChannel[ENRAPI]) -> None:
        async with send_channel:
            async with condition:
                while True:
                    # this `fail_after` is a failsafe to prevent deadlock situations
                    # which are possible with `Condition` objects.
                    with trio.move_on_after(60) as scope:
                        node_ids = get_unqueried_node_ids()

                        if not node_ids and not in_flight:
                            break
                        else:
                            await condition.wait()

                    if scope.cancelled_caught:
                        network.logger.error("Deadlock")

    send_channel, receive_channel = trio.open_memory_channel[ENRAPI](256)

    async with trio.open_nursery() as nursery:
        nursery.start_soon(_monitor_done, send_channel)

        for worker_id in range(concurrency):
            nursery.start_soon(worker, worker_id, send_channel)

        async with receive_channel:
            yield receive_channel

        nursery.cancel_scope.cancel()

    elapsed = trio.current_time() - start_at

    network.logger.debug(
        "Lookup for %s finished in %f seconds: seen=%d  queried=%d  unresponsive=%d",
        target.hex(),
        elapsed,
        len(received_node_ids),
        len(queried_node_ids),
        len(unresponsive_node_ids),
    )
Esempio n. 18
0
 def condition(cls, lock=None):
     return trio.Condition(lock)
Esempio n. 19
0
 def __init__(self, *args):
     super().__init__(*args)
     self._log_message_queue = []
     self._measurements = {}
     self._trio_token = None
     self._condition = trio.Condition()