Пример #1
0
def test_resolve_future(resolve):
    future = Future()
    promise = resolve(future)
    assert promise.is_pending
    future.set_result(1)
    assert promise.get() == 1
    assert promise.is_fulfilled
Пример #2
0
 def _init_futures(self):
     """Build futures for results and output; hook up callbacks"""
     if not self._children:
         for msg_id in self.msg_ids:
             future = self._client._futures.get(msg_id, None)
             if not future:
                 result = self._client.results.get(msg_id, _default)
                 # result resides in local cache, construct already-resolved Future
                 if result is not _default:
                     future = Future()
                     future.msg_id = msg_id
                     future.output = Future()
                     future.output.metadata = self.client.metadata[msg_id]
                     future.set_result(result)
                     future.output.set_result(None)
             if not future:
                 raise KeyError("No Future or result for msg_id: %s" % msg_id)
             self._children.append(future)
             
     self._result_future = multi_future(self._children)
     self._output_future = multi_future([self._result_future] + [
         f.output for f in self._children
     ])
     # on completion of my constituents, trigger my own resolution
     self._result_future.add_done_callback(self._resolve_result)
     self._output_future.add_done_callback(self._resolve_output)
Пример #3
0
def ws_rpc_call(host, port, api, method, *args):
    cached = False  # TODO: decide whether to maintain cached=True
    if not cached:
        result = Future()
        try:
            loop = _event_loops[(host, port)]
        except KeyError as e:
            raise core.RPCError('Connection aborted: Websocket event loop for {}:{} not available yet'.format(host, port)) from e
        protocol = _monitoring_protocols[(host, port)]

        loop.call_soon_threadsafe(functools.partial(protocol.rpc_call, api, method, *args, result=result))
        try:
            return result.result(timeout=10)
        except TimeoutError:
            log.warning('timeout while calling {} {}({}) on {}:{}'.format(api_name(api), method, ', '.join(args), host, port))
            return None

    # else: check whether it is in the cache
    key = (api, method,  args)
    try:
        result = _ws_rpc_cache[(host, port)][key]
        return result['result']
    except KeyError:
        # FIXME: distinguish when key is not in or when 'result' is not in
        #        (ie: deserialize exception if any)
        raise core.RPCError('{}: {}({}) not in websocket cache'.format(api, method, ', '.join(args)))
Пример #4
0
 def wrapper(*args, **kwargs):
     g = func(*args, **kwargs)
     future = Future()
     subFuture = Future()
     subFuture.set_result(None)
     runSwing(_swingCoroutine, subFuture, g, future)
     return future
Пример #5
0
def test_resolve_future_rejected(resolve):
    future = Future()
    promise = resolve(future)
    assert promise.is_pending
    future.set_exception(Exception("Future rejected"))
    assert promise.is_rejected
    assert_exception(promise.reason, Exception, "Future rejected")
Пример #6
0
 def future_getResult(self, ):
   fut = Future()
   try:
     fut.set_result(self.getResult())
   except:
     fut.set_exception(sys.exc_info()[1])
   return fut
Пример #7
0
            def process_done(future: Future):
                nonlocal self
                self._tasks_number -= 1
                if future.cancelled():
                    # process_task ended by cancel
                    self.requeue_message(self.requeue_message(
                        basic_deliver.delivery_tag)
                    )
                else:
                    if future.exception():
                        exception = future.exception()
                        if (not isinstance(exception, RequeueMessage)
                                and not isinstance(exception, ChangeQueue)):
                            self.log.exception(exception)

                        self.requeue_message(
                            basic_deliver.delivery_tag
                        )
                        if isinstance(exception, ChangeQueue):
                            if not self.running.is_set():
                                self.running.clear()
                                self.log.info("Changing queues")
                                self.stop_consuming()
                                self._queue = self.another_queue(
                                    exception.host)
                                self.running.set()
                    else:
                        self.acknowledge_message(basic_deliver.delivery_tag)
Пример #8
0
def run_in_background(target, *args, **kwargs):
    event = Event()
    future = Future()

    def extended_target(*args, **kwargs):
        try:
            future.set_result(target(*args, **kwargs))
        except Exception as ex:
            future.set_exception(ex)
        finally:
            event.set()

    thread = Thread(target=extended_target, args=args, kwargs=kwargs)
    thread.start()
    exception = None

    try:
        yield event
    except Exception as ex:
        exception = ex
    finally:
        if not exception:
            future.result(timeout=5)
        else:
            raise

        thread.join(timeout=5)
        assert not thread.isAlive()
Пример #9
0
def test_add_future():
    expected_res = "EXPECTED"
    io = CocaineIO.instance()
    f = Future()
    io.post(f.set_result, expected_res)
    res = f.result(timeout=4)
    assert res == expected_res
Пример #10
0
    def asyncRun(self, cb, *args, **kwargs):
        """Helper call to run a callback `cb` within the task's main loop.
        Returns an instance of Future() that can be waited for obtain
        the result of computation. The callback will be run only once.
        """
        def _future_execute(f, cb, *args, **kwargs):
            try:
                # Only execute `cb` if the future wasn't cancelled
                if f.set_running_or_notify_cancel():
                    f.set_result(cb(*args, **kwargs))
            except Exception as e:
                f.set_exception(e)
            # return False so that glib will automatically remove the
            # idle source
            return False

        def _future_cancel(handle, f):
            if f.cancelled():
                glib.source_remove(handle)

        f = Future()
        handle = glib.idle_add(partial(_future_execute, f,
                                       cb, *args, **kwargs))
        f.add_done_callback(partial(_future_cancel, handle))
        return f
Пример #11
0
 def from_slave(self, **kwargs):
     if 'uuid' not in kwargs:
         kwargs['uuid'] = self.get_uuid()
     future = Future()
     self.futures[kwargs['uuid']] = future
     self.send(**kwargs)
     return future.result()
Пример #12
0
    def test_task(self):
        results = []

        task = Task(function=QThread.currentThread)
        task.resultReady.connect(results.append)

        task.start()
        self.app.processEvents()

        self.assertSequenceEqual(results, [QThread.currentThread()])

        thread = QThread()
        thread.start()
        try:
            task = Task(function=QThread.currentThread)
            task.moveToThread(thread)

            self.assertIsNot(task.thread(), QThread.currentThread())
            self.assertIs(task.thread(), thread)
            results = Future()

            def record(value):
                # record the result value and the calling thread
                results.set_result((QThread.currentThread(), value))

            task.resultReady.connect(record, Qt.DirectConnection)
            task.start()
            f = task.future()
            emit_thread, thread_ = results.result(3)
            self.assertIs(f.result(3), thread)
            self.assertIs(emit_thread, thread)
            self.assertIs(thread_, thread)
        finally:
            thread.quit()
            thread.wait()
Пример #13
0
def test_promisify_future(promisify):
    future = Future()
    promise = promisify(future)
    assert promise.is_pending
    future.set_result(1)
    assert promise.is_fulfilled
    assert promise.value == 1
Пример #14
0
def test_promisify_future_rejected(promisify):
    future = Future()
    promise = promisify(future)
    assert promise.is_pending
    future.set_exception(Exception('Future rejected'))
    assert promise.is_rejected
    assert_exception(promise.reason, Exception, 'Future rejected')
Пример #15
0
def declare_queue_side_effect(rabbitmq_url, queue_name):

    ftr = Future()

    ftr.set_result((queue_name, "{}_exchange".format(queue_name)))

    return ftr
Пример #16
0
 def create_task(self, coro, cb=None):
     """
     threadsafe create_task from asyncio
     """
     future = Future()
     p = functools.partial(self._create_task, future, coro, cb)
     self.loop.call_soon_threadsafe(p)
     return future.result()
Пример #17
0
 def run_until_complete(self, coro):
     """
     threadsafe run_until_completed from asyncio
     """
     future = Future()
     p = functools.partial(self._run_until_complete, future, coro)
     self.loop.call_soon_threadsafe(p)
     return future.result()
Пример #18
0
 def submit(self, func, *args, **kwargs):
     from concurrent.futures import Future
     fut = Future()
     self.tasks[fut] = self.pool.apply_async(
         func, args, kwargs, fut.set_result, fut.set_exception
     )
     fut.add_done_callback(self.tasks.pop)
     return fut
Пример #19
0
 def delete_subscriptions(self, subscriptionids):
     self.logger.info("delete_subscription")
     request = ua.DeleteSubscriptionsRequest()
     request.Parameters.SubscriptionIds = subscriptionids
     resp_fut = Future()
     mycallbak = partial(self._delete_subscriptions_callback, subscriptionids, resp_fut)
     self._uasocket.send_request(request, mycallbak)
     return resp_fut.result(self._timeout)
Пример #20
0
 def create_subscription(self, params, callback):
     self.logger.info("create_subscription")
     request = ua.CreateSubscriptionRequest()
     request.Parameters = params
     resp_fut = Future()
     mycallbak = partial(self._create_subscription_callback, callback, resp_fut)
     self._uasocket.send_request(request, mycallbak)
     return resp_fut.result(self._timeout)
Пример #21
0
def _await(*args, **kwargs):
    """
    Run an function from a synchronous code, in a new event loop, and await its
    result.

    Intended for debugging asyncs e.g. from ipython.

    Usage:

        result = _await(awaitable, *args, **kwargs)

    Also usable:

        result = _await(awaitable(*args, **kwargs))

    See also:
    https://github.com/django/asgiref/blob/master/asgiref/sync.py
    https://stackoverflow.com/a/48479665
    https://github.com/ipython/ipython/pull/11155
    """
    awaitable, args = args[0], args[1:]

    call_result = Future()
    if isinstance(awaitable, types.CoroutineType):
        assert not args
        assert not kwargs
        cor = awaitable
    else:
        cor = awaitable(*args, **kwargs)

    async def wrap():
        try:
            result = await cor
        except Exception as exc:
            call_result.set_exception(exc)
        else:
            call_result.set_result(result)

    try:
        main_loop = asyncio.get_event_loop()
    except RuntimeError:
        main_loop = None
    if main_loop and main_loop.is_running():
        raise RuntimeError("There's already a running loop, you probably shouldn't use this wrapper")

    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    try:
        loop.run_until_complete(wrap())
    finally:
        try:
            if hasattr(loop, "shutdown_asyncgens"):
                loop.run_until_complete(loop.shutdown_asyncgens())
        finally:
            loop.close()
            asyncio.set_event_loop(main_loop)

    return call_result.result()
Пример #22
0
    def close(self):
        """Performs a clean shutdown of the tracer, flushing any traces that
        may have been buffered in memory.

        :return: Returns a :py:class:futures.Future
        """
        fut = Future()
        fut.set_result(True)
        return fut
Пример #23
0
 def send_hello(self, url):
     hello = ua.Hello()
     hello.EndpointUrl = url
     header = ua.Header(ua.MessageType.Hello, ua.ChunkType.Single)
     future = Future()
     with self._lock:
         self._callbackmap[0] = future
     self._write_socket(header, hello)
     return ua.Acknowledge.from_binary(future.result(self.timeout))
Пример #24
0
 def _execute_placement(self, agents, request):
     futures = []
     for agent in agents:
         child = self._children[agent.id]
         response = child.place(request)
         future = Future()
         future.set_result(response)
         futures.append(future)
     return futures
Пример #25
0
        def dispose() -> None:
            future = Future()

            def cancel_handle() -> None:
                handle.cancel()
                future.set_result(0)

            self.loop.call_soon_threadsafe(cancel_handle)
            future.result()
Пример #26
0
    def flush(self):
        """Flushes any trace data that may be buffered in memory, presumably
        out of the process.

        :return: Returns a :py:class:futures.Future
        """
        fut = Future()
        fut.set_result(True)
        return fut
Пример #27
0
 def submit(self, fn, *args, **kwargs):
     future = Future()
     try:
         result = fn(*args, **kwargs)
     except Exception as exc:
         future.set_exception(exc)
     else:
         future.set_result(result)
     return future
Пример #28
0
 def test_await_future(self):
     f = Future()
     def finish_later():
         time.sleep(0.1)
         f.set_result('future')
     Thread(target=finish_later).start()
     assert self.client.wait([f])
     assert f.done()
     assert f.result() == 'future'
Пример #29
0
 def detect(self, previous=None):
   with self.__lock:
     future = Future()
     if previous != self._leader:
       future.set_result(self._leader)
       return future
     self._future_queue.append(future)
     future.set_running_or_notify_cancel()
     return future
Пример #30
0
    def __init__(self, f_dict, *args, **kwargs):
        Future.__init__(self, *args, **kwargs)

        self.timeout = None

        self._mem_dict = f_dict
        self._lifespan = 30
        self._name = None
        self._start_time = time.time()
Пример #31
0
class ClientContinuation(NativeResource):
    """
    A continuation of messages on a given stream-id.

    Create with :meth:`ClientConnection.new_stream()`.

    The stream will send no data until :meth:`ClientContinuation.activate()`
    is called. Call activate() when you're ready for callbacks and events to fire.

    Attributes:
        connection (ClientConnection): This stream's connection.

        closed_future (concurrent.futures.Future) : Future which completes with a result of None
            when the continuation has closed.
    """
    def __init__(self, handler, connection):
        # Do not instantiate directly, use ClientConnection.new_stream()
        super().__init__()
        self._handler = handler
        self.connection = connection  # type: ClientConnection
        self.closed_future = Future()  # type: Future
        self.closed_future.set_running_or_notify_cancel()  # prevent cancel

    def activate(self,
                 *,
                 operation: str,
                 headers: Sequence[Header] = None,
                 payload: ByteString = None,
                 message_type: MessageType,
                 flags: int = None,
                 on_flush: Callable = None):
        """
        Activate the stream by sending its first message.

        Use the returned future, or the `on_flush` callback, to be informed
        when the message is successfully written to the wire, or fails to send.

        activate() may only be called once, use send_message() to write further
        messages on this stream-id.

        Keyword Args:
            operation: Operation name for this stream.

            headers: Message headers.

            payload: Binary message payload.

            message_type: Message type.

            flags: Message flags. Values from :class:`MessageFlag` may be
                XORed together. Not all flags can be used with all message
                types, consult documentation.

            on_flush: Callback invoked when the message is successfully written
                to the wire, or fails to send. The function should take the
                following arguments and return nothing:

                    *   `error` (Optional[Exception]): None if the message was
                        successfully written to the wire, or an Exception
                        if it failed to send.

                    *   `**kwargs` (dict): Forward compatibility kwargs.

                This callback is always invoked on the connection's event-loop
                thread.

        Returns:
            A future which completes with a result of None if the
            message is successfully written to the wire,
            or an exception if the message fails to send.
        """

        flush_future = Future()
        flush_future.set_running_or_notify_cancel()  # prevent cancel

        # native code deals with simplified types
        headers, payload, message_type, flags = _to_binding_msg_args(
            headers, payload, message_type, flags)

        _awscrt.event_stream_rpc_client_continuation_activate(
            self._binding,
            # don't give binding a reference to self until activate() is called.
            # this reference is used for invoking callbacks, and its existence
            # keeps the python object alive until the closed callback fires
            self,
            operation,
            headers,
            payload,
            message_type,
            flags,
            partial(_on_message_flush, flush_future, on_flush))

        return flush_future

    def send_message(self,
                     *,
                     headers: Sequence[Header] = None,
                     payload: ByteString = None,
                     message_type: MessageType,
                     flags: int = None,
                     on_flush: Callable = None) -> 'concurrent.futures.Future':
        """
        Send a continuation message.

        Use the returned future, or the `on_flush` callback, to be informed
        when the message is successfully written to the wire, or fails to send.

        Note that the the first message on a stream-id must be sent with activate(),
        send_message() is for all messages that follow.

        Keyword Args:
            operation: Operation name for this stream.

            headers: Message headers.

            payload: Binary message payload.

            message_type: Message type.

            flags: Message flags. Values from :class:`MessageFlag` may be
                XORed together. Not all flags can be used with all message
                types, consult documentation.

            on_flush: Callback invoked when the message is successfully written
                to the wire, or fails to send. The function should take the
                following arguments and return nothing:

                    *   `error` (Optional[Exception]): None if the message was
                        successfully written to the wire, or an Exception
                        if it failed to send.

                    *   `**kwargs` (dict): Forward compatibility kwargs.

                This callback is always invoked on the connection's event-loop
                thread.

        Returns:
            A future which completes with a result of None if the
            message is successfully written to the wire,
            or an exception if the message fails to send.
        """
        future = Future()
        future.set_running_or_notify_cancel()  # prevent cancel
        # native code deals with simplified types
        headers, payload, message_type, flags = _to_binding_msg_args(
            headers, payload, message_type, flags)

        _awscrt.event_stream_rpc_client_continuation_send_message(
            self._binding, headers, payload, message_type, flags,
            partial(_on_message_flush, future, on_flush))
        return future

    def is_closed(self):
        return _awscrt.event_stream_rpc_client_continuation_is_closed(
            self._binding)

    def _on_continuation_closed(self):
        try:
            self._handler.on_continuation_closed()
        finally:
            # ensure future completes, even if user callback had unhandled exception
            self.closed_future.set_result(None)

    def _on_continuation_message(self, headers, payload, message_type, flags):
        # transform from simple types to actual classes
        headers, payload, message_type, flags = _from_binding_msg_args(
            headers, payload, message_type, flags)
        self._handler.on_continuation_message(headers=headers,
                                              payload=payload,
                                              message_type=message_type,
                                              flags=flags)
Пример #32
0
 def on_done(future: Future) -> None:
     self.mw.progress.finish()
     hooks.media_files_did_export.remove(exported_media)
     # raises if exporter failed
     future.result()
     self.on_export_finished()
Пример #33
0
 def __init__(self, func, args, kwargs):
     RunnableWrapper.__init__(self, func, args, kwargs)
     self.future = Future()
Пример #34
0
 def put(self, coro):
     future = Future()
     p = fn.partial(self._add_task, future, coro)
     self.loop.call_soon_threadsafe(p)
     return future.result()
    def Enrichment(self):
        assert self.clusterDataset is not None
        assert self.__state == State.Ready

        if not self.annotations.ontology:
            self.annotations.ontology = self.ontology

        self.error(1)
        self.warning([0, 1])

        if self.useAttrNames:
            clusterGenes = [
                v.name for v in self.clusterDataset.domain.attributes
            ]
            self.information(0)
        elif 0 <= self.geneAttrIndex < len(self.candidateGeneAttrs):
            geneAttr = self.candidateGeneAttrs[self.geneAttrIndex]
            clusterGenes = [
                str(ex[geneAttr]) for ex in self.clusterDataset
                if not numpy.isnan(ex[geneAttr])
            ]
            if any("," in gene for gene in clusterGenes):
                self.information(
                    0,
                    "Separators detected in cluster gene names. Assuming multiple genes per example."
                )
                clusterGenes = reduce(operator.iadd,
                                      (genes.split(",")
                                       for genes in clusterGenes), [])
            else:
                self.information(0)
        else:
            self.error(1, "Failed to extract gene names from input dataset!")
            return {}

        genesSetCount = len(set(clusterGenes))

        self.clusterGenes = clusterGenes = self.annotations.map_to_ncbi_id(
            clusterGenes).values()

        self.infoLabel.setText(
            "%i unique genes on input\n%i (%.1f%%) genes with known annotations"
            % (genesSetCount, len(clusterGenes), 100.0 * len(clusterGenes) /
               genesSetCount if genesSetCount else 0.0))

        referenceGenes = None
        if not self.useReferenceDataset or self.referenceDataset is None:
            self.information(2)
            self.information(1)
            referenceGenes = self.annotations.genes()

        elif self.referenceDataset is not None:
            if self.useAttrNames:
                referenceGenes = [
                    v.name for v in self.referenceDataset.domain.attributes
                ]
                self.information(1)
            elif geneAttr in (self.referenceDataset.domain.variables +
                              self.referenceDataset.domain.metas):
                referenceGenes = [
                    str(ex[geneAttr]) for ex in self.referenceDataset
                    if not numpy.isnan(ex[geneAttr])
                ]
                if any("," in gene for gene in clusterGenes):
                    self.information(
                        1,
                        "Separators detected in reference gene names. Assuming multiple genes per example."
                    )
                    referenceGenes = reduce(operator.iadd,
                                            (genes.split(",")
                                             for genes in referenceGenes), [])
                else:
                    self.information(1)
            else:
                self.information(1)
                referenceGenes = None

            if referenceGenes is None:
                referenceGenes = list(self.annotations.genes())
                self.referenceRadioBox.buttons[1].setText("Reference set")
                self.referenceRadioBox.buttons[1].setDisabled(True)
                self.information(
                    2,
                    "Unable to extract gene names from reference dataset. Using entire genome for reference"
                )
                self.useReferenceDataset = 0
            else:
                refc = len(referenceGenes)
                # referenceGenes = self.annotations.get_gene_names_translator(referenceGenes).values()
                self.referenceRadioBox.buttons[1].setText(
                    "Reference set (%i genes, %i matched)" %
                    (refc, len(referenceGenes)))
                self.referenceRadioBox.buttons[1].setDisabled(False)
                self.information(2)
        else:
            self.useReferenceDataset = 0
        if not referenceGenes:
            self.error(1, "No valid reference set")
            return {}

        self.referenceGenes = referenceGenes
        evidences = []
        for etype in go.evidenceTypesOrdered:
            if self.useEvidenceType[etype]:
                evidences.append(etype)
        aspect = ['Process', 'Component', 'Function'][self.aspectIndex]

        self.progressBarInit(processEvents=False)
        self.setBlocking(True)
        self.__state = State.Running

        if clusterGenes:
            f = self._executor.submit(self.annotations.get_enriched_terms,
                                      clusterGenes,
                                      referenceGenes,
                                      evidences,
                                      aspect=aspect,
                                      prob=self.probFunctions[self.probFunc],
                                      use_fdr=False,
                                      progress_callback=methodinvoke(
                                          self, "_progressBarSet", (float, )))
            fw = FutureWatcher(f, parent=self)
            fw.done.connect(self.__on_enrichment_done)
            fw.done.connect(fw.deleteLater)
            return
        else:
            f = Future()
            f.set_result({})
            self.__on_enrichment_done(f)
Пример #36
0
    def setupScene(self):
        self.error()
        if self.data:
            attr = self.stringAttrs[self.imageAttr]
            titleAttr = self.allAttrs[self.titleAttr]
            assert self.thumbnailView.count() == 0
            size = QSizeF(self.imageSize, self.imageSize)

            for i, inst in enumerate(self.data):
                if not numpy.isfinite(inst[attr]):  # skip missing
                    continue
                url = self.urlFromValue(inst[attr])
                title = str(inst[titleAttr])

                thumbnail = GraphicsThumbnailWidget(QPixmap(), title=title)
                thumbnail.setThumbnailSize(size)
                thumbnail.setToolTip(url.toString())
                thumbnail.instance = inst
                self.thumbnailView.addThumbnail(thumbnail)

                if url.isValid() and url.isLocalFile():
                    reader = QImageReader(url.toLocalFile())
                    image = reader.read()
                    if image.isNull():
                        error = reader.errorString()
                        thumbnail.setToolTip(
                            thumbnail.toolTip() + "\n" + error)
                        self._errcount += 1
                    else:
                        pixmap = QPixmap.fromImage(image)
                        thumbnail.setPixmap(pixmap)
                        self._successcount += 1

                    future = Future()
                    future.set_result(image)
                    future._reply = None
                elif url.isValid():
                    future = self.loader.get(url)

                    @future.add_done_callback
                    def set_pixmap(future, thumb=thumbnail):
                        if future.cancelled():
                            return

                        assert future.done()

                        if future.exception():
                            # Should be some generic error image.
                            pixmap = QPixmap()
                            thumb.setToolTip(thumb.toolTip() + "\n" +
                                             str(future.exception()))
                        else:
                            pixmap = QPixmap.fromImage(future.result())

                        thumb.setPixmap(pixmap)

                        self._noteCompleted(future)
                else:
                    future = None

                self.items.append(_ImageItem(i, thumbnail, url, future))

            if any(it.future is not None and not it.future.done()
                   for it in self.items):
                self.info.setText("Retrieving...\n")
            else:
                self._updateStatus()
Пример #37
0
    def activate(self,
                 *,
                 operation: str,
                 headers: Sequence[Header] = None,
                 payload: ByteString = None,
                 message_type: MessageType,
                 flags: int = None,
                 on_flush: Callable = None):
        """
        Activate the stream by sending its first message.

        Use the returned future, or the `on_flush` callback, to be informed
        when the message is successfully written to the wire, or fails to send.

        activate() may only be called once, use send_message() to write further
        messages on this stream-id.

        Keyword Args:
            operation: Operation name for this stream.

            headers: Message headers.

            payload: Binary message payload.

            message_type: Message type.

            flags: Message flags. Values from :class:`MessageFlag` may be
                XORed together. Not all flags can be used with all message
                types, consult documentation.

            on_flush: Callback invoked when the message is successfully written
                to the wire, or fails to send. The function should take the
                following arguments and return nothing:

                    *   `error` (Optional[Exception]): None if the message was
                        successfully written to the wire, or an Exception
                        if it failed to send.

                    *   `**kwargs` (dict): Forward compatibility kwargs.

                This callback is always invoked on the connection's event-loop
                thread.

        Returns:
            A future which completes with a result of None if the
            message is successfully written to the wire,
            or an exception if the message fails to send.
        """

        flush_future = Future()
        flush_future.set_running_or_notify_cancel()  # prevent cancel

        # native code deals with simplified types
        headers, payload, message_type, flags = _to_binding_msg_args(
            headers, payload, message_type, flags)

        _awscrt.event_stream_rpc_client_continuation_activate(
            self._binding,
            # don't give binding a reference to self until activate() is called.
            # this reference is used for invoking callbacks, and its existence
            # keeps the python object alive until the closed callback fires
            self,
            operation,
            headers,
            payload,
            message_type,
            flags,
            partial(_on_message_flush, flush_future, on_flush))

        return flush_future
Пример #38
0
 def callback(f: Future) -> None:
     if f.cancelled() and self._event_loop_thread_id not in (
             None, threading.get_ident()):
         self.call(scope.cancel)
Пример #39
0
 def __enter__(self) -> T_co:
     self._enter_future = Future()
     self._exit_future = self._portal.start_task_soon(self.run_async_cm)
     cm = self._enter_future.result()
     return cast(T_co, cm)
Пример #40
0
def mkfuture(result):
    fut = Future()
    fut.set_result(result)
    return fut
Пример #41
0
 def on_done(fut: Future) -> None:
     self.updateModelsList(fut.result())
     self.maybe_select_provided_notetype()
Пример #42
0
 def __call__(self, *args, **kwargs):
     # You can't call AsyncToSync from a thread with a running event loop
     try:
         event_loop = asyncio.get_event_loop()
     except RuntimeError:
         pass
     else:
         if event_loop.is_running():
             raise RuntimeError(
                 "You cannot use AsyncToSync in the same thread as an async event loop - "
                 "just await the async function directly.")
     # Make a future for the return information
     call_result = Future()
     # Get the source thread
     source_thread = threading.current_thread()
     # Make a CurrentThreadExecutor we'll use to idle in this thread - we
     # need one for every sync frame, even if there's one above us in the
     # same thread.
     if hasattr(self.executors, "current"):
         old_current_executor = self.executors.current
     else:
         old_current_executor = None
     current_executor = CurrentThreadExecutor()
     self.executors.current = current_executor
     # Use call_soon_threadsafe to schedule a synchronous callback on the
     # main event loop's thread if it's there, otherwise make a new loop
     # in this thread.
     try:
         if not (self.main_event_loop
                 and self.main_event_loop.is_running()):
             # Make our own event loop - in a new thread - and run inside that.
             loop = asyncio.new_event_loop()
             loop_executor = ThreadPoolExecutor(max_workers=1)
             loop_future = loop_executor.submit(
                 self._run_event_loop,
                 loop,
                 self.main_wrap(args, kwargs, call_result, source_thread,
                                sys.exc_info()),
             )
             if current_executor:
                 # Run the CurrentThreadExecutor until the future is done
                 current_executor.run_until_future(loop_future)
             # Wait for future and/or allow for exception propagation
             loop_future.result()
         else:
             # Call it inside the existing loop
             self.main_event_loop.call_soon_threadsafe(
                 self.main_event_loop.create_task,
                 self.main_wrap(args, kwargs, call_result, source_thread,
                                sys.exc_info()),
             )
             if current_executor:
                 # Run the CurrentThreadExecutor until the future is done
                 current_executor.run_until_future(call_result)
     finally:
         # Clean up any executor we were running
         if hasattr(self.executors, "current"):
             del self.executors.current
         if old_current_executor:
             self.executors.current = old_current_executor
     # Wait for results from the future.
     return call_result.result()
Пример #43
0
class ClientConnection(NativeResource):
    """A client connection for the event-stream RPC protocol.

    Use :meth:`ClientConnection.connect()` to establish a new
    connection.

    Note that the network connection stays alive until it is closed,
    even if no local references to the connection object remain.
    The user should store a reference to any connections, and call
    :meth:`close()` when they are done with them to avoid leaking resources.

    Attributes:
        host_name (str): Remote host name.

        port (int): Remote port.

        shutdown_future (concurrent.futures.Future[None]): Completes when this
            connection has finished shutting down. Future will contain a
            result of None, or an exception indicating why shutdown occurred.
    """

    __slots__ = [
        'host_name', 'port', 'shutdown_future', '_connect_future', '_handler'
    ]

    def __init__(self, host_name, port, handler):
        # Do no instantiate directly, use static connect method
        super().__init__()
        self.host_name = host_name  # type: str
        self.port = port  # type: int
        self.shutdown_future = Future()  # type: Future
        self.shutdown_future.set_running_or_notify_cancel()  # prevent cancel
        self._connect_future = Future()  # type: Future
        self._connect_future.set_running_or_notify_cancel()  # prevent cancel
        self._handler = handler  # type: ClientConnectionHandler

    @classmethod
    def connect(
        cls,
        *,
        handler: ClientConnectionHandler,
        host_name: str,
        port: int,
        bootstrap: ClientBootstrap = None,
        socket_options: Optional[SocketOptions] = None,
        tls_connection_options: Optional[TlsConnectionOptions] = None
    ) -> 'concurrent.futures.Future':
        """Asynchronously establish a new ClientConnection.

        Args:
            handler: Handler for connection events.

            host_name: Connect to host.

            port: Connect to port.

            bootstrap: Client bootstrap to use when initiating socket connection.
                If None is provided, the default singleton is used.

            socket_options: Optional socket options.
                If None is provided, then default options are used.

            tls_connection_options: Optional TLS
                connection options. If None is provided, then the connection will
                be attempted over plain-text.

        Returns:
            concurrent.futures.Future: A Future which completes when the connection succeeds or fails.
            If successful, the Future will contain None.
            Otherwise it will contain an exception.
            If the connection is successful, it will be made available via
            the handler's on_connection_setup callback.
            Note that this network connection stays alive until it is closed,
            even if no local references to the connection object remain.
            The user should store a reference to any connections, and call
            :meth:`close()` when they are done with them to avoid leaking resources.
        """

        if not socket_options:
            socket_options = SocketOptions()

        # Connection is not made available to user until setup callback fires
        connection = cls(host_name, port, handler)

        if not bootstrap:
            bootstrap = ClientBootstrap.get_or_create_static_default()

        # connection._binding is set within the following call */
        _awscrt.event_stream_rpc_client_connection_connect(
            host_name, port, bootstrap, socket_options, tls_connection_options,
            connection)

        return connection._connect_future

    def _on_connection_setup(self, error_code):
        if error_code:
            connection = None
            error = awscrt.exceptions.from_code(error_code)
        else:
            connection = self
            error = None

        try:
            self._handler.on_connection_setup(connection=connection,
                                              error=error)
        finally:
            # ensure future completes, even if user callback had unhandled exception
            if error:
                self._connect_future.set_exception(error)
            else:
                self._connect_future.set_result(None)

    def _on_connection_shutdown(self, error_code):
        reason = awscrt.exceptions.from_code(
            error_code) if error_code else None
        try:
            self._handler.on_connection_shutdown(reason=reason)
        finally:
            # ensure future completes, even if user callback had unhandled exception
            if reason:
                self.shutdown_future.set_exception(reason)
            else:
                self.shutdown_future.set_result(None)

    def _on_protocol_message(self, headers, payload, message_type, flags):
        # transform from simple types to actual classes
        headers, payload, message_type, flags = _from_binding_msg_args(
            headers, payload, message_type, flags)
        self._handler.on_protocol_message(headers=headers,
                                          payload=payload,
                                          message_type=message_type,
                                          flags=flags)

    def close(self):
        """Close the connection.

        Shutdown is asynchronous. This call has no effect if the connection is
        already closed or closing.

        Note that, if the network connection hasn't already ended,
        `close()` MUST be called to avoid leaking resources. The network
        connection will not terminate simply because there are no references
        to the connection object.

        Returns:
            concurrent.futures.Future: This connection's :attr:`shutdown_future`,
            which completes when shutdown has finished.
        """
        # TODO: let user pass their own exception/error-code/reason for closing
        _awscrt.event_stream_rpc_client_connection_close(self._binding)
        return self.shutdown_future

    def is_open(self):
        """
        Returns:
            bool: True if this connection is open and usable, False otherwise.
            Check :attr:`shutdown_future` to know when the connection is completely
            finished shutting down.
        """
        return _awscrt.event_stream_rpc_client_connection_is_open(
            self._binding)

    def send_protocol_message(
            self,
            *,
            headers: Optional[Sequence[Header]] = None,
            payload: Optional[ByteString] = None,
            message_type: MessageType,
            flags: Optional[int] = None,
            on_flush: Callable = None) -> 'concurrent.futures.Future':
        """Send a protocol message.

        Protocol messages use stream-id 0.

        Use the returned future, or the `on_flush` callback, to be informed
        when the message is successfully written to the wire, or fails to send.

        Keyword Args:
            headers: Message headers.

            payload: Binary message payload.

            message_type: Message type.

            flags: Message flags. Values from :class:`MessageFlag` may be
                XORed together. Not all flags can be used with all message
                types, consult documentation.

            on_flush: Callback invoked when the message is successfully written
                to the wire, or fails to send. The function should take the
                following arguments and return nothing:

                    *   `error` (Optional[Exception]): None if the message was
                        successfully written to the wire, or an Exception
                        if it failed to send.

                    *   `**kwargs` (dict): Forward compatibility kwargs.

                This callback is always invoked on the connection's event-loop
                thread.

        Returns:
            A future which completes with a result of None if the
            message is successfully written to the wire,
            or an exception if the message fails to send.
        """
        future = Future()
        future.set_running_or_notify_cancel()  # prevent cancel

        # native code deals with simplified types
        headers, payload, message_type, flags = _to_binding_msg_args(
            headers, payload, message_type, flags)

        _awscrt.event_stream_rpc_client_connection_send_protocol_message(
            self._binding, headers, payload, message_type, flags,
            partial(_on_message_flush, future, on_flush))
        return future

    def new_stream(
            self,
            handler: 'ClientContinuationHandler') -> 'ClientContinuation':
        """
        Create a new stream.

        The stream will send no data until :meth:`ClientContinuation.activate()`
        is called. Call activate() when you're ready for callbacks and events to fire.

        Args:
            handler: Handler to process continuation messages and state changes.

        Returns:
            The new continuation object.
        """
        continuation = ClientContinuation(handler, self)
        continuation._binding = _awscrt.event_stream_rpc_client_connection_new_stream(
            self)
        return continuation
Пример #44
0
 def __init__(self,
              level: int = logging.NOTSET,
              maxlen: int | None = None) -> None:
     super().__init__(level=level)
     self.cached_records = deque([], maxlen)
     self._emit_future = Future()
Пример #45
0
 def reset(self):
     self.future = Future()
    def setUpClass(cls, *_):
        """
        Starts the thread which launches ryu apps

        Create a testing bridge, add a port, setup the port interfaces. Then
        launch the ryu apps for testing pipelined. Gets the references
        to apps launched by using futures, mocks the redis policy_dictionary
        of ue_mac_controller
        """
        super(CWFRestartResilienceTest, cls).setUpClass()
        warnings.simplefilter('ignore')
        cls.service_manager = create_service_manager([], ['ue_mac', 'arpd'])

        ue_mac_controller_reference = Future()
        arp_controller_reference = Future()
        testing_controller_reference = Future()

        def mock_thread_safe(cmd, body):
            cmd(body)

        loop_mock = MagicMock()
        loop_mock.call_soon_threadsafe = mock_thread_safe

        test_setup = TestSetup(
            apps=[
                PipelinedController.UEMac,
                PipelinedController.Arp,
                PipelinedController.Testing,
                PipelinedController.StartupFlows,
            ],
            references={
                PipelinedController.UEMac: ue_mac_controller_reference,
                PipelinedController.Arp: arp_controller_reference,
                PipelinedController.Testing: testing_controller_reference,
                PipelinedController.StartupFlows: Future(),
            },
            config={
                'setup_type': 'CWF',
                'bridge_name': cls.BRIDGE,
                'bridge_ip_address': cls.BRIDGE_IP_ADDRESS,
                'enforcement': {
                    'poll_interval': 5
                },
                'internal_ip_subnet': '192.168.0.0/16',
                'nat_iface': 'eth2',
                'local_ue_eth_addr': False,
                'allow_unknown_arps': False,
                'enodeb_iface': 'eth1',
                'qos': {
                    'enable': False
                },
                'clean_restart': False,
                'quota_check_ip': '1.2.3.4',
                'enable_nat': False,
                'dpi': {
                    'enabled': False,
                    'mon_port': 'mon1',
                    'mon_port_number': 32769,
                    'idle_timeout': 42,
                },
            },
            mconfig=PipelineD(ue_ip_block=cls.UE_BLOCK, ),
            loop=loop_mock,
            service_manager=cls.service_manager,
            integ_test=False,
        )

        BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
        BridgeTools.create_internal_iface(cls.BRIDGE, cls.DPI_PORT, cls.DPI_IP)

        cls.thread = start_ryu_app_thread(test_setup)

        cls.ue_mac_controller = ue_mac_controller_reference.result()
        cls.testing_controller = testing_controller_reference.result()
        cls.arp_controller = arp_controller_reference.result()
        cls.arp_controller.add_arp_response_flow = MagicMock()
Пример #47
0
 def __init__(self):
     self.future = Future()
Пример #48
0
 def submit(self, func, *args, **kwds):
     future = self._result(Future(), func, *args, **kwds)
     return future
Пример #49
0
    def get(self, url):
        future = Future()
        url = QUrl(url)
        request = QNetworkRequest(url)
        request.setRawHeader(b"User-Agent", b"OWImageViewer/1.0")
        request.setAttribute(
            QNetworkRequest.CacheLoadControlAttribute,
            QNetworkRequest.PreferCache
        )

        # Future yielding a QNetworkReply when finished.
        reply = self._netmanager.get(request)
        future._reply = reply

        @future.add_done_callback
        def abort_on_cancel(f):
            # abort the network request on future.cancel()
            if f.cancelled() and f._reply is not None:
                f._reply.abort()

        n_redir = 0

        def on_reply_ready(reply, future):
            nonlocal n_redir
            # schedule deferred delete to ensure the reply is closed
            # otherwise we will leak file/socket descriptors
            reply.deleteLater()
            future._reply = None
            if reply.error() == QNetworkReply.OperationCanceledError:
                # The network request was cancelled
                reply.close()
                future.cancel()
                return

            if reply.error() != QNetworkReply.NoError:
                # XXX Maybe convert the error into standard
                # http and urllib exceptions.
                future.set_exception(Exception(reply.errorString()))
                reply.close()
                return

            # Handle a possible redirection
            location = reply.attribute(
                QNetworkRequest.RedirectionTargetAttribute)

            if location is not None and n_redir < 1:
                n_redir += 1
                location = reply.url().resolved(location)
                # Retry the original request with a new url.
                request = QNetworkRequest(reply.request())
                request.setUrl(location)
                newreply = self._netmanager.get(request)
                future._reply = newreply
                newreply.finished.connect(
                    partial(on_reply_ready, newreply, future))
                reply.close()
                return

            reader = QImageReader(reply)
            image = reader.read()
            reply.close()

            if image.isNull():
                future.set_exception(Exception(reader.errorString()))
            else:
                future.set_result(image)

        reply.finished.connect(partial(on_reply_ready, reply, future))
        return future
Пример #50
0
 def forward(self, input_tensor):
     res = Future()
     self._supervisor.send_command(commands.ForwardPass(res, input_tensor))
     return res
Пример #51
0
 def result(future: Future):
     res = future.result()
     return res[risk_measure] if risk_measure and\
         isinstance(res, (MultipleRiskMeasureResult, MultipleRiskMeasureFuture)) else res
Пример #52
0
 def on_done(fut: Future):
     self.show()
     res = fut.result()  # Required to check for errors
 def readFrame(self):
     """Empty read frame that is never ready"""
     return Future()
Пример #54
0
 def add_data() -> Future[dtype]:  # type: ignore
     future = Future()
     # simulate something that isn't immediately ready when function returns
     QTimer.singleShot(10, partial(future.set_result, data))
     return future
Пример #55
0
    def submit(self, fn, *args, **kwargs):
        """Submit Tornado Coroutine to IOLoop in daemonized thread.

        :param fn: Tornado Coroutine to execute
        :param args: Args to pass to coroutine
        :param kwargs: Kwargs to pass to coroutine
        :returns concurrent.futures.Future: future result of coroutine
        """
        if not self.is_ready():
            raise ThreadNotStartedError("The thread has not been started yet, "
                                        "make sure you call start() first")

        future = Future()

        def execute():
            """Executes fn on the IOLoop."""
            try:
                result = gen.maybe_future(fn(*args, **kwargs))
            except Exception:
                # The function we ran didn't return a future and instead raised
                # an exception. Let's pretend that it returned this dummy
                # future with our stack trace.
                f = gen.Future()
                f.set_exc_info(sys.exc_info())
                on_done(f)
            else:
                result.add_done_callback(on_done)

        def on_done(f):
            """Sets tornado.Future results to the concurrent.Future."""

            if not f.exception():
                future.set_result(f.result())
                return

            # if f is a tornado future, then it has exc_info()
            if hasattr(f, 'exc_info'):
                exception, traceback = f.exc_info()[1:]

            # else it's a concurrent.future
            else:
                # python2's concurrent.future has exception_info()
                if hasattr(f, 'exception_info'):
                    exception, traceback = f.exception_info()

                # python3's concurrent.future just has exception()
                else:
                    exception = f.exception()
                    traceback = None

            # python2 needs exc_info set explicitly
            if _FUTURE_HAS_EXC_INFO:
                future.set_exception_info(exception, traceback)
                return

            # python3 just needs the exception, exc_info works fine
            future.set_exception(exception)

        self._io_loop.add_callback(execute)

        return future
Пример #56
0
class RemoteTask(TaskDefinition):
    def __init__(self, taskdef: TaskDefinition, cluster):
        kwargs = taskdef.serialize()
        super().__init__(**kwargs)
        self.conn = None
        self.nonce = 0
        self.cluster = cluster
        self.future = Future()
        self.awaitable = asyncio.wrap_future(self.future)
        self.status = WAIT
        self.error = None
        self.result = None

    def __await__(self):
        return self.awaitable.__await__()

    @property
    def done(self) -> bool:
        return self.future.done()

    def destroy(self) -> None:
        self.cluster.destroy(self.id)

    def set_status(self, status: str) -> None:
        # sanity checks
        if self.status == FAIL and status == DONE:
            raise RuntimeError('Cant complete a failed task')

        if self.status == DONE and status == FAIL:
            raise RuntimeError('Cant fail a failed completed')

        # update status
        self.status = status

    def set_error(self, error: str) -> None:
        self.set_status(FAIL)
        self.error = error
        if not self.future.done():
            self.future.set_exception(TaskError(error))

    def set_result(self, result: any, result_type: any = 'any') -> None:
        # unpack type & deserialize result
        result_type = type_from_description(result_type)
        result = result_type.deserialize(result)

        self.set_status(DONE)
        self.result = result
        if not self.future.done():
            self.future.set_result(result)

    async def wait_for_init(self, timeout=30) -> None:
        if self.status != WAIT:
            raise RuntimeError(f'Cant await task with status {self.status}')

        slept = 0
        interval = 0.2
        while True:
            if self.status == WORK:
                return
            if self.status == FAIL:
                raise RuntimeError(
                    f'Awaited task failed with error: {self.error}')

            if slept > timeout:
                raise TimeoutError('Task took to long to initialize')

            await asyncio.sleep(interval)
            slept += interval

    async def call(self, method, args={}):
        if self.status != WORK:
            await self.wait_for_init()
            # raise RuntimeError(
            #     f'RPC is only available when status = WORK, was {self.status}. '
            #     f'Attempted to call {method}')

        return await self.conn.rpc.call(method, args)

    async def stop(self):
        # special case RPC - it always causes a send exception
        await self.call('stop')

    def __getattr__(self, method):
        async def magic_rpc(**kwargs):
            return await self.call(method, kwargs)

        return magic_rpc
Пример #57
0
    def subscribe(self, topic, qos, callback=None):
        """Subscribe to a topic filter (async).

        The client sends a SUBSCRIBE packet and the server responds with a SUBACK.

        subscribe() may be called while the device is offline, though the async
        operation cannot complete successfully until the connection resumes.

        Once subscribed, `callback` is invoked each time a message matching
        the `topic` is received. It is possible for such messages to arrive before
        the SUBACK is received.

        Args:
            topic (str): Subscribe to this topic filter, which may include wildcards.
            qos (QoS): Maximum requested QoS that server may use when sending messages to the client.
                The server may grant a lower QoS in the SUBACK (see returned Future)
            callback: Optional callback invoked when message received.
                Function should take the following arguments and return nothing:

                    *   `topic` (str): Topic receiving message.

                    *   `payload` (bytes): Payload of message.

                    *   `dup` (bool): DUP flag. If True, this might be re-delivery
                        of an earlier attempt to send the message.

                    *   `qos` (:class:`QoS`): Quality of Service used to deliver the message.

                    *   `retain` (bool): Retain flag. If True, the message was sent
                        as a result of a new subscription being made by the client.

                    *   `**kwargs` (dict): Forward-compatibility kwargs.

        Returns:
            Tuple[concurrent.futures.Future, int]: Tuple containing a Future and
            the ID of the SUBSCRIBE packet. The Future completes when a
            SUBACK is received from the server. If successful, the Future will
            contain a dict with the following members:

                *   ['packet_id'] (int): ID of the SUBSCRIBE packet being acknowledged.

                *   ['topic'] (str): Topic filter of the SUBSCRIBE packet being acknowledged.

                *   ['qos'] (:class:`QoS`): Maximum QoS that was granted by the server.
                    This may be lower than the requested QoS.

            If unsuccessful, the Future contains an exception. The exception
            will be a :class:`SubscribeError` if a SUBACK was received
            in which the server rejected the subscription. Other exception
            types indicate other errors with the operation.
        """

        future = Future()
        packet_id = 0

        if callback:
            uses_old_signature = self._check_uses_old_message_callback_signature(callback)

            def callback_wrapper(topic, payload, dup, qos, retain):
                if uses_old_signature:
                    callback(topic=topic, payload=payload)
                else:
                    callback(topic=topic, payload=payload, dup=dup, qos=QoS(qos), retain=retain)

        else:
            callback_wrapper = None

        def suback(packet_id, topic, qos, error_code):
            if error_code:
                future.set_exception(awscrt.exceptions.from_code(error_code))
            else:
                qos = _try_qos(qos)
                if qos is None:
                    future.set_exception(SubscribeError(topic))
                else:
                    future.set_result(dict(
                        packet_id=packet_id,
                        topic=topic,
                        qos=qos,
                    ))

        try:
            assert callable(callback) or callback is None
            assert isinstance(qos, QoS)
            packet_id = _awscrt.mqtt_client_connection_subscribe(
                self._binding, topic, qos.value, callback_wrapper, suback)
        except Exception as e:
            future.set_exception(e)

        return future, packet_id
Пример #58
0
    def start_loop(self, connection):
        """Connection open callback."""

        self.is_open.set()
        self.process(None, (DECLARE_DEAD_LETTERS, (), {}), Future())
Пример #59
0
    async def _call_func(self, func: Callable, args: tuple,
                         kwargs: Dict[str, Any], future: Future) -> None:
        def callback(f: Future) -> None:
            if f.cancelled() and self._event_loop_thread_id not in (
                    None, threading.get_ident()):
                self.call(scope.cancel)

        try:
            retval = func(*args, **kwargs)
            if iscoroutine(retval):
                with CancelScope() as scope:
                    if future.cancelled():
                        scope.cancel()
                    else:
                        future.add_done_callback(callback)

                    retval = await retval
        except self._cancelled_exc_class:
            future.cancel()
        except BaseException as exc:
            if not future.cancelled():
                future.set_exception(exc)

            # Let base exceptions fall through
            if not isinstance(exc, Exception):
                raise
        else:
            if not future.cancelled():
                future.set_result(retval)
        finally:
            scope = None  # type: ignore[assignment]
Пример #60
0
class CachedHandler(logging.Handler):
    """Handler which stores past records

    This is used to populate Maestral's status and error interfaces. The method
    :meth:`wait_for_emit` can be used from another thread to block until a new record is
    emitted, for instance to react to state changes.

    :param level: Initial log level. Defaults to NOTSET.
    :param maxlen: Maximum number of records to store. If ``None``, all records will be
        stored. Defaults to ``None``.
    """

    cached_records: deque[logging.LogRecord]
    _emit_future: Future

    def __init__(self,
                 level: int = logging.NOTSET,
                 maxlen: int | None = None) -> None:
        super().__init__(level=level)
        self.cached_records = deque([], maxlen)
        self._emit_future = Future()

    def emit(self, record: logging.LogRecord) -> None:
        """
        Logs the specified log record and saves it to the cache.

        :param record: Log record.
        """
        self.cached_records.append(record)

        # notify any waiting coroutines that we have a status change
        try:
            self._emit_future.set_result(True)
        except InvalidStateError:
            pass

    def wait_for_emit(self, timeout: float | None) -> bool:
        """
        Blocks until a new record is emitted. This is effectively a longpoll API.

        :param timeout: Maximum time to block before returning.
        :returns: ``True`` if there was a status change, ``False`` in case of a timeout.
        """
        try:
            self._emit_future.result(timeout=timeout)
        except concurrent.futures.TimeoutError:
            return False

        self._emit_future = Future()  # reset future
        return True

    def getLastMessage(self) -> str:
        """
        :returns: The log message of the last record or an empty string.
        """
        try:
            last_record = self.cached_records[-1]
            return last_record.getMessage()
        except IndexError:
            return ""

    def getAllMessages(self) -> list[str]:
        """
        :returns: A list of all record messages.
        """
        return [r.getMessage() for r in self.cached_records]

    def clear(self) -> None:
        """
        Clears all cached records.
        """
        self.cached_records.clear()