Esempio n. 1
0
    def validate(self) -> Optional[Awaitable]:
        """Validate that the configuration is set up properly and the necessary
        libraries are available.

        If any configuration is amiss, raises a FilestorageConfigError.
        """
        coroutines: List[Awaitable] = []
        # Verify that any provided filters are valid.
        for filter_ in self._filters:
            if inspect.isclass(filter_):
                filter_name: str = filter_.__name__  # type: ignore
                raise FilestorageConfigError(
                    f'Filter {filter_name} is a class, not an instance. '
                    f'Did you mean to use "filters=[{filter_name}()]" instead?'
                )
            result = filter_.validate()
            if iscoroutine(result) or isfuture(result):
                coroutines.append(cast(Awaitable, result))

        result = self._validate()
        if iscoroutine(result) or isfuture(result):
            coroutines.append(cast(Awaitable, result))

        if not coroutines:
            return None
        return gather(*coroutines)
Esempio n. 2
0
async def test_interledger_multi_send_inquiry():

    t = TransferToMulti()
    t.payload = {}
    t.payload['nonce'] = str(uuid4().int)
    t.payload['data'] = b"dummy"

    init = MockInitiator([t])
    resp1 = MockMultiResponder()
    resp2 = MockMultiResponder()
    i = Interledger(init, [resp1, resp2], True)

    i.transfers = [t]

    # the processing to be observed
    task = asyncio.ensure_future(i.send_inquiry())
    assert task.done() == False
    await task

    assert len(i.transfers_inquired) == 1

    tr = i.transfers[0]
    assert tr.status == TransferStatus.INQUIRED

    assert len(tr.inquiry_tasks) == 2
    assert asyncio.isfuture(tr.inquiry_tasks[0])
    assert asyncio.isfuture(tr.inquiry_tasks[1])

    assert tr.inquiry_results == [None] * 2

    await asyncio.wait(tr.inquiry_tasks, return_when=asyncio.ALL_COMPLETED)
    assert tr.inquiry_tasks[0].done() == True
    assert tr.inquiry_tasks[1].done() == True
Esempio n. 3
0
    def shutdown(self):
        if asyncio.isfuture(self.hash_future):
            if not self.hash_future.done():
                self.hash_future.cancel()

        if asyncio.isfuture(self.event_loop_future):
            if not self.event_loop_future.done():
                self.event_loop_future.cancel()
Esempio n. 4
0
    def kill(
            self,
            msg: Union[str,
                       None] = None) -> Union[bool, plumpy.futures.Future]:
        """
        Kill the process and all the children calculations it called

        :param msg: message
        """
        self.node.logger.info(f'Request to kill Process<{self.node.pk}>')

        had_been_terminated = self.has_terminated()

        result = super().kill(msg)

        # Only kill children if we could be killed ourselves
        if result is not False and not had_been_terminated:
            killing = []
            for child in self.node.called:
                if self.runner.controller is None:
                    self.logger.info(
                        'no controller available to kill child<%s>', child.pk)
                    continue
                try:
                    result = self.runner.controller.kill_process(
                        child.pk, f'Killed by parent<{self.node.pk}>')
                    result = asyncio.wrap_future(
                        result)  # type: ignore[arg-type]
                    if asyncio.isfuture(result):
                        killing.append(result)
                except ConnectionClosed:
                    self.logger.info(
                        'no connection available to kill child<%s>', child.pk)
                except UnroutableError:
                    self.logger.info(
                        'kill signal was unable to reach child<%s>', child.pk)

            if asyncio.isfuture(result):
                # We ourselves are waiting to be killed so add it to the list
                killing.append(result)  # type: ignore[arg-type]

            if killing:
                # We are waiting for things to be killed, so return the 'gathered' future
                kill_future = plumpy.futures.gather(*killing)
                result = self.loop.create_future()

                def done(done_future: plumpy.futures.Future):
                    is_all_killed = all(done_future.result())
                    result.set_result(
                        is_all_killed)  # type: ignore[union-attr]

                kill_future.add_done_callback(done)

        return result
Esempio n. 5
0
    async def run(self) -> List[ResultsHolder]:
        if not asyncio.iscoroutine(self.callable) and not asyncio.isfuture(self.callable):
            coro = asyncio.coroutine(self.callable)
        else:
            coro = self.callable
        if asyncio.iscoroutine(coro) or asyncio.isfuture(coro):
            children = await coro
        else:
            children = await coro()

        return _coerce(children)
Esempio n. 6
0
 async def mover(getter, putter):
     while True:
         data = getter()
         if asyncio.iscoroutine(data):
             data = asyncio.ensure_future(data)
         if asyncio.isfuture(data):
             data = await data
         result = putter(data)
         if asyncio.iscoroutine(result):
             result = asyncio.ensure_future(result)
         if asyncio.isfuture(result):
             result = await result
Esempio n. 7
0
async def schedule_jobs():
    """Schedule jobs concurrently."""
    print(f"{_current_time()} -> Send kickoff email")

    # Create a job which also represents a coroutine
    single_job = start_job(_MILLISECOND, uuid4().hex)
    assert asyncio.iscoroutine(single_job)

    # Grab a job record from the coroutine
    single_record = await single_job
    assert _is_valid_record(single_record)

    # Task is a wrapped coroutine which also represents a future
    single_task = asyncio.create_task(start_job(_HOUR, uuid4().hex))
    assert asyncio.isfuture(single_task)

    # Futures are different from coroutines in that they can be cancelled
    single_task.cancel()
    try:
        await single_task
    except asyncio.exceptions.CancelledError:
        assert single_task.cancelled()

    # Gather coroutines for batch start
    batch_jobs = [start_job(.01, uuid4().hex) for _ in range(10)]
    batch_records = await asyncio.gather(*batch_jobs)

    # We get the same amount of records as we have coroutines
    assert len(batch_records) == len(batch_jobs)

    for batch_record in batch_records:
        assert _is_valid_record(batch_record)

    print(f"{_current_time()} -> Send confirmation email")
Esempio n. 8
0
    async def add(self, item) -> None:
        """ Start a new async task. If this is not possible, then block
        until we can start running the task.

        Args:
            item (coroutine, future): A task to be run.

        Raises:
            ValueError: If input is not a coroutine or future.

        Returns:
            None: None.
        """
        if not asyncio.iscoroutine(item) and \
            not asyncio.isfuture(item):
            error = "Expecting a coroutine or a Future"
            raise ValueError(error)
        if item in self._set:
            return None
        while True:
            if len(self._set) >= self._maxsize:
                await asyncio.sleep(self._wait_time)
            else:
                break
        logging.debug("Number of tasks currently running: : " +
                      str(len(self._set)))
        item = asyncio.create_task(item)
        self._set.add(item)
        item.add_done_callback(self._remove)
Esempio n. 9
0
    def run_until_complete(self, future):
        self._check_closed()
        # self._check_running()

        new_task = not asyncio.isfuture(future)
        future = asyncio.ensure_future(future, loop=self)
        if new_task:
            # An exception is raised if the future didn't complete, so there
            # is no need to log the "destroy pending task" message
            future._log_destroy_pending = False
        future.add_done_callback(_run_until_complete_cb)

        try:
            self.run_forever()
        except:  # noqa: E722, B001
            if new_task and future.done() and not future.cancelled():
                # The coroutine raised a BaseException. Consume the exception
                # to not log a warning, the caller doesn't have access to the
                # local task.
                future.exception()
            raise

        finally:
            future.remove_done_callback(_run_until_complete_cb)

        if not future.done():
            raise RuntimeError("Event loop stopped before Future completed")

        return future.result()
Esempio n. 10
0
    async def wrapper(self, *args):
        ext = args[-1]
        ext = ext.lower()

        # Call wrapped function to get document
        ret = wrapped(self, *args)
        if asyncio.isfuture(ret) or asyncio.iscoroutine(ret):
            name, html = await ret
        else:
            name, html = ret

        # Convert using Calibre
        try:
            mimetype, contents = convert.html_to(
                html,
                ext,
                self.application.config,
            )
        except convert.UnsupportedFormat:
            self.set_status(404)
            self.set_header('Content-Type', 'text/plain')
            return await self.finish("Unsupported format: %s" % ext)

        # Return document
        self.set_header('Content-Type', mimetype)
        if name:
            self.set_header('Content-Disposition',
                            'attachment; filename="%s.%s"' % (name, ext))
        else:
            self.set_header('Content-Disposition', 'attachment')
        for chunk in await contents:
            self.write(chunk)
        return await self.finish()
Esempio n. 11
0
async def skein_client(principal=None, keytab=None):
    """Return a shared skein client object.

    Calls with the same principal & keytab will return the same client object
    (if one exists).
    """
    key = (principal, keytab)
    client = _skein_client_cache.get(key)
    if client is None:
        kwargs = dict(
            principal=principal,
            keytab=keytab,
            security=skein.Security.new_credentials(),
        )
        fut = get_running_loop().run_in_executor(
            None, lambda: skein.Client(**kwargs))
        # Save the future first so any concurrent calls will wait on the same
        # future for generating the client
        _skein_client_cache[key] = fut
        client = await fut
        # Replace the future now that the operation is done
        _skein_client_cache[key] = client
    elif asyncio.isfuture(client):
        client = await client
    return client
Esempio n. 12
0
    def add_raw_event(self, ven_id, event, callback=None):
        """
        Add a new event to the queue for a specific VEN.
        :param str ven_id: The ven_id to which this event should be distributed.
        :param dict event: The event (as a dict or as a objects.Event instance)
                           that contains the event details.
        :param callable callback: A callback that will receive the opt status for this event.
                                  This callback receives ven_id, event_id, opt_type as its arguments.
        """
        if utils.getmember(event, 'response_required') == 'always':
            if callback is None:
                logger.warning("You did not provide a 'callback', which means you won't know if the "
                               "VEN will opt in or opt out of your event. You should consider adding "
                               "a callback for this.")
            elif not asyncio.isfuture(callback):
                args = inspect.signature(callback).parameters
                if not all(['ven_id' in args, 'event_id' in args, 'opt_type' in args]):
                    raise ValueError("The 'callback' must have at least the following parameters: "
                                     "'ven_id' (str), 'event_id' (str), 'opt_type' (str). Please fix "
                                     "your 'callback' handler.")

        event_id = utils.getmember(event, 'event_descriptor.event_id')
        # Create the event queue if it does not exist yet
        if ven_id not in self.events:
            self.events[ven_id] = []

        # Add event to the queue
        self.events[ven_id].append(event)
        self.events_updated[ven_id] = True

        # Add the callback for the response to this event
        if callback is not None:
            self.event_callbacks[event_id] = (event, callback)
        return event_id
Esempio n. 13
0
        async def run_callback():
            with kiwipy.capture_exceptions(kiwi_future):
                result = callback(*args, **kwargs)
                while asyncio.isfuture(result):
                    result = await result

                kiwi_future.set_result(result)
Esempio n. 14
0
async def test_future_task(communicator: kiwipy.rmq.RmqCommunicator):
    """
    Test a task that returns a future meaning that will be resolve to a value later
    """
    TASK = 'The meaning?'
    RESULT = 42
    result_future = asyncio.Future()

    tasks = []

    def on_task(_comm, task):
        tasks.append(task)
        return result_future

    await communicator.add_task_subscriber(on_task)
    task_future = await communicator.task_send(TASK)

    # The task has given us a future
    future_from_task = await task_future
    assert asyncio.isfuture(future_from_task)

    # Now resolve the future which should give us a result
    result_future.set_result(42)

    result = await future_from_task

    assert tasks[0] == TASK
    assert RESULT == result
Esempio n. 15
0
 async def _handle(self, data):
     op = data.get('op') or ''
     # NOTE if op is empty or whitespace string, then exception raises.
     # we ensure that it is not in server.vim.
     # but op below may be empty or whitespace string. that's ok.
     idx = op.find(' ')
     if idx >= 0:
         op, args = op[:idx], op[idx:].lstrip()
     else:
         op, args = op, ''
     if op == 'response':
         resp = data['args']
         fut = _global_resp.pop(resp['id'], None)
         if asyncio.isfuture(fut) and not fut.done():
             if resp['code'] == 0:
                 fut.set_result(resp['data'])
             else:
                 fut.set_exception(VimException(resp['data']))
         return
     else:
         if hasattr(self.worker, op):
             kwargs = data['args']
             if not kwargs.get('bang'):
                 kwargs.pop('bang', None)
             if not kwargs.get('range'):
                 for i in (
                         'range',
                         'line1',
                         'line2',
                 ):
                     kwargs.pop(i, None)
             await getattr(self.worker, op)(args, **kwargs)
             return
     self._exception(f'unknown method: {op or ""}', 'raw data: %s' % data)
Esempio n. 16
0
 async def test_api_calls_return_a_future_when_run_in_async_mode(
         self, mock_request):
     self.client.run_async = True
     future = self.client.api_test()
     self.assertTrue(asyncio.isfuture(future))
     resp = await future
     self.assertTrue(resp["ok"])
Esempio n. 17
0
    async def _on_response(self, message):
        """
        Called when we get a message on our response queue

        :param message: The response message
        :type message: :class:`aio_pika.IncomingMessage`
        """
        correlation_id = message.correlation_id
        try:
            response_future = self._awaiting_response.pop(correlation_id)
        except KeyError:
            _LOGGER.error("Got a response for an unknown id '%s':\n%s",
                          correlation_id, message)
        else:
            try:
                response = self._response_decode(message.body)
            except Exception:
                _LOGGER.error('Failed to decode message body:\n%s%s',
                              message.body, traceback.format_exc())
                raise
            else:
                utils.response_to_future(response, response_future)
                try:
                    # If the response was a future it means we should get another message that
                    # resolves that future
                    if asyncio.isfuture(response_future.result()):
                        self._awaiting_response[
                            correlation_id] = response_future.result()
                except Exception:  # pylint: disable=broad-except
                    pass
Esempio n. 18
0
    def run_until_complete(self, future: asyncio.Future) -> Any:
        self._check_running()

        new_task = False

        if not asyncio.isfuture(future):
            future = asyncio.ensure_future(future, loop=self)

            # We wrapped `future` in a new Task since it was not a Future.
            new_task = True

            # An exception is raised if the new task doesn't complete, so there is no need to log the "destroy
            # pending task" message.
            future._log_destroy_pending = False
        else:
            if _helpers.get_future_loop(future) is not self:
                raise ValueError("Future does not belong to this loop")

        future.add_done_callback(_run_until_complete_cb)
        try:
            self._run_mainloop()
        except:
            if new_task and future.done() and not future.cancelled():
                # The coroutine raised a BaseException. Consume the exception to not log a warning (Future
                # will log a warning if its exception is not retrieved), the caller doesn't have access to the
                # task wrapper we made.
                future.exception()
            raise
        finally:
            future.remove_done_callback(_run_until_complete_cb)

        if not future.done():
            raise RuntimeError('Event loop stopped before Future completed.')

        return future.result()
Esempio n. 19
0
    async def get(
        self,
        key: Hashable,
        func: Callable[[Hashable], Coroutine[Any, Any, Any]],
    ) -> Any:
        '''Run async ``func`` and cache its return value by ``key``.

    The ``key`` should be hashable, and the function will be called with it as
    its sole argument. For multiple simultaneous calls with the same key, only
    one will actually be called, and others will wait and return the same
    (cached) value.
    '''
        async with self.lock:
            cached = self.cache.get(key)
            if cached is None:
                coro = func(key)
                fu = asyncio.create_task(coro)
                self.cache[key] = fu

        if asyncio.isfuture(cached):  # pending
            return await cached  # type: ignore
        elif cached is not None:  # cached
            return cached
        else:  # not cached
            r = await fu
            self.cache[key] = r
            return r
Esempio n. 20
0
  def __init__(self, identifier, hashable_key, type_spec, target_future):
    """Creates a cached value.

    Args:
      identifier: An instance of `CachedValueIdentifier`.
      hashable_key: A hashable source value key, if any, or `None` of not
        applicable in this context, for use during cleanup.
      type_spec: The type signature of the target, an instance of `tff.Type`.
      target_future: An asyncio future that returns an instance of
        `executor_value_base.ExecutorValue` that represents a value embedded in
        the target executor.

    Raises:
      TypeError: If the arguments are of the wrong types.
    """
    py_typecheck.check_type(identifier, CachedValueIdentifier)
    py_typecheck.check_type(hashable_key, collections.Hashable)
    py_typecheck.check_type(type_spec, computation_types.Type)
    if not asyncio.isfuture(target_future):
      raise TypeError('Expected an asyncio future, got {}'.format(
          py_typecheck.type_string(type(target_future))))
    self._identifier = identifier
    self._hashable_key = hashable_key
    self._type_spec = type_spec
    self._target_future = target_future
    self._computed_result = None
Esempio n. 21
0
    async def _send_future_response(self, future, reply_to, correlation_id):
        """
        The RPC call returned a future which means we need to send a pending response
        and send a further message when the future resolves.  If it resolves to another future
        we should send out a further pending response and so on.

        :param future: the future from the RPC call
        :type future: :class:`asyncio.Future`
        :param reply_to: the recipient
        :param correlation_id: the correlation id
        """
        try:
            # Keep looping in case we're in a situation where a future resolves to a future etc.
            while asyncio.isfuture(future):
                # Send out a message saying that we're waiting for a future to complete
                await self._send_response(reply_to, correlation_id, utils.pending_response())
                future = await future
        except kiwipy.CancelledError as exc:
            # Send out a cancelled response
            await self._send_response(reply_to, correlation_id, utils.cancelled_response(str(exc)))
        except Exception as exc:  # pylint: disable=broad-except
            # Send out an exception response
            await self._send_response(reply_to, correlation_id, utils.exception_response(exc))
        else:
            # We have a final result so send that as the response
            await self._send_response(reply_to, correlation_id, utils.result_response(future))
Esempio n. 22
0
    async def asgi_http(self, scope, receive, send):
        try:
            # sets the initials body value, to be replaced by the "real"
            # body file instance once it's created
            body = None

            # creates the context dictionary so that this new "pseudo" request
            # can have its own context for futures placement
            ctx = dict(start_task=None, encoding="utf-8")

            # runs the asynchronous building of the intermediate structures
            # to get to the final WSGI compliant environment dictionary
            start_response = await self._build_start_response(ctx, send)
            sender = await self._build_sender(ctx, send, start_response)
            body = await self._build_body(receive)
            environ = await self._build_environ(scope, body, sender)

            self.prepare()
            try:
                result = self.application_l(environ,
                                            start_response,
                                            ensure_gen=False)
                self.set_request_ctx()
            finally:
                self.restore()

            # verifies if the resulting value is an awaitable and if
            # that's the case waits for it's "real" result value (async)
            if inspect.isawaitable(result):
                result = await result  #@UndefinedVariable

            # waits for the start (code and headers) send operation to be
            # completed (async) so that we can proceed with body sending
            self._ensure_start(ctx, start_response)
            await ctx["start_task"]

            # iterates over the complete set of chunks in the response
            # iterator to send each of them to the client side
            for chunk in (result if result else []):
                if asyncio.iscoroutine(chunk):
                    await chunk
                elif asyncio.isfuture(chunk):
                    await chunk
                elif isinstance(chunk, int):
                    continue
                else:
                    if legacy.is_string(chunk):
                        chunk = chunk.encode(ctx["encoding"])
                    await send({
                        "type": "http.response.body",
                        "body": chunk,
                        "more_body": True
                    })

            # sends the final empty chunk indicating the end
            # of the body payload to the "owning" server
            await send({"type": "http.response.body", "body": b""})
        finally:
            if body: body.close()
            self.unset_request_ctx()
Esempio n. 23
0
    async def disconnect(self, event_args: DisconnectedEventArgs = None):
        did_disconnect = False

        if not self._is_disconnecting:
            self._is_disconnecting = True
            try:
                try:
                    if self._receiver:
                        await self._receiver.close()
                        # TODO: investigate if 'dispose' is necessary
                        did_disconnect = True
                except Exception:
                    traceback.print_exc()

                self._receiver = None

                if did_disconnect:
                    if callable(self.disconnected):
                        # pylint: disable=not-callable
                        if iscoroutinefunction(self.disconnected) or isfuture(
                                self.disconnected):
                            await self.disconnected(
                                self, event_args
                                or DisconnectedEventArgs.empty)
                        else:
                            self.disconnected(
                                self, event_args
                                or DisconnectedEventArgs.empty)
            finally:
                self._is_disconnecting = False
Esempio n. 24
0
    async def _propagate_fatal_exc(self,
                                   happy_path_awaitable: Awaitable[T]) -> T:
        happy_path: asyncio.Future[T]

        if asyncio.isfuture(happy_path_awaitable):
            happy_path = cast('asyncio.Future[T]', happy_path_awaitable)
        else:
            happy_path = self.loop.create_task(happy_path_awaitable)

        if not self._completion.done():
            futures: List[Awaitable[Any]] = [happy_path, self._completion]

            await asyncio.wait(futures,
                               timeout=None,
                               return_when=asyncio.FIRST_COMPLETED)

            if not self._completion.done():
                return happy_path.result()

        # At this point we know completion is done

        if not happy_path.done():
            happy_path.cancel()

        # Raise exception
        self._completion.result()

        # Fallback: there was already an orderly exit
        raise RuntimeError('future is pending after join')
Esempio n. 25
0
    def run_coroutine(self, coro: Awaitable) -> Any:
        """Runs a given awaitable on the connection thread's event loop.
        Cannot be called from within the connection thread.

        .. testcode::

            import anki_vector

            async def my_coroutine():
                print("Running on the connection thread")
                return "Finished"

            with anki_vector.Robot() as robot:
                result = robot.conn.run_coroutine(my_coroutine())

        :param coro: The coroutine, task or any other awaitable which should be executed.
        :returns: The result of the awaitable's execution.
        """
        if threading.current_thread() is self._thread:
            raise VectorAsyncException("Attempting to invoke async from same thread."
                                       "Instead you may want to use 'run_soon'")
        if asyncio.iscoroutinefunction(coro) or asyncio.iscoroutine(coro):
            return self._run_coroutine(coro)
        if asyncio.isfuture(coro):
            async def future_coro():
                return await coro
            return self._run_coroutine(future_coro())
        if callable(coro):
            async def wrapped_coro():
                return coro()
            return self._run_coroutine(wrapped_coro())
        raise VectorAsyncException("\n\nInvalid parameter to run_coroutine: {}\n"
                                   "This function expects a coroutine, task, or awaitable.".format(type(coro)))
Esempio n. 26
0
    async def start(self):
        """ Monitor registered background tasks, and raise their exceptions.
        """
        self._exception_q = janus.Queue()
        try:
            while True:
                exc = await self._exception_q.async_q.get()
                raise exc

        except asyncio.CancelledError:
            # Cancel any tasks that will no longer be monitored
            while self._pending_tasks:
                task = self._pending_tasks.pop()
                if not task.cancelled():
                    task.cancel()

                # We can't await a concurrent.future task, make sure it comes from asyncio
                if asyncio.isfuture(task):
                    await task
            raise

        finally:
            try:
                exc = self._exception_q.async_q.get_nowait()
                raise exc
            except asyncio.QueueEmpty:
                pass
Esempio n. 27
0
async def schedule_jobs():
    """Schedule jobs concurrently."""
    # Start a job which also represents a coroutine
    single_job = start_job(_DELAY_SMALL, uuid4().hex)
    assert asyncio.iscoroutine(single_job)

    # Grab a job record from the coroutine
    single_record = await single_job
    assert _is_valid_record(single_record)

    # Task is a wrapped coroutine which also represents a future
    single_task = asyncio.create_task(start_job(_DELAY_LARGE, uuid4().hex))
    assert asyncio.isfuture(single_task)

    # Futures are different from coroutines in that they can be cancelled
    single_task.cancel()
    try:
        await single_task
    except asyncio.exceptions.CancelledError:
        assert single_task.cancelled()

    # Gather coroutines for batch start
    batch_jobs = [start_job(_DELAY_SMALL, uuid4().hex) for _ in range(10)]
    batch_records = await asyncio.gather(*batch_jobs)

    # We get the same amount of records as we have coroutines
    assert len(batch_records) == len(batch_jobs)
    assert all(_is_valid_record(record) for record in batch_records)
 def test_start_stop(self):
     self.assertTrue(
         asyncio.isfuture(
             self.order_book_tracker._order_book_snapshot_router_task))
     self.order_book_tracker.stop()
     self.assertIsNone(
         self.order_book_tracker._order_book_snapshot_router_task)
     self.order_book_tracker.start()
Esempio n. 29
0
 async def test_api_calls_return_a_future_when_run_in_async_mode(self):
     self.client.token = "xoxb-api_test"
     self.client.run_async = True
     future = self.client.api_test()
     self.assertTrue(asyncio.isfuture(future))
     resp = await future
     self.assertEqual(200, resp.status_code)
     self.assertTrue(resp["ok"])
Esempio n. 30
0
        def callback():
            if not future.cancelled():
                with futures.capture_exceptions(future):
                    result = func(*args, **kwargs)
                    if asyncio.isfuture(result):
                        result = aio_future_to_thread(result)

                    future.set_result(result)
Esempio n. 31
0
    async def test_read_exception_on_wait(self, buffer, loop):
        read_task = loop.create_task(buffer.read())
        await asyncio.sleep(0)
        assert asyncio.isfuture(buffer._waiter)

        buffer.feed_eof()
        buffer.set_exception(ValueError())

        with pytest.raises(ValueError):
            await read_task
Esempio n. 32
0
    def test_read_exception_on_wait(self):
        read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
        test_utils.run_briefly(self.loop)
        self.assertTrue(asyncio.isfuture(self.buffer._waiter))

        self.buffer.feed_eof()
        self.buffer.set_exception(ValueError())

        self.assertRaises(
            ValueError, self.loop.run_until_complete, read_task)
Esempio n. 33
0
    def test_wrap_future(self):

        def run(arg):
            return (arg, threading.get_ident())
        ex = concurrent.futures.ThreadPoolExecutor(1)
        f1 = ex.submit(run, 'oi')
        f2 = asyncio.wrap_future(f1, loop=self.loop)
        res, ident = self.loop.run_until_complete(f2)
        self.assertTrue(asyncio.isfuture(f2))
        self.assertEqual(res, 'oi')
        self.assertNotEqual(ident, threading.get_ident())
Esempio n. 34
0
    async def test_read_cancelled(self, buffer, loop):
        read_task = loop.create_task(buffer.read())
        await asyncio.sleep(0)
        waiter = buffer._waiter
        assert asyncio.isfuture(waiter)

        read_task.cancel()
        with pytest.raises(asyncio.CancelledError):
            await read_task
        assert waiter.cancelled()
        assert buffer._waiter is None

        buffer.feed_data(b'test', 4)
        assert buffer._waiter is None
Esempio n. 35
0
    def test_read_cancelled(self):
        read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
        test_utils.run_briefly(self.loop)
        waiter = self.buffer._waiter
        self.assertTrue(asyncio.isfuture(waiter))

        read_task.cancel()
        self.assertRaises(
            asyncio.CancelledError,
            self.loop.run_until_complete, read_task)
        self.assertTrue(waiter.cancelled())
        self.assertIsNone(self.buffer._waiter)

        self.buffer.feed_data(b'test', 4)
        self.assertIsNone(self.buffer._waiter)
Esempio n. 36
0
async def test_data_file(loop, buf, conn):
    req = ClientRequest(
        'POST', URL('http://python.org/'),
        data=io.BufferedReader(io.BytesIO(b'*' * 2)),
        loop=loop)
    assert req.chunked
    assert isinstance(req.body, payload.BufferedReaderPayload)
    assert req.headers['TRANSFER-ENCODING'] == 'chunked'

    resp = await req.send(conn)
    assert asyncio.isfuture(req._writer)
    await resp.wait_for_close()
    assert req._writer is None
    assert buf.split(b'\r\n\r\n', 1)[1] == \
        b'2\r\n' + b'*' * 2 + b'\r\n0\r\n\r\n'
    await req.close()
Esempio n. 37
0
async def test_data_stream(loop, buf, conn):
    @aiohttp.streamer
    def gen(writer):
        writer.write(b'binary data')
        writer.write(b' result')

    req = ClientRequest(
        'POST', URL('http://python.org/'), data=gen(), loop=loop)
    assert req.chunked
    assert req.headers['TRANSFER-ENCODING'] == 'chunked'

    resp = await req.send(conn)
    assert asyncio.isfuture(req._writer)
    await resp.wait_for_close()
    assert req._writer is None
    assert buf.split(b'\r\n\r\n', 1)[1] == \
        b'b\r\nbinary data\r\n7\r\n result\r\n0\r\n\r\n'
    await req.close()
Esempio n. 38
0
    def test_isfuture(self):
        class MyFuture:
            _asyncio_future_blocking = None

            def __init__(self):
                self._asyncio_future_blocking = False

        self.assertFalse(asyncio.isfuture(MyFuture))
        self.assertTrue(asyncio.isfuture(MyFuture()))
        self.assertFalse(asyncio.isfuture(1))

        # As `isinstance(Mock(), Future)` returns `False`
        self.assertFalse(asyncio.isfuture(mock.Mock()))

        f = self._new_future(loop=self.loop)
        self.assertTrue(asyncio.isfuture(f))
        self.assertFalse(asyncio.isfuture(type(f)))

        # As `isinstance(Mock(Future), Future)` returns `True`
        self.assertTrue(asyncio.isfuture(mock.Mock(type(f))))

        f.cancel()
 async def test_api_calls_return_a_future_when_run_in_async_mode(self, mock_request):
     self.client.run_async = True
     future = self.client.api_test()
     self.assertTrue(asyncio.isfuture(future))
     resp = await future
     self.assertTrue(resp["ok"])
 def test_api_calls_return_a_response_when_run_in_sync_mode(self, mock_request):
     resp = self.client.api_test()
     self.assertFalse(asyncio.isfuture(resp))
     self.assertTrue(resp["ok"])