Exemple #1
0
class CloseableQueue(Queue):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__closed__ = Event()

    def is_closed(self):
        return self.__closed__.is_set()

    def close(self):
        self.__closed__.set()

    async def wait_until_closed(self):
        if self.__closed__.is_set():
            return True
        else:
            await self.__closed__.wait()
            return True

    @staticmethod
    def from_iter(itr):
        c = CloseableQueue()
        for i in itr:
            c.put_nowait(i)
        c.close()
        return c
Exemple #2
0
async def _execute(*tileables: Tuple[TileableType],
                   session: AbstractSession = None,
                   wait: bool = True,
                   show_progress: Union[bool, str] = 'auto',
                   progress_update_interval: Union[int, float] = 1,
                   cancelled: asyncio.Event = None,
                   **kwargs):
    def _attach_session(fut: asyncio.Future):
        fut.result()
        for t in tileables:
            t._attach_session(session)

    async_session = session.to_async()
    execution_info = await async_session.execute(*tileables, **kwargs)
    execution_info.add_done_callback(_attach_session)
    cancelled = cancelled or asyncio.Event()

    if wait:
        progress = None
        if show_progress:  # pragma: no cover
            try:
                progress = _new_progress()
                progress.send(None)
            except ImportError:
                if show_progress != 'auto':
                    raise
                else:
                    show_progress = False

        if show_progress:
            while not cancelled.is_set():
                try:
                    await asyncio.wait_for(asyncio.shield(execution_info),
                                           progress_update_interval)
                    # done
                    if not cancelled.is_set():
                        progress.send(100)
                    break
                except asyncio.TimeoutError:
                    # timeout
                    if not cancelled.is_set():
                        progress.send(execution_info.progress() * 100)
            if cancelled.is_set():
                # cancel execution
                execution_info.cancel()
                execution_info.remove_done_callback(_attach_session)
                await execution_info
        else:
            _, pending = await asyncio.wait(
                [execution_info, cancelled.wait()],
                return_when=asyncio.FIRST_COMPLETED)
            if cancelled.is_set():
                execution_info.remove_done_callback(_attach_session)
            for fut in pending:
                fut.cancel()
            await execution_info
    else:
        return execution_info
Exemple #3
0
async def peers_handler(
    *,
    event: Mapping,
    freeze: asyncio.Event,
    ourselves: Peer,
    autoclean: bool = True,
):
    """
    Handle a single update of the peers by us or by other operators.

    When an operator with a higher priority appears, switch to the freeze-mode.
    The these operators disappear or become presumably dead, resume the event handling.

    The freeze object is passed both to the peers handler to set/clear it,
    and to all the resource handlers to check its value when the events arrive
    (see `create_tasks` and `run` functions).
    """

    # Silently ignore the peering objects which are not ours to worry.
    body = event['object']
    name = body.get('metadata', {}).get('name', None)
    namespace = body.get('metadata', {}).get('namespace', None)
    if namespace != ourselves.namespace or name != ourselves.name:
        return

    # Find if we are still the highest priority operator.
    pairs = body.get('status', {}).items()
    peers = [Peer(id=opid, name=name, **opinfo) for opid, opinfo in pairs]
    dead_peers = [peer for peer in peers if peer.is_dead]
    prio_peers = [
        peer for peer in peers
        if not peer.is_dead and peer.priority > ourselves.priority
    ]
    same_peers = [
        peer for peer in peers if not peer.is_dead
        and peer.priority == ourselves.priority and peer.id != ourselves.id
    ]

    if autoclean and dead_peers:
        # NB: sync and blocking, but this is fine.
        await apply_peers(dead_peers,
                          name=ourselves.name,
                          namespace=ourselves.namespace,
                          legacy=ourselves.legacy)

    if prio_peers:
        if not freeze.is_set():
            logger.info(f"Freezing operations in favour of {prio_peers}.")
            freeze.set()
    else:
        if same_peers:
            logger.warning(
                f"Possibly conflicting operators with the same priority: {same_peers}."
            )
        if freeze.is_set():
            logger.info(f"Resuming operations after the freeze.")
            freeze.clear()
Exemple #4
0
class Stream:

    def __init__(self):
        self._future = Future()
        self._complete = Event()
        self._listeners = [ ]

    def __aiter__(self):
        return self

    async def __anext__(self):

        if self._complete.is_set():
            raise StopAsyncIteration

        result = await self._future
        self._future = Future()
        return result

    def write(self, item, last):
        if self._complete.is_set():
            return
        if last:
            self._set_complete()
        if self._future.done():
            self._future = Future()
        self._future.set_result(item)

    def abort(self, exc):
        if self._complete.is_set():
            return
        self._set_complete()
        if self._future.done():
            self._future = Future()
        self._future.set_exception(exc)

    def cancel(self):
        self._set_complete()
        if self._future.done():
            self._future = Future()
        self._future.cancel()

    async def completed(self):
        await self._complete.wait()

    @property
    def is_complete(self):
        return self._complete.is_set()

    def add_complete_listener(self, listener):
        self._listeners.append(listener)

    def _set_complete(self):
        self._complete.set()
        for listener in self._listeners:
            listener()
async def read_trace(queue: asyncio.Queue, trigger: asyncio.Event, rsc_sock: asyncio.StreamReader):
    while True:
        # throw away data while not triggered
        await rsc_sock.read(1024)

        # only record data if triggered
        if trigger.is_set():
            buf = b''
            while trigger.is_set():
                buf += await rsc_sock.read(1024)
            await queue.put(np.frombuffer(buf, dtype='uint8'))
Exemple #6
0
 async def steal_emoji(self, ctx, emojis: Greedy[PartialEmoji],
                       messages: Greedy[Message]):
     """Steals an emoji"""
     if not emojis and not messages:
         last_message = [
             await
             ctx.history(limit=10
                         ).find(lambda m: self.message_contains_emoji(m))
         ]
         if None in last_message:
             last_message = []
         emojis = await self.extract_emoji_from_messages(last_message)
     elif messages:
         emojis = await self.extract_emoji_from_messages(messages)
     added_emoji = set()
     async with ctx.channel.typing():
         limit_reached = Event()
         for emoji in filter(lambda e: e.is_custom_emoji(), emojis):
             try:
                 created_emoji = await self.copy_emoji_to_guild(
                     emoji, ctx.guild)
                 added_emoji.add(created_emoji)
             except HTTPException:
                 limit_reached.set()
                 break
     if added_emoji:
         summary = Embed(
             title="New emoji added ✅",
             description="\n".join(f"\\:{emoji.name}\\: -> {emoji}"
                                   for emoji in added_emoji),
             color=Color.green(),
         )
         if limit_reached.is_set():
             summary.description += (
                 "\nSome emoji were not added because you hit the limit.")
     elif not added_emoji and limit_reached.is_set():
         summary = Embed(
             title="Emoji limit reached â›”",
             description=
             "You have reached the max emoji for this server, get more boosts to raise this limit!",
             color=Color.red(),
         )
     else:
         messages_given = bool(messages)
         error_message = "message(s) given" if messages_given else "last 10 messages"
         summary = Embed(
             title="No emoji found 😔",
             description=f"No emoji were found in the {error_message}",
             color=Color.red(),
         )
     await ctx.reply(embed=summary)
async def cancellable_aiter(async_iterator: MapAsyncIterator,
                            cancellation_event: Event,
                            *,
                            cancel_pending: bool = True,
                            timeout: Optional[float] = None) -> AsyncIterator:
    """[summary]

    Args:
        async_iterator (MapAsyncIterator): The iterator to use
        cancellation_event (Event): A cancellable event
        cancel_pending (bool, optional): If True cancel pendings. Defaults to
            True.
        timeout (Optional[float], optional): A timeout. Defaults to None.

    Returns:
        AsyncIterator: The async iterator
    """
    result_iter = async_iterator.__aiter__()
    cancellation_task = asyncio.create_task(cancellation_event.wait())
    pending: Set["Future[Any]"] = {
        cancellation_task,
        asyncio.create_task(result_iter.__anext__())
    }

    if timeout is None:
        sleep_task: "Optional[Future[Any]]" = None
    else:
        sleep_task = asyncio.create_task(asyncio.sleep(timeout))
        pending.add(sleep_task)

    while not cancellation_event.is_set():
        try:
            done, pending = await asyncio.wait(
                pending, return_when=asyncio.FIRST_COMPLETED)
        except asyncio.CancelledError:
            for pending_task in pending:
                pending_task.cancel()
            raise

        for done_task in done:
            if done_task == cancellation_task:
                for pending_task in pending:
                    if cancel_pending:
                        pending_task.cancel()
                    else:
                        await pending_task
                        yield pending_task.result()
                break
            elif done_task == sleep_task:
                yield None
            else:
                yield done_task.result()
                pending.add(asyncio.create_task(result_iter.__anext__()))
        else:
            if timeout is not None:
                if sleep_task in pending:
                    sleep_task.cancel()
                    pending.discard(sleep_task)
                sleep_task = asyncio.create_task(asyncio.sleep(timeout))
                pending.add(sleep_task)
Exemple #8
0
 async def _do_multiplexing(self, stop: asyncio.Event) -> None:
     """
     Background task that reads messages from the transport and feeds them
     into individual queues for each of the protocols.
     """
     msg_stream = stream_transport_messages(
         self._transport,
         self._base_protocol,
         *self._protocols,
     )
     try:
         await self._handle_commands(msg_stream, stop)
     except asyncio.TimeoutError as exc:
         self.logger.warning(
             "Timed out waiting for command from %s, Stop: %r, exiting...",
             self,
             stop.is_set(),
         )
         self.logger.debug("Timeout %r: %s", self, exc, exc_info=True)
     except CorruptTransport as exc:
         self.logger.error("Corrupt transport, while multiplexing %s: %r",
                           self, exc)
         self.logger.debug("Corrupt transport, multiplexing trace: %s",
                           self,
                           exc_info=True)
Exemple #9
0
async def async_main(conf):
    async with AsyncExitStack() as stack:
        session = await stack.enter_async_context(ClientSession())
        stop_event = Event()
        get_running_loop().add_signal_handler(SIGINT, stop_event.set)
        get_running_loop().add_signal_handler(SIGTERM, stop_event.set)
        send_report_semaphore = Semaphore(2)
        check_tasks = []
        try:
            # create asyncio task for each configured check target
            for target in conf.targets:
                check_tasks.append(
                    create_task(
                        check_target(session, conf, target,
                                     send_report_semaphore)))
                await sleep(.1)
            # all set up and (hopefully) running
            done, pending = await wait(check_tasks + [stop_event.wait()],
                                       return_when=FIRST_COMPLETED)
            if not stop_event.is_set():
                raise Exception(
                    f'Some task(s) unexpectedly finished: {done!r}')
        finally:
            logger.debug('Cleanup...')
            for t in check_tasks:
                t.cancel()
 async def spin_driver(set_glyph: Callable[[str], None],
                       is_complete: asyncio.Event,
                       seq: str = SPINNER_SEQ) -> None:
     spin_provider = itertools.cycle(seq)
     while not is_complete.is_set():
         set_glyph(next(spin_provider))
         await asyncio.sleep(spin_refresh_rate)
Exemple #11
0
async def custom_object_handler(
    lifecycle: Callable,
    registry: registries.BaseRegistry,
    resource: registries.Resource,
    event: dict,
    freeze: asyncio.Event,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """
    body = event['object']

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = ObjectLogger(
        logging.getLogger(__name__),
        extra=dict(
            namespace=body.get('metadata', {}).get('namespace', 'default'),
            name=body.get('metadata',
                          {}).get('name',
                                  body.get('metadata', {}).get('uid', None)),
        ))

    # If the global freeze is set for the processing (i.e. other operator overrides), do nothing.
    if freeze.is_set():
        logger.debug("Ignoring the events due to freeze.")
        return

    # Object patch accumulator. Populated by the methods. Applied in the end of the handler.
    # Detect the cause and handle it (or at least log this happened).
    patch = {}
    cause = causation.detect_cause(event=event,
                                   resource=resource,
                                   logger=logger,
                                   patch=patch)
    delay = await handle_cause(lifecycle=lifecycle,
                               registry=registry,
                               cause=cause)

    # Provoke a dummy change to trigger the reactor after sleep.
    # TODO: reimplement via the handler delayed statuses properly.
    if delay and not patch:
        patch.setdefault('status', {}).setdefault(
            'kopf', {})['dummy'] = datetime.datetime.utcnow().isoformat()

    # Whatever was done, apply the accumulated changes to the object.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    if patch:
        logger.debug("Patching with: %r", patch)
        patching.patch_obj(resource=resource, patch=patch, body=body)

    # Sleep strictly after patching, never before -- to keep the status proper.
    if delay:
        logger.info(f"Sleeping for {delay} seconds for the delayed handlers.")
        await asyncio.sleep(delay)
Exemple #12
0
async def cancel_wrapper(
    stream: Stream[_TSend, _TRecv], stop: asyncio.Event
) -> AsyncIterator[_TRecv]:
    async for event in stop_wrapper(stream, stop):
        yield event
    if stop.is_set():
        await stream.cancel()
Exemple #13
0
class PricesManager(Initializable):
    MARK_PRICE_TIMEOUT = 60

    def __init__(self):
        super().__init__()
        self.logger = get_logger(self.__class__.__name__)
        self.mark_price = 0

        # warning: should only be created in the async loop thread
        self.prices_initialized_event = Event()

    async def initialize_impl(self):
        self.__reset_prices()

    def set_mark_price(self, mark_price):
        self.mark_price = mark_price
        self.prices_initialized_event.set()

    async def get_mark_price(self, timeout=MARK_PRICE_TIMEOUT):
        if not self.prices_initialized_event.is_set():
            await wait_for(self.prices_initialized_event.wait(), timeout)
        return self.mark_price

    def __reset_prices(self):
        self.mark_price = 0
Exemple #14
0
async def send_task(
    s_sender: asyncio.StreamWriter,
    q: asyncio.Queue,
    e: asyncio.Event,
    delimiter: bytes,
    timeout=None,
):
    print("[SEND][INFO] Started")

    try:
        while True:
            try:
                n = await asyncio.wait_for(q.get(), timeout)
                q.task_done()
            except asyncio.TimeoutError:
                if e.is_set():
                    print(SharedData.bold("[SEND][INFO] Event set!"))
                    return
            else:
                try:
                    await tcp_send(n, s_sender, delimiter, timeout)

                except asyncio.TimeoutError:
                    # really just want to use logging and dump logs in other thread..
                    print(SharedData.red("[Send][CRIT] Connection Broken!"))
                    break
    except Exception:
        print(SharedData.bold("[SEND][CRIT] Stopping SEND!"))
        e.set()
        raise
Exemple #15
0
async def read(reader: StreamReader, e: asyncio.Event):
    while not e.is_set():
        got = await tcp_recv(reader, DELIMIT)
        print(f"recv >> {got}")

        if KILL in got:
            e.set()
class TimeTicker:
    """An event source which supports queue based notification"""

    def __init__(self) -> None:
        self.shutdown_event = Event()
        self.listeners: List[Queue] = []

    async def start(self) -> None:
        """Start generating events"""

        while not self.shutdown_event.is_set():
            now = datetime.now()
            for listener in self.listeners:
                await listener.put(now)
            try:
                await asyncio.wait_for(self.shutdown_event.wait(), timeout=1)
            except asyncio.TimeoutError:
                pass
            except:  # pylint: disable=bare-except
                LOGGER.exception('Cancelled')

    def stop(self):
        """Stop the event source"""
        self.shutdown_event.set()

    def add_listener(self) -> Queue:
        """Add a listener to the event source"""
        LOGGER.debug('Adding a listener')
        listener: Queue = Queue()
        self.listeners.append(listener)
        return listener

    def remove_listener(self, listener: Queue) -> None:
        """Remove a listener from the event source"""
        self.listeners.remove(listener)
Exemple #17
0
    async def _handle_commands(
            self,
            msg_stream: AsyncIterator[Tuple[ProtocolAPI, CommandAPI[Any]]],
            stop: asyncio.Event) -> None:

        async for protocol, cmd in msg_stream:
            self._last_msg_time = time.monotonic()
            # track total number of messages received for each command type.
            self._msg_counts[type(cmd)] += 1

            queue = self._protocol_queues[type(protocol)]
            try:
                # We must use `put_nowait` here to ensure that in the event
                # that a single protocol queue is full that we don't block
                # other protocol messages getting through.
                queue.put_nowait(cmd)
            except asyncio.QueueFull:
                self.logger.error(
                    (
                        "Multiplexing queue for protocol '%s' full. "
                        "discarding message: %s"
                    ),
                    protocol,
                    cmd,
                )

            if stop.is_set():
                break
Exemple #18
0
async def recv_task(
    s_receiver: asyncio.StreamReader,
    q: asyncio.Queue,
    e: asyncio.Event,
    delimiter: bytes,
    timeout=None,
):
    print("[RECV][INFO] Started")

    try:
        while True:
            try:
                data = await tcp_recv(s_receiver, delimiter, timeout)
            except asyncio.TimeoutError:
                print('[RECV][WARN] TIMEOUT')
                if e.is_set():
                    print(SharedData.bold(f"[RECV][INFO] Event set!"))
                    return
            except asyncio.IncompleteReadError:
                print(SharedData.red(f"[RECV][CRIT] Disconnected!"))
                e.set()
                return

            else:
                await q.put(data)

    except Exception:
        print(SharedData.bold("[RECV][CRIT] Stopping SEND!"))
        e.set()
        raise
Exemple #19
0
async def watch_files(
    files: List[Path],
    cmd: List[str],
    shutdown: asyncio.Event,
    wait_time: int = 1,
):
    """Watch files for changes indefinitely.

    Args:
        files: A list of files to watch.
        cmd: A command to run when a file changes.
        shutdown: An event that can signal this coroutine to terminate.
        wait_time: The amount of time (in seconds) to wait between each scan of
            the watched files.
    Returns:
        A c
        """
    files = [file for file in files if file.is_file()]
    LOGGER.info("watching files %s", [str(p) for p in files])
    hashes = set(HashedFile(path) for path in files)

    loop = asyncio.get_event_loop()

    while not shutdown.is_set():
        await asyncio.sleep(wait_time)
        new_hashes = set(HashedFile(path) for path in files)
        diff = hashes - new_hashes
        if diff:
            for hf in diff:
                LOGGER.info("%s changed", hf.path.name)
                loop.create_task(execute(cmd, hf.path))
            hashes = new_hashes
Exemple #20
0
async def test_swaplink_callback():
    callback_flag = Event()
    callback_neighbors = []

    def callback(neighbors: List[Any]):
        nonlocal callback_flag, callback_neighbors
        callback_neighbors = neighbors
        callback_flag.set()

    my_num_links = 3
    others_amount = 10
    others_relative_load = [
        random.randrange(2, 20) for _ in range(others_amount)
    ]
    my_network, other_networks = await setup_network_by_relative_loads(
        my_num_links, others_relative_load)
    my_network.list_neighbours(callback)
    await asyncio.sleep(defaults.HBEAT_CHECK_FREQUENCY * 1.5)
    cuurent_neighbors = my_network.list_neighbours(callback)

    assert callback_flag.is_set()
    assert callback_neighbors == cuurent_neighbors

    # clean up
    await my_network.leave()
    for network in other_networks:
        await network.leave()
Exemple #21
0
    async def _do_multiplexing(self, stop: asyncio.Event, token: CancelToken) -> None:
        """
        Background task that reads messages from the transport and feeds them
        into individual queues for each of the protocols.
        """
        msg_stream = self.wait_iter(stream_transport_messages(
            self._transport,
            self._base_protocol,
            *self._protocols,
            token=token,
        ), token=token)
        async for protocol, cmd, msg in msg_stream:
            # track total number of messages received for each command type.
            self._msg_counts[type(cmd)] += 1

            queue = self._protocol_queues[type(protocol)]
            try:
                # We must use `put_nowait` here to ensure that in the event
                # that a single protocol queue is full that we don't block
                # other protocol messages getting through.
                queue.put_nowait((cmd, msg))
            except asyncio.QueueFull:
                self.logger.error(
                    (
                        "Multiplexing queue for protocol '%s' full. "
                        "discarding message: %s"
                    ),
                    protocol,
                    cmd,
                )

            if stop.is_set():
                break
Exemple #22
0
async def update_lifetime(loop: asyncio.BaseEventLoop, ev: asyncio.Event,
                          lifetime: int):
    i = 0
    while not ev.is_set():
        logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
        await asyncio.wait([loop.run_in_executor(None, _do), asyncio.sleep(1)])
        i += 1
Exemple #23
0
async def test_worker_exception_failed_to_send_message(mock_aioredis,
                                                       monkeypatch,
                                                       mock_redis_db):

    send_mock = CoroutineMock()

    def raise_connect_error(*_, **__):
        raise FailedToSendMessage("")

    send_mock.side_effect = raise_connect_error

    stopped_event = Event()
    monkeypatch.setattr(pyuubin.connectors.smtp, "send", send_mock)

    await mock_redis_db.connect()
    [await mock_redis_db.add_mail(mail) for mail in mail_generator(4)]

    await worker("test", "redis://localhost", stopped_event=stopped_event)

    send_mock.assert_awaited()

    assert not stopped_event.is_set()

    assert (await
            mock_aioredis.llen(f"{settings.redis_mail_queue}::failed") == 4)
    failed_msg = unpack(
        await
        mock_aioredis.rpop(f"{settings.redis_mail_queue}::failed"))["mail"]
    assert failed_msg["parameters"]["secret_data"] == "XXXXXX"
Exemple #24
0
async def worker(
    name: str,
    to_run: asyncio.Queue,
    to_write: asyncio.Queue,
    schema: quiz.Schema,
    executor: quiz.execution.async_executor,
    shutdown: asyncio.Event,
    request_pending: asyncio.Event,
    run_graphql_with_backoff: Callable[
        [quiz.execution.async_executor, str, str], quiz.execution.RawResult
    ],
):
    """worker runs Github metadata requests until shutdown

    More specifically until the shutdown event fires it repeatedly:

    1. pulls a request from the to_run queue
    2. sets request pending
    3. runs the request
    4. clears request pending
    5. pushes successful request response exhcanges to the to_write queue
    """
    queue_wait_timeout_seconds = 2

    while True:
        if shutdown.is_set():
            log.debug(f"{name} shutting down")
            break

        try:
            request: Request = await asyncio.wait_for(
                to_run.get(), queue_wait_timeout_seconds
            )
        except asyncio.TimeoutError:
            log.debug(f"{name} no new requests after {queue_wait_timeout_seconds}s")
            continue

        with event_in_progress(request_pending):
            # TODO: retry if request fails due to rate limit or intermittant error
            try:
                gql_query = str(schema.query[request.graphql])
                assert str(MISSING) not in gql_query
                log.info(f"{name} running {request.log_str}")
                log.debug(f"{name} {request.log_id} gql_query is: {gql_query}")
                result: quiz.execution.RawResult = await run_graphql_with_backoff(
                    executor, name, gql_query
                )
                response: Response = Response(resource=request.resource, json=result)
                log.debug(
                    f"{name} for {request.log_id} queued response {response.log_str} to write"
                )
                # write non-empty responses to stdout
                assert response
                to_write.put_nowait(RequestResponseExchange(request, response))
            except Exception as err:
                log.error(f"{name} error running {request.log_id}\n:{exc_to_str()}")

            # Notify the queue that the "work item" has been processed.
            to_run.task_done()
Exemple #25
0
async def send(writer: StreamWriter, e: asyncio.Event):
    while not e.is_set():
        msg = await ainput("send << ")
        await tcp_send(msg, writer, DELIMIT)
        await writer.drain()

        if KILL in msg:
            e.set()
Exemple #26
0
async def update_lifetime(
    loop: asyncio.BaseEventLoop, ev: asyncio.Event, lifetime: int
):
    i = 0
    while not ev.is_set():
        logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
        await asyncio.wait([loop.run_in_executor(None, _do), asyncio.sleep(1)])
        i += 1
Exemple #27
0
class CompoundQueue(GeneratorQueue):
    stop_event = None
    ready = None
    loop = None
    queues = None

    def __init__(self, queues, loop):
        self.ready = Event(loop=loop)
        self.stop_event = Event(loop=loop)
        self.queues = queues
        self.loop = loop

    async def start(self):
        if self.stop_event.is_set():
            raise QueueError("Socket already stopped.")

        await self.do_action("start")
        self.ready.set()

    @dies_on_stop_event
    async def get(self):
        raise NotImplementedError()

    @dies_on_stop_event
    async def put(self, data):
        await self.setup()
        await self.ready.wait()
        await self.do_action("put", (data,))

    async def setup(self):
        """Setup the client."""
        if not self.ready.is_set():
            await self.start()

    async def stop(self):
        """Stop queue."""
        self.ready.clear()
        self.stop_event.set()

        await self.do_action("stop")

    async def do_action(self, name, args=()):
        coroutines = [getattr(i, name) for i in self.queues]
        tasks = [i(*args) for i in coroutines]

        await wait(tasks, loop=self.loop)
Exemple #28
0
 async def periodically_report_progress(self,
                                        done: asyncio.Event,
                                        period: float = 0.5):
     while not done.is_set():
         with contextlib.suppress(asyncio.TimeoutError):
             await asyncio.wait_for(done.wait(), period)
         self.update_rates()
         self.report_progress()
Exemple #29
0
async def event_wait(event: asyncio.Event, timeout: int) -> bool:
    """Wait for event with timeout, return True if event was set, False if we timed out

    This is intended to behave like threading.Event.wait"""
    try:
        return await asyncio.wait_for(event.wait(), timeout)
    except asyncio.TimeoutError:
        return event.is_set()
Exemple #30
0
async def task_producer_run(client: aiohttp.ClientSession, task_type: str,
                            task_payload, expect_response: asyncio.Event):
    async with client.post('http://localhost:8080/task/run',
                           params={'taskType': task_type},
                           json=task_payload) as response:
        assert expect_response.is_set()
        assert response.status == 200
        return await response.json()
Exemple #31
0
async def update_lifetime(loop: asyncio.BaseEventLoop, ev: asyncio.Event,
                          lifetime: int):
    with spawn_task_scope(loop) as spawn:
        i = 0
        while not ev.is_set():
            logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
            spawn(_do)
            await asyncio.sleep(1)
            i += 1
Exemple #32
0
class Server(tornado.web.Application):
    io_loop = None
    address = None
    port = None

    def __init__(self, address='', port=8080, loglevel=logging.WARNING, mounting_point='',
                 listener_key=None, notifier_key=None):
        self.listener_key = listener_key
        self.notifier_key = notifier_key
        # tornado.log.enable_pretty_logging(logger=logging.getLogger('tornado'))
        log.setLevel(loglevel)
        log.debug('Initializing server')
        tornado.web.Application.__init__(self, [
            (mounting_point + r'/send', SendHandler),
            (mounting_point + r'/listen', NotificationSocket),
        ], websocket_ping_interval=10)
        self.stopped = Event()
        self.address = address
        self.port = port

    def cors_headers(self, handler):
        origin = None
        try:
            origin = handler.request.headers['origin']
        except KeyError:
            pass
        handler.add_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
        handler.add_header('Access-Control-Allow-Headers', 'DNT,'
                                                           'X-CustomHeader,'
                                                           'Keep-Alive,'
                                                           'User-Agent,'
                                                           'X-Requested-With,'
                                                           'If-Modified-Since,'
                                                           'Cache-Control,'
                                                           'Content-Type,'
                                                           'Content-Range,'
                                                           'Range,'
                                                           'Origin,'
                                                           'Accept')

        if origin is not None and self.check_origin(origin):
            handler.add_header('Access-Control-Allow-Origin', origin)

    def check_origin(self, origin):
        return True

    def start(self):
        log.debug('Starting server at %s:%d' % (self.address, self.port))
        self.listen(self.port, self.address)
        self.io_loop = tornado.ioloop.IOLoop.current()
        self.io_loop.start()
        self.stopped.set()

    def stop(self):
        log.info('Stopping...')
        if not self.stopped.is_set():
            self.io_loop.stop()
Exemple #33
0
async def update_lifetime(
    loop: asyncio.BaseEventLoop, ev: asyncio.Event, lifetime: int
):
    with spawn_task_scope(loop) as spawn:
        i = 0
        while not ev.is_set():
            logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
            spawn(_do)
            await asyncio.sleep(1)
            i += 1
Exemple #34
0
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()
        self.error_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()
        elif status.is_error():
            self.error_queue.put_nowait(status.error_data.exception)

    def message(self, pubnub, message):
        self.message_queue.put_nowait(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put_nowait(presence)

    @asyncio.coroutine
    def _wait_for(self, coro):
        scc_task = asyncio.ensure_future(coro)
        err_task = asyncio.ensure_future(self.error_queue.get())

        yield from asyncio.wait([
            scc_task,
            err_task
        ], return_when=asyncio.FIRST_COMPLETED)

        if err_task.done() and not scc_task.done():
            if not scc_task.cancelled():
                scc_task.cancel()
            raise err_task.result()
        else:
            if not err_task.cancelled():
                err_task.cancel()
            return scc_task.result()

    @asyncio.coroutine
    def wait_for_connect(self):
        if not self.connected_event.is_set():
            yield from self._wait_for(self.connected_event.wait())
        else:
            raise Exception("instance is already connected")

    @asyncio.coroutine
    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            yield from self._wait_for(self.disconnected_event.wait())
        else:
            raise Exception("instance is already disconnected")

    @asyncio.coroutine
    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                env = yield from self._wait_for(self.message_queue.get())
                if env.channel in channel_names:
                    return env
                else:
                    continue
            finally:
                self.message_queue.task_done()

    @asyncio.coroutine
    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                env = yield from self._wait_for(self.presence_queue.get())
                if env.channel in channel_names:
                    return env
                else:
                    continue
            finally:
                self.presence_queue.task_done()
Exemple #35
0
class Channel(object):
    """
        A Channel is a closable queue. A Channel is considered "finished" when
        it is closed and drained (unlike a queue which is "finished" when the queue
        is empty)
    """

    def __init__(self, maxsize=0, *, loop=None):
        if loop is None:
            self._loop = get_event_loop()
        else:
            self._loop = loop

        if not isinstance(maxsize, int) or maxsize < 0:
            raise TypeError("maxsize must be an integer >= 0 (default is 0)")
        self._maxsize = maxsize

        # Futures.
        self._getters = deque()
        self._putters = deque()

        # "finished" means channel is closed and drained
        self._finished = Event(loop=self._loop)
        self._close = Event(loop=self._loop)

        self._init()

    def _init(self):
        self._queue = deque()

    def _get(self):
        return self._queue.popleft()

    def _put(self, item):
        self._queue.append(item)

    def _wakeup_next(self, waiters):
        # Wake up the next waiter (if any) that isn't cancelled.
        while waiters:
            waiter = waiters.popleft()
            if not waiter.done():
                waiter.set_result(None)
                break

    def __repr__(self):
        return '<{} at {:#x} maxsize={!r} qsize={!r}>'.format(
            type(self).__name__, id(self), self._maxsize, self.qsize())

    def __str__(self):
        return '<{} maxsize={!r} qsize={!r}>'.format(
            type(self).__name__, self._maxsize, self.qsize())

    def qsize(self):
        """Number of items in the channel buffer."""
        return len(self._queue)

    @property
    def maxsize(self):
        """Number of items allowed in the channel buffer."""
        return self._maxsize

    def empty(self):
        """Return True if the channel is empty, False otherwise."""
        return not self._queue

    def full(self):
        """Return True if there are maxsize items in the channel.
        Note: if the Channel was initialized with maxsize=0 (the default),
        then full() is never True.
        """
        if self._maxsize <= 0:
            return False
        else:
            return self.qsize() >= self._maxsize

    @coroutine
    def put(self, item):
        """Put an item into the channel.
        If the channel is full, wait until a free
        slot is available before adding item.
        If the channel is closed or closing, raise ChannelClosed.
        This method is a coroutine.
        """
        while self.full() and not self._close.is_set():
            putter = Future(loop=self._loop)
            self._putters.append(putter)
            try:
                yield from putter
            except ChannelClosed:
                raise
            except:
                putter.cancel()  # Just in case putter is not done yet.
                if not self.full() and not putter.cancelled():
                    # We were woken up by get_nowait(), but can't take
                    # the call.  Wake up the next in line.
                    self._wakeup_next(self._putters)
                raise
        return self.put_nowait(item)

    def put_nowait(self, item):
        """Put an item into the channel without blocking.
        If no free slot is immediately available, raise ChannelFull.
        """
        if self.full():
            raise ChannelFull
        if self._close.is_set():
            raise ChannelClosed
        self._put(item)
        self._wakeup_next(self._getters)

    @coroutine
    def get(self):
        """Remove and return an item from the channel.
        If channel is empty, wait until an item is available.
        This method is a coroutine.
        """
        while self.empty() and not self._close.is_set():
            getter = Future(loop=self._loop)
            self._getters.append(getter)
            try:
                yield from getter
            except ChannelClosed:
                raise
            except:
                getter.cancel()  # Just in case getter is not done yet.
                if not self.empty() and not getter.cancelled():
                    # We were woken up by put_nowait(), but can't take
                    # the call.  Wake up the next in line.
                    self._wakeup_next(self._getters)
                raise
        return self.get_nowait()

    def get_nowait(self):
        """Remove and return an item from the channel.
        Return an item if one is immediately available, else raise ChannelEmpty.
        """
        if self.empty():
            if self._close.is_set():
                raise ChannelClosed
            else:
                raise ChannelEmpty
        item = self._get()
        if self.empty() and self._close.is_set():
            # if empty _after_ we retrieved an item AND marked for closing,
            # set the finished flag
            self._finished.set()
        self._wakeup_next(self._putters)
        return item

    @coroutine
    def join(self):
        """Block until channel is closed and channel is drained
        """
        yield from self._finished.wait()

    def close(self):
        """Marks the channel is closed and throw a ChannelClosed in all pending putters"""
        self._close.set()
        # cancel putters
        for putter in self._putters:
            putter.set_exception(ChannelClosed())
        # cancel getters that can't ever return (as no more items can be added)
        while len(self._getters) > self.qsize():
            getter = self._getters.pop()
            getter.set_exception(ChannelClosed())

        if self.empty():
            # already empty, mark as finished
            self._finished.set()

    def closed(self):
        """Returns True if the Channel is marked as closed"""
        return self._close.is_set()

    @coroutine
    def __aiter__(self):  # pragma: no cover
        """Returns an async iterator (self)"""
        return self

    @coroutine
    def __anext__(self):  # pragma: no cover
        try:
            data = yield from self.get()
        except ChannelClosed:
            raise StopAsyncIteration
        else:
            return data

    def __iter__(self):
        return iter(self._queue)
Exemple #36
0
async def update_lifetime(ev: asyncio.Event, lifetime: int):
    i = 0
    while not ev.is_set():
        logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
        await asyncio.sleep(1)
        i += 1