Esempio n. 1
0
async def run(*, loop: asyncio.BaseEventLoop = None):
    loop = loop or asyncio.get_event_loop()

    ev = asyncio.Event()
    loop.add_signal_handler(signal.SIGINT, ev.set)

    atask = loop.create_task(do_task())
    done, pending = await asyncio.wait(
        [update_lifetime(loop, ev, lifetime=3), atask],
        return_when=asyncio.FIRST_COMPLETED,
    )

    result = None
    if atask in done:
        try:
            result = await atask
        except Exception as e:
            result = f"ng ({e!r})"

    interrupted = ev.is_set()
    if interrupted:
        logger.info("task is interrupted (catch SIGINT)")
    else:
        logger.info("task completed, result=%r", result)
    ev.set()
    await asyncio.wait(pending)
Esempio n. 2
0
def _master_watcher(pid: int, loop: asyncio.BaseEventLoop):
    loop.call_later(MASTER_WATCHER_PERIOD, _master_watcher, pid, loop)
    try:
        os.kill(pid, 0)  # check master process
    except ProcessLookupError:
        os._exit(
            os.EX_OK)  # noqa: W0212 hard break better than deattached pocesses
    def start(self, loop: asyncio.BaseEventLoop = None):
        self.lock_cooking_number()

        for instruction in self.instructions:
            if not isinstance(instruction, dict):
                print(f'pressing key {instruction}')
                self.key_press(instruction)
                time.sleep(self.sleep_tm)
            else:
                for _ in range(instruction['times']):
                    if 'press_key_down_time' not in instruction:
                        self.key_press(instruction['key'])
                        time.sleep(self.sleep_tm)
                    else:
                        self.key_down(instruction['key'], instruction['press_key_down_time'])
                        time.sleep(self.sleep_tm)

        if self.additional_cooking_instructions:
            recipe = Recipe(**self.additional_cooking_instructions)

            loop.call_later(self.cook_tm, cooking_timer,
                            self.order_num,
                            recipe)
        else:
            loop.call_later(self.cook_tm, cooking_timer, self.order_num)
Esempio n. 4
0
async def run(*, loop: asyncio.BaseEventLoop = None):
    loop = loop or asyncio.get_event_loop()

    ev = asyncio.Event()
    loop.add_signal_handler(signal.SIGINT, ev.set)

    update_task = loop.create_task(update_lifetime(loop, ev, lifetime=3))
    update_task.add_done_callback(lambda fut: fut.cancelled() or fut.result())
    atask = loop.create_task(do_task())
    done, pending = await asyncio.wait(
        [ev.wait(), atask], return_when=asyncio.FIRST_COMPLETED
    )

    result = None
    if atask in done:
        try:
            result = await atask
        except Exception as e:
            result = f"ng ({e!r})"
        update_task.cancel()

    interrupted = ev.is_set()
    if interrupted:
        logger.info("task is interrupted (catch SIGINT)")
    else:
        logger.info("task completed, result=%r", result)
Esempio n. 5
0
    def start(self, loop: asyncio.BaseEventLoop = None):
        self.lock_cooking_number()

        for instruction in self.instructions:
            if not isinstance(instruction, dict):
                print(f'pressing key {instruction}')
                self.key_press(instruction)
                time.sleep(self.sleep_tm)
            else:
                for _ in range(instruction['times']):
                    if 'press_key_down_time' not in instruction:
                        self.key_press(instruction['key'])
                        time.sleep(self.sleep_tm)
                    else:
                        self.key_down(instruction['key'],
                                      instruction['press_key_down_time'])
                        time.sleep(self.sleep_tm)

        if self.additional_cooking_instructions:
            recipe = Recipe(**self.additional_cooking_instructions)

            loop.call_later(self.cook_tm, cooking_timer, self.order_num,
                            recipe)
        else:
            loop.call_later(self.cook_tm, cooking_timer, self.order_num)
Esempio n. 6
0
def main(loop: asyncio.BaseEventLoop):
    executor = ThreadPoolExecutor()
    loop.set_default_executor(executor)
    root = MailGui(loop)
    root.protocol('WM_DELETE_WINDOW', loop.stop)
    accounts = dict(get_accounts())
    controller = Controller(loop, accounts, root)
    return root
Esempio n. 7
0
    def sig_handler(sig: enum.Enum, loop: asyncio.BaseEventLoop):
        nonlocal stopping
        if stopping:
            return

        logger.info(f"got {sig}, terminating")
        loop.create_task(server.stop(0))
        stopping = True
Esempio n. 8
0
    async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop,
                                     output: asyncio.Queue):
        """
        *required
        Subscribe to user stream via web socket, and keep the connection open for incoming messages
        :param ev_loop: ev_loop to execute this function in
        :param output: an async queue where the incoming messages are stored
        """
        while True:
            try:
                async with websockets.connect(Constants.BAEE_WS_URL) as ws:
                    ws: websockets.WebSocketClientProtocol = ws
                    ev_loop.create_task(self.custom_ping(ws))

                    # Send a auth request first
                    auth_request: Dict[str, Any] = {
                        "event": Constants.WS_AUTH_REQUEST_EVENT,
                        "data": self._liquid_auth.get_ws_auth_data()
                    }
                    await ws.send(ujson.dumps(auth_request))

                    quoted_currencies = [
                        trading_pair.split('-')[1]
                        for trading_pair in self._trading_pairs
                    ]

                    for trading_pair, quoted_currency in zip(
                            self._trading_pairs, quoted_currencies):
                        subscribe_request: Dict[str, Any] = {
                            "event": Constants.WS_PUSHER_SUBSCRIBE_EVENT,
                            "data": {
                                "channel":
                                Constants.WS_USER_ACCOUNTS_SUBSCRIPTION.format(
                                    quoted_currency=quoted_currency.lower())
                            }
                        }
                        await ws.send(ujson.dumps(subscribe_request))
                    async for raw_msg in self._inner_messages(ws):
                        diff_msg = ujson.loads(raw_msg)

                        event_type = diff_msg.get('event', None)
                        if event_type == 'updated':
                            output.put_nowait(diff_msg)
                            self._last_recv_time = time.time()
                        elif event_type == "pusher:pong":
                            self._last_recv_time = time.time()
                        elif not event_type:
                            raise ValueError(
                                f"Liquid Websocket message does not contain an event type - {diff_msg}"
                            )
            except asyncio.CancelledError:
                raise
            except Exception:
                self.logger().error(
                    "Unexpected error with Liquid WebSocket connection. "
                    "Retrying after 30 seconds...",
                    exc_info=True)
                await asyncio.sleep(30.0)
Esempio n. 9
0
 def __init__(self, loop: asyncio.BaseEventLoop):
     self.static = pathlib.Path(rel_path('../frontend/build', check=False))
     self.loop = loop
     self.app = Starlette(routes=self.routes, on_shutdown=[self.exit])
     self.config = uvicorn.config.Config(self.app, log_config=None, host='0.0.0.0', port=7999)
     self.server = uvicorn.Server(config=self.config)
     self.serve_task = loop.create_task(self.server.serve())
     self.update_task = loop.create_task(self.update_loop())
     self.ws_clients = []
Esempio n. 10
0
def shared(loop: asyncio.BaseEventLoop):
    fut = loop.create_future()

    def callback():
        print("called")
        fut.set_result("xxx")

    loop.call_later(0.2, callback)
    return fut
Esempio n. 11
0
    def start(self, loop: asyncio.BaseEventLoop):
        self.ws = websockets.serve(lambda ws, p: self.handler(ws, p),
                                   self.host,
                                   self.port,
                                   loop=loop)

        loop.run_until_complete(self.ws)
        log.debug("Started websocket server.")
        log.info(f"Started websocket bridge on {self.host}:{self.port}")
Esempio n. 12
0
    def run_server(self, loop: asyncio.BaseEventLoop):
        asyncio.set_event_loop(loop)
        self.loop = loop

        handler = self.app.make_handler()
        server = loop.create_server(handler, host=self.local, port=self.port)

        loop.run_until_complete(server)
        loop.run_forever()
Esempio n. 13
0
    def start(self, loop: asyncio.BaseEventLoop):
        app = web.Application()
        app.add_routes([web.get("/{command}", lambda r: self.handler(r))])

        runner = web.AppRunner(app)
        loop.run_until_complete(runner.setup())
        self.site = web.TCPSite(runner, self.host, self.port)

        loop.run_until_complete(self.site.start())

        log.info(f"Serving HTTP bridge on port {self.port}")
Esempio n. 14
0
def start_background_lock_extender(lock_manager: Aioredlock, lock: Lock,
                                   loop: asyncio.BaseEventLoop) -> None:
    """Will periodically extend the duration of the lock"""
    async def extender_worker(lock_manager: Aioredlock):
        sleep_interval = 0.9 * config.REDLOCK_REFRESH_INTERVAL_SECONDS
        while True:
            await lock_manager.extend(lock,
                                      config.REDLOCK_REFRESH_INTERVAL_SECONDS)

            await asyncio.sleep(sleep_interval)

    loop.run_until_complete(extender_worker(lock_manager))
Esempio n. 15
0
    def __init__(self, loop: asyncio.BaseEventLoop, pool: PubSubPool, node: int):
        self.loop = loop
        self._pool = pool
        self._node = node
        self._topics = []
        self._websocket = None
        self._timeout = asyncio.Event()

        self._last_result = None

        loop.create_task(self.handle_ping())
        self._listener = None
def distribute_global_resources(
        loop: asyncio.BaseEventLoop,
        blob_client: azure.storage.blob.BlockBlobService,
        queue_client: azure.storage.queue.QueueService,
        table_client: azure.storage.table.TableService,
        ipaddress: str) -> None:
    """Distribute global services/resources
    :param asyncio.BaseEventLoop loop: event loop
    :param azure.storage.blob.BlockBlobService blob_client: blob client
    :param azure.storage.queue.QueueService queue_client: queue client
    :param azure.storage.table.TableService table_client: table client
    :param str ipaddress: ip address
    """
    # set torrent session port listen
    if _ENABLE_P2P:
        global _TORRENT_SESSION
        # create torrent session
        logger.info('creating torrent session on {}:{}'.format(
            ipaddress, _DEFAULT_PORT_BEGIN))
        _TORRENT_SESSION = libtorrent.session()
        _TORRENT_SESSION.listen_on(_DEFAULT_PORT_BEGIN, _DEFAULT_PORT_END)
        _TORRENT_SESSION.stop_lsd()
        _TORRENT_SESSION.stop_upnp()
        _TORRENT_SESSION.stop_natpmp()
        # bootstrap dht nodes
        bootstrap_dht_nodes(loop, table_client, ipaddress, 0)
        _TORRENT_SESSION.start_dht()
    # get globalresources from table
    try:
        entities = table_client.query_entities(
            _STORAGE_CONTAINERS['table_globalresources'],
            filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
    except azure.common.AzureMissingResourceHttpError:
        entities = None
    nentities = 0
    # check torrent info table for resource
    if entities is not None:
        for ent in entities:
            nentities += 1
            if _ENABLE_P2P:
                _check_resource_has_torrent(blob_client, table_client,
                                            ent['Resource'])
            else:
                with _DIRECTDL_LOCK:
                    _DIRECTDL.append(ent['Resource'])
    if nentities == 0:
        logger.info('no global resources specified')
        return
    # run async func in loop
    loop.run_until_complete(
        download_monitor_async(loop, blob_client, queue_client, table_client,
                               ipaddress, nentities))
Esempio n. 17
0
    def start(self, loop: asyncio.BaseEventLoop):
        ws_future = websockets.serve(
            lambda ws, p: self.handler(ws, p),
            self.host,
            self.port,
            subprotocols=["com.microsoft.minecraft.wsencrypt"],
            ping_interval=None,
            loop=loop,
        )
        self.ws = loop.run_until_complete(ws_future)
        loop.run_until_complete(
            self.set_uninitiated_handler_status(ready=False))

        log.info(f"Started Minecraft connector on {self.host}:{self.port}")
Esempio n. 18
0
def bootstrap_dht_nodes(
        loop: asyncio.BaseEventLoop,
        table_client: azure.storage.table.TableService,
        ipaddress: str,
        num_attempts: int) -> None:
    """Bootstrap DHT router nodes
    :param asyncio.BaseEventLoop loop: event loop
    :param azure.storage.table.TableService table_client: table client
    :param str ipaddress: ip address
    :param int num_attempts: number of attempts
    """
    found_self = False
    dht_nodes = []
    try:
        entities = table_client.query_entities(
            _STORAGE_CONTAINERS['table_dht'],
            filter='PartitionKey eq \'{}\''.format(_PARTITION_KEY))
    except azure.common.AzureMissingResourceHttpError:
        pass
    else:
        for entity in entities:
            dht_nodes.append((entity['RowKey'], entity['Port']))
            if entity['RowKey'] == ipaddress:
                found_self = True
    if not found_self:
        entity = {
            'PartitionKey': _PARTITION_KEY,
            'RowKey': ipaddress,
            'Port': _DEFAULT_PORT_BEGIN,
        }
        table_client.insert_entity(_STORAGE_CONTAINERS['table_dht'], entity)
        dht_nodes.insert(0, (ipaddress, _DEFAULT_PORT_BEGIN))
    # TODO handle vm/ips no longer in pool
    for node in dht_nodes:
        if len(_DHT_ROUTERS) >= 3:
            break
        add_dht_node(node[0], node[1])
    # ensure at least 3 DHT router nodes if possible
    if len(dht_nodes) < 3:
        num_attempts += 1
        if num_attempts < 600:
            delay = 1
        elif num_attempts < 1200:
            delay = 10
        else:
            delay = 30
        loop.call_later(
            delay, bootstrap_dht_nodes, loop, table_client, ipaddress,
            num_attempts)
Esempio n. 19
0
def start_child_thread_loop(
        loop: asyncio.BaseEventLoop, break_sign: BreakSign,
        main_fetch_callback: Callable[[Union[Future, SyncFuture]], None],
        child_fetch_callback: Callable[[Union[Future, SyncFuture]], None],
        request_package_list: RequestPackageList, headers: Union[dict, None],
        total_timeout: int, concurrency_number: int) -> None:
    downloader_logger.debug('Async downloader start.')

    asyncio.set_event_loop(loop)
    main_fetch_task = loop.create_task(
        main_fetch(request_package_list, break_sign, child_fetch_callback,
                   headers, total_timeout, concurrency_number))
    main_fetch_task.add_done_callback(main_fetch_callback)
    loop.run_until_complete(main_fetch_task)
    downloader_logger.debug('Async download thread over.')
Esempio n. 20
0
    def __init__(self,
                 loop: BaseEventLoop,
                 host: str,
                 port: int,
                 worker_id: str,
                 request_id: str,
                 grpc_connect_timeout: float,
                 grpc_max_msg_len: int = -1) -> None:
        self._loop = loop
        self._host = host
        self._port = port
        self._request_id = request_id
        self._worker_id = worker_id
        self._function_data_cache_enabled = False
        self._functions = functions.Registry()
        self._shmem_mgr = SharedMemoryManager()

        self._old_task_factory = None

        # We allow the customer to change synchronous thread pool max worker
        # count by setting the PYTHON_THREADPOOL_THREAD_COUNT app setting.
        #   For 3.[6|7|8] The default value is 1.
        #   For 3.9, we don't set this value by default but we honor incoming
        #     the app setting.
        self._sync_call_tp: concurrent.futures.Executor = (
            self._create_sync_call_tp(self._get_sync_tp_max_workers()))

        self._grpc_connect_timeout: float = grpc_connect_timeout
        # This is set to -1 by default to remove the limitation on msg size
        self._grpc_max_msg_len: int = grpc_max_msg_len
        self._grpc_resp_queue: queue.Queue = queue.Queue()
        self._grpc_connected_fut = loop.create_future()
        self._grpc_thread: threading.Thread = threading.Thread(
            name='grpc-thread', target=self.__poll_grpc)
Esempio n. 21
0
def mock_network_loop(loop: asyncio.BaseEventLoop):
    dht_network: typing.Dict[typing.Tuple[str, int], 'KademliaProtocol'] = {}

    async def create_datagram_endpoint(proto_lam: typing.Callable[[], 'KademliaProtocol'],
                                       from_addr: typing.Tuple[str, int]):
        def sendto(data, to_addr):
            rx = dht_network.get(to_addr)
            if rx and rx.external_ip:
                # print(f"{from_addr[0]}:{from_addr[1]} -{len(data)} bytes-> {rx.external_ip}:{rx.udp_port}")
                return rx.datagram_received(data, from_addr)

        protocol = proto_lam()
        transport = asyncio.DatagramTransport(extra={'socket': mock_sock})
        transport.is_closing = lambda: False
        transport.close = lambda: mock_sock.close()
        mock_sock.sendto = sendto
        transport.sendto = mock_sock.sendto
        protocol.connection_made(transport)
        dht_network[from_addr] = protocol
        return transport, protocol

    with mock.patch('socket.socket') as mock_socket:
        mock_sock = mock.Mock(spec=socket.socket)
        mock_sock.setsockopt = lambda *_: None
        mock_sock.bind = lambda *_: None
        mock_sock.setblocking = lambda *_: None
        mock_sock.getsockname = lambda: "0.0.0.0"
        mock_sock.getpeername = lambda: ""
        mock_sock.close = lambda: None
        mock_sock.type = socket.SOCK_DGRAM
        mock_sock.fileno = lambda: 7
        mock_socket.return_value = mock_sock
        loop.create_datagram_endpoint = create_datagram_endpoint
        yield
Esempio n. 22
0
    def __init__(self,
                 loop: BaseEventLoop,
                 host: str,
                 port: int,
                 worker_id: str,
                 request_id: str,
                 grpc_connect_timeout: float,
                 grpc_max_msg_len: int = -1) -> None:
        self._loop = loop
        self._host = host
        self._port = port
        self._request_id = request_id
        self._worker_id = worker_id
        self._functions = functions.Registry()

        self._old_task_factory = None

        # We allow the customer to change synchronous thread pool count by
        # PYTHON_THREADPOOL_THREAD_COUNT app setting. The default value is 1.
        self._sync_tp_max_workers: int = self._get_sync_tp_max_workers()
        self._sync_call_tp: concurrent.futures.Executor = (
            concurrent.futures.ThreadPoolExecutor(
                max_workers=self._sync_tp_max_workers))

        self._grpc_connect_timeout: float = grpc_connect_timeout
        # This is set to -1 by default to remove the limitation on msg size
        self._grpc_max_msg_len: int = grpc_max_msg_len
        self._grpc_resp_queue: queue.Queue = queue.Queue()
        self._grpc_connected_fut = loop.create_future()
        self._grpc_thread: threading.Thread = threading.Thread(
            name='grpc-thread', target=self.__poll_grpc)
Esempio n. 23
0
async def test_exectution_limit_once(coresys: CoreSys,
                                     loop: asyncio.BaseEventLoop):
    """Test the ignore conditions decorator."""
    class TestClass:
        """Test class."""
        def __init__(self, coresys: CoreSys):
            """Initialize the test class."""
            self.coresys = coresys
            self.run = asyncio.Lock()

        @Job(limit=JobExecutionLimit.ONCE, on_condition=JobException)
        async def execute(self, sleep: float):
            """Execute the class method."""
            assert not self.run.locked()
            async with self.run:
                await asyncio.sleep(sleep)

    test = TestClass(coresys)
    run_task = loop.create_task(test.execute(0.3))

    await asyncio.sleep(0.1)
    with pytest.raises(JobException):
        await test.execute(0.1)

    await run_task
Esempio n. 24
0
async def loop_set_device(event_loop: asyncio.BaseEventLoop):
    try:
        while event_loop.is_running():
            normalize_volume()
            await asyncio.sleep(CONFIG.SLEEP_INTERVAL)
    except asyncio.CancelledError:
        await asyncio.sleep(0)
Esempio n. 25
0
 async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
     trading_pairs: List[str] = await self.get_trading_pairs()
     tasks = [
         ev_loop.create_task(self._listen_trades_for_pair(pair, output))
         for pair in trading_pairs
     ]
     await asyncio.gather(*tasks)
Esempio n. 26
0
async def update_lifetime(loop: asyncio.BaseEventLoop, ev: asyncio.Event,
                          lifetime: int):
    i = 0
    while not ev.is_set():
        logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
        await asyncio.wait([loop.run_in_executor(None, _do), asyncio.sleep(1)])
        i += 1
Esempio n. 27
0
 async def server_loop(self, loop: asyncio.BaseEventLoop):
     self.server = aiohttp.web.Server(self.handle_request)
     runner = aiohttp.web.ServerRunner(self.server)
     await runner.setup()
     site = aiohttp.web.TCPSite(runner, self.config.server.ip,
                                self.config.server.port)
     await site.start()
     print("==== PyPubSub v/%s starting... ====" % PUBSUB_VERSION)
     print("==== Serving up PubSub goodness at %s:%s ====" %
           (self.config.server.ip, self.config.server.port))
     if self.config.sqs:
         for key, config in self.config.sqs.items():
             loop.create_task(plugins.sqs.get_payloads(self, config))
     self.read_backlog_storage()
     loop.create_task(self.write_backlog_storage())
     await self.poll()
Esempio n. 28
0
def _renew_blob_lease(loop: asyncio.BaseEventLoop,
                      blob_client: azureblob.BlockBlobService,
                      container_key: str, resource: str, blob_name: str):
    """Renew a storage blob lease
    :param asyncio.BaseEventLoop loop: event loop
    :param azureblob.BlockBlobService blob_client: blob client
    :param str container_key: blob container index into _STORAGE_CONTAINERS
    :param str resource: resource
    :param str blob_name: blob name
    """
    try:
        lease_id = blob_client.renew_blob_lease(
            container_name=_STORAGE_CONTAINERS[container_key],
            blob_name=blob_name,
            lease_id=_BLOB_LEASES[resource],
        )
    except azure.common.AzureException as e:
        logger.exception(e)
        _BLOB_LEASES.pop(resource)
        _CBHANDLES.pop(resource)
    else:
        _BLOB_LEASES[resource] = lease_id
        _CBHANDLES[resource] = loop.call_later(15, _renew_blob_lease, loop,
                                               blob_client, container_key,
                                               resource, blob_name)
Esempio n. 29
0
    def feeds_check(*feed_names,
                    data_dir: str = None,
                    loop: asyncio.BaseEventLoop = None):
        """Check feeds for name validity.

        :raises: ValueError if any feed name is invalid
        """
        loop = loop or asyncio.get_event_loop()

        # remove local feeds
        distinct_feeds = list(
            filter(
                lambda f: not FeedManager.feeds_exist(
                    f, data_dir=data_dir, loop=loop), feed_names))

        futures = [
            JSONFeedMetadata.url_exists(feed, loop=loop)
            for feed in distinct_feeds
        ]
        tasks = asyncio.gather(*futures)

        results = loop.run_until_complete(tasks)
        invalid = [
            feed for valid, feed in zip(results, distinct_feeds) if not valid
        ]

        if any(invalid):
            raise ValueError(f"Invalid feeds found: {invalid}")
Esempio n. 30
0
async def request_blob(loop: asyncio.BaseEventLoop, blob: 'AbstractBlob', address: str, tcp_port: int,
                       peer_connect_timeout: float, blob_download_timeout: float,
                       connected_transport: asyncio.Transport = None, connection_id: int = 0,
                       connection_manager: typing.Optional['ConnectionManager'] = None)\
        -> typing.Tuple[int, typing.Optional[asyncio.Transport]]:
    """
    Returns [<downloaded blob>, <keep connection>]
    """

    protocol = BlobExchangeClientProtocol(
        loop, blob_download_timeout, connection_manager
    )
    if connected_transport and not connected_transport.is_closing():
        connected_transport.set_protocol(protocol)
        protocol.transport = connected_transport
        log.debug("reusing connection for %s:%d", address, tcp_port)
    else:
        connected_transport = None
    try:
        if not connected_transport:
            await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
                                   peer_connect_timeout, loop=loop)
        if blob.get_is_verified() or not blob.is_writeable():
            # file exists but not verified means someone is writing right now, give it time, come back later
            return 0, connected_transport
        return await protocol.download_blob(blob)
    except (asyncio.TimeoutError, ConnectionRefusedError, ConnectionAbortedError, OSError):
        return 0, None
Esempio n. 31
0
def task_factory(loop: asyncio.BaseEventLoop, coro: typing.Coroutine):
    """
    Task factory for implementing context processor

    :param loop:
    :param coro:
    :return: new task
    :rtype: :obj:`asyncio.Task`
    """
    # Is not allowed when loop is closed.
    if loop.is_closed():
        raise RuntimeError('Event loop is closed.')

    task = asyncio.Task(coro, loop=loop)

    # Hide factory
    if task._source_traceback:
        del task._source_traceback[-1]

    try:
        task.context = asyncio.Task.current_task().context.copy()
    except AttributeError:
        task.context = {CONFIGURED: True}

    return task
Esempio n. 32
0
async def update_lifetime(
    loop: asyncio.BaseEventLoop, ev: asyncio.Event, lifetime: int
):
    i = 0
    while not ev.is_set():
        logger.info("update lifetime:%d %d", i, lifetime + 20)  # xxx: +20?
        await asyncio.wait([loop.run_in_executor(None, _do), asyncio.sleep(1)])
        i += 1
Esempio n. 33
0
def asyncio_shutdown(loop: asyncio.BaseEventLoop = asyncio.get_event_loop()) -> None:
    try:
        asyncio.runners._cancel_all_tasks(loop)
        loop.run_until_complete(loop.shutdown_asyncgens())

        if sys.version_info.minor > 8:
            loop.run_until_complete(loop.shutdown_default_executor())
    finally:
        asyncio.set_event_loop(None)
        loop.close()
Esempio n. 34
0
    async def gpus_mon(self, loop: asyncio.BaseEventLoop = None,
                       ignore=tuple()):
        subproc_exec = self.async_exec
        nv2cuda, pid2owner = await asyncio.gather(
            self.nv2cuda_coro(subproc_exec),
            self.pid2owner_coro(subproc_exec))

        p = await self.async_exec('nvidia-smi', '-l', '1',
                                  stdout=asyncio.subprocess.PIPE,
                                  stderr=FNULL)

        loop = loop or asyncio.get_event_loop()
        gpus = dict()
        gpu_nvprocs = dict()
        do_GPUComb = GPUComb not in ignore
        do_GPUProcess = GPUProcess not in ignore
        nvdev = None
        tasks = list()
        seen_pids = list()
        while not self.terminated:
            line = await p.stdout.readline()
            line = line.decode()
            if do_GPUComb:
                nvdev = nv_line2nvdev(line, nvdev)
                nvgpu = nv_line2GPUNv(line, nvdev)

                # a gpu was found in stdout
                if nvgpu:
                    prev_gpu = gpus.get(nvgpu.nvdev, None)
                    # has anything changed?
                    if prev_gpu != nvgpu[1:]:
                        # translate to cuda dev and update gpus
                        gpu = GPUComb(nv2cuda[nvgpu.nvdev], *nvgpu[1:])
                        gpus[nvgpu.nvdev] = nvgpu[1:]

                        # put into change stream
                        await self.change_stream.put(gpu)
                    continue

            if do_GPUProcess:
                nvproc = nv_line2GPUNvProcess(line)
                if nvproc:
                    seen_pids.append(nvproc.pid)
                    tasks.append(
                        loop.create_task(self._nvproc2proc(subproc_exec,
                                                           nvproc,
                                                           pid2owner,
                                                           nv2cuda,
                                                           gpu_nvprocs)))
                    continue

            if tasks:
                await asyncio.wait(tasks)
                tasks.clear()

                dead_pids = set(gpu_nvprocs.keys()).difference(seen_pids)
                for dead_proc in (gpu_nvprocs[pid] for pid in dead_pids):
                    await self.change_stream.put(GPUProcess(dead_proc.pid,
                                                            pid2owner[
                                                                dead_proc.pid],
                                                            nv2cuda[
                                                                dead_proc.nvdev],
                                                            0))
                    gpu_nvprocs.pop(dead_proc.pid)
                seen_pids.clear()
Esempio n. 35
0
 def create_connection(cls, loop: asyncio.BaseEventLoop, **kwargs) -> types.coroutine:
     host = kwargs['host']
     port = kwargs['port']
     return loop.create_connection(lambda: cls(loop), host=host, port=port)