Пример #1
0
    def __init__(self,
                 loop: BaseEventLoop,
                 host: str,
                 port: int,
                 worker_id: str,
                 request_id: str,
                 grpc_connect_timeout: float,
                 grpc_max_msg_len: int = -1) -> None:
        self._loop = loop
        self._host = host
        self._port = port
        self._request_id = request_id
        self._worker_id = worker_id
        self._functions = functions.Registry()

        self._old_task_factory = None

        # We allow the customer to change synchronous thread pool count by
        # PYTHON_THREADPOOL_THREAD_COUNT app setting. The default value is 1.
        self._sync_tp_max_workers: int = self._get_sync_tp_max_workers()
        self._sync_call_tp: concurrent.futures.Executor = (
            concurrent.futures.ThreadPoolExecutor(
                max_workers=self._sync_tp_max_workers))

        self._grpc_connect_timeout: float = grpc_connect_timeout
        # This is set to -1 by default to remove the limitation on msg size
        self._grpc_max_msg_len: int = grpc_max_msg_len
        self._grpc_resp_queue: queue.Queue = queue.Queue()
        self._grpc_connected_fut = loop.create_future()
        self._grpc_thread: threading.Thread = threading.Thread(
            name='grpc-thread', target=self.__poll_grpc)
Пример #2
0
    def __init__(self,
                 loop: BaseEventLoop,
                 host: str,
                 port: int,
                 worker_id: str,
                 request_id: str,
                 grpc_connect_timeout: float,
                 grpc_max_msg_len: int = -1) -> None:
        self._loop = loop
        self._host = host
        self._port = port
        self._request_id = request_id
        self._worker_id = worker_id
        self._function_data_cache_enabled = False
        self._functions = functions.Registry()
        self._shmem_mgr = SharedMemoryManager()

        self._old_task_factory = None

        # We allow the customer to change synchronous thread pool max worker
        # count by setting the PYTHON_THREADPOOL_THREAD_COUNT app setting.
        #   For 3.[6|7|8] The default value is 1.
        #   For 3.9, we don't set this value by default but we honor incoming
        #     the app setting.
        self._sync_call_tp: concurrent.futures.Executor = (
            self._create_sync_call_tp(self._get_sync_tp_max_workers()))

        self._grpc_connect_timeout: float = grpc_connect_timeout
        # This is set to -1 by default to remove the limitation on msg size
        self._grpc_max_msg_len: int = grpc_max_msg_len
        self._grpc_resp_queue: queue.Queue = queue.Queue()
        self._grpc_connected_fut = loop.create_future()
        self._grpc_thread: threading.Thread = threading.Thread(
            name='grpc-thread', target=self.__poll_grpc)
Пример #3
0
def shared(loop: asyncio.BaseEventLoop):
    fut = loop.create_future()

    def callback():
        print("called")
        fut.set_result("xxx")

    loop.call_later(0.2, callback)
    return fut
Пример #4
0
async def test_split_rcon(event_loop: asyncio.BaseEventLoop):
    rcon_registrar = registrar.Registrar('test')
    future = event_loop.create_future()
    future2 = event_loop.create_future()
    count_packets = 3
    seq = 5
    expected_data = ''
    await rcon_registrar.register(seq, future)
    await rcon_registrar.register(seq + 1, future2)
    for i in range(count_packets):
        payload_data = f'data from packet #{i};'
        expected_data += payload_data
        for j in range(2):
            payload = protocol.SplitCommand(seq + j, payload_data,
                                            count_packets, i)
            packet = protocol.Packet(payload)
            asyncio.create_task(
                rcon_registrar.incoming(packet.payload.sequence_number,
                                        packet))
    await future
    assert future.result().payload.data == expected_data
    await future2
    assert future2.result().payload.data == expected_data
Пример #5
0
async def _stream_media(voice_client: discord.VoiceClient, loop: asyncio.BaseEventLoop, source: YTDLSource):
    """Method which starts the streaming and playback of the provided URL.

    Args:
        voice_client (discord.VoiceClient): The voice client used by the bot.
        source (YTDLSource): Audio source which contains the audio stream.
    """

    future = loop.create_future() if loop else asyncio.get_event_loop().create_future()
    voice_client.play(source, after=lambda e: future.set_result(e))

    try:
        await asyncio.wait_for(future, timeout=None)
        error = future.result()
        if error:
            log.error("Player error: %s", error)
    except asyncio.TimeoutError:
        log.error("Player error: Timeout for song playback has been reached.")
Пример #6
0
    def __init__(self,
                 loop: BaseEventLoop,
                 host: str,
                 port: int,
                 worker_id: str,
                 request_id: str,
                 grpc_connect_timeout: float,
                 grpc_max_msg_len: int = -1) -> None:
        self._loop = loop
        self._host = host
        self._port = port
        self._request_id = request_id
        self._worker_id = worker_id
        self._functions = functions.Registry()

        self._old_task_factory = None

        # A thread-pool for synchronous function calls.  We limit
        # the number of threads to 1 so that one Python worker can
        # only run one synchronous function in parallel.  This is
        # because synchronous code in Python is rarely designed with
        # concurrency in mind, so we don't want to allow users to
        # have races in their synchronous functions.  Moreover,
        # because of the GIL in CPython, it rarely makes sense to
        # use threads (unless the code is IO bound, but we have
        # async support for that.)
        self._sync_call_tp = concurrent.futures.ThreadPoolExecutor(
            max_workers=1)

        self._grpc_connect_timeout = grpc_connect_timeout
        # This is set to -1 by default to remove the limitation on msg size
        self._grpc_max_msg_len = grpc_max_msg_len
        self._grpc_resp_queue: queue.Queue = queue.Queue()
        self._grpc_connected_fut = loop.create_future()
        self._grpc_thread = threading.Thread(name='grpc-thread',
                                             target=self.__poll_grpc)