Beispiel #1
0
class ConnectionPool:

    def __init__(self):
        self._config_dict = None
        self._queue = Queue()
        self._outstanding_connections = WeakSet()

    async def get_conn(self):
        self._check_config()
        try:
            while True:
                conn = self._queue.get_nowait()
                if conn.is_open():
                    break
                try:
                    await conn.close()
                except Exception:
                    l.debug('Exception in close rethink connection', exc_info=True)
        except QueueEmpty:
            conn = await r.connect(**self._config_dict)
        self._outstanding_connections.add(conn)
        return conn

    async def put_conn(self, conn):
        self._queue.put_nowait(conn)
        self._outstanding_connections.remove(conn)

    def set_config(self, config):
        self._config_dict = config

    def get_config(self):
        self._check_config()
        return self._config_dict

    async def teardown(self):
        while True:
            try:
                conn = self._queue.get_nowait()
            except QueueEmpty:
                break
            self._outstanding_connections.add(conn)
        for conn in self._outstanding_connections:
            try:
                await conn.close()
            except Exception:
                l.debug('Exception in close rethink connection', exc_info=True)

    def _check_config(self):
        assert self._config_dict is not None, "Did you remember to run resync.setup()?"
Beispiel #2
0
class MockConnection:
    def __init__(self):
        self.results = Queue()
        self.completed_queries = []

    async def general_query(self, query, *args, **kwargs):
        self.completed_queries.append((query, *args, kwargs))
        return self.results.get_nowait()

    def __getattr__(self, item):
        if item in ('execute', 'fetch', 'fetchval', 'fetchrow'):
            return self.general_query

        raise Exception('Sorry, {} doesnt exist yet. '
                        'Consider making a PR.'.format(item))

    async def prepare(self, query, *, timeout=None):
        return MockPreparedStatement(self, query, None)

    async def close(self):
        pass
Beispiel #3
0
class PyCyrlMultiHander(object):
    def __init__(self, max_query=10, loop=get_event_loop()):
        self.loop = loop
        self.multi = pycurl.CurlMulti()
        self.share = pycurl.CurlShare()

        self.share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
        self.share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
        self.share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
        self.share.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)

        self.handles = set()
        self.waiting_handles = Queue(loop=self.loop)
        self.running = True
        self.max_active = max_query

    def query(self, handle, future):
        def manage_callback(status, headers, data):
            handle.close()
            future.set_result((status, headers, data))

        def failed_callback(ex):
            handle.close()
            future.set_exception(ex)

        handle.cb = manage_callback
        handle.f_cb = failed_callback

        # put the query in the waiting queue, that launch it if possible
        # and wait for the processing to be finished
        yield from wait((future, self.waiting_handles.put(handle)),
                        loop=self.loop)
        return future

    def close(self):
        self.multi.close()
        self.share.close()

    def _perform_loop(self):
        ret, num_handles = self.multi.perform()
        # libcurl < 6.20 needed this loop (see https://curl.haxx.se/libcurl/c/libcurl-errors.html#CURLMCALLMULTIPERFORM)
        while ret == pycurl.E_CALL_MULTI_PERFORM:
            ret, num_handles = self.multi.perform()
        return ret, num_handles

    def _try_load_queries(self, wait=True, timeout=1.0):
        added = 0
        while len(self.handles) < self.max_active:
            try:
                if wait:
                    handler = yield from wait_for(self.waiting_handles.get(),
                                                  timeout,
                                                  loop=self.loop)
                else:
                    handler = self.waiting_handles.get_nowait()
                # only wait once
                wait = False
                # needed to keep reference count
                self.handles.add(handler)
                self.multi.add_handle(handler)
                added += 1
            except QueueEmpty:
                break
            except TimeoutError:
                break

        if added > 0:
            ret, num_handles = self._perform_loop()
            if ret > 0:
                raise ConnectionError("pycurl failed", ret)

    def perform(self, timeout=0.1):
        """
        Loop on waiting handles to process them until they are no more waiting one and all send are finished.
        It's never finished until closed for end of all processing, don't wait for it on loop
        :param timeout: the timeout for the loop
        :return: Nothing
        """
        while self.running:
            if len(self.handles) == 0:
                # no activity, just sleep, for new queries
                yield from self._try_load_queries(True, timeout)
            else:
                yield from self._try_load_queries(False)
            if len(self.handles) == 0:
                continue
            # wait for something to happen
            selected = self.multi.select(timeout)
            if selected < 0:
                continue
            # it was not a select time out, something to do
            ret, num_handles = self._perform_loop()
            if ret > 0:
                raise ConnectionError("pycurl failed", ret)
            if len(self.handles) == 0:
                continue
            else:
                # some handles to process
                (waiting, succeded, failed) = self.multi.info_read()
                for handle in succeded:
                    self.handles.remove(handle)
                    self.multi.remove_handle(handle)
                    status = handle.getinfo(pycurl.RESPONSE_CODE)
                    content_type, decoded = decode_body(handle)
                    if not self.running:
                        # is stopped, just swallow content
                        continue
                    elif status >= 200 and status < 300:
                        handle.cb(status, handle.headers, decoded)
                    elif status >= 300:
                        handle.f_cb(
                            NexusException(
                                status,
                                decoded,
                                content_type,
                                http_message=handle.headers.pop('__STATUS__'),
                                url=handle.getinfo(pycurl.EFFECTIVE_URL)))
                for handle, code, message in failed:
                    self.handles.remove(handle)
                    self.multi.remove_handle(handle)
                    ex = ConnectionError(code, message)
                    ex.url = pycurl.EFFECTIVE_URL
                    ex.code = code
                    handle.f_cb(ex)
Beispiel #4
0
class ClientConnectionPool:

    def __init__(self, loop, scheme, host, port, ssl=None, max_size=0):
        self.loop = loop
        self.scheme = scheme
        self.host = host if isinstance(host, str) else host.decode()
        self.port = int(port)
        self.ssl = self._ssl_option(ssl)
        self.max_size = max_size
        self._idle_connections = Queue(maxsize=max_size)
        self.disposed = False

    def _ssl_option(self, ssl):
        if self.scheme == b'https':
            if ssl is None:
                return SECURE_SSLCONTEXT
            if ssl is False:
                return INSECURE_SSLCONTEXT
            if isinstance(ssl, SSLContext):
                return ssl
            raise InvalidArgument('Invalid ssl argument, expected one of: '
                                  '{None, False, True, instance of ssl.SSLContext}')
        if ssl:
            raise InvalidArgument('SSL argument specified for non-https scheme.')
        return None

    def _get_connection(self):
        # if there are no connections, let QueueEmpty exception happen
        # if all connections are closed, remove all of them and let QueueEmpty exception happen
        while True:
            connection = self._idle_connections.get_nowait()  # type: ClientConnection

            if connection.open:
                logger.debug(f'Reusing connection {id(connection)} to: {self.host}:{self.port}')
                return connection

    def try_return_connection(self, connection):
        if self.disposed:
            return

        try:
            self._idle_connections.put_nowait(connection)
        except QueueFull:
            pass

    async def get_connection(self):
        try:
            return self._get_connection()
        except QueueEmpty:
            return await self.create_connection()

    async def create_connection(self):
        logger.debug(f'Creating connection to: {self.host}:{self.port}')
        transport, connection = await self.loop.create_connection(
            lambda: ClientConnection(self.loop, self),
            self.host,
            self.port,
            ssl=self.ssl)
        await connection.ready.wait()
        # NB: a newly created connection is going to be used by a request-response cycle;
        # so we don't put it inside the pool (since it's not immediately reusable for other requests)
        return connection

    def dispose(self):
        self.disposed = True
        while True:
            try:
                connection = self._idle_connections.get_nowait()
            except QueueEmpty:
                break
            else:
                logger.debug(f'Closing connection {id(connection)} to: {self.host}:{self.port}')
                connection.close()
Beispiel #5
0
from sys import stdin, stdout
from asyncio import Queue
suma = 0
harrys = Queue()
monks = []
n = int(stdin.readline().strip())
for x in stdin.readline().split(' '):
    num = int(x)
    harrys.put_nowait(num)
Q, target = map(int, stdin.readline().split(' '))
res = -1
if suma == target:
    res = 0
else:
    for q in range(Q):
        op = stdin.readline().strip()
        if op == "Harry":
            num = harrys.get_nowait()
            monks.append(num)
            suma += num
        else:
            num = monks.pop()
            suma -= num
        if suma == target:
            res = monks.__len__()
            break
stdout.write(str(res) + '\n')
Beispiel #6
0
class LiveChatAsync:
    '''LiveChatAsync object fetches chat data and stores them
    in a buffer with asyncio.

    Parameter
    ---------
    video_id : str

    seektime : int
        start position of fetching chat (seconds).
        This option is valid for archived chat only.
        If negative value, chat data posted before the start of the broadcast
        will be retrieved as well.

    processor : ChatProcessor

    buffer : Buffer
        buffer of chat data fetched background.

    interruptable : bool
        Allows keyboard interrupts.
        Set this parameter to False if your own threading program causes
        the problem.

    callback : func
        function called periodically from _listen().

    done_callback : func
        function called when listener ends.

    exception_handler : func

    direct_mode : bool
        If True, invoke specified callback function without using buffer.
        callback is required. If not, IllegalFunctionCall will be raised.

    force_replay : bool
        force to fetch archived chat data, even if specified video is live.

    topchat_only : bool
        If True, get only top chat.

    replay_continuation : str
        If this parameter is not None, the processor will attempt to get chat data from continuation.
        This parameter is only allowed in archived mode.

    Attributes
    ---------
    _is_alive : bool
        Flag to stop getting chat.
    '''

    _setup_finished = False

    def __init__(self,
                 video_id,
                 seektime=-1,
                 processor=DefaultProcessor(),
                 buffer=None,
                 client=httpx.AsyncClient(http2=True),
                 interruptable=True,
                 callback=None,
                 done_callback=None,
                 exception_handler=None,
                 direct_mode=False,
                 force_replay=False,
                 topchat_only=False,
                 logger=config.logger(__name__),
                 replay_continuation=None):
        self._client: httpx.AsyncClient = client
        self._video_id = util.extract_video_id(video_id)
        self.seektime = seektime
        if isinstance(processor, tuple):
            self.processor = Combinator(processor)
        else:
            self.processor = processor
        self._buffer = buffer
        self._callback = callback
        self._done_callback = done_callback
        self._exception_handler = exception_handler
        self._direct_mode = direct_mode
        self._is_alive = True
        self._is_replay = force_replay or (replay_continuation is not None)
        self._parser = Parser(is_replay=self._is_replay)
        self._pauser = Queue()
        self._pauser.put_nowait(None)
        self._first_fetch = replay_continuation is None
        self._fetch_url = config._sml if replay_continuation is None else config._smr
        self._topchat_only = topchat_only
        self._dat = ''
        self._last_offset_ms = 0
        self._logger = logger
        self.exception = None
        self.continuation = replay_continuation
        LiveChatAsync._logger = logger

        if exception_handler:
            self._set_exception_handler(exception_handler)
        if interruptable:
            signal.signal(signal.SIGINT,
                          (lambda a, b: self._keyboard_interrupt()))
        self._setup()

    def _setup(self):
        # An exception is raised when direct mode is true and no callback is set.
        if self._direct_mode:
            if self._callback is None:
                raise exceptions.IllegalFunctionCall(
                    "When direct_mode=True, callback parameter is required.")
        else:
            # Create a default buffer if `direct_mode` is False and buffer is not set.
            if self._buffer is None:
                self._buffer = Buffer(maxsize=20)
                # Create a loop task to call callback if the `callback` param is specified.
            if self._callback is None:
                pass
            else:
                # Create a loop task to call callback if the `callback` param is specified.
                loop = asyncio.get_event_loop()
                loop.create_task(self._callback_loop(self._callback))
        # Start a loop task for _listen()
        loop = asyncio.get_event_loop()
        self.listen_task = loop.create_task(self._startlisten())
        # Register add_done_callback
        if self._done_callback is None:
            self.listen_task.add_done_callback(self._finish)
        else:
            self.listen_task.add_done_callback(self._done_callback)

    async def _startlisten(self):
        """Fetch first continuation parameter,
        create and start _listen loop.
        """
        if not self.continuation:
            channel_id = await util.get_channelid_async(
                self._client, self._video_id)
            self.continuation = liveparam.getparam(self._video_id,
                                                   channel_id,
                                                   past_sec=3)

        await self._listen(self.continuation)

    async def _listen(self, continuation):
        ''' Fetch chat data and store them into buffer,
        get next continuaiton parameter and loop.

        Parameter
        ---------
        continuation : str
            parameter for next chat data
        '''
        try:
            async with self._client as client:
                while (continuation and self._is_alive):
                    continuation = await self._check_pause(continuation)
                    contents = await self._get_contents(
                        continuation, client, headers)  #Q#
                    metadata, chatdata = self._parser.parse(contents)
                    continuation = metadata.get('continuation')
                    if continuation:
                        self.continuation = continuation
                    timeout = metadata['timeoutMs'] / 1000
                    chat_component = {
                        "video_id": self._video_id,
                        "timeout": timeout,
                        "chatdata": chatdata
                    }
                    time_mark = time.time()
                    if self._direct_mode:
                        processed_chat = self.processor.process(
                            [chat_component])
                        if isinstance(processed_chat, tuple):
                            await self._callback(*processed_chat)
                        else:
                            await self._callback(processed_chat)
                    else:
                        await self._buffer.put(chat_component)
                    diff_time = timeout - (time.time() - time_mark)
                    await asyncio.sleep(diff_time)
                    self._last_offset_ms = metadata.get('last_offset_ms', 0)
        except exceptions.ChatParseException as e:
            self._logger.debug(f"[{self._video_id}]{str(e)}")
            raise
        except Exception:
            self._logger.error(f"{traceback.format_exc(limit=-1)}")
            raise

        self._logger.debug(f"[{self._video_id}] finished fetching chat.")

    async def _check_pause(self, continuation):
        if self._pauser.empty():
            '''pause'''
            await self._pauser.get()
            '''resume:
                prohibit from blocking by putting None into _pauser.
            '''
            self._pauser.put_nowait(None)
            if not self._is_replay:
                async with self._client as client:
                    channel_id = await util.get_channelid_async(
                        client, self.video_id)
                    continuation = liveparam.getparam(self._video_id,
                                                      channel_id,
                                                      past_sec=3)

        return continuation

    async def _get_contents(self, continuation, client, headers):
        '''Get 'continuationContents' from livechat json.
           If contents is None at first fetching,
           try to fetch archive chat data.

          Return:
          -------
            'continuationContents' which includes metadata & chatdata.
        '''
        livechat_json = await self._get_livechat_json(
            continuation,
            client,
            replay=self._is_replay,
            offset_ms=self._last_offset_ms)
        contents, dat = self._parser.get_contents(livechat_json)
        if self._dat == '' and dat:
            self._dat = dat
        if self._first_fetch:
            if contents is None or self._is_replay:
                '''Try to fetch archive chat data.'''
                self._parser.is_replay = True
                self._fetch_url = config._smr
                channelid = await util.get_channelid_async(
                    client, self._video_id)
                continuation = arcparam.getparam(self._video_id, self.seektime,
                                                 self._topchat_only, channelid)
                livechat_json = (await self._get_livechat_json(
                    continuation,
                    client,
                    replay=True,
                    offset_ms=self.seektime * 1000))
                reload_continuation = self._parser.reload_continuation(
                    self._parser.get_contents(livechat_json)[0])
                if reload_continuation:
                    livechat_json = (await self._get_livechat_json(
                        reload_continuation, client, headers))
                contents, _ = self._parser.get_contents(livechat_json)
                self._is_replay = True
            self._first_fetch = False
        return contents

    async def _get_livechat_json(self,
                                 continuation,
                                 client,
                                 replay: bool,
                                 offset_ms: int = 0):
        '''
        Get json which includes chat data.
        '''
        livechat_json = None
        if offset_ms < 0:
            offset_ms = 0
        param = util.get_param(continuation,
                               dat=self._dat,
                               replay=replay,
                               offsetms=offset_ms)
        for _ in range(MAX_RETRY + 1):
            try:
                resp = await client.post(self._fetch_url, json=param)
                livechat_json = resp.json()
                break
            except (json.JSONDecodeError, httpx.HTTPError):
                await asyncio.sleep(2)
                continue
        else:
            self._logger.error(f"[{self._video_id}]" f"Exceeded retry count.")
            raise exceptions.RetryExceedMaxCount()
        return livechat_json

    async def _callback_loop(self, callback):
        """ If a callback is specified in the constructor,
        it throws chat data at regular intervals to the
        function specified in the callback in the backgroun

        Parameter
        ---------
        callback : func
            function to which the processed chat data is passed.
        """
        while self.is_alive():
            items = await self._buffer.get()
            processed_chat = self.processor.process(items)
            if isinstance(processed_chat, tuple):
                await self._callback(*processed_chat)
            else:
                await self._callback(processed_chat)

    async def get(self):
        """
        Retrieves data from the buffer,
        throws it to the processor,
        and returns the processed chat data.

        Returns
             : Chat data processed by the Processor
        """
        if self._callback is None:
            if self.is_alive():
                items = await self._buffer.get()
                return self.processor.process(items)
            else:
                return []
        raise exceptions.IllegalFunctionCall(
            "Callback parameter is already set, so get() cannot be performed.")

    def is_replay(self):
        return self._is_replay

    def pause(self):
        if self._callback is None:
            return
        if not self._pauser.empty():
            self._pauser.get_nowait()

    def resume(self):
        if self._callback is None:
            return
        if self._pauser.empty():
            self._pauser.put_nowait(None)

    def is_alive(self):
        return self._is_alive

    def _finish(self, sender):
        '''Called when the _listen() task finished.'''
        try:
            self._task_finished()
        except CancelledError:
            self._logger.debug(f'[{self._video_id}] cancelled:{sender}')

    def terminate(self):
        if not self.is_alive():
            return
        if self._pauser.empty():
            self._pauser.put_nowait(None)
        self._is_alive = False
        self._buffer.put_nowait({})
        self.processor.finalize()

    def _keyboard_interrupt(self):
        self.exception = exceptions.ChatDataFinished()
        self.terminate()

    def _task_finished(self):
        if self.is_alive():
            self.terminate()
        try:
            self.listen_task.result()
        except Exception as e:
            self.exception = e
            if not isinstance(e, exceptions.ChatParseException):
                self._logger.error(f'Internal exception - {type(e)}{str(e)}')
        self._logger.info(f'[{self._video_id}] finished.')

    def raise_for_status(self):
        if self.exception is not None:
            raise self.exception

    @classmethod
    def _set_exception_handler(cls, handler):
        loop = asyncio.get_event_loop()
        loop.set_exception_handler(handler)
Beispiel #7
0
class Executor:
    '''
	Executor — класс, предназначенный для выполнения
	множества задач, которые берутся из Лиры и кладутся
	после успешного выполнения туда же. Для корректной
	работы необходимо установить следующие члены:

	lira - объект lira, откуда берутся задачи и куда
	       кладутся
	fun  - функция, которая выполняет задачу и возвращает
	       True в случае успеха и False — иначе; если 
	       используются асинхронные методы, то и эта
		   функция должна быть асинхронной 
	
	Дополнительно можно установить следующие члены:

	name    - имя исполнителя (используются в сообщениях
	          о работе, если silent = False)
	silent  - флаг, который указывает, нужно ли выводить
	          сообщения о работе
	workerc - количество работников (по умолчанию 5), 
	          которые будут выполнять задания из очереди
			  (используется только в методе extasks)

	Заданием может выступать любой объект, который
	имеет булевый член done
	'''
    def __init__(self, lira: Lira, fun, name='Ex', silent=True, workerc=5):
        self.lira = lira
        self.fun = fun
        self.name = name
        self.silent = silent
        self.workerc = workerc
        self.stopflag = False
        self.stoplock = Lock()
        self.taskc = None  # task count
        self.compc = None  # complete task count
        self.btime = None
        self.que = Queue()
        self.workers = []
        pass

    def stop(self):
        'Останавливает выполнение задач'
        with self.stoplock:
            self.stopflag = True
        for w in self.workers:
            w.cancel()
        return

    def stopped(self):
        'Проверяет, остановлено ли выполнение задач'
        with self.stoplock:
            res = self.stopflag
        return res

    def ex(self, id, outcat) -> bool:
        'Выполняет задачу id и сообщает об успехе'
        task = self.lira.get(id)
        if task is None:
            raise NoneTaskException(id)

        if task.done:
            return True

        result = self.fun(task)
        if not result:
            self._print_task_message(task, False)
            return False

        task.done = True
        self.lira.put(task, id, cat=outcat)
        if self.compc is not None:
            self.compc += 1
        self._print_task_message(task, True)
        return True

    async def ex_async(self, id, outcat) -> bool:
        '''
		Выполняет задачу id и сообщает об успехе
		(асинхронная версия)
		'''
        task = self.lira.get(id)
        if task is None:
            raise NoneTaskException(id)

        if task.done:
            return True

        try:
            result = await self.fun(task)
        except CancelledError as e:
            raise e
        except Exception as e:
            print(e)
            raise e

        if not result:
            self._print_task_message(task, False)
            return False

        task.done = True
        self.lira.put(task, id, cat=outcat)
        if self.compc is not None:
            self.compc += 1
        self._print_task_message(task, True)
        return True

    async def exque(self, outcat):
        try:
            while not self.stopped():
                try:
                    id = await self.que.get()
                    await self.ex_async(id, outcat)
                    self.que.task_done()
                except CancelledError:
                    if not self.stopped():
                        return
                    self.que.task_done()

            while not self.que.empty():
                self.que.get_nowait()
                self.que.task_done()
        except Exception as e:
            print(e)

        return

    async def extasks(self, cat, outcat):
        '''
		Формирует очередь из задач указанной
		категории (берутся все объекты из lira)
		и исполняет в асинхронном режиме
		'''
        if not self.silent:
            print('\n(%s) BEGIN\n' % self.name, flush=True)

        # Формирование очереди
        self.que = Queue()

        objs = self.lira[cat]
        if len(objs) != 0:
            for id in objs:
                self.que.put_nowait(id)
            self.taskc = self.que.qsize()
            self.compc = 0
            self.btime = dt.datetime.now()

            # Создание работников
            with self.stoplock:
                self.stopflag = False

            self.workers = [
                create_task(self.exque(outcat)) for _ in range(self.workerc)
            ]

            # Окончание работы
            await self.que.join()
            for w in self.workers:
                w.cancel()

            await gather(*self.workers)
            self.taskc = None
            self.compc = None
            self.btime = None

        if not self.silent:
            print('\n(%s) END\n' % self.name, flush=True)

        return

    def run_que(self, outcat):
        self.workers = [
            create_task(self.exque(self.que, outcat))
            for _ in range(self.workerc)
        ]
        return

    def put(self, task):
        id = self.lira.put(task)
        self.que.put_nowait(id)
        return

    def putid(self, id):
        self.que.put_nowait(id)
        return

    async def stop_que(self):
        for w in self.workers:
            w.cancel()
        await gather(*self.workers)
        return

    async def join_que(self):
        await self.que.join()
        await self.stop_que()
        return

    def _print_task_message(self, task, done):
        if self.silent:
            return
        done = 'Done:' if done else 'Fail:'
        try:
            if self.taskc is not None and self.btime is not None:
                delta = (dt.datetime.now() - self.btime).total_seconds()
                speed = self.compc / delta
                still = (self.taskc - self.compc) / speed
                print('(%s | %6.2f%% : %5.1fs) %s %s' %
                      (self.name, 100 * self.compc / self.taskc, still, done,
                       str(task)))
            else:
                print('(%s) %s %s' % (self.name, done, str(task)))
        except Exception as e:
            print(e)
        return
class Platform:
    service_running = False
    match_updates = None
    match_updates_faulty = None
    worker_count = 20
    task_queue = None
    proxy_endpoint = None

    def __init__(self, region, platforms, handler):
        self.name = region
        self.platforms = platforms
        self.handler = handler
        self.logging = logging.getLogger("%s" % region)
        self._worker = []  # Worker promises
        self.updater = None  # Updater promise
        self.retry_after = datetime.now()
        self.proxy = handler.proxy
        self.endpoint_url = (
            f"https://{self.name}.api.riotgames.com/lol/match/v5/matches/%s_%s/timeline"
        )

    async def init(self):
        """Init background runner."""
        self.task_queue = Queue()
        self.match_updates = Queue()
        self.match_updates_faulty = Queue()
        self.logging.info("Ready.")

    async def shutdown(self):
        self.logging.info("Shutdown")
        self.service_running = False
        await asyncio.gather(*self._worker, self.updater)
        await self.flush_tasks()

    async def start(self):
        """Start the service calls."""
        if not self.service_running:
            self.service_running = True
            self.logging.info("Started service calls.")
            self.proxy_endpoint = await self.handler.proxy.get_endpoint(
                server=self.name, zone="match-timeline-v5"
            )
            self.updater = asyncio.create_task(self.task_updater())
            self._worker = [
                asyncio.create_task(self.worker()) for _ in range(self.worker_count)
            ]

    async def stop(self):
        """Halt the service calls."""
        if self.service_running:
            self.service_running = False
            self.logging.info("Stopped service calls.")
            await self.updater
            for worker in self._worker:
                worker.cancel()
            try:
                await asyncio.gather(*self._worker)
            except asyncio.CancelledError:
                pass
            await self.flush_tasks()

    async def task_updater(self):
        """Pull new tasks when the list is empty."""
        self.logging.debug("Task Updater initiated.")
        while self.service_running:
            if self.match_updates.qsize() >= 200:
                await self.flush_tasks()
                continue
            if self.task_queue.qsize() > 200:
                await asyncio.sleep(5)
                continue
            async with self.handler.postgres.acquire() as connection:
                entries = await connection.fetch(
                    """UPDATE %s.match
                            SET reserved_timeline = current_date + INTERVAL '10 minute'
                            FROM (
                                SELECT  match_id,
                                        platform
                                    FROM %s.match
                                    WHERE timeline IS NULL
                                    AND details IS NOT NULL
                                    AND find_fails <= 10
                                    AND (reserved_timeline IS NULL OR reserved_timeline < current_timestamp)
                                    ORDER BY find_fails, match_id DESC
                                    LIMIT $1
                                    ) selection
                        WHERE match.match_id = selection.match_id
                           AND match.platform = selection.platform
                            RETURNING match.platform, match.match_id
                    """
                    % tuple([self.name for _ in range(2)]),
                    1000,
                )
                self.logging.debug(
                    "Refilling tasks [%s -> %s].",
                    self.task_queue.qsize(),
                    self.task_queue.qsize() + len(entries),
                )
                if len(entries) == 0:
                    await asyncio.sleep(30)
                    await self.flush_tasks()
                    pass

                for entry in entries:
                    await self.task_queue.put([entry["platform"], entry["match_id"]])

    async def worker(self):
        """Execute requests."""
        while self.service_running:
            for i in range(10):
                async with aiohttp.ClientSession(
                    headers={"X-Riot-Token": self.handler.api_key}
                ) as session:
                    task = await self.task_queue.get()
                    try:
                        url = self.endpoint_url % (task[0], task[1])
                        response = await self.proxy_endpoint.request(url, session)
                        folder = str(task[1])[:5]
                        path = os.path.join("data", "timeline", task[0], folder)
                        if not os.path.exists(path):
                            os.makedirs(path)
                        filename = os.path.join(path, "%s_%s.json" % (task[0], task[1]))
                        if not os.path.isfile(filename):
                            with open(
                                filename,
                                "w+",
                            ) as file:
                                file.write(json.dumps(response))
                        del response
                        await self.match_updates.put([task[0], task[1]])
                        self.task_queue.task_done()
                    except LimitBlocked as err:
                        self.retry_after = datetime.now() + timedelta(
                            seconds=err.retry_after
                        )
                        await self.task_queue.put(task)
                        self.task_queue.task_done()
                    except aiohttp.ServerDisconnectedError:
                        self.logging.error("Server Disconnected")
                        await self.task_queue.put(task)
                        self.task_queue.task_done()
                    except RatelimitException:
                        self.logging.error("Ratelimit")
                        await self.task_queue.put(task)
                        self.task_queue.task_done()
                    except Non200Exception:
                        self.logging.error("Others")
                        await self.task_queue.put(task)
                        self.task_queue.task_done()
                    except NotFoundException:
                        await self.match_updates_faulty.put([task[0], task[1]])
                        self.task_queue.task_done()
                    except Exception:
                        self.logging.exception("General Exception")
                        await self.task_queue.put(task)
                        self.task_queue.task_done()

    async def flush_tasks(self):
        """Insert results from requests into the db."""
        match_updates = []
        while True:
            try:
                match_updates.append(self.match_updates.get_nowait())
                self.match_updates.task_done()
            except asyncio.QueueEmpty:
                break
        match_not_found = []
        while True:
            try:
                match_not_found.append(self.match_updates_faulty.get_nowait())
                self.match_updates_faulty.task_done()
            except asyncio.QueueEmpty:
                break
        async with self.handler.postgres.acquire() as connection:
            async with connection.transaction():
                if match_updates:
                    # Insert match updates
                    query = await connection.prepare(
                        """UPDATE %s.match
                        SET timeline = TRUE,
                            reserved_timeline = NULL
                            WHERE platform = $1
                            AND match_id = $2
                        """
                        % self.name,
                    )
                    await query.executemany(match_updates)

                if match_not_found:
                    await connection.execute(
                        """UPDATE %s.match
                            SET find_fails = find_fails + 1,
                                reserved_timeline = current_date + INTERVAL '10 minute'
                            WHERE platform::varchar || '_' || match_id::varchar = any($1::varchar[])
                        """
                        % self.name,
                        ["%s_%s" % match for match in match_not_found],
                    )
        if match_updates or match_not_found:
            self.logging.info(
                "Flushing %s match_updates (%s not found).",
                len(match_updates) + len(match_not_found),
                len(match_not_found),
            )
 async def _verify(self, actual: asyncio.Queue, expected: list, items_left: bool):
     await asyncio.sleep(0.001)
     for expected_msg in expected:
         actual_msg = actual.get_nowait()
         assert actual_msg == expected_msg
     assert items_left or actual.empty()
Beispiel #10
0
class TorrentClient:
    """
    The torrent client is the local peer that holds peer-to-peer
    connections to download and upload pieces for a given torrent.

    Once started, the client makes periodic announce calls to the tracker
    registered in the torrent meta-data. These calls results in a list of
    peers that should be tried in order to exchange pieces.

    Each received peer is kept in a queue that a pool of PeerConnection
    objects consume. There is a fix number of PeerConnections that can have
    a connection open to a peer. Since we are not creating expensive threads
    (or worse yet processes) we can create them all at once and they will
    be waiting until there is a peer to consume in the queue.
    """
    def __init__(self, torrent):
        self.tracker = Tracker(torrent)
        # The list of potential peers is the work queue, consumed by the
        # PeerConnections
        self.available_peers = Queue()
        # The list of peers is the list of workers that *might* be connected
        # to a peer. Else they are waiting to consume new remote peers from
        # the `available_peers` queue. These are our workers!
        self.peers = []
        # The piece manager implements the strategy on which pieces to
        # request, as well as the logic to persist received pieces to disk.
        self.piece_manager = PieceManager(torrent)
        self.abort = False

    async def start(self):
        """
        Start downloading the torrent held by this client.

        This results in connecting to the tracker to retrieve the list of
        peers to communicate with. Once the torrent is fully downloaded or
        if the download is aborted this method will complete.
        """
        self.peers = [
            PeerConnection(self.available_peers,
                           self.tracker.torrent.info_hash,
                           self.tracker.peer_id, self.piece_manager,
                           self._on_block_retrieved)
            for _ in range(MAX_PEER_CONNECTIONS)
        ]

        # The time we last made an announce call (timestamp)
        previous = None
        # Default interval between announce calls (in seconds)
        interval = 30 * 60

        while True:
            if self.piece_manager.complete:
                logging.info('Torrent fully downloaded!')
                break
            if self.abort:
                logging.info('Aborting download...')
                break

            current = time.time()
            if (not previous) or (previous + interval < current):
                response = await self.tracker.connect(
                    first=previous if previous else False,
                    uploaded=self.piece_manager.bytes_uploaded,
                    downloaded=self.piece_manager.bytes_downloaded)

                if response:
                    previous = current
                    interval = response.interval
                    self._empty_queue()
                    for peer in response.peers:
                        self.available_peers.put_nowait(peer)
            else:
                await asyncio.sleep(5)
        self.stop()

    def _empty_queue(self):
        while not self.available_peers.empty():
            self.available_peers.get_nowait()

    def stop(self):
        """
        Stop the download or seeding process.
        """
        self.abort = True
        for peer in self.peers:
            peer.stop()
        self.piece_manager.close()
        self.tracker.close()

    def _on_block_retrieved(self, peer_id, piece_index, block_offset, data):
        """
        Callback function called by the `PeerConnection` when a block is
        retrieved from a peer.

        :param peer_id: The id of the peer the block was retrieved from
        :param piece_index: The piece index this block is a part of
        :param block_offset: The block offset within its piece
        :param data: The binary data retrieved
        """
        self.piece_manager.block_received(peer_id=peer_id,
                                          piece_index=piece_index,
                                          block_offset=block_offset,
                                          data=data)
Beispiel #11
0
class LiveChatAsync:
    '''asyncio(aiohttp)を利用してYouTubeのライブ配信のチャットデータを取得する。

    Parameter
    ---------
    video_id : str
        動画ID

    seektime : int
        (ライブチャット取得時は無視)
        取得開始するアーカイブ済みチャットの経過時間(秒)
        マイナス値を指定した場合は、配信開始前のチャットも取得する。

    processor : ChatProcessor
        チャットデータを加工するオブジェクト

    buffer : Buffer(maxsize:20[default])
        チャットデータchat_componentを格納するバッファ。
        maxsize : 格納できるchat_componentの個数
        default値20個。1個で約5~10秒分。

    interruptable : bool
        Ctrl+Cによる処理中断を行うかどうか。

    callback : func
        _listen()関数から一定間隔で自動的に呼びだす関数。

    done_callback : func
        listener終了時に呼び出すコールバック。

    exception_handler : func
        例外を処理する関数

    direct_mode : bool
        Trueの場合、bufferを使わずにcallbackを呼ぶ。
        Trueの場合、callbackの設定が必須
        (設定していない場合IllegalFunctionCall例外を発生させる)
    
    force_replay : bool  
        Trueの場合、ライブチャットが取得できる場合であっても
        強制的にアーカイブ済みチャットを取得する。
    
    topchat_only : bool
        Trueの場合、上位チャットのみ取得する。
          
    Attributes
    ---------
    _is_alive : bool
        チャット取得を停止するためのフラグ
    '''

    _setup_finished = False

    def __init__(self, video_id,
                seektime = 0,
                processor = DefaultProcessor(),
                buffer = None,
                interruptable = True,
                callback = None,
                done_callback = None,
                exception_handler = None,
                direct_mode = False,
                force_replay = False,
                topchat_only = False,
                logger = config.logger(__name__),
                ): 
        self.video_id  = video_id
        self.seektime = seektime
        if isinstance(processor, tuple):
            self.processor = Combinator(processor)
        else:
            self.processor = processor
        self._buffer = buffer
        self._callback = callback
        self._done_callback = done_callback
        self._exception_handler = exception_handler
        self._direct_mode = direct_mode
        self._is_alive   = True
        self._is_replay = force_replay
        self._parser = Parser(is_replay = self._is_replay)
        self._pauser = Queue()
        self._pauser.put_nowait(None)
        self._setup()
        self._first_fetch = True
        self._fetch_url = "live_chat/get_live_chat?continuation="
        self._topchat_only = topchat_only
        self._logger = logger
        LiveChatAsync._logger = logger

        if not LiveChatAsync._setup_finished:
            LiveChatAsync._setup_finished = True
            if exception_handler:
                self._set_exception_handler(exception_handler)
            if interruptable:
                signal.signal(signal.SIGINT,  
                    (lambda a, b:asyncio.create_task(
                    LiveChatAsync.shutdown(None,signal.SIGINT,b))
                ))
 
    def _setup(self):
        #direct modeがTrueでcallback未設定の場合例外発生。
        if self._direct_mode:
            if self._callback is None:
                raise IllegalFunctionCall(
                    "When direct_mode=True, callback parameter is required.")
        else:
            #direct modeがFalseでbufferが未設定ならばデフォルトのbufferを作成
            if self._buffer is None:
                self._buffer = Buffer(maxsize = 20)
                #callbackが指定されている場合はcallbackを呼ぶループタスクを作成
            if self._callback is None:
                pass 
            else:
                #callbackを呼ぶループタスクの開始
                loop = asyncio.get_event_loop()
                loop.create_task(self._callback_loop(self._callback))
        #_listenループタスクの開始
        loop = asyncio.get_event_loop()
        listen_task = loop.create_task(self._startlisten())
        #add_done_callbackの登録
        if self._done_callback is None:
            listen_task.add_done_callback(self.finish)
        else:
            listen_task.add_done_callback(self._done_callback)

    async def _startlisten(self):
        """Fetch first continuation parameter,
        create and start _listen loop.
        """
        initial_continuation = liveparam.getparam(self.video_id,3)
        await self._listen(initial_continuation)

    async def _listen(self, continuation):
        ''' Fetch chat data and store them into buffer,
        get next continuaiton parameter and loop.

        Parameter
        ---------
        continuation : str
            parameter for next chat data
        '''
        try:
            async with aiohttp.ClientSession() as session:
                while(continuation and self._is_alive):
                    continuation = await self._check_pause(continuation)
                    contents = await self._get_contents(
                        continuation, session, headers)
                    metadata, chatdata =  self._parser.parse(contents)

                    timeout = metadata['timeoutMs']/1000
                    chat_component = {
                        "video_id" : self.video_id,
                        "timeout"  : timeout,
                        "chatdata" : chatdata
                    }
                    time_mark =time.time()
                    if self._direct_mode:
                        processed_chat = self.processor.process([chat_component])
                        if isinstance(processed_chat,tuple):
                            await self._callback(*processed_chat)
                        else:
                            await self._callback(processed_chat)
                    else:
                        await self._buffer.put(chat_component)
                    diff_time = timeout - (time.time()-time_mark)
                    await asyncio.sleep(diff_time)        
                    continuation = metadata.get('continuation')     
        except ChatParseException as e:
            self._logger.debug(f"[{self.video_id}]{str(e)}")
            return            
        except (TypeError , json.JSONDecodeError) :
            self._logger.error(f"{traceback.format_exc(limit = -1)}")
            return
        
        self._logger.debug(f"[{self.video_id}]finished fetching chat.")

    async def _check_pause(self, continuation):
        if self._pauser.empty():
            '''pause'''
            await self._pauser.get()
            '''resume:
                prohibit from blocking by putting None into _pauser.
            '''
            self._pauser.put_nowait(None)
            if not self._is_replay:
                continuation = liveparam.getparam(
                    self.video_id, 3, self._topchat_only)
        return continuation

    async def _get_contents(self, continuation, session, headers):
        '''Get 'continuationContents' from livechat json.
           If contents is None at first fetching, 
           try to fetch archive chat data.

          Return:
          -------
            'continuationContents' which includes metadata & chatdata.
        '''
        livechat_json = (await 
            self._get_livechat_json(continuation, session, headers)
        )
        contents = self._parser.get_contents(livechat_json)
        if self._first_fetch:
            if contents is None or self._is_replay:
                '''Try to fetch archive chat data.'''
                self._parser.is_replay = True
                self._fetch_url = "live_chat_replay/get_live_chat_replay?continuation="
                continuation = arcparam.getparam(
                    self.video_id, self.seektime, self._topchat_only)
                livechat_json = (await  self._get_livechat_json(
                    continuation, session, headers))
                reload_continuation = self._parser.reload_continuation(
                    self._parser.get_contents(livechat_json))
                if reload_continuation:
                    livechat_json = (await  self._get_livechat_json(
                        reload_continuation, session, headers))
                contents = self._parser.get_contents(livechat_json)
                self._is_replay = True
            self._first_fetch = False
        return contents

    async def _get_livechat_json(self, continuation, session, headers):
        '''
        Get json which includes chat data.
        '''
        continuation = urllib.parse.quote(continuation)
        livechat_json = None
        status_code = 0
        url =f"https://www.youtube.com/{self._fetch_url}{continuation}&pbj=1"
        for _ in range(MAX_RETRY + 1):
            async with session.get(url ,headers = headers) as resp:
                try:
                    text = await resp.text()
                    livechat_json = json.loads(text)
                    break
                except (ClientConnectorError,json.JSONDecodeError) :
                    await asyncio.sleep(1)
                    continue
        else:
            self._logger.error(f"[{self.video_id}]"
                    f"Exceeded retry count. status_code={status_code}")
            return None
        return livechat_json

    async def _callback_loop(self,callback):
        """ コンストラクタでcallbackを指定している場合、バックグラウンドで
        callbackに指定された関数に一定間隔でチャットデータを投げる。        
        
        Parameter
        ---------
        callback : func
            加工済みのチャットデータを渡す先の関数。
        """
        while self.is_alive():
            items = await self._buffer.get()
            processed_chat = self.processor.process(items)
            if isinstance(processed_chat, tuple):
                await self._callback(*processed_chat)
            else:
                await self._callback(processed_chat)

    async def get(self):
        """ bufferからデータを取り出し、processorに投げ、
        加工済みのチャットデータを返す。
        
        Returns
             : Processorによって加工されたチャットデータ
        """
        if self._callback is None:
            items = await self._buffer.get()
            return  self.processor.process(items)
        raise IllegalFunctionCall(
            "既にcallbackを登録済みのため、get()は実行できません。")

    def is_replay(self):
        return self._is_replay

    def pause(self):
        if self._callback is None:
            return
        if not self._pauser.empty():
            self._pauser.get_nowait()

    def resume(self):
        if self._callback is None:
            return
        if self._pauser.empty():
            self._pauser.put_nowait(None)
        
    def is_alive(self):
        return self._is_alive

    def finish(self,sender):
        '''Listener終了時のコールバック'''
        try: 
            self.terminate()
        except CancelledError:
            self._logger.debug(f'[{self.video_id}]cancelled:{sender}')

    def terminate(self):
        '''
        Listenerを終了する。
        '''
        self._is_alive = False
        if self._direct_mode == False:
            #bufferにダミーオブジェクトを入れてis_alive()を判定させる
            self._buffer.put_nowait({'chatdata':'','timeout':0}) 
        self._logger.info(f'[{self.video_id}]finished.')
 
    @classmethod
    def _set_exception_handler(cls, handler):
        loop = asyncio.get_event_loop()
        loop.set_exception_handler(handler)
    
    @classmethod
    async def shutdown(cls, event, sig = None, handler=None):
        cls._logger.debug("shutdown...")
        tasks = [t for t in asyncio.all_tasks() if t is not
        asyncio.current_task()]
        [task.cancel() for task in tasks]

        cls._logger.debug(f"complete remaining tasks...")
        await asyncio.gather(*tasks,return_exceptions=True)
        loop = asyncio.get_event_loop()
        loop.stop()
Beispiel #12
0
class WebsocketBase(Client):
    """
    A client for interacting with the rippled WebSocket API.

    :meta private:
    """
    def __init__(self: WebsocketBase, url: str) -> None:
        """
        Constructs a WebsocketBase.

        Arguments:
            url: The URL of the rippled node to submit requests to.
        """
        self.url = url
        self._open_requests: Dict[str, Future[Dict[str, Any]]] = {}
        self._websocket: Optional[WebSocketClientProtocol] = None
        self._handler_task: Optional[Task[None]] = None
        self._messages: Optional[Queue[Dict[str, Any]]] = None

    def is_open(self: WebsocketBase) -> bool:
        """
        Returns whether the client is currently open.

        Returns:
            Whether the client is currently open.
        """
        return (self._handler_task is not None and self._messages is not None
                and self._websocket is not None and self._websocket.open)

    async def _do_open(self: WebsocketBase) -> None:
        """Connects the client to the Web Socket API at the given URL."""
        if self.is_open():
            return

        # open the connection
        self._websocket = await connect(self.url)

        # make a message queue
        self._messages = Queue()

        # start the handler
        self._handler_task = create_task(self._handler())

    async def _do_close(self: WebsocketBase) -> None:
        """Closes the connection."""
        if not self.is_open():
            return
        assert self._handler_task is not None  # mypy
        assert self._websocket is not None  # mypy
        assert self._messages is not None  # mypy

        # cancel the handler
        self._handler_task.cancel()
        self._handler_task = None

        # cancel any pending request Futures
        for future in self._open_requests.values():
            future.cancel()
        self._open_requests = {}

        # clear the message queue
        for _ in range(self._messages.qsize()):
            self._messages.get_nowait()
            self._messages.task_done()
        self._messages = None

        # close the connection
        await self._websocket.close()

    async def _handler(self: WebsocketBase) -> None:
        """
        This is basically a middleware for the websocket library. For all received
        messages we check whether there is an outstanding future we need to resolve,
        and if so do so.

        Then we store the already-parsed JSON in our own queue for generic iteration.

        As long as a given client remains open, this handler will be running as a Task.
        """
        assert self._websocket is not None  # mypy
        assert self._messages is not None  # mypy
        async for response in self._websocket:
            response_dict = json.loads(response)

            # if this response corresponds to request, fulfill the Future
            if "id" in response_dict and response_dict[
                    "id"] in self._open_requests:
                self._open_requests[response_dict["id"]].set_result(
                    response_dict)

            # enqueue the response for the message queue
            self._messages.put_nowait(response_dict)

    def _set_up_future(self: WebsocketBase, request: Request) -> None:
        """
        Only to be called from the public send and request_impl functions.
        Given a request with an ID, ensure that that ID is backed by an open
        Future in self._open_requests.
        """
        if request.id is None:
            return
        request_str = str(request.id)
        if (request_str in self._open_requests
                and not self._open_requests[request_str].done()):
            raise XRPLWebsocketException(
                f"Request {request_str} is already in progress.")
        self._open_requests[request_str] = get_running_loop().create_future()

    async def _do_send_no_future(self: WebsocketBase,
                                 request: Request) -> None:
        assert self._websocket is not None  # mypy
        await self._websocket.send(json.dumps(
            request_to_websocket(request), ), )

    async def _do_send(self: WebsocketBase, request: Request) -> None:
        # we need to set up a future here, even if no one cares about it, so
        # that if a user submits a few requests with the same ID they fail.
        self._set_up_future(request)
        await self._do_send_no_future(request)

    async def _do_pop_message(self: WebsocketBase) -> Dict[str, Any]:
        assert self._messages is not None  # mypy
        msg = await self._messages.get()
        self._messages.task_done()
        return msg

    async def request_impl(self: WebsocketBase, request: Request) -> Response:
        """
        Asynchronously submits the request represented by the request to the
        rippled node specified by this client's URL and waits for a response.

        Note: if this is used for an API method that returns many responses, such as
        `subscribe`, this method only returns the first response; all subsequent
        responses will be available if you use the async iterator pattern on this
        client, IE `async for message in client`. You can create an async task to
        read messages from subscriptions.

        Arguments:
            request: An object representing information about a rippled request.

        Returns:
            The response from the server, as a Response object.

        Raises:
            XRPLWebsocketException: If there is already an open request by the
                request's ID, or if this WebsocketBase is not open.

        :meta private:
        """
        if not self.is_open():
            raise XRPLWebsocketException("Websocket is not open")

        # if no ID on this request, generate and inject one, and ensure it
        # is backed by a future
        request_with_id = _inject_request_id(request)
        request_str = str(request_with_id.id)
        self._set_up_future(request_with_id)

        # fire-and-forget the send, and await the Future
        create_task(self._do_send_no_future(request_with_id))
        raw_response = await self._open_requests[request_str]

        # remove the resolved Future, hopefully getting it garbage colleted
        del self._open_requests[request_str]
        return websocket_to_response(raw_response)
Beispiel #13
0
class M3u8Downloader(object):
    def __init__(self, url, path, worker_num, ts_timeout=120, loop=None):
        self.path = path
        self.ts_timeout = ts_timeout
        self.cache_dir = path + ".cache"
        if not os.path.exists(self.cache_dir):
            os.mkdir(self.cache_dir)
        self.loop = loop or asyncio.get_event_loop()
        self.ts_list = M3u8Parser(url).ts
        self.worker_num = worker_num
        self.q = Queue()
        self._ts_name_list = []
        self._fill_q()

    def _fill_q(self):
        for index, ts in enumerate(self.ts_list):
            name = str(index) + ".ts"
            self.q.put_nowait((name, ts))
            self._ts_name_list.append(name)

    async def worker(self):
        async with aiohttp.ClientSession() as session:
            while not self.q.empty():
                name, ts = self.q.get_nowait()
                path = os.path.join(self.cache_dir, name)
                if os.path.exists(path):
                    print("{name} already exists".format(name=name))
                else:
                    try:
                        await self.download(session, path, ts)
                    except:
                        print("retry download {name}".format(name=name))
                        self.q.put_nowait((name, ts))
                self.q.task_done()

    async def download(self, session, path, ts):
        print("start download {p} / {total}".format(p=path,
                                                    total=len(self.ts_list)))
        async with async_timeout.timeout(self.ts_timeout):
            async with session.get(ts, headers=headers) as response:
                with open(path + ".downloading", "wb") as fd:
                    while True:
                        chunk = await response.content.read(1024)
                        if not chunk:
                            break
                        fd.write(chunk)
                os.rename(path + ".downloading", path)

    async def run(self):
        # download all .ts
        print("start download...")
        workers = [
            asyncio.Task(self.worker(), loop=self.loop)
            for _ in range(self.worker_num)
        ]
        await self.q.join()
        for w in workers:
            w.cancel()
        print("download complete , start merge...")
        self.merge_all_ts()
        print("remove .ts ...")
        shutil.rmtree(self.cache_dir)

    def merge_all_ts(self):
        command = '{ffmpeg_path} -y -i concat:{path_list} -acodec copy -vcodec copy -absf aac_adtstoasc {output}'.format(
            path_list="|".join([
                os.path.join(self.cache_dir, name)
                for name in self._ts_name_list
            ]),
            ffmpeg_path=ffmpeg_path,
            output=self.path).split(" ")
        p = subprocess.Popen(command)
        p.wait()
Beispiel #14
0
class Writer:
    """
    """

    terminator = "\n"

    delay = 1.0

    def __init__(self, levelno=logging.DEBUG):
        self.queue = Queue()
        self.canWrite = Event()
        self.levelno = levelno

    def filterMessage(self, message) -> bool:
        if not isinstance(message, Message) or message.type != "log":
            return False  # ignore invalid

        if message.levelno < self.levelno:
            return False  # filter level

        return True

    async def start(self):
        loop = get_running_loop()

        while True:
            try:
                await self.canWrite.wait()

                if not self.check():
                    self.canWrite.clear()

                    continue

                message = await self.queue.get()

                if not self.filterMessage(message):
                    self.queue.task_done()
                    continue  # avoid acquiring the lock

                await loop.run_in_executor(None, self.acquire)

                while True:
                    if self.filterMessage(message):
                        await loop.run_in_executor(
                            None, self.emit, message.msg, message.levelno
                        )
                    self.queue.task_done()

                    try:  # handle any other records while we have the lock
                        message = self.queue.get_nowait()
                    except QueueEmpty:
                        break

                await loop.run_in_executor(None, self.release)

                await sleep(self.delay)  # rate limit

            except CancelledError:
                break  # exit the writer

            except Exception:  # catch all
                logging.warning(f"Caught exception in {self.__class__.__name__}. Stopping", exc_info=True)

                self.canWrite.clear()

    def check(self) -> bool:
        return True

    def acquire(self):
        pass

    @abstractmethod
    def emit(self, msg: str, levelno: int):
        raise NotImplementedError()

    def release(self):
        pass
def main():

    url = "http://isoredirect.centos.org/centos/7/isos/x86_64"
    f = urllib.request.urlopen(url)
    html = f.read()
    source = BeautifulSoup(html, 'html.parser')

    threads = []
    queue = Queue()

    # Testing each url for speed
    for link in source.find_all('a'):
        href = str(link.string)

        if href.startswith('http') and 'x86_64' in href:

            thread = Thread(target=TestSpeed, args=(href, queue))
            thread.start()
            threads.append(thread)

    for th in threads:
        th.join()

    bestspeed = 0.2

    while not queue.empty():
        speedtest = queue.get_nowait()

        besturl = speedtest['url']
        speed = speedtest['speed']

        if args.verbose:
            print(str(speed) + " - Mirror: " + url)

        if speed < bestspeed:
            bestspeed = speed
            besturl = url

    print("Best speed: " + besturl + " (speed: " + str(bestspeed) + ")")

    # If verbose enable interactive
    isoAsk = "n"

    if args.verbose and not args.list:
        isoAsk = raw_input('Download latest CentOS7? (Y/n): ')

        while isoAsk != "Y" and isoAsk != "n":
            isoAsk = raw_input('Download latest CentOS7? (Y/n): ')

    # As before, fetch page
    f = urllib.request.urlopen(besturl)

    html_linksPage = f.read()
    linksPage = BeautifulSoup(html_linksPage, 'html.parser')

    # For each link
    for link in linksPage.find_all('a'):
        href = str(link.string)
        if ('torrent' not in href and 'Minimal' in href):
            file_name = href.rstrip()
    if args.list and not args.tree:
        print("Best download option: " + besturl + "/" + file_name)
        exit()

    if args.tree:
        dirtree = besturl.replace("isos", "os")
        print("Use directory tree: " + dirtree)
        exit()

    # Downloading the ISO
    if isoAsk == "Y" or args.verbose == False:
        with open(file_name, "wb") as f:
            print("Downloading %s" % file_name)

            response = requests.get(besturl + "/" + file_name, stream=True)
            total_length = response.headers.get('content-length')

            if total_length is None:
                f.write(response.content)
            else:
                dl = 0
                total_length = int(total_length)
                for data in response.iter_content(chunk_size=4096):
                    dl += len(data)
                    f.write(data)
                    done = int(50 * dl / total_length)
                    sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' *
                                                   (50 - done)))
                    sys.stdout.flush()
                    print()
    else:
        print("Quitting")
Beispiel #16
0
class Stream:
    """
    API for working with streams, used by clients and request handlers
    """
    id = None
    __buffer__ = None
    __wrapper__ = None

    def __init__(self,
                 connection: Connection,
                 h2_connection: H2Connection,
                 transport: Transport,
                 *,
                 loop: AbstractEventLoop,
                 stream_id: Optional[int] = None,
                 wrapper: Optional[Wrapper] = None) -> None:
        self._connection = connection
        self._h2_connection = h2_connection
        self._transport = transport
        self._loop = loop
        self.__wrapper__ = wrapper

        if stream_id is not None:
            self.id = stream_id
            self.__buffer__ = Buffer(self.id,
                                     self._connection,
                                     self._h2_connection,
                                     loop=self._loop)

        self.__headers__ = Queue(loop=loop) \
            # type: Queue[List[Tuple[str, str]]]

        self.__window_updated__ = Event(loop=loop)

    async def recv_headers(self):
        return await self.__headers__.get()

    def recv_headers_nowait(self):
        try:
            return self.__headers__.get_nowait()
        except QueueEmpty:
            return None

    async def recv_data(self, size):
        return await self.__buffer__.read(size)

    async def send_request(self, headers, end_stream=False, *, _processor):
        assert self.id is None, self.id
        while True:
            # this is the first thing we should check before even trying to
            # create new stream, because this wait() can be cancelled by timeout
            # and we wouldn't need to create new stream at all
            if not self._connection.write_ready.is_set():
                await self._connection.write_ready.wait()

            # `get_next_available_stream_id()` should be as close to
            # `connection.send_headers()` as possible, without any async
            # interruptions in between, see the docs on the
            # `get_next_available_stream_id()` method
            stream_id = self._h2_connection.get_next_available_stream_id()
            try:
                self._h2_connection.send_headers(stream_id,
                                                 headers,
                                                 end_stream=end_stream)
            except TooManyStreamsError:
                # we're going to wait until any of currently opened streams will
                # be closed, and we will be able to open a new one
                # TODO: maybe implement FIFO for waiters, but this limit
                #       shouldn't be reached in a normal case, so why bother
                # TODO: maybe we should raise an exception here instead of
                #       waiting, if timeout wasn't set for the current request
                self._connection.stream_close_waiter.clear()
                await self._connection.stream_close_waiter.wait()
                # while we were trying to create a new stream, write buffer
                # can became full, so we need to repeat checks from checking
                # if we can write() data
                continue
            else:
                self.id = stream_id
                self.__buffer__ = Buffer(self.id,
                                         self._connection,
                                         self._h2_connection,
                                         loop=self._loop)
                release_stream = _processor.register(self)
                self._transport.write(self._h2_connection.data_to_send())
                return release_stream

    async def send_headers(self, headers, end_stream=False):
        assert self.id is not None
        if not self._connection.write_ready.is_set():
            await self._connection.write_ready.wait()

        # Workaround for the H2Connection.send_headers method, which will try
        # to create a new stream if it was removed earlier from the
        # H2Connection.streams, and therefore will raise StreamIDTooLowError
        if self.id not in self._h2_connection.streams:
            raise StreamClosedError(self.id)

        self._h2_connection.send_headers(self.id,
                                         headers,
                                         end_stream=end_stream)
        self._transport.write(self._h2_connection.data_to_send())

    async def send_data(self, data, end_stream=False):
        f = BytesIO(data)
        f_pos, f_last = 0, len(data)

        while True:
            if not self._connection.write_ready.is_set():
                await self._connection.write_ready.wait()

            window = self._h2_connection.local_flow_control_window(self.id)
            if not window:
                self.__window_updated__.clear()
                await self.__window_updated__.wait()
                window = self._h2_connection.local_flow_control_window(self.id)

            max_frame_size = self._h2_connection.max_outbound_frame_size
            f_chunk = f.read(min(window, max_frame_size, f_last - f_pos))
            f_pos = f.tell()

            if f_pos == f_last:
                self._h2_connection.send_data(self.id,
                                              f_chunk,
                                              end_stream=end_stream)
                self._transport.write(self._h2_connection.data_to_send())
                break
            else:
                self._h2_connection.send_data(self.id, f_chunk)
                self._transport.write(self._h2_connection.data_to_send())

    async def end(self):
        if not self._connection.write_ready.is_set():
            await self._connection.write_ready.wait()
        self._h2_connection.end_stream(self.id)
        self._transport.write(self._h2_connection.data_to_send())

    async def reset(self, error_code=ErrorCodes.NO_ERROR):
        if not self._connection.write_ready.is_set():
            await self._connection.write_ready.wait()
        self._h2_connection.reset_stream(self.id, error_code=error_code)
        self._transport.write(self._h2_connection.data_to_send())

    def reset_nowait(self, error_code=ErrorCodes.NO_ERROR):
        self._h2_connection.reset_stream(self.id, error_code=error_code)
        if self._connection.write_ready.is_set():
            self._transport.write(self._h2_connection.data_to_send())

    def __ended__(self):
        self.__buffer__.eof()

    def __terminated__(self, reason):
        if self.__wrapper__ is not None:
            self.__wrapper__.cancel(StreamTerminatedError(reason))

    @property
    def closable(self):
        if self._h2_connection.state_machine.state is ConnectionState.CLOSED:
            return False
        stream = self._h2_connection.streams.get(self.id)
        if stream is None:
            return False
        return not stream.closed
Beispiel #17
0
async def project_runner(
    idx: int,
    config: Dict[str, Any],
    queue: asyncio.Queue,
    work_path: Path,
    results: Results,
    long_checkouts: bool = False,
    rebase: bool = False,
    keep: bool = False,
    no_diff: bool = False,
) -> None:
    """Check out project and run Black on it + record result"""
    loop = asyncio.get_event_loop()
    py_version = f"{version_info[0]}.{version_info[1]}"
    while True:
        try:
            project_name = queue.get_nowait()
        except asyncio.QueueEmpty:
            LOG.debug(f"project_runner {idx} exiting")
            return
        LOG.debug(f"worker {idx} working on {project_name}")

        project_config = config["projects"][project_name]

        # Check if disabled by config
        if "disabled" in project_config and project_config["disabled"]:
            results.stats["disabled"] += 1
            LOG.info(f"Skipping {project_name} as it's disabled via config")
            continue

        # Check if we should run on this version of Python
        if (
            "all" not in project_config["py_versions"]
            and py_version not in project_config["py_versions"]
        ):
            results.stats["wrong_py_ver"] += 1
            LOG.debug(f"Skipping {project_name} as it's not enabled for {py_version}")
            continue

        # Check if we're doing big projects / long checkouts
        if not long_checkouts and project_config["long_checkout"]:
            results.stats["skipped_long_checkout"] += 1
            LOG.debug(f"Skipping {project_name} as it's configured as a long checkout")
            continue

        repo_path: Optional[Path] = Path(__file__)
        stdin_project = project_name.upper() == "STDIN"
        if not stdin_project:
            repo_path = await git_checkout_or_rebase(work_path, project_config, rebase)
            if not repo_path:
                continue
        await black_run(project_name, repo_path, project_config, results, no_diff)

        if not keep and not stdin_project:
            LOG.debug(f"Removing {repo_path}")
            rmtree_partial = partial(
                rmtree, path=repo_path, onerror=handle_PermissionError
            )
            await loop.run_in_executor(None, rmtree_partial)

        LOG.info(f"Finished {project_name}")
Beispiel #18
0
class TorrentClient:
    def __init__(self, torrent):
        self.tracker = Tracker(torrent)
        self.available_peers = Queue()
        self.peers = []
        self.piece_manager = PieceManager(torrent)
        self.abort = False

    async def start(self):
        self.peers = [
            PeerConnection(self.available_peers,
                           self.tracker.torrent.info_hash,
                           self.tracker.peer_id, self.piece_manager,
                           self._on_block_retrieved)
            for _ in range(MAX_PEER_CONNECTIONS)
        ]

        previous = None
        interval = 30 * 60

        while True:
            if self.piece_manager.complete:
                logging.info('Torrent fully downloaded!')
                break
            if self.abort:
                logging.info('Aborting download...')
                break

            current = time.time()
            if (not previous) or (previous + interval < current):
                response = await self.tracker.connect(
                    first=previous if previous else False,
                    uploaded=self.piece_manager.bytes_uploaded,
                    downloaded=self.piece_manager.bytes_downloaded)

                if response:
                    previous = current
                    interval = response.interval
                    self._empty_queue()
                    for peer in response.peers:
                        self.available_peers.put_nowait(peer)
            else:
                await asyncio.sleep(5)
        self.stop()

    def _empty_queue(self):
        while not self.available_peers.empty():
            self.available_peers.get_nowait()

    def stop(self):
        self.abort = True
        for peer in self.peers:
            peer.stop()
        self.piece_manager.close()
        self.tracker.close()

    def _on_block_retrieved(self, peer_id, piece_index, block_offset, data):
        self.piece_manager.block_received(peer_id=peer_id,
                                          piece_index=piece_index,
                                          block_offset=block_offset,
                                          data=data)
Beispiel #19
0
class ProxyResponse(object):
    '''Asynchronous wsgi response.
    '''
    _started = False
    _headers = None
    _done = False

    def __init__(self, environ, start_response):
        self._loop = environ['pulsar.connection']._loop
        self.environ = environ
        self.start_response = start_response
        self.queue = Queue()

    def __iter__(self):
        while True:
            if self._done:
                try:
                    yield self.queue.get_nowait()
                except QueueEmpty:
                    break
            else:
                yield async(self.queue.get(), loop=self._loop)

    def pre_request(self, response, exc=None):
        self._started = True
        response.bind_event('data_processed', self.data_processed)
        return response

    def error(self, exc):
        if not self._started:
            request = wsgi.WsgiRequest(self.environ)
            content_type = request.content_types.best_match(
                ('text/html', 'text/plain'))
            uri = self.environ['RAW_URI']
            msg = 'Could not find %s' % uri
            logger.info(msg=msg)
            if content_type == 'text/html':
                html = wsgi.HtmlDocument(title=msg)
                html.body.append('<h1>%s</h1>' % msg)
                data = html.render()
                resp = wsgi.WsgiResponse(504, data, content_type='text/html')
            elif content_type == 'text/plain':
                resp = wsgi.WsgiResponse(504, msg, content_type='text/html')
            else:
                resp = wsgi.WsgiResponse(504, '')
            self.start_response(resp.status, resp.get_headers())
            self._done = True
            self.queue.put_nowait(resp.content[0])

    def data_processed(self, response, exc=None, **kw):
        '''Receive data from the requesting HTTP client.'''
        status = response.get_status()
        if status == '100 Continue':
            stream = self.environ.get('wsgi.input') or io.BytesIO()
            body = yield stream.read()
            response.transport.write(body)
        if response.parser.is_headers_complete():
            if self._headers is None:
                headers = self.remove_hop_headers(response.headers)
                self._headers = Headers(headers, kind='server')
                # start the response
                self.start_response(status, list(self._headers))
            body = response.recv_body()
            if response.parser.is_message_complete():
                self._done = True
            self.queue.put_nowait(body)

    def remove_hop_headers(self, headers):
        for header, value in headers:
            if header.lower() not in wsgi.HOP_HEADERS:
                yield header, value
Beispiel #20
0
class Mailbox(Generic[T]):
    """Similar to `asyncio.Queue`_, but able to perform "selective receive".

    See `~.select`.
    """

    # ---- Queue API ----
    def __init__(
        self,
        maxsize=0,
        *,
        _queue: Optional[Queue[T]] = None,
        _pending: Optional[List[T]] = None,
    ):
        """PRIVATE ARGS: _queue and _pending are not part of the public API)."""
        self._queue = Queue(maxsize) if _queue is None else _queue
        self._pending = [] if _pending is None else _pending

    @property
    def maxsize(self):
        return self._queue.maxsize

    def empty(self):
        return self._queue.empty() and len(self._pending) < 1

    async def get(self):
        """See `asyncio.Queue.get`_."""
        if len(self._pending) > 0:
            return self._pending.pop()
        return await self._queue.get()

    def full(self):
        if self.maxsize <= 0 or self.qsize() < self.maxsize:
            return False
        return True

    def get_nowait(self, default=NO_VALUE):
        """Similar to `asyncio.Queue.get_nowait`_, but can return a default value.

        When the argument ``default`` is passed, instead of raising an exception if the
        queue is empty, the default value is passed.
        """
        if len(self._pending) > 0:
            return self._pending.pop()
        try:
            return self._queue.get_nowait()
        except asyncio.QueueEmpty:
            if default is not NO_VALUE:
                return default
            raise

    async def join(self):
        return await self._queue.join()

    async def put(self, item: T):
        await self._queue.put(item)

    def put_nowait(self, item: T):
        if self.full():
            raise asyncio.QueueFull

        self._queue.put_nowait(item)

    def qsize(self) -> int:
        return len(self._pending) + self._queue.qsize()

    def task_done(self):
        self._queue.task_done()

    # ---- Select API ----

    @overload  # noqa
    async def select(self, *args: Topic) -> Tuple[T, ...]:
        ...

    @overload  # noqa
    async def select(self, selection: Dict[Topic, Callable[[T],
                                                           S]]) -> S:  # noqa
        ...

    async def select(self, *args):  # noqa
        selectors, callbacks, value, callback = self._select_common(args)
        if value is NO_VALUE:
            value, callback = await self._select_on_queue(selectors, callbacks)

        if callback:
            return callback(value)

        return value

    def select_nowait(self, *args, default=NO_VALUE):  # noqa
        selectors, callbacks, value, callback = self._select_common(args)
        if value is NO_VALUE:
            value, callback = self._select_nowait(selectors, callbacks,
                                                  default)

        if callback:
            return callback(value)

        return value

    def _select_common(self, args):
        """This internal method avoid code duplication between get and get_nowait,
        encapsulating the common parts.
        """
        n = len(args)
        if n == 1 and isinstance(args[0], dict):
            selectors = list(args[0].keys())
            callbacks = list(args[0].values())
        else:
            selectors = args
            callbacks = []

        selectors = [pattern(s) for s in selectors]
        value, callback = self._select_on_pending(selectors, callbacks)

        return selectors, callbacks, value, callback

    def _select_on_pending(
        self, selectors: List[Pattern], callbacks: Optional[List[Callable[[T],
                                                                          S]]]
    ) -> Tuple[OptVal[T], Optional[Callable[[T], S]]]:

        for i in range(len(self._pending)):
            value, callback = _select_value(self._pending[i], selectors,
                                            callbacks)
            if value is not NO_VALUE:
                del self._pending[i]
                return value, callback

        return NO_VALUE, None

    async def _select_on_queue(
        self, selectors: List[Pattern], callbacks: Optional[List[Callable[[T],
                                                                          S]]]
    ) -> Tuple[OptVal[T], Optional[Callable[[T], S]]]:
        while True:
            item = await self._queue.get()
            value, callback = _select_value(item, selectors, callbacks)
            if value is not NO_VALUE:
                return value, callback
            else:
                self._pending.append(item)

    def _select_nowait(
        self,
        selectors: List[Pattern],
        callbacks: Optional[List[Callable[[T], S]]],
        default: OptVal[T] = NO_VALUE,
    ) -> Tuple[OptVal[T], Optional[Callable[[T], S]]]:
        try:
            while True:
                item = self._queue.get_nowait()
                value, callback = _select_value(item, selectors, callbacks)
                if value is not NO_VALUE:
                    return value, callback
                else:
                    self._pending.append(item)
        except asyncio.QueueEmpty:
            if default is not NO_VALUE:
                return default, None
            raise

    def _size(self):
        return f"[queued: {self._queue.qsize()} + pending: {len(self._pending)}]"

    def __str__(self):
        return f"<{type(self).__name__}{self._size()}>"

    def __repr__(self):
        return (f"<{type(self).__name__} at {id(self):#x} "
                f"queue={self._queue}, pending={self._pending}>")
from sys import stdin,stdout
from asyncio import Queue
suma = 0
harrys = Queue()
monks = []
n = int(stdin.readline().strip())
for x in stdin.readline().split(' '):
    num = int(x)
    harrys.put_nowait(num)
Q,target = map(int,stdin.readline().split(' '))
res = -1
if suma == target:
    res = 0
else:
    for q in range(Q):
        op = stdin.readline().strip()        
        if op == "Harry":
            num = harrys.get_nowait()
            monks.append(num)
            suma += num
        else:
            num = monks.pop()
            suma -= num        
        if suma == target:
            res = monks.__len__()
            break
stdout.write(str(res)+'\n')
Beispiel #22
0
  def fit(self, dataset):
    nodes_at_start = Node.totalcount
    self.Xnames = dataset.Xnames
    self.Ynames = dataset.Ynames
    self.root = Node(dataset, parent=None)
    self.lc_nodes = 0

    que = Queue()
    que.put_nowait(self.root)

    while not que.empty():
      c = que.get_nowait()

      if len(c.data.Ynames) == 1:
        # One class remaining, create an answer node
        for answername, answer in c.data.Ynames.items():
          assert(c.answer is None)
          c.answer = node_types.Answer(answer, answername, c.data.Y.size)
        continue

      if len(c.data.Ynames) == 2 and self.use_lc:
        # Possible to crate a linear classifier here
        # Conjunction (penalty='l1' loss='squared_hinge' dual=True) not supported
        lc = LinearSVC(penalty='l1', tol=0.000001, C=10000.0,
                       dual=False, fit_intercept=True, random_state=42)
        # Create a mask to only include features that
        # do not contain the same value in all samples
        feature_mask = []
        for i, dom in enumerate(c.data.Xdomains):
          assert(len(dom) >= 1)
          feature_mask.append(len(dom) > 1)
        # Create a scaler to transform data before fitting
        scaler = StandardScaler()
        X_transformed = scaler.fit_transform(c.data.X[:,feature_mask])
        lc.fit(X_transformed, c.data.Y)
        sc = accuracy_score(normalize=False, y_true=c.data.Y,
                            y_pred=lc.predict(X_transformed))
        if sc == c.data.Y.size:
          # Linear classifier is correct, make a leaf node with it
          c.line = node_types.Line(lc, feature_mask, scaler,
                                   list(c.data.Ynames.keys()), c.data.Y.size)
          self.lc_nodes += 1
          continue

      # We need to split here
      c.predicate = self.split_criterion.best(c.data, c)
      mask, s_Xdomains, u_Xdomains = c.predicate.evaluate_domains(c.data.X)
      Ynames_back = {}
      for name, idx in c.data.Ynames.items():
        assert(idx not in Ynames_back)
        Ynames_back[idx] = name
      # SAT
      s_X = c.data.X[mask]
      s_Y = c.data.Y[mask]
      s_Xnames = c.data.Xnames
      s_Ynames = {}
      s_Yids = set()
      for y in s_Y:
        if y not in s_Yids:
          s_Yids.add(y)
          assert(y in Ynames_back and Ynames_back[y] not in s_Ynames)
          s_Ynames[Ynames_back[y]] = y
      c.childSAT = Node(Dataset(s_X, s_Y, s_Xnames, s_Xdomains, s_Ynames,
                                c.data.Xineqforbidden.copy(),
                                c.data.ActionIDtoName.copy(),
                                c.data.ModuleIDtoName.copy()),
                        c)
      que.put_nowait(c.childSAT)
      # UNSAT
      u_X = c.data.X[~mask]
      u_Y = c.data.Y[~mask]
      u_Xnames = c.data.Xnames
      u_Ynames = {}
      u_Yids = set()
      for y in u_Y:
        if y not in u_Yids:
          u_Yids.add(y)
          assert(y in Ynames_back and Ynames_back[y] not in u_Ynames)
          u_Ynames[Ynames_back[y]] = y
      c.childUNSAT = Node(Dataset(u_X, u_Y, u_Xnames, u_Xdomains, u_Ynames,
                                  c.data.Xineqforbidden.copy(),
                                  c.data.ActionIDtoName.copy(),
                                  c.data.ModuleIDtoName.copy()),
                          c)
      que.put_nowait(c.childUNSAT)
      # Clean up previous dataset
      c.data = None
      assert(dataset is not None) # c.data was pointer copy, not reference

    # Finished
    self.nodes = Node.totalcount - nodes_at_start