Esempio n. 1
0
    def __init__(self, nethost: str, netport: int, database: str, user: str,
                 concurrency: int, protocol: str, **kwargs):

        super().__init__(**kwargs)

        if protocol != self.get_proto_name():
            raise RuntimeError(f'unknown protocol {protocol!r}')
        if concurrency <= 0 or concurrency > defines.HTTP_PORT_MAX_CONCURRENCY:
            raise RuntimeError(
                f'concurrency must be greater than 0 and '
                f'less than {defines.HTTP_PORT_MAX_CONCURRENCY}')

        self._compilers = asyncio.LifoQueue()
        self._pgcons = asyncio.LifoQueue()
        self._compilers_list = []
        self._pgcons_list = []

        self._nethost = nethost
        self._netport = netport

        self.database = database
        self.user = user
        self.concurrency = concurrency

        self._servers = []
        self._query_cache = cache.StatementsCache(
            maxsize=defines.HTTP_PORT_QUERY_CACHE_SIZE)
Esempio n. 2
0
def create_task_queues(app, loop):
    app.threaded_executor = ThreadPoolExecutor(config.thread_pool_size)

    app.updates_queue = asyncio.Queue(maxsize=1_000)
    app.ls_queue = asyncio.LifoQueue(maxsize=100)
    app.add_task(
        b2browser.b2.worker(f"ls_worker", app.ls_queue, app.updates_queue,
                            jinja))

    app.preview_queue = asyncio.LifoQueue(maxsize=10_000)
    for i in range(config.thread_pool_size):
        app.add_task(
            b2browser.preview.worker(f"preview_worker_{i}",
                                     app.threaded_executor, app.preview_queue,
                                     app.updates_queue))
Esempio n. 3
0
 def __init__(self, event_loop=None, n_worker=1):
     self.event_loop = event_loop
     self.n_worker = n_worker
     self._awaiting_worker_queue = asyncio.Queue(loop=self.event_loop,
                                                 maxsize=self.n_worker)
     self._processing_worker_queue = asyncio.LifoQueue(
         loop=self.event_loop, maxsize=self.n_worker)
Esempio n. 4
0
    async def crawl(self, seed=''):
        counts = {}
        visited = set()
        count_kws = keyword_counter(self.keywords)

        q = asyncio.LifoQueue()
        await q.put((0, seed or self.DEFAULT_SEED))

        while not q.empty():
            depth, url = await q.get()
            visited.add(url)
            print(url)

            content = await fetch(self.session, url)
            parser = BeautifulSoup(content, 'lxml')

            if self.is_story(url):
                story = self.get_story(parser)
                story = normalize_story(story.text) if story else ''
                counts[url] = count_kws(story)
                pprint(counts[url])

            if depth < self.MAX_DEPTH:
                for link in parser.find_all('a', href=True):
                    href = link['href']
                    if href not in self.visited and self.should_visit(href):
                        await q.put((depth + 1, href))

        return counts
Esempio n. 5
0
 def __init__(self, *args, **kwargs):
     self.pool_size = kwargs.pop('pool_size', 8)
     self.connection_args = args
     self.connection_kwargs = kwargs
     self.queue = asyncio.LifoQueue()  # important because released connections are put in the end
     self.connection_list = []  # used to close in the end
     self._async__init__()  # not sure how to model this
Esempio n. 6
0
    def test_order(self):
        q = asyncio.LifoQueue(loop=self.loop)
        for i in [1, 3, 2]:
            q.put_nowait(i)

        items = [q.get_nowait() for _ in range(3)]
        self.assertEqual([2, 3, 1], items)
Esempio n. 7
0
async def run(loop):
    device = None

    print('looking for devices...')
    while device is None:
        devices = await discover()
        device = next((d for d in devices if d.name == 'Lyd2'), None)

    async with BleakClient(device.address, loop=loop) as client:
        queue = asyncio.LifoQueue(maxsize=1)
        print('disconnecting...')
        await client.disconnect()
        print('connecting...')
        await client.connect()

        print('starting notificiations')

        def callback(sender, data):
            print(f"{sender}: {data}")

        await client.start_notify(
            characteristicUuid,
            lambda _, data: loop.call_soon(queue.put_nowait, data))

        print('waiting')
        while True:
            args = await queue.get()
            music.play()
            print(args)
Esempio n. 8
0
    def __init__(self, nethost: str, netport: int, database: str, user: str,
                 concurrency: int, protocol: str, **kwargs):

        super().__init__(**kwargs)
        self._compiler_pool_size = 0

        if protocol != self.get_proto_name():
            raise RuntimeError(f'unknown protocol {protocol!r}')
        if concurrency <= 0 or concurrency > defines.HTTP_PORT_MAX_CONCURRENCY:
            raise RuntimeError(
                f'concurrency must be greater than 0 and '
                f'less than {defines.HTTP_PORT_MAX_CONCURRENCY}')

        self._compilers: asyncio.LifoQueue[Any] = asyncio.LifoQueue()
        self._compilers_list: List[Any] = []

        self._nethost = nethost
        self._netport = netport

        self.database = database
        self.user = user
        self.concurrency = concurrency
        self.last_minute_requests = windowedsum.WindowedSum()

        self._http_proto_server = None
        self._http_request_logger = None
        self._query_cache = cache.StatementsCache(
            maxsize=defines.HTTP_PORT_QUERY_CACHE_SIZE)
Esempio n. 9
0
    def setUp(self):
        super(EntryWorkerTestCase, self).setUp()

        self.queue = asyncio.LifoQueue()
        self.loop.run_until_complete(self.init_queue())
        self.buf = set()  # Having add() method is enough
        self.sess = SessionStub()
Esempio n. 10
0
 def __init__(self, factory: tp.Coroutine, size: int = POOL_SIZE):
     self._factory = factory
     self._size = size
     self._queue = asyncio.LifoQueue(maxsize=self._size)
     self._used = set()
     for _ in range(self._size):
         self._queue.put_nowait(None)
Esempio n. 11
0
def run():
    bsr_end = RabbitmqInputEndpoint('amz_bsr:input', **RABBITMQ_CONF)
    output_end = RabbitmqOutputEndpoint(
        ['amz_bsr:input', 'amz_bsr:output', 'amz_ip_ban:input'],
        **RABBITMQ_CONF)
    queue = asyncio.Queue()
    notify_input_end = pipeflow.QueueInputEndpoint(queue)
    notify_output_end = pipeflow.QueueOutputEndpoint(queue)
    queue = asyncio.LifoQueue()
    inner_input_end = pipeflow.QueueInputEndpoint(queue)
    inner_output_end = pipeflow.QueueOutputEndpoint(queue)

    server = pipeflow.Server()

    task_group = server.add_group('task', 1)
    task_group.set_handle(handle_task)
    task_group.add_input_endpoint('input', bsr_end)
    task_group.add_input_endpoint('notify', notify_input_end)
    task_group.add_output_endpoint('input_back', output_end, 'amz_bsr:input')
    task_group.add_output_endpoint('inner_output', inner_output_end)

    worker_group = server.add_group('work', MAX_WORKERS)
    worker_group.set_handle(handle_worker)
    worker_group.add_input_endpoint('inner_input', inner_input_end)
    worker_group.add_output_endpoint('output',
                                     output_end,
                                     'amz_bsr:output',
                                     buffer_size=MAX_WORKERS * 20)
    worker_group.add_output_endpoint('inner_output', inner_output_end)
    worker_group.add_output_endpoint('notify', notify_output_end)
    worker_group.add_output_endpoint('ban', output_end, 'amz_ip_ban:input')
    server.run()
Esempio n. 12
0
    def __init__(self, *connect_args,
                 min_size,
                 max_size,
                 max_queries,
                 max_inactive_connection_lifetime,
                 setup,
                 init,
                 loop,
                 connection_class,
                 **connect_kwargs):

        if loop is None:
            loop = asyncio.get_event_loop()
        self._loop = loop

        if max_size <= 0:
            raise ValueError('max_size is expected to be greater than zero')

        if min_size < 0:
            raise ValueError(
                'min_size is expected to be greater or equal to zero')

        if min_size > max_size:
            raise ValueError('min_size is greater than max_size')

        if max_queries <= 0:
            raise ValueError('max_queries is expected to be greater than zero')

        if max_inactive_connection_lifetime < 0:
            raise ValueError(
                'max_inactive_connection_lifetime is expected to be greater '
                'or equal to zero')

        self._minsize = min_size
        self._maxsize = max_size

        self._holders = []
        self._initialized = False
        self._queue = asyncio.LifoQueue(maxsize=self._maxsize, loop=self._loop)

        self._working_addr = None
        self._working_config = None
        self._working_params = None

        self._connection_class = connection_class

        self._closed = False

        for _ in range(max_size):
            ch = PoolConnectionHolder(
                self,
                connect_args=connect_args,
                connect_kwargs=connect_kwargs,
                max_queries=max_queries,
                max_inactive_time=max_inactive_connection_lifetime,
                setup=setup,
                init=init)

            self._holders.append(ch)
            self._queue.put_nowait(ch)
Esempio n. 13
0
    async def test_order(self):
        q = asyncio.LifoQueue()
        for i in [1, 3, 2]:
            await q.put(i)

        items = [await q.get() for _ in range(3)]
        self.assertEqual([2, 3, 1], items)
Esempio n. 14
0
 def __init__(self, root, *, max_tasks=10, loop=None):
     self._loop = loop or asyncio.get_event_loop()
     self._root = URL(root)
     self._q = asyncio.LifoQueue(loop=self._loop)
     self._max_tasks = max_tasks
     self._session = aiohttp.ClientSession(loop=self._loop)
     self._fetched_url_num = 0
Esempio n. 15
0
async def main(contracts, number_of_workers, now=datetime.now()):

    asyncio.get_event_loop().set_debug(True)

    log.debug('main function started')
    await ib.qualifyContractsAsync(*contracts)
    log.debug('contracts qualified')

    queue = asyncio.LifoQueue()
    producers = [
        asyncio.create_task(
            schedule_task(**initial_schedule(c, now), queue=queue))
        for c in contracts
    ]
    workers = [
        asyncio.create_task(worker(f'worker {i}', queue))
        for i in range(number_of_workers)
    ]
    await asyncio.gather(*producers, return_exceptions=True)

    # wait until the queue is fully processed (implicitly awaits workers)
    await queue.join()

    # cancel all workers
    log.debug('cancelling workers')
    for w in workers:
        w.cancel()

    # wait until all worker tasks are cancelled
    await asyncio.gather(*workers, return_exceptions=True)
Esempio n. 16
0
    async def _initialize(self):
        self._queue = asyncio.LifoQueue(maxsize=self._maxsize)
        for _ in range(self._maxsize):
            ch = PoolConnectionHolder(
                self,
                max_queries=self._max_queries,
                max_inactive_time=self._max_inactive_connection_lifetime,
                setup=self._setup)

            self._holders.append(ch)
            self._queue.put_nowait(ch)

        if self._minsize:
            # Since we use a LIFO queue, the first items in the queue will be
            # the last ones in `self._holders`.  We want to pre-connect the
            # first few connections in the queue, therefore we want to walk
            # `self._holders` in reverse.

            # Connect the first connection holder in the queue so that it
            # can record `_working_addr` and `_working_opts`, which will
            # speed up successive connection attempts.
            first_ch = self._holders[-1]  # type: PoolConnectionHolder
            await first_ch.connect()

            if self._minsize > 1:
                connect_tasks = []
                for i, ch in enumerate(reversed(self._holders[:-1])):
                    # `minsize - 1` because we already have first_ch
                    if i >= self._minsize - 1:
                        break
                    connect_tasks.append(ch.connect())

                await asyncio.gather(*connect_tasks)
Esempio n. 17
0
    async def _async__init__(self):
        if self._initialized:
            return
        if self._initializing:
            raise errors.InterfaceError(
                'pool is being initialized in another task')
        if self._closed:
            raise errors.InterfaceError('pool is closed')

        self._initializing = True

        self._queue = asyncio.LifoQueue(maxsize=self._maxsize)
        for _ in range(self._maxsize):
            ch = PoolConnectionHolder(self,
                                      on_acquire=self._on_acquire,
                                      on_release=self._on_release)

            self._holders.append(ch)
            self._queue.put_nowait(ch)

        try:
            await self._initialize()
            return self
        finally:
            self._initializing = False
            self._initialized = True
Esempio n. 18
0
async def main(holder: ContractHolder):

    log.debug('inside main')
    contracts = holder()
    log.debug('past holder')
    number_of_workers = min(len(contracts), max_number_of_workers)

    log.debug(f'main function started, '
              f'retrieving data for {len(contracts)} instruments')

    queue = asyncio.LifoQueue()
    for contract in contracts:
        await queue.put(contract)
    workers = [
        asyncio.create_task(worker(f'worker {i}', queue))
        for i in range(number_of_workers)
    ]

    await queue.join()

    # cancel all workers
    log.debug('cancelling workers')
    for w in workers:
        w.cancel()

    # wait until all worker tasks are cancelled
    await asyncio.gather(*workers)
Esempio n. 19
0
    def __init__(self, *connect_args,
                 min_size,
                 max_size,
                 on_acquire,
                 on_release,
                 on_connect,
                 connection_class,
                 **connect_kwargs):

        loop = asyncio.get_event_loop()
        self._loop = loop

        if max_size <= 0:
            raise ValueError('max_size is expected to be greater than zero')

        if min_size < 0:
            raise ValueError(
                'min_size is expected to be greater or equal to zero')

        if min_size > max_size:
            raise ValueError('min_size is greater than max_size')

        if not issubclass(connection_class, asyncio_con.AsyncIOConnection):
            raise TypeError(
                'connection_class is expected to be a subclass of '
                'edgedb.AsyncIOConnection, got {!r}'.format(connection_class))

        self._minsize = min_size
        self._maxsize = max_size

        self._holders = []
        self._initialized = False
        self._initializing = False
        self._queue = asyncio.LifoQueue(maxsize=self._maxsize, loop=self._loop)

        self._working_addr = None
        self._working_config = None
        self._working_params = None

        self._connection_class = connection_class

        self._closing = False
        self._closed = False
        self._generation = 0
        self._on_connect = on_connect
        self._connect_args = connect_args
        self._connect_kwargs = connect_kwargs

        for _ in range(max_size):
            ch = PoolConnectionHolder(
                self,
                on_acquire=on_acquire,
                on_release=on_release)

            self._holders.append(ch)
            self._queue.put_nowait(ch)
Esempio n. 20
0
async def main():
    assert len(
        sys.argv
    ) > 1  # Supply the token to the bot as an argument to the script
    async with websockets.connect(GATEWAY_URI) as websocket:
        queue = asyncio.Queue(
        )  # Used to let the producer know what kind of message
        heartbeatqueue = asyncio.LifoQueue(
        )  # Used for heartbeat messages, the first element that is added to this queue
        await handler(websocket, queue, heartbeatqueue)
Esempio n. 21
0
 async def test_subscribe_custom_queue(self):
     sp = AsyncSubPub()
     q = asyncio.PriorityQueue()
     q_out = await sp.subscribe('', queue=q)
     self.assertIs(q_out, q)
     q_out = await sp.subscribe('', queue=q)
     self.assertIs(q_out, q)
     q_out = await sp.subscribe('')
     self.assertIsInstance(q_out, asyncio.Queue)
     q_out = await sp.subscribe('', queue=asyncio.LifoQueue())
     self.assertIsInstance(q_out, asyncio.LifoQueue)
Esempio n. 22
0
async def main(contracts, number_of_workers, now=datetime.now()):

    if logmode == "logging.INFO":
        FileHandler(
            f'{__file__[:-3]}_{datetime.today().strftime("%Y-%m-%d_%H-%M")}',
            format_string=
            '[{record.time:%y-%m-%d %H:%M:%S.%f%z}] {record.level_name}: {record.lineno}: {record.message}',
            bubble=True,
            delay=True).push_application()
        #logger=createLogger(__name__,args.log,logging.INFO)
    else:
        FileHandler(
            f'{__file__[:-3]}_{datetime.today().strftime("%Y-%m-%d_%H-%M")}',
            format_string=
            '[{record.time:%y-%m-%d %H:%M:%S.%f%z}] {record.level_name}: {record.lineno}: {record.message}',
            bubble=True,
            delay=True).push_application()
        #logger=createLogger(__name__,args.log,logging.DEBUG)
        logger = logging.getLogger(__name__)
        #    logger=createLogger(__name__,"scan.log",logging.DEBUG)

    asyncio.get_event_loop().set_debug(True)

    log.debug('main function started')
    for i in contracts:
        log.debug(contracts)
    #log.debug('main function started'+contracts)
    #await ib.qualifyContractsAsync(*contracts)
    log.debug('contracts qualified')

    queue = asyncio.LifoQueue()
    producers = [
        asyncio.create_task(
            schedule_task(**initial_scheduleBF(c, backdays), queue=queue))
        for c in contracts
    ]
    workers = [
        asyncio.create_task(worker(f'worker {i}', queue))
        for i in range(number_of_workers)
    ]
    await asyncio.gather(*producers, return_exceptions=True)

    # wait until the queue is fully processed (implicitly awaits workers)
    await queue.join()

    # cancel all workers
    log.debug('cancelling workers')
    for w in workers:
        w.cancel()

    # wait until all worker tasks are cancelled
    await asyncio.gather(*workers, return_exceptions=True)
Esempio n. 23
0
async def s3_dir_dir(url, depth, dst_q, s3):
    """
    Find directories certain depth from the base, push them to the `dst_q`

    ```
    s3://bucket/a
                 |- b1
                      |- c1/...
                      |- c2/...
                      |- some_file.txt
                 |- b2
                      |- c3/...
    ```

    Given a bucket structure above, calling this function with

    - url s3://bucket/a/
    - depth=1 will produce
         - s3://bucket/a/b1/
         - s3://bucket/a/b2/
    - depth=2 will produce
         - s3://bucket/a/b1/c1/
         - s3://bucket/a/b1/c2/
         - s3://bucket/a/b2/c3/

    Any files are ignored.
    """
    if not url.endswith('/'):
        url = url + '/'

    pp = s3.get_paginator('list_objects_v2')

    async def step(bucket, prefix, depth, work_q, dst_q):

        async for o in pp.paginate(Bucket=bucket, Prefix=prefix,
                                   Delimiter='/'):
            for d in o.get('CommonPrefixes', []):
                d = d.get('Prefix')
                if depth > 1:
                    await work_q.put((d, depth - 1))
                else:
                    d = 's3://{}/{}'.format(bucket, d)
                    await dst_q.put(d)

    bucket, prefix = s3_url_parse(url)
    work_q = asyncio.LifoQueue()
    work_q.put_nowait((prefix, depth))

    while work_q.qsize() > 0:
        _dir, depth = work_q.get_nowait()
        await step(bucket, _dir, depth, work_q, dst_q)
Esempio n. 24
0
def _get_stack(message_type):
    if message_type in message_stacks:
        return message_stacks[message_type]
    else:
        try:
            ms_lock.acquire()
            if message_type in message_stacks:
                return message_stacks[message_type]
            else:
                selected_stack = asyncio.LifoQueue()
                message_stacks[message_type] = selected_stack
                return selected_stack
        finally:
            ms_lock.release()
Esempio n. 25
0
async def using_queues():
    q = asyncio.Queue()

    q.put_nowait('Hello')

    await q.get()

    await q.put('world')

    q.get_nowait()

    pq = asyncio.PriorityQueue()

    stack = asyncio.LifoQueue()
Esempio n. 26
0
    def _queue(self) -> asyncio.Queue[_T]:
        # Delay creation of the queue until it is first used, to avoid
        # binding it to a possibly wrong event loop.
        # By delaying the creation of the pool we accommodate the common
        # usage pattern of instantiating the engine at module level, where a
        # different event loop is in present compared to when the application
        # is actually run.

        queue: asyncio.Queue[_T]

        if self.use_lifo:
            queue = asyncio.LifoQueue(maxsize=self.maxsize)
        else:
            queue = asyncio.Queue(maxsize=self.maxsize)
        return queue
Esempio n. 27
0
    def __init__(self, asset_data: AssetData, **passargs):
        """Create a Downloader object.

        Parameters:
            asset_data: Asset Data
            passargs: Keyword arguments
        """
        self.asset_data = asset_data
        self.passargs = passargs

        self.finished = False

        self._task: Optional[asyncio.Task] = None
        self._queue: asyncio.Queue = asyncio.LifoQueue()
        self._progress = 0
        self._stop_event = threading.Event()
Esempio n. 28
0
    async def _run_push_pipeline(self, object_service_root: str, object_service_headers: dict,
                                 objects: List[PushObject], progress_update_fn: Callable,
                                 multipart_chunk_size: int, upload_chunk_size: int = 4194304,
                                 num_workers: int = 4) -> None:
        """Method to run the async upload pipeline

        Args:
            object_service_root: The root URL to use for all objects, including the namespace and dataset name
            object_service_headers: The headers to use when requesting signed urls, including auth info
            objects: A list of PushObjects to push
            progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
                                uploaded in since last called
            multipart_chunk_size: Size in bytes for break a file apart for multi-part uploading
            upload_chunk_size: Size in bytes for streaming IO chunks
            num_workers: the number of consumer workers to start

        Returns:

        """
        # We use a LifoQueue to ensure S3 uploads start as soon as they are ready to help ensure pre-signed urls do
        # not timeout before they can be used if there are a lot of files.
        queue: asyncio.LifoQueue = asyncio.LifoQueue()

        async with aiohttp.ClientSession() as session:
            # Start workers
            workers = []
            for i in range(num_workers):
                task = asyncio.ensure_future(self._push_object_consumer(queue, session, progress_update_fn))
                workers.append(task)

            # Populate the work queue
            await self._push_object_producer(queue,
                                             object_service_root,
                                             object_service_headers,
                                             multipart_chunk_size,
                                             upload_chunk_size,
                                             objects)

            # wait until the consumer has processed all items
            await queue.join()

            # the workers are still awaiting for work so close them
            for worker in workers:
                worker.cancel()
Esempio n. 29
0
    async def _run_pull_pipeline(self,
                                 object_service_root: str,
                                 object_service_headers: dict,
                                 objects: List[PullObject],
                                 status_update_fn: Callable,
                                 download_chunk_size: int = 4194304,
                                 num_workers: int = 4):
        """Method to run the async download pipeline

        Args:
            object_service_root: The root URL to use for all objects, including the namespace and dataset name
            object_service_headers: The headers to use when requesting signed urls, including auth info
            objects: A list of PushObjects to push
            status_update_fn: the update function for providing feedback
            download_chunk_size: Size in bytes for streaming IO chunks
            num_workers: the number of consumer workers to start

        Returns:

        """
        # We use a LifoQueue to ensure S3 uploads start as soon as they are ready to help ensure pre-signed urls do
        # not timeout before they can be used if there are a lot of files.
        queue: asyncio.LifoQueue = asyncio.LifoQueue()

        async with aiohttp.ClientSession() as session:
            workers = []
            for i in range(num_workers):
                task = asyncio.ensure_future(
                    self._pull_object_consumer(queue, session,
                                               status_update_fn))
                workers.append(task)

            # Populate the work queue
            await self._pull_object_producer(queue, object_service_root,
                                             object_service_headers,
                                             download_chunk_size, objects)

            # wait until the consumer has processed all items
            await queue.join()

            # the workers are still awaiting for work so close them
            for worker in workers:
                worker.cancel()
Esempio n. 30
0
    def __init__(self, pool_min_size: int, pool_max_size: int,
                 pool_max_queries: int, pool_loop, connect_url: str,
                 **connect_kwargs):

        if pool_loop is None:
            pool_loop = asyncio.get_event_loop()
        self._loop = pool_loop

        if pool_max_size <= 0:
            raise ValueError('max_size is expected to be greater than zero')

        if pool_min_size < 0:
            raise ValueError(
                'min_size is expected to be greater or equal to zero')

        if pool_min_size > pool_max_size:
            raise ValueError('min_size is greater than max_size')

        if pool_max_queries < 0:
            raise ValueError(
                'max_queries is expected to be greater than or equal zero')

        self._minsize = pool_min_size
        self._maxsize = pool_max_size

        self._holders = []
        self._initialized = False
        self._queue = asyncio.LifoQueue(loop=self._loop)

        self._closing = False
        self._closed = False

        self._connect_url = connect_url
        self._connect_kwargs = connect_kwargs

        for _ in range(pool_max_size):
            ch = PoolConnectionHolder(self, max_queries=pool_max_queries)
            self._holders.append(ch)
            self._queue.put_nowait(ch)