예제 #1
0
파일: task_manager.py 프로젝트: dmwm/DAS
async def tasks(tid):
    jobs = []
    jobs.append(asyncio.Task(process_future(test_func, (tid,), {})))
    jobs.append(asyncio.Task(process_future(test_func, (tid,), {})))
    jobs.append(asyncio.Task(process_future(test_func, (tid,), {})))
    print("### jobs", jobs, time.time())
    asyncio.gather(*jobs)
예제 #2
0
 async def __crawl__(self):
     print('Starting webcrawler on url {}'.format(self.basePath))
     
     t1 = time.time()
     # make tasks that are processing the queue
     tasks = [asyncio.ensure_future(self.process(), loop=self.loop) for _ in range(self.max_tasks)]
     
     # aggregate tasks and squash exceptions
     asyncio.gather(*tasks, return_exceptions=True)
     
     # all queue items should call task_done for each put
     await self.queue.join()
     
     # cancel tasks
     for t in tasks:
          t.cancel()
     
     self.close()
     self.loop.stop()
     
     # save JSON file for viewing
     self._save_file()
     
     # 
     print('{} pages processed in {} secs. Data saved in sitemap.json'.format(len(self.processed), time.time() - t1))
     
     # leave
     exit(1)
예제 #3
0
    def step(self):
        tasks = [firm.call('set_production_target', self.state) for firm in shuffle(self.firms)]
        jobs = yield from asyncio.gather(*tasks)
        jobs = list(zip(jobs, self.firms))
        yield from self.labor_market(jobs)

        yield from asyncio.gather(*[firm.call('produce', self.state) for firm in self.raw_material_firms])

        # raw material market
        sold = yield from self.market(
            self.raw_material_firms,
            self.consumer_good_firms + self.capital_equipment_firms,
            'purchase_materials')
        print('materials: ${:.2f}'.format(self.mean_sales_price(sold)))

        yield from asyncio.gather(*[firm.call('produce', self.state) for firm in self.capital_equipment_firms])

        # capital equipment market
        sold = yield from self.market(
            self.capital_equipment_firms,
            self.consumer_good_firms + self.raw_material_firms,
            'purchase_equipment')
        print('equipment: ${:.2f}'.format(self.mean_sales_price(sold)))

        yield from asyncio.gather(*[firm.call('produce', self.state) for firm in self.consumer_good_firms])

        # consumer good market
        sold = yield from self.market(
            self.consumer_good_firms,
            self.people,
            'purchase_goods')
        print('consumer goods: ${:.2f}'.format(self.mean_sales_price(sold)))

        # people consume (reset) their goods
        yield from asyncio.gather(*[p.call('consume') for p in self.people])
예제 #4
0
 def _call(self):
     cuuid = str(uuid.uuid4())
     logger.info('Start Job: %s UUID(%s)', str(self), cuuid)
     asyncio.gather(
         self.target(cuuid, *self.args),
         loop=self.loop, return_exceptions=True
         ).add_done_callback(functools.partial(self._result, cuuid))
예제 #5
0
파일: app.py 프로젝트: tryexceptpass/sofi
    def __run_loop(self):
        # Set event loop
        if self.background:
            logging.info("Running in background")
            asyncio.set_event_loop(self.loop)

        # Create the loop server
        self.server = self.loop.run_until_complete(websockets.serve(self.handler, self.address, self.port))

        try:
            logging.info("Starting server")
            self.loop.run_forever()

        except KeyboardInterrupt:
            logging.info("Keyboard Interrupt received.")

        finally:
            # Tell any clients that we're closing
            self.server.close()
            self.loop.run_until_complete(asyncio.sleep(0.1))

            # Gather any remaining tasks so we can cancel them
            asyncio.gather(*asyncio.Task.all_tasks()).cancel()
            self.loop.stop()

            logging.info("Cancelling pending tasks...")
            self.loop.run_forever()

            logging.info("Stopping Server...")
            self.loop.close()
예제 #6
0
    def test_starttls_reject_incorrect_arguments(self):
        ssl_context = unittest.mock.MagicMock()
        post_handshake_callback = unittest.mock.MagicMock()

        self.xmlstream.transport = object()

        with self.assertRaisesRegex(AssertionError,
                                    "mismatched starttls argument"):
            run_coroutine(
                asyncio.gather(
                    self.xmlstream.starttls(object(), post_handshake_callback),
                    self.xmlstream.run_test(
                        [
                            XMLStreamMock.STARTTLS(
                                ssl_context,
                                post_handshake_callback)
                        ],
                    )
                )
            )

        with self.assertRaisesRegex(AssertionError,
                                    "mismatched starttls argument"):
            run_coroutine(
                asyncio.gather(
                    self.xmlstream.starttls(ssl_context, object()),
                    self.xmlstream.run_test(
                        [
                            XMLStreamMock.STARTTLS(
                                ssl_context,
                                post_handshake_callback)
                        ],
                    )
                )
            )
예제 #7
0
async def main():
    url, headers = parse_headers_from_stdin()
    cookies = parse_cookies(headers.get('cookie', ''))
    futs = [
        try_cookie_combinations(url, headers, cookies),
        try_cookie_removal(url, headers, cookies),
    ]
    all_results = []
    for result_co in asyncio.as_completed(futs):
        results = await result_co
        all_results += results
        if not results:
            break
        min_len = min(len(res) for res in results)
        if min_len < 2:
            break
    asyncio.gather(*futs).cancel()

    if not all_results:
        print("No combinations found")
        return
    min_len = min(len(res) for res in all_results)
    valid_cookie_combos = filter(lambda res: len(res) == min_len, all_results)
    for combo in valid_cookie_combos:
        print('Found valid combination with cookie names:',
              ', '.join([ck.partition('=')[0] for ck in combo]))
예제 #8
0
파일: drone.py 프로젝트: tapuu/drone
 def run(self):
     loop = asyncio.get_event_loop()
     inittasks = [
         self.communicator,
         self.telemetry,
         self.detection
     ]
     print("starting init tasks")
     loop.run_until_complete(asyncio.gather(
         *[x.initialise() for x in inittasks]
     ))
     tasks = [
         self,
         self.datastore,
         self.detection,
         self.messagedispatcher,
         self.navigator,
         self.telemetry,
         self.mesh_controller,
         self.engine,
         self.c2_reactor
     ]
     print("starting main tasks")
     loop.run_until_complete(asyncio.gather(
         *[x.startup() for x in tasks]
     ))
     loop.close()
예제 #9
0
파일: ipc.py 프로젝트: odahoda/noisicaa
    async def command_loop(self):
        cancelled_task = asyncio.ensure_future(self._command_loop_cancelled.wait())
        while not self._command_loop_cancelled.is_set():
            get_task = asyncio.ensure_future(self._command_queue.get())
            done, pending = await asyncio.wait(
                [get_task, cancelled_task],
                return_when=asyncio.FIRST_COMPLETED)
            if get_task not in done:
                get_task.cancel()
                asyncio.gather(get_task, return_exceptions=True)
                continue

            cmd, payload, response_container = get_task.result()

            if self._transport.is_closing():
                response_container.set(self.CLOSE_SENTINEL)
                continue

            self._transport.write(b'CALL %s %d\n' % (cmd, len(payload)))
            if payload:
                self._transport.write(payload)

            response = await self._protocol.response_queue.get()
            response_container.set(response)

        cancelled_task.cancel()
        asyncio.gather(cancelled_task, return_exceptions=True)
예제 #10
0
파일: app.py 프로젝트: byt3bl33d3r/sofi
    def __start_background_loop(self):
        # Set event loop
        asyncio.set_event_loop(self.loop)

        # Create the loop server
        self.server = self.loop.create_server(self.factory, self.address, self.port)
        self.loop.run_until_complete(self.server)

        try:
            logging.info("Starting background server")
            self.loop.run_forever()

        except KeyboardInterrupt:
            logging.info("Keyboard Interrupt received.")

        finally:
            # Tell any clients that we're closing
            for client in self.clients:
                client.sendClose()
                pass

            self.server.close()

            # Gather any remaining tasks so we can cancel them
            asyncio.gather(*asyncio.Task.all_tasks()).cancel()
            self.loop.stop()

            logging.info("Cancelling pending tasks...")
            self.loop.run_forever()

            logging.info("Stopping Server...")
            self.loop.close()
예제 #11
0
 def test_constructor_heterogenous_futures(self):
     fut1 = asyncio.Future(loop=self.one_loop)
     fut2 = asyncio.Future(loop=self.other_loop)
     with self.assertRaises(ValueError):
         asyncio.gather(fut1, fut2)
     with self.assertRaises(ValueError):
         asyncio.gather(fut1, loop=self.other_loop)
예제 #12
0
 def down():
     self.__log.info('shutdown closes all channels')
     yield from asyncio.gather(*(
             chan.close() for outs in self.outs.values() for _,chan in outs))
     yield from asyncio.gather(*(
             unit.__teardown__() for unit in self.units.values()))
     exit(0)
예제 #13
0
    def run(self):
        args = self.make_arg_parser().parse_args()

        self.coordinator = coordinator.Coordinator(work_dir=args.work_dir,
                                                   pseudorange_filename=args.dump_pseudorange,
                                                   partition=args.partition,
                                                   tag=args.tag)

        subtasks = self.make_subtasks(args)

        # Start everything
        startup = asyncio.gather(*[x.start() for x in subtasks])
        self.loop.run_until_complete(startup)
        startup.result()  # provoke exceptions if something failed

        self.loop.add_signal_handler(signal.SIGINT, self.stop, "Halting on SIGINT")
        self.loop.add_signal_handler(signal.SIGTERM, self.stop, "Halting on SIGTERM")

        self.loop.run_forever()  # Well, until stop() is called anyway!

        logging.info("Server shutting down.")

        # Stop everything
        for t in reversed(subtasks):
            t.close()

        # Wait for completion
        shutdown = asyncio.gather(*[t.wait_closed() for t in subtasks], return_exceptions=True)
        self.loop.run_until_complete(shutdown)
        for e in shutdown.result():
            if isinstance(e, Exception) and not isinstance(e, asyncio.CancelledError):
                logging.error("Exception thrown during shutdown", exc_info=(type(e), e, e.__traceback__))

        self.loop.close()
        logging.info("Server shutdown done.")
def test_multi_exec(redis, loop):
    yield from redis.delete('foo', 'bar')

    tr = redis.multi_exec()
    f1 = tr.incr('foo')
    f2 = tr.incr('bar')
    res = yield from tr.execute()
    assert res == [1, 1]
    res2 = yield from asyncio.gather(f1, f2, loop=loop)
    assert res == res2

    tr = redis.multi_exec()
    f1 = tr.incr('foo')
    f2 = tr.incr('bar')
    yield from tr.execute()
    assert (yield from f1) == 2
    assert (yield from f2) == 2

    tr = redis.multi_exec()
    f1 = tr.set('foo', 1.0)
    f2 = tr.incrbyfloat('foo', 1.2)
    res = yield from tr.execute()
    assert res == [True, 2.2]
    res2 = yield from asyncio.gather(f1, f2, loop=loop)
    assert res == res2

    tr = redis.multi_exec()
    f1 = tr.incrby('foo', 1.0)
    with pytest.raises_regex(MultiExecError, "increment must be .* int"):
        yield from tr.execute()
    with pytest.raises(TypeError):
        yield from f1
예제 #15
0
파일: scans2pdf.py 프로젝트: Unrud/djpdf
    def _json(self, psem):
        # Prepare everything in parallel
        def get_dpi(psem):
            if self._page["dpi"] == "auto":
                return (yield from self._input_image.dpi(psem))
            return self._page["dpi"], self._page["dpi"]
        (texts, background, foregrounds_json, (width, height),
         (dpi_x, dpi_y)) = yield from asyncio.gather(
                self._ocr.texts(psem),
                self._background.json(psem),
                asyncio.gather(*[fg.json(psem) for fg in self._foregrounds]),
                self._input_image.size(psem),
                get_dpi(psem))
        if texts is not None:
            for text in texts:
                text["x"] *= (PDF_DPI / dpi_x)
                text["y"] = ((height - text["y"] - text["height"]) *
                             (PDF_DPI / dpi_y))
                text["width"] *= (PDF_DPI / dpi_x)
                text["height"] *= (PDF_DPI / dpi_y)
        # Filter empty foregrounds
        foregrounds_json = [fg for fg in foregrounds_json if fg is not None]

        return {
            "width": width * (PDF_DPI / dpi_x),
            "height": height * (PDF_DPI / dpi_y),
            "background": background,
            "foreground": foregrounds_json,
            "color": self._page["bg_color"],
            "text": texts
        }
예제 #16
0
파일: drone.py 프로젝트: GPIG5/drone
 def run(self):
     inittasks = [
         self.communicator,
         self.telemetry,
         self.detection
     ]
     print("starting init tasks")
     yield from asyncio.gather(
         *[x.initialise() for x in inittasks]
     )
     tasks = [
         self,
         self.datastore,
         self.detection,
         self.messagedispatcher,
         self.navigator,
         self.telemetry,
         self.mesh_controller,
         self.engine,
         self.navigator.reactor.c2_reactor,
         self.navigator.reactor.battery_life_checker,
         self.navigator.reactor.pit_stop
     ]
     print("starting main tasks")
     yield from asyncio.gather(
         *[x.startup() for x in tasks]
     )
예제 #17
0
    def test_multi_exec(self):
        yield from self.redis.delete("foo", "bar")

        tr = self.redis.multi_exec()
        f1 = tr.incr("foo")
        f2 = tr.incr("bar")
        res = yield from tr.execute()
        self.assertEqual(res, [1, 1])
        res2 = yield from asyncio.gather(f1, f2, loop=self.loop)
        self.assertEqual(res, res2)

        tr = self.redis.multi_exec()
        f1 = tr.incr("foo")
        f2 = tr.incr("bar")
        yield from tr.execute()
        self.assertEqual((yield from f1), 2)
        self.assertEqual((yield from f2), 2)

        tr = self.redis.multi_exec()
        f1 = tr.set("foo", 1.0)
        f2 = tr.incrbyfloat("foo", 1.2)
        res = yield from tr.execute()
        self.assertEqual(res, [True, 2.2])
        res2 = yield from asyncio.gather(f1, f2, loop=self.loop)
        self.assertEqual(res, res2)

        tr = self.redis.multi_exec()
        f1 = tr.incrby("foo", 1.0)
        with self.assertRaisesRegex(MultiExecError, "increment must be .* int"):
            yield from tr.execute()
        with self.assertRaises(TypeError):
            yield from f1
예제 #18
0
파일: util.py 프로젝트: oberstet/txaio
def run_once():
    '''
    A helper that takes one trip through the event-loop to process any
    pending Futures. This is a no-op for Twisted, because you don't
    need to use the event-loop to get callbacks to happen in Twisted.
    '''

    import txaio
    if txaio.using_twisted:
        return

    try:
        import asyncio
        if sys.version_info >= (3, 7):
            # https://github.com/crossbario/txaio/issues/139
            from _asyncio_test_utils import run_once as _run_once
        else:
            from asyncio.test_utils import run_once as _run_once
        return _run_once(txaio.config.loop or asyncio.get_event_loop())

    except ImportError:
        import trollius as asyncio
        # let any trollius import error out; if we're not using
        # twisted, and have no asyncio *and* no trollius, that's a
        # problem.

        # copied from asyncio.testutils because trollius has no
        # testutils"

        # just like modern asyncio.testutils.run_once does it...
        loop = asyncio.get_event_loop()
        loop.stop()
        loop.run_forever()
        asyncio.gather(*asyncio.Task.all_tasks())
예제 #19
0
    def report(self, problems: List[Problem]) -> None:
        grouped_problems = Problem.group_by_path_and_line(problems)

        headers = {
            'Authorization': 'token {}'.format(self.auth_token),
        }
        with aiohttp.ClientSession(headers=headers) as client_session:
            (line_map, existing_messages) = yield from asyncio.gather(
                self.create_line_to_position_map(client_session),
                self.get_existing_messages(client_session))
            lint_errors = 0
            review_comment_awaitable = []
            pr_url = self._get_pr_url()
            for location, problems_for_line in grouped_problems:
                message_for_line = [':sparkles:Linty Fresh Says:sparkles::',
                                    '',
                                    '```']

                reported_problems_for_line = set()
                for problem in problems_for_line:
                    if problem.message not in reported_problems_for_line:
                        message_for_line.append(problem.message)
                        reported_problems_for_line.add(problem.message)
                message_for_line.append('```')

                path = location[0]
                line_number = location[1]
                position = line_map.get(path, {}).get(line_number, None)
                if position is not None:
                    message = '\n'.join(message_for_line)
                    if (path, position, message) not in existing_messages:
                        lint_errors += 1
                        if lint_errors <= MAX_LINT_ERROR_REPORTS:
                            data = json.dumps({
                                'body': message,
                                'commit_id': self.commit,
                                'path': path,
                                'position': position,
                            }, sort_keys=True)
                            review_comment_awaitable.append(
                                client_session.post(pr_url, data=data))
            if lint_errors > MAX_LINT_ERROR_REPORTS:
                message = ''':sparkles:Linty Fresh Says:sparkles::

Too many lint errors to report inline!  {0} lines have a problem.
Only reporting the first {1}.'''.format(
                    lint_errors, MAX_LINT_ERROR_REPORTS)
                data = json.dumps({
                    'body': message
                })
                review_comment_awaitable.append(
                    asyncio.async(client_session.post(pr_url, data=data)))

            responses = yield from asyncio.gather(
                *review_comment_awaitable
            )  # type: List[aiohttp.ClientResponse]

            for response in responses:
                response.close()
예제 #20
0
파일: agent.py 프로젝트: openstack/iotronic
    def stop(self):
        LOG.info("Stopping WAMP server...")

        # Canceling pending tasks and stopping the loop
        asyncio.gather(*asyncio.Task.all_tasks()).cancel()
        # Stopping the loop
        self.loop.stop()
        LOG.info("WAMP server stopped.")
예제 #21
0
파일: web.py 프로젝트: wwqgtxx/wwqLyParse
def run_app(app, *, host=None, port=None, path=None, sock=None,
            shutdown_timeout=60.0, ssl_context=None,
            print=print, backlog=128, access_log_format=None,
            access_log=access_logger, handle_signals=True, loop=None):
    """Run an app locally"""
    user_supplied_loop = loop is not None
    if loop is None:
        loop = asyncio.get_event_loop()

    app._set_loop(loop)
    loop.run_until_complete(app.startup())

    try:
        make_handler_kwargs = dict()
        if access_log_format is not None:
            make_handler_kwargs['access_log_format'] = access_log_format
        handler = app.make_handler(loop=loop, access_log=access_log,
                                   **make_handler_kwargs)

        server_creations, uris = _make_server_creators(
            handler,
            loop=loop, ssl_context=ssl_context,
            host=host, port=port, path=path, sock=sock,
            backlog=backlog)
        servers = loop.run_until_complete(
            asyncio.gather(*server_creations, loop=loop)
        )

        if handle_signals:
            try:
                loop.add_signal_handler(signal.SIGINT, raise_graceful_exit)
                loop.add_signal_handler(signal.SIGTERM, raise_graceful_exit)
            except NotImplementedError:  # pragma: no cover
                # add_signal_handler is not implemented on Windows
                pass

        try:
            if print:
                print("======== Running on {} ========\n"
                      "(Press CTRL+C to quit)".format(', '.join(uris)))
            loop.run_forever()
        except (GracefulExit, KeyboardInterrupt):  # pragma: no cover
            pass
        finally:
            server_closures = []
            for srv in servers:
                srv.close()
                server_closures.append(srv.wait_closed())
            loop.run_until_complete(
                asyncio.gather(*server_closures, loop=loop))
            loop.run_until_complete(app.shutdown())
            loop.run_until_complete(handler.shutdown(shutdown_timeout))
    finally:
        loop.run_until_complete(app.cleanup())
    if not user_supplied_loop:
        if hasattr(loop, 'shutdown_asyncgens'):
            loop.run_until_complete(loop.shutdown_asyncgens())
        loop.close()
예제 #22
0
	async def _read(self):
		while True:
			msgtype, payload = await self.recvmsg()
			if msgtype & 0x80000000:
				msgtype &= 0x7FFFFFFF
				asyncio.gather(*(f(msgtype, payload) for f in self._eventhandlers))
			else:
				msgtype2, fut = await self._queue.get()
				assert msgtype2 == msgtype
				fut.set_result(payload)
예제 #23
0
 def test_constructor_homogenous_futures(self):
     children = [asyncio.Future(loop=self.other_loop) for i in range(3)]
     fut = asyncio.gather(*children)
     self.assertIs(fut._loop, self.other_loop)
     self._run_loop(self.other_loop)
     self.assertFalse(fut.done())
     fut = asyncio.gather(*children, loop=self.other_loop)
     self.assertIs(fut._loop, self.other_loop)
     self._run_loop(self.other_loop)
     self.assertFalse(fut.done())
예제 #24
0
 def test_connection_pool(self):
     '''Test the connection pool. A very important test!'''
     client = Echo(self.server_cfg.addresses[0], pool_size=2)
     self.assertEqual(client._loop, get_event_loop())
     #
     self.assertEqual(client.pool.pool_size, 2)
     self.assertEqual(client.pool.in_use, 0)
     self.assertEqual(client.pool.available, 0)
     self.assertEqual(client.sessions, 0)
     self.assertEqual(client._requests_processed, 0)
     #
     response = yield from client(b'test connection')
     self.assertEqual(response, b'test connection')
     self.assertEqual(client.pool.in_use, 0)
     self.assertEqual(client.pool.available, 1)
     self.assertEqual(client.sessions, 1)
     self.assertEqual(client._requests_processed, 1)
     #
     response = yield from client(b'test connection 2')
     self.assertEqual(response, b'test connection 2')
     self.assertEqual(client.pool.in_use, 0)
     self.assertEqual(client.pool.available, 1)
     self.assertEqual(client.sessions, 1)
     self.assertEqual(client._requests_processed, 2)
     #
     result = yield from gather(client(b'ciao'),
                                client(b'pippo'),
                                client(b'foo'))
     self.assertEqual(len(result), 3)
     self.assertTrue(b'ciao' in result)
     self.assertTrue(b'pippo' in result)
     self.assertTrue(b'foo' in result)
     self.assertEqual(client.pool.in_use, 0)
     self.assertEqual(client.pool.available, 2)
     self.assertEqual(client.sessions, 2)
     self.assertEqual(client._requests_processed, 5)
     #
     # drop a connection
     yield from run_in_loop(client._loop, self._drop_conection, client)
     #
     result = yield from gather(client(b'ciao'),
                                client(b'pippo'),
                                client(b'foo'))
     self.assertEqual(len(result), 3)
     self.assertEqual(client.pool.in_use, 0)
     self.assertEqual(client.pool.available, 2)
     self.assertEqual(client.sessions, 3)
     self.assertEqual(client._requests_processed, 8)
     #
     yield from run_in_loop(client._loop, client.pool.close)
     #
     self.assertEqual(client.pool.in_use, 0)
     self.assertEqual(client.pool.available, 0)
     self.assertEqual(client.sessions, 3)
     self.assertEqual(client._requests_processed, 8)
예제 #25
0
 def _check_empty_sequence(self, seq_or_iter):
     asyncio.set_event_loop(self.one_loop)
     self.addCleanup(asyncio.set_event_loop, None)
     fut = asyncio.gather(*seq_or_iter)
     self.assertIsInstance(fut, asyncio.Future)
     self.assertIs(fut._loop, self.one_loop)
     self._run_loop(self.one_loop)
     self.assertTrue(fut.done())
     self.assertEqual(fut.result(), [])
     fut = asyncio.gather(*seq_or_iter, loop=self.other_loop)
     self.assertIs(fut._loop, self.other_loop)
예제 #26
0
def shutdown_callback(code, _):
    logging.info('Shutting down. Code: %s.', str(code))
    asyncio.gather(*asyncio.Task.all_tasks()).cancel()
    context.loop.stop()
    shutdown_webhook()
    shutdown_workers()
    if context and context.watcher:
        context.watcher.close()
        context.watcher = None
    logging.shutdown()
    logging.info('Shut down complete.')
예제 #27
0
 async def stop(self, torrent: MetaInfoFile = None):
     """
     Stops downloading the specified torrent, or all torrents if none specified.
     :param torrent: torrent to stop downloading. Default = None = ALL torrents
     """
     if torrent:
         await self.torrents[torrent].cancel()
     else:
         tasks = []
         for t, ct in self.torrents.items():
             tasks.append(ct.cancel)
         asyncio.gather(*[x() for x in tasks])
예제 #28
0
    def exit_cleanly(self):
        loop = asyncio.get_event_loop()
        self.log.info("Shutting down charlesbot")
        self.set_running(False)

        for plug in self.plugin_list:
            self.log.info("Shutting down plugin: %s" % plug.get_plugin_name())
            plug.is_running = False

        pending = asyncio.Task.all_tasks()
        asyncio.gather(*pending).cancel()
        loop.stop()
예제 #29
0
    def labor_market(self, jobs):
        tasks = [p.get('employer') for p in self.people]
        applicants = {f: [] for (_, __), f in jobs}
        employers = yield from asyncio.gather(*tasks)
        job_seekers = [p for p, e in zip(self.people, employers) if e is None]
        while job_seekers and jobs:
            job_dist = self.job_distribution(jobs)
            for p in shuffle(job_seekers):
                (n_vacancies, wage), firm  = random_choice(job_dist)
                applicants[firm].append(p)

            # firms select from their applicants
            _jobs = []
            for job in shuffle(jobs):
                # filter down to valid applicants
                (n_vacancies, wage), firm = job
                apps = [a for a in applicants[firm] if a in job_seekers]
                hired, n_vacancies, wage = yield from firm.call('hire', apps, wage)

                # remove hired people from the job seeker pool
                for p in hired:
                    job_seekers.remove(p)

                if not job_seekers:
                    break

                # if vacancies remain, post the new jobs with the new wage
                if n_vacancies:
                    _jobs.append(((n_vacancies, wage), firm))
            jobs = _jobs
예제 #30
0
def run_app(app, *, host='0.0.0.0', port=None,
            shutdown_timeout=60.0, ssl_context=None,
            print=print, backlog=128):
    """Run an app locally"""
    if port is None:
        if not ssl_context:
            port = 8080
        else:
            port = 8443

    loop = app.loop

    handler = app.make_handler()
    server = loop.create_server(handler, host, port, ssl=ssl_context,
                                backlog=backlog)
    srv, startup_res = loop.run_until_complete(asyncio.gather(server,
                                                              app.startup(),
                                                              loop=loop))

    scheme = 'https' if ssl_context else 'http'
    print("======== Running on {scheme}://{host}:{port}/ ========\n"
          "(Press CTRL+C to quit)".format(
              scheme=scheme, host=host, port=port))

    try:
        loop.run_forever()
    except KeyboardInterrupt:  # pragma: no cover
        pass
    finally:
        srv.close()
        loop.run_until_complete(srv.wait_closed())
        loop.run_until_complete(app.shutdown())
        loop.run_until_complete(handler.finish_connections(shutdown_timeout))
        loop.run_until_complete(app.cleanup())
    loop.close()
예제 #31
0
            await db.execute("DELETE FROM users WHERE proofs = $1",
                             ctx.message_id)


@bot.event
async def on_raw_message_edit(ctx):
    if ctx.channel_id == 658331377602920471:
        proofs = await db.fetchrow("SELECT * FROM users WHERE proofs = $1",
                                   ctx.message_id)
        if proofs:
            await db.execute("DELETE FROM users WHERE proofs = $1",
                             ctx.message_id)
            message = (await bot.get_channel(ctx.channel_id
                                             ).fetch_message(ctx.message_id))
            await message.delete()


if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    botprocess = loop.create_task(bot.start("NO BOT TOKEN PUBLISHING FOR ME"))
    postgres = loop.create_task(run())
    web = loop.create_task(app.run(port=3000, loop=loop))
    gathered = asyncio.gather(botprocess, web)
    try:
        loop.run_until_complete(gathered)
    except KeyboardInterrupt:
        pass
    finally:
        loop.run_until_complete(bot.logout())
        loop.close()
예제 #32
0
#! python3
# coding: utf-8
import time
import aiohttp
import asyncio

NUMBERS = range(240)
#URL = 'http://httpbin.org/get?a={}'
URL = 'http://192.168.1.22:8000/get?a={}'


async def fetch_async(a):
    async with aiohttp.request('GET', URL.format(a)) as r:
        data = await r.json()
    return data['args']['a']


start = time.time()
event_loop = asyncio.get_event_loop()
tasks = [fetch_async(num) for num in NUMBERS]
results = event_loop.run_until_complete(asyncio.gather(*tasks))

#for num,result in zip(NUMBERS, results):
#    print('fetch({}) = {}'.format(num, result))

print('Use aiohttp+asyncio  cost: {}'.format(time.time() - start))
예제 #33
0
    def run_sweeps(self):
        # Propagate the descriptors through the network
        self.update_descriptors()
        # Make sure we are starting from scratch... is this necessary?
        self.reset()
        # Update the progress bar if need be
        if self.progressbar is not None:
            self.progressbar.reset()

        #Make sure we have axes.
        if not any(
            [oc.descriptor.axes for oc in self.output_connectors.values()]):
            logger.warning(
                "There do not appear to be any axes defined for this experiment!"
            )

        # Go find any writers
        self.writers = [n for n in self.nodes if isinstance(n, WriteToHDF5)]
        self.buffers = [n for n in self.nodes if isinstance(n, DataBuffer)]
        if self.name:
            for w in self.writers:
                w.filename.value = os.path.join(
                    os.path.dirname(w.filename.value), self.name)
        self.filenames = [w.filename.value for w in self.writers]
        self.files = []

        # Check for redundancy in filenames, and share plot file objects
        for filename in set(self.filenames):
            wrs = [w for w in self.writers if w.filename.value == filename]

            # Let the first writer with this filename create the file...
            wrs[0].file = wrs[0].new_file()
            self.files.append(wrs[0].file)

            # Make the rest of the writers use this same file object
            for w in wrs[1:]:
                w.file = wrs[0].file
                w.filename.value = wrs[0].filename.value

        # Remove the nodes with 0 dimension
        self.nodes = [
            n for n in self.nodes
            if not (hasattr(n, 'input_connectors')
                    and n.input_connectors['sink'].descriptor.num_dims() == 0)
        ]

        # Go and find any plotters
        self.standard_plotters = [
            n for n in self.nodes
            if isinstance(n, (Plotter, MeshPlotter, XYPlotter))
        ]
        self.plotters = copy.copy(self.standard_plotters)

        # We might have some additional plotters that are separate from
        # The asyncio filter pipeline
        self.plotters.extend(self.extra_plotters)

        # These use neither streams nor the filter pipeline
        self.plotters.extend(self.manual_plotters)

        # Call any final initialization on the filter pipeline
        for n in self.nodes + self.extra_plotters:
            n.experiment = self
            n.loop = self.loop
            # n.executor   = self.executor
            if hasattr(n, 'final_init'):
                n.final_init()

        # Launch plot servers.
        if len(self.plotters) > 0:
            self.init_plot_servers()
        time.sleep(1)
        #connect all instruments
        self.connect_instruments()
        #initialize instruments
        self.init_instruments()

        def catch_ctrl_c(signum, frame):
            logger.info("Caught SIGINT. Shutting down.")
            self.shutdown()
            raise NameError("Shutting down.")
            sys.exit(0)

        signal.signal(signal.SIGINT, catch_ctrl_c)

        # We want to wait for the sweep method above,
        # not the experiment's run method, so replace this
        # in the list of tasks.
        other_nodes = self.nodes[:]
        other_nodes.extend(self.extra_plotters)
        other_nodes.remove(self)
        tasks = [n.run() for n in other_nodes]

        tasks.append(self.sweep())
        try:
            self.loop.run_until_complete(asyncio.gather(*tasks))
            self.loop.run_until_complete(asyncio.sleep(1))
        except Exception as e:
            logger.exception("message")

        for plot, callback in zip(self.manual_plotters,
                                  self.manual_plotter_callbacks):
            if callback:
                callback(plot)

        self.shutdown()
예제 #34
0
 def _unknown_command(self, message):
     message.message = "." + message.message[len("borgcmd" +
                                                 str(self.instance_id)) +
                                             1:]
     return asyncio.gather(*[uk(message, "") for uk in self._unknowns])
예제 #35
0
if __name__ == "__main__":

    local_node = OEFLocalProxy.LocalNode()

    client = WeatherClient("weather_client", local_node)
    server = WeatherStation("weather_station", local_node)
    client.connect()
    server.connect()

    server.register_service(0, server.weather_service_description)

    query = Query([
        Constraint(TEMPERATURE_ATTR.name, Eq(True)),
        Constraint(AIR_PRESSURE_ATTR.name, Eq(True)),
        Constraint(HUMIDITY_ATTR.name, Eq(True))
    ], WEATHER_DATA_MODEL)

    client.on_search_result(0, ["weather_station"])

    try:
        loop = asyncio.get_event_loop()
        asyncio.ensure_future(local_node.run())
        loop.run_until_complete(
            asyncio.gather(client.async_run(), server.async_run()))
        local_node.stop()
    finally:
        local_node.stop()
        client.stop()
        server.stop()
예제 #36
0
        g.setup(proxy=proxy_for_parser.host + ':' + str(proxy_for_parser.port),
                proxy_type='http')
        print('2')
        try:

            g.go('http://www.google.com/search?q=Spam')
        except:
            pass
        print('2.1')
        print(g.doc.url)
        print('3')
        proxy_for_parser = await proxies.get()

    #async def test_my_server():
    #proxy_server = Proxy('94.177.175.232', '3128')


proxies = asyncio.Queue()
broker = Broker(proxies)
tasks = asyncio.gather(
    broker.find(types={'HTTP': 'High'}, limit=50, countries='CA'),
    prx_srv(proxies))

loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
#loop.run_until_complete(test_my_server())

#proxy_server = Proxy('94.177.175.232','3128')
#print(proxy_server.geo)
#print(proxy_server.is_working)
#help(asyncio.gather())
예제 #37
0
    # ),
    # parser.add_argument("--play-from", help="Read the media from a file and sent it."),
    # parser.add_argument("--record-to", help="Write received media to a file."),
    # parser.add_argument("--verbose", "-v", action="count")
    # args = parser.parse_args()

    # if args.verbose:
    #     logging.basicConfig(level=logging.DEBUG)

    # create signaling and peer connection
    session = JanusSession("http://192.168.3.101:8088/janus")

    loop = asyncio.get_event_loop()
    try:
        loop.run_until_complete(run(session=session))
    except KeyboardInterrupt:
        pass
    finally:

        if recorder is not None:
            loop.run_until_complete(recorder.stop())
        loop.run_until_complete(session.destroy())

        # close peer connections
        coros = [pc.close() for pc in pcs]
        loop.run_until_complete(asyncio.gather(*coros))

    while True:
        asyncio.sleep(1)

# DTLS HANDSHAKE DIFFERENT BETWEEN TWO PROBABLY ONLY HAPPENS WHEN YOU DO SOMETHING WITH THE TRACK (Or something that happens before you start using the track)
예제 #38
0
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            data = await response.read()
            with open(dest, 'wb') as f:
                f.write(data)


async def setup_mask():
    await download_file(export_file_url, path / export_file_name)
    BLACK_IMAGE_PATH = path / export_file_name
    return BLACK_IMAGE_PATH


loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_mask())]
BLACK_IMAGE_PATH = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()


@app.route('/')
async def homepage(request):
    html_file = path / 'view' / 'index.html'
    if os.path.exists("app/newmask.png"):
        os.remove("app/newmask.png")
    else:
        print("The file does not exist")
    return HTMLResponse(html_file.open().read())


@app.route('/analyze', methods=['POST', 'GET'])
async def analyze(request):
예제 #39
0
"""Find and show 10 working HTTP(S) proxies."""

import asyncio

from proxybroker import Broker


async def show(proxies):
    while True:
        proxy = await proxies.get()
        if proxy is None:
            break
        print('Found proxy: %s' % proxy)


proxies = asyncio.Queue()
broker = Broker(proxies)
tasks = asyncio.gather(broker.find(types=['HTTP', 'HTTPS'], limit=10),
                       show(proxies))

loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
예제 #40
0
    print("woshi disange")
    await asyncio.sleep(1)
    return "three"


def call_back(loop, futu):
    print("调用回调函数")
    # 在回调函数中,同样会将future对象传进来,因此获取到一步函数的返回值
    print(futu.result(), 111111111)
    loop.stop()


if __name__ == '__main__':
    start = time.time()
    # 获得时间循环对象
    loop = asyncio.get_event_loop()
    # 创建异步协成列表
    geater = [first(), second(), three()]
    # 可以创建一个future对象,将多个异步任务合并成一个future
    futu = asyncio.gather(*geater)
    # 使用future添加异步任务都完成后的回调函数
    # functools.partial会先将函数的部分参数传递进去,等所有参数都传递完了,再开始调用
    futu.add_done_callback(functools.partial(call_back, loop))
    # 使用ayncio.gather将事件添加进时间循环
    # loop.run_until_complete(asyncio.gather(*geater))
    loop.run_forever()
    # 循环完成后,关闭事件循环
    # loop.close()
    ends = time.time()
    print(ends - start)
예제 #41
0
 async def start(self):
     """Start telegram queue."""
     self._consumer_task = asyncio.gather(self._telegram_consumer(),
                                          self._outgoing_rate_limiter())
예제 #42
0
    async def socketconnection(self):  # noqa: C901
        '''
        Main socket connection function
        May be called repeatedly when trying to open a connection
        '''
        # Make sure we retry tasks on reconnection
        self.retry_task = True

        # Setup SSL context
        self.ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)

        # Handle both IPv4 and IPv6 cases
        try:
            logging.debug("Try IPv4 (may autodetect IPv6)")
            self.reader, self.writer = await asyncio.wait_for(
                asyncio.open_connection(
                    self.addr,
                    self.port,
                    ssl=self.ctx,
                ),
                timeout=1.0,
            )
        except (asyncio.TimeoutError, OSError):
            logging.debug("Try IPv6")
            try:
                self.reader, self.writer = await asyncio.wait_for(
                    asyncio.open_connection(self.addr,
                                            self.port,
                                            ssl=self.ctx,
                                            family=socket.AF_INET6),
                    timeout=1.0,
                )
            except (asyncio.TimeoutError, OSError):
                logger.debug(
                    "Retrying port connection {}:{} auth level {}".format(
                        self.addr, self.port, self.auth))
                return False

        self.overalltasks = []

        # Assemble reader and writer tasks, run in the background
        logging.debug("Backgrounding socket reader and writer functions")
        coroutines = [self.socketreader(), self.socketwriter()]
        self.overalltasks.append(
            asyncio.gather(*coroutines, return_exceptions=True))

        # Start TwoPartyClient using TwoWayPipe (takes no arguments in this mode)
        logging.debug("Starting TwoPartyClient")
        self.client = capnp.TwoPartyClient()
        logging.debug("Starting Bootstrap")
        self.cap = self.client.bootstrap().cast_as(hidio_capnp.HidIoServer)

        # Start watcher to restart socket connection if it is lost
        logging.debug("Backgrounding socketwatcher")
        watcher = [self.socketwatcher()]
        self.overalltasks.append(
            asyncio.gather(*watcher, return_exceptions=True))

        # Start hid-io-core log watcher
        logging.debug("Backgrounding corelogwatcher")
        watcher = [self.corelogwatcher()]
        self.overalltasks.append(
            asyncio.gather(*watcher, return_exceptions=True))

        # Lookup version information
        self.version_info = (await self.cap.version().a_wait()).version
        logger.info(self.version_info)

        # Lookup uid
        self.uid_info = (await self.cap.id().a_wait()).id
        logger.info("uid: %s", self.uid_info)

        # Lookup name
        self.daemon_name = (await self.cap.name().a_wait()).name
        logger.info("name: %s", self.daemon_name)

        # Lookup log files
        self.core_log_files = (await self.cap.logFiles().a_wait()).paths
        logger.info("hid-io-core log files: %s", self.core_log_files)
        try:
            self.core_current_log_file = [
                f for f in self.core_log_files if 'rCURRENT' in f
            ][0]
            self.core_current_log_file_offset = "{}.offset.{}".format(
                self.core_current_log_file, self.uid_info)
            self.reset_corelog_followposition()
        except IndexError:
            self.core_current_log_file = None
            logger.warn("Could not find current hid-io-core log file...")

        # AUTH_NONE doesn't need to go any further
        cap_auth = None
        if self.auth:
            # Lookup key information
            self.key_info = (await self.cap.key().a_wait()).key
            logger.info(self.key_info)

            # Lookup key for auth level
            key_lookup = {
                self.AUTH_BASIC: self.key_info.basicKeyPath,
                self.AUTH_ADMIN: self.key_info.authKeyPath,
            }
            key_location = key_lookup[self.auth]

            # Fail connection if authentication key cannot be read
            # This usually means that the client doesn't have permission
            self.key = None
            try:
                with open(key_location, 'r') as myfile:
                    self.key = myfile.read()
            except OSError as err:
                # XXX (HaaTa): Commented out as this info is sensitive
                #  logger.error("Could not read '%s'. This usually means insufficient permissions.", key_location)
                logger.error(
                    "Could not read keyfile. This usually means insufficient permissions."
                )
                logger.error(err)
                await self.disconnect()
                return False
            logger.info("Key: %s", self.key)

            # Connect to specified auth level
            cap_auth = await self.capability_authenticate()
            if not cap_auth:
                await self.disconnect()
                return False
            logger.debug("Authenticated with %s", self.auth)

            # Add nodes subscription
            background_tasks = [self.nodeswatcher()]
            self.overalltasks.append(
                asyncio.gather(*background_tasks, return_exceptions=True))

        # Callback
        await self.on_connect(self.cap, cap_auth)

        # Spin here until connection is broken
        while self.retry_task:
            await asyncio.sleep(1)
        logger.debug("socketconnection done.")
예제 #43
0
 def finalizer():
     event_loop.run_until_complete(
         asyncio.gather(client.cancel(), server.cancel()))
     # Yield control so that client/server.run() returns, otherwise asyncio will complain.
     event_loop.run_until_complete(asyncio.sleep(0.1))
예제 #44
0
def main4():
    loop = asyncio.get_event_loop()
    future = asyncio.gather(prime_filter(2, 100), square_mapper(1, 100))
    future.add_done_callback(lambda x: print(x.result()))
    loop.run_until_complete(future)
    loop.close()
예제 #45
0
def insert_topics(data):
    #es = Elasticsearch(['%s:%d'%(es_ip, es_port)])

    some_bulks = ''
    bulk_result = 0
    #bulk_result = None

    try:
        result = teaclient.request(data)
        #print(result)

        root = et.fromstring(result)
        status = root.findall(
            "./results/result[@name='status']")[0].text if len(
                root.findall("./results/result[@name='status']")) > 0 else ''
        #print(">>> Tea client response : %s" % status)

        if status == "success" and len(
                root.findall("./results/result[@name='keywords']")) > 0:
            result_scd = root.findall(
                "./results/result[@name='keywords']")[0].text

            terms = ""
            verbs = ""
            for line in result_scd.split("\n"):
                if line.startswith("<TERMS>"):
                    terms = line.replace("<TERMS>", "")  # 하늘:387^테스트:14^도움:11
                elif line.startswith("<VERBS>"):
                    verbs = line.replace("<VERBS>", "")  # 하늘:387^테스트:14^도움:11
                #print("### terms : %s" % terms)

            # <TERMS>
            #t = asyncio.ensure_future(time_log())
            terms = [('NN', term)
                     for term in terms.split(teaclient.ITEM_DELIMITER)]
            verbs = [('VV', verb)
                     for verb in verbs.split(teaclient.ITEM_DELIMITER)]

            # 2018.03.26 terms와 verbs에 모두 등장하면 명사형으로 간주.
            newDict = {}
            for cl, topic in terms + verbs:
                if topic in newDict:
                    newDict[topic]['cnt'] += 1
                else:
                    newDict[topic] = {'cnt': 1, 'topic_class': cl}

            newArr = []
            for x in newDict.items():
                if x[1]['cnt'] > 1 or x[1]['topic_class'] == 'NN':
                    newArr.append(('NN', x[0]))
                else:
                    newArr.append(('VV', x[0]))

            fts = [make_bulk(t, data) for t in (newArr)]
            #t.cancel()
            some_bulks = yield from asyncio.gather(*fts)

            for bulk in some_bulks:
                print(bulk)

    except ParseError as xmlerror:
        logger.error("[insert_topics] TeaClient failed. (%s)" % str(xmlerror))
        logger.error("==============> teaclient's xml response : %s" % result)
예제 #46
0
        #await asyncio.sleep(8)

    await ipc.disconnect()


async def call_2():
    ipc = AsyncIpcClient(klass=Hello(), resending=False)
    await ipc.connect()
    my_class = ipc.server  # Get class instance proxy
    items = 1
    while True:
        try:
            jobs = [my_class.echo(1, 2, 3, me="hello", you=3) for _ in range(items)]
            t = time.time()
            res = await asyncio.gather(*jobs)
        except Exception as e:
            print(e)
        #print(f"it took {(time.time() -t):.2f} seconds to complete {items} requests.\n"
        #      f"One request took {(time.time()-t) / items * 1000:.4f} millisecond")
        #print(res[0])
        await asyncio.sleep(8)

    await ipc.disconnect()

try:
    jobs = [call_2() for _ in range(100)]
    asyncio.get_event_loop().run_until_complete(asyncio.gather(*jobs))
except KeyboardInterrupt:
    print("Keyboard interrupt")

예제 #47
0
 def close_rpc(self):
     if self.status != Status.closed:
         rpc.active.discard(self)
     self.status = Status.closed
     return asyncio.gather(*self.close_comms())
예제 #48
0
    async with aiohttp.ClientSession() as session:
        html = await get_content(session, url)
        parser_content(html)


# 全部网页
urls = [
    'http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent7-0-0-1-%d'
    % i for i in range(1, 3)
]

# 统计该爬虫的消耗时间
print('#' * 50)
t1 = time.time()  # 开始时间

# 利用asyncio模块进行异步IO处理
'''
获取Eventloop
执行coroutine
'''
loop = asyncio.get_event_loop()
# loop.run_until_complete(download(urls))
tasks = [asyncio.ensure_future(download(url)) for url in urls]
tasks = asyncio.gather(*tasks)
loop.run_until_complete(tasks)
loop.close()

t2 = time.time()  # 结束时间
print('使用一般方法,总共耗时:%s' % (t2 - t1))
print('#' * 50)
예제 #49
0
파일: get.py 프로젝트: Juniper/splitcopy
    def get(self):
        """
        copies file from remote host to local host
        performs file split/transfer/join/verify functions
        :returns loop_start: time when transfers started
        :type: datetime object
        :returns loop_end: time when transfers ended
        :type: datetime object
        """
        ssh_kwargs = {
            "username": self.user,
            "hostname": self.host,
            "password": self.passwd,
            "key_filename": self.ssh_key,
            "ssh_port": self.ssh_port,
        }

        # handle sigint gracefully on *nix, WIN32 is (of course) a basket case
        signal.signal(signal.SIGINT, self.handlesigint)

        # connect to host
        self.sshshell, ssh_kwargs = self.scs.connect(SSHShell, **ssh_kwargs)

        # determine remote host os
        junos, evo, bsd_version, sshd_version = self.scs.which_os()

        # verify which protocol to use
        self.copy_proto, self.passwd = self.scs.which_proto(self.copy_proto)

        # ensure dest path is valid
        self.validate_remote_path_get()

        # check required binaries exist on remote host
        self.scs.req_binaries(junos=junos, evo=evo)

        # cleanup previous remote tmp directory if found
        self.scs.remote_cleanup(
            remote_dir=self.remote_dir, remote_file=self.remote_file, silent=True
        )

        # delete target file if it already exists
        self.delete_target_local()

        # determine remote file size
        file_size = self.remote_filesize()

        # determine optimal size for chunks
        split_size, executor = self.scs.file_split_size(
            file_size, sshd_version, bsd_version, evo, self.copy_proto
        )

        # confirm remote storage is sufficient
        self.scs.storage_check_remote(file_size, split_size)

        # confirm local storage is sufficient
        self.scs.storage_check_local(file_size)

        if not self.noverify:
            # get/create sha hash for remote file
            sha_hash = self.remote_sha_get()

        # create tmp directory on remote host
        remote_tmpdir = self.scs.mkdir_remote()

        # split file into chunks
        self.split_file_remote(SCPClient, file_size, split_size, remote_tmpdir)

        # add chunk names to a list, pass this info to Progress
        chunks = self.get_chunk_info(remote_tmpdir)
        self.progress.add_chunks(file_size, chunks)

        # begin connection/rate limit check and transfer process
        command_list = []
        if junos or evo:
            command_list = self.scs.limit_check(self.copy_proto)
        print("starting transfer...")
        self.progress.start_progress(self.use_curses)
        with self.scs.tempdir():
            # copy files from remote host
            self.hard_close = True
            loop_start = datetime.datetime.now()
            loop = asyncio.new_event_loop()
            tasks = []
            for chunk in chunks:
                task = loop.run_in_executor(
                    executor,
                    functools.partial(
                        self.get_files,
                        FTP,
                        SSHShell,
                        SCPClient,
                        chunk,
                        remote_tmpdir,
                        ssh_kwargs,
                    ),
                )
                tasks.append(task)
            try:
                loop.run_until_complete(asyncio.gather(*tasks))
            except TransferError:
                self.progress.stop_progress()
                self.scs.close(
                    err_str="an error occurred while copying the files from the remote host",
                    hard_close=self.hard_close,
                )
            finally:
                loop.close()

            self.hard_close = False
            while self.progress.totals["percent_done"] != 100:
                time.sleep(0.1)
            self.progress.stop_progress()

            print("\ntransfer complete")
            loop_end = datetime.datetime.now()

            # combine chunks
            self.join_files_local()

        # remove remote tmp dir
        self.scs.remote_cleanup()

        # rollback any config changes made
        if command_list:
            self.scs.limits_rollback()

        if self.noverify:
            print(
                f"file has been successfully copied to {self.local_dir}/{self.local_file}"
            )
        else:
            # generate a sha hash for the combined file, compare to hash of src
            self.local_sha_get(sha_hash)

        self.sshshell.close()
        return loop_start, loop_end
예제 #50
0
        if filetuple_or_none is None:
            break

        file_and_stats = await get_file_and_stats(*filetuple_or_none)
        file_matched_size = await match_file_size(file_and_stats, args.smaller,
                                                  args.bigger)
        file_matched_date = await match_file_date(file_and_stats, args.days)
        file_matched_suffix = await match_file_suffix(file_and_stats,
                                                      args.suffix)

        if file_matched_size and file_matched_date and file_matched_suffix:
            txt = '{:<30.30}'.format(os.path.basename(file_matched_size[0]))
            if args.output:
                if 'date' in args.output:
                    txt += ' {}'.format(
                        datetime.date.fromtimestamp(
                            file_matched_size[1].st_mtime))
                if 'size' in args.output:
                    txt += ' {:<15}'.format(file_matched_size[1].st_size)
            print(txt)


if __name__ == '__main__':
    args = get_args()

    loop = asyncio.get_event_loop()
    queue = asyncio.Queue(loop=loop)
    producer = produce_files(queue, args.path)
    consumer = main(args, queue)
    loop.run_until_complete(asyncio.gather(producer, consumer))
    loop.close()
예제 #51
0
    def run(self):

        self._loop.run_until_complete(
            asyncio.gather(self._workers_tasks, self.consume()))
root@ubuntu:/home/zhouping/anaconda3/pyLearn/scalary# python37 asyncio_sleep_gather.py 
hello world!
hello Python!
[None, None]
root@ubuntu:/home/zhouping/anaconda3/pyLearn/scalary# cat asyncio_sleep_gather.py 
import asyncio

async def hello_world():
    print("hello world!")

async def hello_python():
    print("hello Python!")
    await asyncio.sleep(0.1)

event_loop = asyncio.get_event_loop()
try:
    result = event_loop.run_until_complete(asyncio.gather(
    hello_world(),
    hello_python(),
    ))
    print(result)
finally:
    event_loop.close()
root@ubuntu:/home/zhouping/anaconda3/pyLearn/scalary# 
예제 #53
0
파일: bot.py 프로젝트: QuinceP/Quincebot
    def process(self, event):
        """
        :type event: Event
        """
        run_before_tasks = []
        tasks = []
        command_prefix = event.conn.config.get('command_prefix', '.')

        # Raw IRC hook
        for raw_hook in self.plugin_manager.catch_all_triggers:
            # run catch-all coroutine hooks before all others - TODO: Make this a plugin argument
            if not raw_hook.threaded:
                run_before_tasks.append(
                    self.plugin_manager.launch(raw_hook, Event(hook=raw_hook, base_event=event)))
            else:
                tasks.append(self.plugin_manager.launch(raw_hook, Event(hook=raw_hook, base_event=event)))
        if event.irc_command in self.plugin_manager.raw_triggers:
            for raw_hook in self.plugin_manager.raw_triggers[event.irc_command]:
                tasks.append(self.plugin_manager.launch(raw_hook, Event(hook=raw_hook, base_event=event)))

        # Event hooks
        if event.type in self.plugin_manager.event_type_hooks:
            for event_hook in self.plugin_manager.event_type_hooks[event.type]:
                tasks.append(self.plugin_manager.launch(event_hook, Event(hook=event_hook, base_event=event)))

        if event.type is EventType.message:
            # Commands
            if event.chan.lower() == event.nick.lower():  # private message, no command prefix
                command_re = r'(?i)^(?:[{}]?|{}[,;:]+\s+)(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
            else:
                command_re = r'(?i)^(?:[{}]|{}[,;:]+\s+)(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)

            cmd_match = re.match(command_re, event.content)

            if cmd_match:
                command = cmd_match.group(1).lower()
                if command in self.plugin_manager.commands:
                    command_hook = self.plugin_manager.commands[command]
                    command_event = CommandEvent(hook=command_hook, text=cmd_match.group(2).strip(),
                                             triggered_command=command, base_event=event)
                    tasks.append(self.plugin_manager.launch(command_hook, command_event))
                else:
                    potential_matches = []
                    for potential_match, plugin in self.plugin_manager.commands.items():
                        if potential_match.startswith(command):
                            potential_matches.append((potential_match, plugin))
                    if potential_matches:
                        if len(potential_matches) == 1:
                            command_hook = potential_matches[0][1]
                            command_event = CommandEvent(hook=command_hook, text=cmd_match.group(2).strip(),
                                                     triggered_command=command, base_event=event)
                            tasks.append(self.plugin_manager.launch(command_hook, command_event))
                        else:
                            event.notice("Possible matches: {}".format(
                                formatting.get_text_list([command for command, plugin in potential_matches])))

            # Regex hooks
            for regex, regex_hook in self.plugin_manager.regex_hooks:
                if not regex_hook.run_on_cmd and cmd_match:
                    pass
                else:
                    regex_match = regex.search(event.content)
                    if regex_match:
                        regex_event = RegexEvent(hook=regex_hook, match=regex_match, base_event=event)
                        tasks.append(self.plugin_manager.launch(regex_hook, regex_event))

        # Run the tasks
        yield from asyncio.gather(*run_before_tasks, loop=self.loop)
        yield from asyncio.gather(*tasks, loop=self.loop)
예제 #54
0
파일: main.py 프로젝트: dpdani/tBB_cli
           ('waiting', 'yellow', 'default'),
           ('navigation', 'dark gray,underline', 'default'),
           ('mainview_title', 'default,bold', 'default'),
           ('reveal focus', 'default,standout', 'default'),
           ('dialog background', 'default', 'dark gray'),
           ('dialog button', 'default', 'dark gray'),
           ('dialog button focused', 'default', 'light blue'),
           ('edit', 'default,underline', 'default')]

frame = urwid_views.MainFrame()
welcome_view = urwid_views.WelcomeView(frame)
welcome_view.app_quit = app_quit
frame.set_body(welcome_view)
evl = urwid.AsyncioEventLoop(loop=asyncio.get_event_loop())
loop = urwid.MainLoop(frame,
                      event_loop=evl,
                      unhandled_input=unhandled_input,
                      palette=palette)
urwid_views.set_ui(loop.screen)
try:
    loop.run()
except KeyboardInterrupt:
    pass
finally:
    for task in asyncio.Task.all_tasks():
        task.cancel()
    asyncio.get_event_loop().run_until_complete(
        asyncio.gather(*asyncio.Task.all_tasks()))
    asyncio.get_event_loop().close()
print("Goodbye!")
예제 #55
0
파일: server.py 프로젝트: yunxinan/sanic
def serve(
    host,
    port,
    request_handler,
    error_handler,
    before_start=None,
    after_start=None,
    before_stop=None,
    after_stop=None,
    debug=False,
    request_timeout=60,
    response_timeout=60,
    keep_alive_timeout=5,
    ssl=None,
    sock=None,
    request_max_size=None,
    reuse_port=False,
    loop=None,
    protocol=HttpProtocol,
    backlog=100,
    register_sys_signals=True,
    run_multiple=False,
    run_async=False,
    connections=None,
    signal=Signal(),
    request_class=None,
    access_log=True,
    keep_alive=True,
    is_request_stream=False,
    router=None,
    websocket_max_size=None,
    websocket_max_queue=None,
    websocket_read_limit=2 ** 16,
    websocket_write_limit=2 ** 16,
    state=None,
    graceful_shutdown_timeout=15.0,
):
    """Start asynchronous HTTP Server on an individual process.

    :param host: Address to host on
    :param port: Port to host on
    :param request_handler: Sanic request handler with middleware
    :param error_handler: Sanic error handler with middleware
    :param before_start: function to be executed before the server starts
                         listening. Takes arguments `app` instance and `loop`
    :param after_start: function to be executed after the server starts
                        listening. Takes  arguments `app` instance and `loop`
    :param before_stop: function to be executed when a stop signal is
                        received before it is respected. Takes arguments
                        `app` instance and `loop`
    :param after_stop: function to be executed when a stop signal is
                       received after it is respected. Takes arguments
                       `app` instance and `loop`
    :param debug: enables debug output (slows server)
    :param request_timeout: time in seconds
    :param response_timeout: time in seconds
    :param keep_alive_timeout: time in seconds
    :param ssl: SSLContext
    :param sock: Socket for the server to accept connections from
    :param request_max_size: size in bytes, `None` for no limit
    :param reuse_port: `True` for multiple workers
    :param loop: asyncio compatible event loop
    :param protocol: subclass of asyncio protocol class
    :param request_class: Request class to use
    :param access_log: disable/enable access log
    :param websocket_max_size: enforces the maximum size for
                               incoming messages in bytes.
    :param websocket_max_queue: sets the maximum length of the queue
                                that holds incoming messages.
    :param websocket_read_limit: sets the high-water limit of the buffer for
                                 incoming bytes, the low-water limit is half
                                 the high-water limit.
    :param websocket_write_limit: sets the high-water limit of the buffer for
                                  outgoing bytes, the low-water limit is a
                                  quarter of the high-water limit.
    :param is_request_stream: disable/enable Request.stream
    :param router: Router object
    :param graceful_shutdown_timeout: How long take to Force close non-idle
                                      connection
    :return: Nothing
    """
    if not run_async:
        # create new event_loop after fork
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    if debug:
        loop.set_debug(debug)

    connections = connections if connections is not None else set()
    server = partial(
        protocol,
        loop=loop,
        connections=connections,
        signal=signal,
        request_handler=request_handler,
        error_handler=error_handler,
        request_timeout=request_timeout,
        response_timeout=response_timeout,
        keep_alive_timeout=keep_alive_timeout,
        request_max_size=request_max_size,
        request_class=request_class,
        access_log=access_log,
        keep_alive=keep_alive,
        is_request_stream=is_request_stream,
        router=router,
        websocket_max_size=websocket_max_size,
        websocket_max_queue=websocket_max_queue,
        websocket_read_limit=websocket_read_limit,
        websocket_write_limit=websocket_write_limit,
        state=state,
        debug=debug,
    )

    server_coroutine = loop.create_server(
        server,
        host,
        port,
        ssl=ssl,
        reuse_port=reuse_port,
        sock=sock,
        backlog=backlog,
    )

    # Instead of pulling time at the end of every request,
    # pull it once per minute
    loop.call_soon(partial(update_current_time, loop))

    if run_async:
        return server_coroutine

    trigger_events(before_start, loop)

    try:
        http_server = loop.run_until_complete(server_coroutine)
    except BaseException:
        logger.exception("Unable to start server")
        return

    trigger_events(after_start, loop)

    # Ignore SIGINT when run_multiple
    if run_multiple:
        signal_func(SIGINT, SIG_IGN)

    # Register signals for graceful termination
    if register_sys_signals:
        _singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
        for _signal in _singals:
            try:
                loop.add_signal_handler(_signal, loop.stop)
            except NotImplementedError:
                logger.warning(
                    "Sanic tried to use loop.add_signal_handler "
                    "but it is not implemented on this platform."
                )
    pid = os.getpid()
    try:
        logger.info("Starting worker [%s]", pid)
        loop.run_forever()
    finally:
        logger.info("Stopping worker [%s]", pid)

        # Run the on_stop function if provided
        trigger_events(before_stop, loop)

        # Wait for event loop to finish and all connections to drain
        http_server.close()
        loop.run_until_complete(http_server.wait_closed())

        # Complete all tasks on the loop
        signal.stopped = True
        for connection in connections:
            connection.close_if_idle()

        # Gracefully shutdown timeout.
        # We should provide graceful_shutdown_timeout,
        # instead of letting connection hangs forever.
        # Let's roughly calcucate time.
        start_shutdown = 0
        while connections and (start_shutdown < graceful_shutdown_timeout):
            loop.run_until_complete(asyncio.sleep(0.1))
            start_shutdown = start_shutdown + 0.1

        # Force close non-idle connection after waiting for
        # graceful_shutdown_timeout
        coros = []
        for conn in connections:
            if hasattr(conn, "websocket") and conn.websocket:
                coros.append(conn.websocket.close_connection())
            else:
                conn.close()

        _shutdown = asyncio.gather(*coros, loop=loop)
        loop.run_until_complete(_shutdown)

        trigger_events(after_stop, loop)

        loop.close()
예제 #56
0
파일: two_devices.py 프로젝트: devbis/bleak
def main(addresses):
    return asyncio.gather(*(connect_to_device(address)
                            for address in addresses))
예제 #57
0
def wait():
    pending = asyncio.Task.all_tasks()

    relevant_tasks = [t for t in pending if ('test_' not in t._coro.__name__)]
    yield from asyncio.gather(*relevant_tasks)
예제 #58
0
    async def forward_request(
            self) -> [wresults.AbstractWazuhResult, exception.WazuhException]:
        """Forward a request to the node who has all available information to answer it.

        This function is called when a distributed_master function is used. Only the master node calls this function.
        An API request will only be forwarded to worker nodes.

        Returns
        -------
        wresults.AbstractWazuhResult or exception.WazuhException
        """
        async def forward(
            node_name: Tuple
        ) -> [wresults.AbstractWazuhResult, exception.WazuhException]:
            """Forward a request to a node.

            Parameters
            ----------
            node_name : tuple
                Node to forward a request to.

            Returns
            -------
            wresults.AbstractWazuhResult or exception.WazuhException
            """
            node_name, agent_list = node_name
            if agent_list:
                self.f_kwargs['agent_id' if 'agent_id' in
                              self.f_kwargs else 'agent_list'] = agent_list
            if node_name == 'unknown' or node_name == '' or node_name == self.node_info[
                    'node']:
                # The request will be executed locally if the the node to forward to is unknown, empty or the master
                # itself
                result = await self.distribute_function()
            else:
                if 'tmp_file' in self.f_kwargs:
                    await self.send_tmp_file(node_name)
                client = self.get_client()
                try:
                    result = json.loads(await client.execute(
                        b'dapi_forward', "{} {}".format(
                            node_name,
                            json.dumps(
                                self.to_dict(),
                                cls=c_common.WazuhJSONEncoder)).encode(),
                        self.wait_for_complete),
                                        object_hook=c_common.as_wazuh_object)
                except WazuhClusterError as e:
                    if e.code == 3022:
                        result = e
                    else:
                        raise e
                # Convert a non existing node into a WazuhError exception
                if isinstance(result,
                              WazuhClusterError) and result.code == 3022:
                    common.rbac.set(self.rbac_permissions)
                    try:
                        await get_nodes_info(client, filter_node=[node_name])
                    except WazuhError as e:
                        if e.code == 4000:
                            result = e
                    dikt = result.to_dict()
                    # Add node ID to error message
                    dikt['ids'] = {node_name}
                    result = WazuhError.from_dict(dikt)

            return result if isinstance(result, (wresults.AbstractWazuhResult, exception.WazuhException)) \
                else wresults.WazuhResult(result)

        # get the node(s) who has all available information to answer the request.
        nodes = await self.get_solver_node()
        self.from_cluster = True
        common.rbac.set(self.rbac_permissions)
        common.cluster_nodes.set(self.nodes)
        common.broadcast.set(self.broadcasting)
        if 'node_id' in self.f_kwargs or 'node_list' in self.f_kwargs:
            # Check cluster:read permissions for each node
            filter_node_kwarg = {'filter_node': list(nodes)} if nodes else {}
            allowed_nodes = await get_nodes_info(self.get_client(),
                                                 **filter_node_kwarg)

            valid_nodes = list()
            if not nodes:
                nodes = {
                    node_name['name']: []
                    for node_name in allowed_nodes.affected_items
                }
            for node in nodes.items():
                if node[0] in [
                        node_name['name']
                        for node_name in allowed_nodes.affected_items
                ] or node[0] == 'unknown':
                    valid_nodes.append(node)
            del self.f_kwargs['node_id' if 'node_id' in
                              self.f_kwargs else 'node_list']
        else:
            if nodes:
                valid_nodes = list(nodes.items())
            else:
                broadcasted_nodes = await get_nodes_info(self.get_client())
                valid_nodes = [(n['name'], [])
                               for n in broadcasted_nodes.affected_items]
            allowed_nodes = wresults.AffectedItemsWazuhResult()
            allowed_nodes.affected_items = list(nodes)
            allowed_nodes.total_affected_items = len(
                allowed_nodes.affected_items)
        response = await asyncio.shield(
            asyncio.gather(*[forward(node) for node in valid_nodes]))

        if allowed_nodes.total_affected_items > 1:
            response = reduce(or_, response)
            if isinstance(response, wresults.AbstractWazuhResult):
                response = response.limit(limit=self.f_kwargs.get('limit', common.database_limit),
                                          offset=self.f_kwargs.get('offset', 0)) \
                    .sort(fields=self.f_kwargs.get('fields', []),
                          order=self.f_kwargs.get('order', 'asc'))
        elif response:
            response = response[0]
        else:
            response = deepcopy(allowed_nodes)

        # It might be a WazuhError after reducing
        if isinstance(response, wresults.AffectedItemsWazuhResult):
            for failed in copy(allowed_nodes.failed_items):
                # Avoid errors coming from 'unknown' node (they are included in the forward)
                if allowed_nodes.failed_items[failed] == {'unknown'}:
                    del allowed_nodes.failed_items[failed]
            response.add_failed_items_from(allowed_nodes)

        return response
예제 #59
0
    await download_file(export_file_url, path / export_file_name)
    try:
        learn = load_learner(path, export_file_name)
        return learn
    except RuntimeError as e:
        if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:
            print(e)
            message = "\n\nThis model was trained with an old version of fastai and will not work in a CPU environment.\n\nPlease update the fastai library in your training environment and export your model again.\n\nSee instructions for 'Returning to work' at https://course.fast.ai."
            raise RuntimeError(message)
        else:
            raise


loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_learner())]
learn = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()


@app.route('/')
async def homepage(request):
    html_file = path / 'view' / 'index.html'
    return HTMLResponse(html_file.open().read())


@app.route('/analyze', methods=['POST'])
async def analyze(request):
    img_data = await request.form()
    img_bytes = await (img_data['file'].read())
    img = open_image(BytesIO(img_bytes))
    prediction = learn.predict(img)[0]
예제 #60
0
        await asyncio.sleep(1.0)

        # time.sleep(1)         
        
    return tuple(primes)

async def square_mapper(m, n):
    """ Square mapper co-routine """
    
    squares = []
    for i in number_generator(m, n):
        print('Square=>',i*i)       
        squares.append(i*i)
        # Uncomment this out and comment next line and see what happens !!!
        await asyncio.sleep(1.0)
        # time.sleep(1)
        
    return squares

def print_result(future):
    print('Result=>',future.result())
        
loop = asyncio.get_event_loop()
future = asyncio.gather(prime_filter(10, 50), square_mapper(10, 50))
future.add_done_callback(print_result)
loop.run_until_complete(future)

loop.close()