def test_asyncio_add_watcher_SIGCHLD_nop(self): async def proc(loop): proc = await asyncio.create_subprocess_exec( 'echo', stdout=subprocess.DEVNULL, loop=loop) await proc.wait() aio_loop = asyncio.new_event_loop() asyncio.set_event_loop(aio_loop) try: aio_loop.run_until_complete(proc(aio_loop)) finally: aio_loop.close() asyncio.set_event_loop(None) try: loop = uvloop.new_event_loop() with self.assertWarnsRegex( RuntimeWarning, "asyncio is trying to install its ChildWatcher"): asyncio.set_event_loop(loop) finally: asyncio.set_event_loop(None) loop.close()
def event_loop(): """ Since pulse-notify creates an event loop on initialization, try and use it for the first test after which it is closed. Then use a new loop every time. """ from pulsenotify import event_loop as pn_loop return uvloop.new_event_loop() if pn_loop.is_closed() else pn_loop
def test_uvloop(self): loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) async def uvwait(x): assert isinstance(asyncio.get_event_loop(), uvloop.Loop) result = await wait(x) return result result = loop.run_until_complete(uvwait(1)) self.assertEqual(1, result)
def loop(request, loop_type): old_loop = asyncio.get_event_loop() asyncio.set_event_loop(None) if loop_type == 'uvloop': loop = uvloop.new_event_loop() else: loop = asyncio.new_event_loop() def fin(): loop.close() asyncio.set_event_loop(old_loop) request.addfinalizer(fin) return loop
def create_asyncio_http_server(on_request_callback=None, host='localhost', port=8888, loop=None, server_protocol=None): if loop is None: loop = uvloop.new_event_loop() if server_protocol is None: server_protocol = AsyncioHttpProtocol return loop, loop.create_server(lambda: server_protocol(loop=loop), host=host, port=port)
def loop(request, loop_type, debug): # old_loop = asyncio.get_event_loop() asyncio.set_event_loop(None) if loop_type == 'uvloop': loop = uvloop.new_event_loop() else: loop = asyncio.new_event_loop() loop.set_debug(debug) asyncio.set_event_loop(loop) yield loop loop.close() asyncio.set_event_loop(None) gc.collect()
def setUpClass(cls): """Set up event loop to run async tests and run async class setup""" super().setUpClass() if uvloop_available and os.environ.get('USE_UVLOOP'): # pragma: no cover cls.loop = uvloop.new_event_loop() else: cls.loop = asyncio.new_event_loop() asyncio.set_event_loop(cls.loop) try: # pylint: disable=no-member cls.loop.run_until_complete(cls.asyncSetUpClass()) except AttributeError: pass
def loop(request, loop_type): loop = asyncio.new_event_loop() asyncio.set_event_loop(None) if uvloop and loop_type == 'uvloop': loop = uvloop.new_event_loop() else: loop = asyncio.new_event_loop() yield loop if not loop._closed: loop.call_soon(loop.stop) loop.run_forever() loop.close() gc.collect() asyncio.set_event_loop(None)
def uvloop_test(): try: import uvloop except ImportError: return pool = ThreadPoolExecutor() async def main(loop): for n in range(COUNT): r = await loop.run_in_executor(pool, time.sleep, 0) loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) start = time.time() loop.run_until_complete(asyncio.ensure_future(main(loop))) end = time.time() print('uvloop:', end-start)
def start(self): loop = uvloop.new_event_loop() def starting_new_thread(): asyncio.set_event_loop(loop) try: address = ("0.0.0.0", self.kademlia.node.port) task = asyncio.Task(loop.create_datagram_endpoint( self.get_self_instance(), local_addr=address)) loop.run_until_complete(task) loop.run_forever() finally: loop.close() threading.Thread(target=starting_new_thread).start() time.sleep(1)
def uvloop_test(n): try: import uvloop except ImportError: return async def main(n): for x in range(n): proc = await asyncio.create_subprocess_exec(*cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE) stdout, stderr = await proc.communicate(input=input) await proc.wait() assert stdout == input loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) start = time.time() loop.run_until_complete(asyncio.ensure_future(main(n))) end = time.time() print('uvloop:', end-start)
def test_get_event_loop_returns_running_loop(self): class Policy(asyncio.DefaultEventLoopPolicy): def get_event_loop(self): raise NotImplementedError loop = None old_policy = asyncio.get_event_loop_policy() try: asyncio.set_event_loop_policy(Policy()) loop = uvloop.new_event_loop() self.assertIs(asyncio._get_running_loop(), None) async def func(): self.assertIs(asyncio.get_event_loop(), loop) self.assertIs(asyncio._get_running_loop(), loop) loop.run_until_complete(func()) finally: asyncio.set_event_loop_policy(old_policy) if loop is not None: loop.close() self.assertIs(asyncio._get_running_loop(), None)
def new_loop(self): return uvloop.new_event_loop()
def idle(self): self.loop = uvloop.new_event_loop() asyncio.set_event_loop(self.loop) self.loop.run_until_complete(self._idle())
def get_event_loop(debug=False): if not debug: import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) asyncio.set_event_loop(uvloop.new_event_loop()) return asyncio.get_event_loop()
class unsync(object): thread_executor = concurrent.futures.ThreadPoolExecutor() process_executor = None if os.name == "posix": loop = uvloop.new_event_loop() else: loop = asyncio.new_event_loop() thread = None unsync_functions = {} @staticmethod def thread_target(loop): asyncio.set_event_loop(loop) loop.run_forever() def __init__(self, *args, **kwargs): self.args = [] self.kwargs = {} if len(args) == 1 and _isfunction(args[0]): self._set_func(args[0]) else: self.args = args self.kwargs = kwargs self.func = None @property def cpu_bound(self): return 'cpu_bound' in self.kwargs and self.kwargs['cpu_bound'] def _set_func(self, func): assert _isfunction(func) self.func = func functools.update_wrapper(self, func) # On Windows/Mac MP turns the main module into __mp_main__ in multiprocess targets module = "__main__" if func.__module__ == "__mp_main__" else func.__module__ unsync.unsync_functions[(module, func.__name__)] = func def __call__(self, *args, **kwargs): if self.func is None: self._set_func(args[0]) return self if inspect.iscoroutinefunction(self.func): if self.cpu_bound: raise TypeError( 'The CPU bound unsync function %s may not be async or a coroutine' % self.func.__name__) future = self.func(*args, **kwargs) else: if self.cpu_bound: if unsync.process_executor is None: unsync.process_executor = concurrent.futures.ProcessPoolExecutor( ) future = unsync.process_executor.submit( _multiprocess_target, (self.func.__module__, self.func.__name__), *args, **kwargs) else: future = unsync.thread_executor.submit(self.func, *args, **kwargs) return Unfuture(future) def __get__(self, instance, owner): def _call(*args, **kwargs): return self(instance, *args, **kwargs) functools.update_wrapper(_call, self.func) return _call
def new_loop(self): return uvloop.new_event_loop()
# Creates bartender dict app bartender_app = dict() # Register bartender app info bartender_app['instance'] = cur_instance bartender_app['nb_replica'] = nb_replica # Registers logger bartender_app['logger'] = setup_logger() bartender_app['logger'].info( f'Bartender current instance : {cur_instance}') # Creates & registers event loop bartender_app['loop'] = uvloop.new_event_loop() asyncio.set_event_loop(bartender_app['loop']) bartender_app['serializer'] = AvroSerializer( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'examples/coffee_bar/avro_schemas')) # Creates & register KafkaProducer bartender_app['transactional_producer'] = KafkaProducer( name=f'bartender-{cur_instance}', bootstrap_servers='localhost:9092', client_id=f'bartender-{cur_instance}', serializer=bartender_app['serializer'], loop=bartender_app['loop'], partitioner=KeyPartitioner(), acks='all',
def test_core_aiozmq_uvloop(count): """core aiozmq with uvloop""" loop = uvloop.new_event_loop() return _test_core_aiozmq(count, loop)
def sub_loop(number): import uvloop loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(RepairBase(number, loop=loop, concurrency=12).run())
def serve(host, port, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, ssl=None, sock=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_async=False): """ Starts asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: Function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: Function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: Function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: Function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: Enables debug output (slows server) :param request_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: Subclass of asyncio protocol class :return: Nothing """ if not run_async: loop = async_loop.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) trigger_events(before_start, loop) connections = set() signal = Signal() server = partial( protocol, loop=loop, connections=connections, signal=signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, request_max_size=request_max_size, ) server_coroutine = loop.create_server( server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog ) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) if run_async: return server_coroutine try: http_server = loop.run_until_complete(server_coroutine) except: log.exception("Unable to start server") return trigger_events(after_start, loop) # Register signals for graceful termination if register_sys_signals: for _signal in (SIGINT, SIGTERM): loop.add_signal_handler(_signal, loop.stop) pid = os.getpid() try: log.info('Starting worker [{}]'.format(pid)) loop.run_forever() finally: log.info("Stopping worker [{}]".format(pid)) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() while connections: loop.run_until_complete(asyncio.sleep(0.1)) trigger_events(after_stop, loop) loop.close()
def main(args): loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) asyncio.ensure_future(run_server(loop, args)) loop.run_forever()
from sanic import Sanic from sanic.response import json from multiprocessing import Event from signal import signal, SIGINT import asyncio import uvloop app = Sanic(__name__) @app.route("/") async def test(request): return json({"answer": "42"}) asyncio.set_event_loop(uvloop.new_event_loop()) server = app.create_server(host="0.0.0.0", port=8001) loop = asyncio.get_event_loop() task = asyncio.ensure_future(server) signal(SIGINT, lambda s, f: loop.stop()) try: loop.run_forever() except: loop.stop()
def write(self, task): self._ready(task) print('Written', task.result()) if __name__ == '__main__': import asyncio async def coro(sleep): await asyncio.sleep(sleep) return sleep from uvloop import new_event_loop loop = new_event_loop() asyncio.set_event_loop(loop) pipeline = Pipeline() def queue(x): t = loop.create_task(coro(x)) pipeline.queue(t) loop.call_later(2, lambda: queue(2)) loop.call_later(12, lambda: queue(2)) queue(1) queue(10) queue(5) queue(1)
def get_event_loop(self, debug=False): if not debug: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) asyncio.set_event_loop(uvloop.new_event_loop()) AsyncIOMainLoop().install() return asyncio.get_event_loop()
sanic = Sanic(name=f'cash-register-{cur_instance}') # Creates cash register dict app cash_register_app = dict() cash_register_app['instance'] = cur_instance cash_register_app['nb_replica'] = nb_replica # Registers logger cash_register_app['logger'] = setup_logger() cash_register_app['logger'].info( f'Current cash register instance : {cur_instance}') # Creates & registers event loop cash_register_app['loop'] = uvloop.new_event_loop() asyncio.set_event_loop(cash_register_app['loop']) cash_register_app['serializer'] = AvroSerializer( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'examples/coffee_bar/avro_schemas')) # Creates & registers local store memory / global store memory cash_register_app['local_store'] = LocalStoreMemory( name=f'cash-register-{cur_instance}-local-memory') cash_register_app['global_store'] = GlobalStoreMemory( name=f'cash-register-{cur_instance}-global-memory') cluster_admin = KafkaAdminClient(bootstrap_servers='localhost:9092', client_id=f'cash-register-{cur_instance}') cluster_metadata = ClusterMetadata(bootstrap_servers='localhost:9092')
def main(): global logger, globopts parser = argparse.ArgumentParser( description="""Fetch weights information from Gstat provider for every job listed in customer.conf""" ) parser.add_argument('-c', dest='custconf', nargs=1, metavar='customer.conf', help='path to customer configuration file', type=str, required=False) parser.add_argument('-g', dest='gloconf', nargs=1, metavar='global.conf', help='path to global configuration file', type=str, required=False) parser.add_argument('-d', dest='date', metavar='YEAR-MONTH-DAY', help='write data for this date', type=str, required=False) args = parser.parse_args() logger = Logger(os.path.basename(sys.argv[0])) fixed_date = None if args.date and date_check(args.date): fixed_date = args.date confpath = args.gloconf[0] if args.gloconf else None cglob = Global(sys.argv[0], confpath) globopts = cglob.parse() confpath = args.custconf[0] if args.custconf else None confcust = CustomerConf(sys.argv[0], confpath) confcust.parse() confcust.make_dirstruct() confcust.make_dirstruct(globopts['InputStateSaveDir'.lower()]) feeds = confcust.get_mapfeedjobs(sys.argv[0], deffeed=VAPORPI) loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) for feed, jobcust in feeds.items(): customers = set(map(lambda jc: confcust.get_custname(jc[1]), jobcust)) customers = customers.pop() if len(customers) == 1 else '({0})'.format( ','.join(customers)) sjobs = set(map(lambda jc: jc[0], jobcust)) jobs = list(sjobs)[0] if len(sjobs) == 1 else '({0})'.format( ','.join(sjobs)) logger.job = jobs logger.customer = customers try: res = loop.run_until_complete(fetch_data(feed)) weights = parse_source(res) for job, cust in jobcust: logger.customer = confcust.get_custname(cust) logger.job = job write_empty = confcust.send_empty(sys.argv[0], cust) if write_empty: weights = [] webapi_opts = get_webapi_opts(cust, job, cglob, confcust) if eval(globopts['GeneralPublishWebAPI'.lower()]): loop.run_until_complete( send_webapi(job, confcust, webapi_opts, fixed_date, weights)) if eval(globopts['GeneralWriteAvro'.lower()]): write_avro(cust, job, confcust, fixed_date, weights) loop.run_until_complete( write_state(cust, job, confcust, fixed_date, True)) if weights or write_empty: custs = set([cust for job, cust in jobcust]) for cust in custs: jobs = [job for job, lcust in jobcust if cust == lcust] logger.info( 'Customer:%s Jobs:%s Sites:%d' % (confcust.get_custname(cust), jobs[0] if len(jobs) == 1 else '({0})'.format(','.join(jobs)), len(weights))) except (ConnectorHttpError, ConnectorParseError, KeyboardInterrupt) as exc: logger.error(repr(exc)) for job, cust in jobcust: loop.run_until_complete( write_state(cust, job, confcust, fixed_date, False))
def pause(self): self.loop = uvloop.new_event_loop() asyncio.set_event_loop(self.loop) self.loop.run_until_complete(self.asyncPause())
def get_new_uvloop_queue(): loop = uvloop.new_event_loop() return asyncio.Queue(loop=loop)
def serve(host, port, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, sock=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100): """ Starts asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: Function to be executed before the server starts listening. Takes single argument `loop` :param after_start: Function to be executed after the server starts listening. Takes single argument `loop` :param before_stop: Function to be executed when a stop signal is received before it is respected. Takes single argumenet `loop` :param after_stop: Function to be executed when a stop signal is received after it is respected. Takes single argumenet `loop` :param debug: Enables debug output (slows server) :param request_timeout: time in seconds :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: Subclass of asyncio protocol class :return: Nothing """ loop = loop or async_loop.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) trigger_events(before_start, loop) connections = set() signal = Signal() server = partial( protocol, loop=loop, connections=connections, signal=signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, request_max_size=request_max_size, ) server_coroutine = loop.create_server(server, host, port, reuse_port=reuse_port, sock=sock, backlog=backlog) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) try: http_server = loop.run_until_complete(server_coroutine) except Exception: log.exception("Unable to start server") return trigger_events(after_start, loop) # Register signals for graceful termination for _signal in (SIGINT, SIGTERM): loop.add_signal_handler(_signal, loop.stop) try: loop.run_forever() finally: log.info("Stop requested, draining connections...") # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() while connections: loop.run_until_complete(asyncio.sleep(0.1)) trigger_events(after_stop, loop) loop.close()
def loop(self): if not self._loop: self._loop = uvloop.new_event_loop() return self._loop
def write(self, task): self._ready(task) print('Written', task.result()) if __name__ == '__main__': import asyncio async def coro(sleep): await asyncio.sleep(sleep) return sleep from uvloop import new_event_loop loop = new_event_loop() asyncio.set_event_loop(loop) pipeline = Pipeline() def queue(x): t = loop.create_task(coro(x)) pipeline.queue(t) loop.call_later(2, lambda: queue(2)) loop.call_later(12, lambda: queue(2)) queue(1) queue(10) queue(5) queue(1)
from structure.config import db_config, server_config, redis_config from base.environment import Environment from aioredis import create_pool as create_redis_pool from structure.route import route, err_route, middleware try: import ujson as json except ImportError: import json try: import uvloop as async_loop except ImportError: async_loop = asyncio loop = async_loop.new_event_loop() asyncio.set_event_loop(loop=loop) env = None async def init_db(*args): application = args[0] loop = args[1] db_host = db_config.get('host', '127.0.0.1') database = db_config.get('database') db_user = db_config.get('user') db_pwd = db_config.get('password') db_pool = await create_pool(max_size=50, host=db_host, database=database, user=db_user, password=db_pwd, loop=loop) redis_host = redis_config['redis'].get('host') redis_port = redis_config['redis'].get('port') redis_db = redis_config['redis'].get('db') redis_pool = await create_redis_pool((redis_host, redis_port), db=redis_db, loop=loop)
def get_new_event_loop(): """Construct a new event loop. Ray will use uvloop if it exists""" if uvloop: return uvloop.new_event_loop() else: return asyncio.new_event_loop()
def serve(host, port, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, response_timeout=60, keep_alive_timeout=5, ssl=None, sock=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_async=False, connections=None, signal=Signal(), request_class=None, access_log=True, keep_alive=True, is_request_stream=False, router=None, websocket_max_size=None, websocket_max_queue=None, state=None, graceful_shutdown_timeout=15.0): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param response_timeout: time in seconds :param keep_alive_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :param request_class: Request class to use :param access_log: disable/enable access log :param is_request_stream: disable/enable Request.stream :param router: Router object :return: Nothing """ if not run_async: loop = async_loop.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, response_timeout=response_timeout, keep_alive_timeout=keep_alive_timeout, request_max_size=request_max_size, request_class=request_class, access_log=access_log, keep_alive=keep_alive, is_request_stream=is_request_stream, router=router, websocket_max_size=websocket_max_size, websocket_max_queue=websocket_max_queue, state=state, debug=debug, ) server_coroutine = loop.create_server(server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) if run_async: return server_coroutine trigger_events(before_start, loop) try: http_server = loop.run_until_complete(server_coroutine) except: logger.exception("Unable to start server") return trigger_events(after_start, loop) # Register signals for graceful termination if register_sys_signals: for _signal in (SIGINT, SIGTERM): try: loop.add_signal_handler(_signal, loop.stop) except NotImplementedError: logger.warning('Sanic tried to use loop.add_signal_handler ' 'but it is not implemented on this platform.') pid = os.getpid() try: logger.info('Starting worker [%s]', pid) loop.run_forever() finally: logger.info("Stopping worker [%s]", pid) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() # Gracefully shutdown timeout. # We should provide graceful_shutdown_timeout, # instead of letting connection hangs forever. # Let's roughly calcucate time. start_shutdown = 0 while connections and (start_shutdown < graceful_shutdown_timeout): loop.run_until_complete(asyncio.sleep(0.1)) start_shutdown = start_shutdown + 0.1 # Force close non-idle connection after waiting for # graceful_shutdown_timeout coros = [] for conn in connections: if hasattr(conn, "websocket") and conn.websocket: coros.append(conn.websocket.close_connection(force=True)) else: conn.close() _shutdown = asyncio.gather(*coros, loop=loop) loop.run_until_complete(_shutdown) trigger_events(after_stop, loop) loop.close()
from sanic import response from sanic_cors import CORS, cross_origin from .nn import recognize def get_root(): if getattr(sys, 'frozen', False): return os.path.dirname(sys.executable) elif __file__: return os.path.abspath(os.path.dirname(__file__)) root = get_root() db = Db(config) asyncio.set_event_loop(uvloop.new_event_loop()) app = Sanic() CORS(app, automatic_options=True) @app.route('/rec', methods=['POST']) async def recognize_image(request): image_file = request.files.get('image') # recognize file_parameters = { 'body': image_file.body, 'name': image_file.name, 'type': image_file.type,
def main(): # loop = ZMQEventLoop() # asyncio.set_event_loop(loop) loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) connection = None try: opts = parse_args(sys.argv[1:]) opts_config = RestApiConfig(bind=opts.bind, connect=opts.connect, timeout=opts.timeout, opentsdb_url=opts.opentsdb_url, opentsdb_db=opts.opentsdb_db) rest_api_config = load_rest_api_config(opts_config) url = None if "tcp://" not in rest_api_config.connect: url = "tcp://" + rest_api_config.connect else: url = rest_api_config.connect connection = Connection(url) log_config = get_log_config(filename="rest_api_log_config.toml") # If no toml, try loading yaml if log_config is None: log_config = get_log_config(filename="rest_api_log_config.yaml") if log_config is not None: log_configuration(log_config=log_config) else: log_dir = get_log_dir() log_configuration(log_dir=log_dir, name="rest_api") init_console_logging(verbose_level=opts.verbose) try: host, port = rest_api_config.bind[0].split(":") port = int(port) except ValueError as e: print("Unable to parse binding {}: Must be in the format" " host:port".format(rest_api_config.bind[0])) sys.exit(1) wrapped_registry = None if rest_api_config.opentsdb_url: LOGGER.info("Adding metrics reporter: url=%s, db=%s", rest_api_config.opentsdb_url, rest_api_config.opentsdb_db) url = urlparse(rest_api_config.opentsdb_url) proto, db_server, db_port, = url.scheme, url.hostname, url.port registry = MetricsRegistry() wrapped_registry = MetricsRegistryWrapper(registry) reporter = InfluxReporter( registry=registry, reporting_interval=10, database=rest_api_config.opentsdb_db, prefix="sawtooth_rest_api", port=db_port, protocol=proto, server=db_server, username=rest_api_config.opentsdb_username, password=rest_api_config.opentsdb_password) reporter.start() start_rest_api(host, port, connection, int(rest_api_config.timeout), wrapped_registry) # pylint: disable=broad-except except Exception as e: LOGGER.exception(e) sys.exit(1) finally: if connection is not None: connection.close()
def serve(host, port, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, response_timeout=60, keep_alive_timeout=5, ssl=None, sock=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_async=False, connections=None, signal=Signal(), request_class=None, access_log=True, keep_alive=True, is_request_stream=False, router=None, websocket_max_size=None, websocket_max_queue=None, state=None, graceful_shutdown_timeout=15.0): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param response_timeout: time in seconds :param keep_alive_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :param request_class: Request class to use :param access_log: disable/enable access log :param is_request_stream: disable/enable Request.stream :param router: Router object :return: Nothing """ if not run_async: loop = async_loop.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, response_timeout=response_timeout, keep_alive_timeout=keep_alive_timeout, request_max_size=request_max_size, request_class=request_class, access_log=access_log, keep_alive=keep_alive, is_request_stream=is_request_stream, router=router, websocket_max_size=websocket_max_size, websocket_max_queue=websocket_max_queue, state=state, debug=debug, ) server_coroutine = loop.create_server( server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog ) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) if run_async: return server_coroutine trigger_events(before_start, loop) try: http_server = loop.run_until_complete(server_coroutine) except BaseException: logger.exception("Unable to start server") return trigger_events(after_start, loop) # Register signals for graceful termination if register_sys_signals: for _signal in (SIGINT, SIGTERM): try: loop.add_signal_handler(_signal, loop.stop) except NotImplementedError: logger.warning('Sanic tried to use loop.add_signal_handler ' 'but it is not implemented on this platform.') pid = os.getpid() try: logger.info('Starting worker [%s]', pid) loop.run_forever() finally: logger.info("Stopping worker [%s]", pid) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() # Gracefully shutdown timeout. # We should provide graceful_shutdown_timeout, # instead of letting connection hangs forever. # Let's roughly calcucate time. start_shutdown = 0 while connections and (start_shutdown < graceful_shutdown_timeout): loop.run_until_complete(asyncio.sleep(0.1)) start_shutdown = start_shutdown + 0.1 # Force close non-idle connection after waiting for # graceful_shutdown_timeout coros = [] for conn in connections: if hasattr(conn, "websocket") and conn.websocket: coros.append( conn.websocket.close_connection(after_handshake=True) ) else: conn.close() _shutdown = asyncio.gather(*coros, loop=loop) loop.run_until_complete(_shutdown) trigger_events(after_stop, loop) loop.close()
def serve(host, port, request_handler, after_start=None, before_stop=None, debug=False, request_timeout=60, sock=None, request_max_size=None, reuse_port=False): # Create Event Loop loop = async_loop.new_event_loop() asyncio.set_event_loop(loop) # I don't think we take advantage of this # And it slows everything waaayyy down # loop.set_debug(debug) connections = {} signal = Signal() server_coroutine = loop.create_server(lambda: HttpProtocol( loop=loop, connections=connections, signal=signal, request_handler=request_handler, request_timeout=request_timeout, request_max_size=request_max_size, ), host, port, reuse_port=reuse_port, sock=sock) try: http_server = loop.run_until_complete(server_coroutine) except Exception as e: log.error("Unable to start server: {}".format(e)) return # Run the on_start function if provided if after_start: result = after_start(loop) if isawaitable(result): loop.run_until_complete(result) # Register signals for graceful termination for _signal in (SIGINT, SIGTERM): loop.add_signal_handler(_signal, loop.stop) try: loop.run_forever() finally: log.info("Stop requested, draining connections...") # Run the on_stop function if provided if before_stop: result = before_stop(loop) if isawaitable(result): loop.run_until_complete(result) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections.keys(): connection.close_if_idle() while connections: loop.run_until_complete(asyncio.sleep(0.1)) loop.close()
def start(connection_count): loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) multiple_connect(connection_count) asyncio.get_event_loop().run_forever()
def serve(host, port, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, ssl=None, sock=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_async=False, connections=None, signal=Signal(), request_class=None, has_log=True, keep_alive=True): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :param request_class: Request class to use :param has_log: disable/enable access log and error log :return: Nothing """ if not run_async: loop = async_loop.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) trigger_events(before_start, loop) connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, request_max_size=request_max_size, request_class=request_class, has_log=has_log, keep_alive=keep_alive, ) server_coroutine = loop.create_server(server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) if run_async: return server_coroutine try: http_server = loop.run_until_complete(server_coroutine) except: log.exception("Unable to start server") return trigger_events(after_start, loop) # Register signals for graceful termination if register_sys_signals: for _signal in (SIGINT, SIGTERM): try: loop.add_signal_handler(_signal, loop.stop) except NotImplementedError: log.warn('Sanic tried to use loop.add_signal_handler but it is' ' not implemented on this platform.') pid = os.getpid() try: log.info('Starting worker [{}]'.format(pid)) loop.run_forever() finally: log.info("Stopping worker [{}]".format(pid)) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() while connections: loop.run_until_complete(asyncio.sleep(0.1)) trigger_events(after_stop, loop) loop.close()
# Creates waiter dict app waiter_app = dict() # Register waiter app info waiter_app['instance'] = cur_instance waiter_app['nb_replica'] = nb_replica # Registers logger waiter_app['logger'] = setup_logger() waiter_app['logger'].info( f'Hello my name is Albert ! Waiter current instance : {cur_instance}') # Creates & registers event loop waiter_app['loop'] = uvloop.new_event_loop() asyncio.set_event_loop(waiter_app['loop']) waiter_app['serializer'] = AvroSerializer( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'examples/coffee_bar/avro_schemas')) # Creates & registers local store memory / global store memory waiter_app['local_store'] = LocalStoreMemory( name=f'waiter-{cur_instance}-local-memory') waiter_app['global_store'] = GlobalStoreMemory( name=f'waiter-{cur_instance}-global-memory') cluster_admin = KafkaAdminClient(bootstrap_servers='localhost:9092', client_id=f'waiter-{cur_instance}')
def main(): global logger, globopts, confcust parser = argparse.ArgumentParser(description="""Fetch and construct entities from EOSC-PORTAL feed""") parser.add_argument('-c', dest='custconf', nargs=1, metavar='customer.conf', help='path to customer configuration file', type=str, required=False) parser.add_argument('-g', dest='gloconf', nargs=1, metavar='global.conf', help='path to global configuration file', type=str, required=False) parser.add_argument('-d', dest='date', metavar='YEAR-MONTH-DAY', help='write data for this date', type=str, required=False) args = parser.parse_args() group_endpoints, group_groups = list(), list() logger = Logger(os.path.basename(sys.argv[0])) fixed_date = None if args.date and date_check(args.date): fixed_date = args.date confpath = args.gloconf[0] if args.gloconf else None cglob = Global(sys.argv[0], confpath) globopts = cglob.parse() confpath = args.custconf[0] if args.custconf else None confcust = CustomerConf(sys.argv[0], confpath) confcust.parse() confcust.make_dirstruct() confcust.make_dirstruct(globopts['InputStateSaveDir'.lower()]) global custname custname = confcust.get_custname() # safely assume here one customer defined in customer file cust = list(confcust.get_customers())[0] jobstatedir = confcust.get_fullstatedir(globopts['InputStateSaveDir'.lower()], cust) fetchtype = confcust.get_topofetchtype()[0] state = None logger.customer = custname uidservtype = confcust.get_uidserviceendpoints() topofeed = confcust.get_topofeed() loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) try: if is_feed(topofeed): res = loop.run_until_complete(fetch_data(topofeed)) group_groups, group_endpoints = parse_source_topo(res, uidservtype, fetchtype) contacts = ParseContacts(logger, res, uidservtype, is_csv=False).get_contacts() attach_contacts_topodata(logger, contacts, group_endpoints) else: try: with open(topofeed) as fp: js = json.load(fp) group_groups, group_endpoints = parse_source_topo(js, uidservtype, fetchtype) except IOError as exc: logger.error('Customer:%s : Problem opening %s - %s' % (logger.customer, topofeed, repr(exc))) loop.run_until_complete( write_state(confcust, fixed_date, True) ) webapi_opts = get_webapi_opts(cglob, confcust) numge = len(group_endpoints) numgg = len(group_groups) # send concurrently to WEB-API in coroutines if eval(globopts['GeneralPublishWebAPI'.lower()]): loop.run_until_complete( asyncio.gather( send_webapi(webapi_opts, group_groups, 'groups', fixed_date), send_webapi(webapi_opts, group_endpoints,'endpoints', fixed_date) ) ) if eval(globopts['GeneralWriteAvro'.lower()]): write_avro(confcust, group_groups, group_endpoints, fixed_date) logger.info('Customer:' + custname + ' Fetched Endpoints:%d' % (numge) + ' Groups(%s):%d' % (fetchtype, numgg)) except (ConnectorHttpError, ConnectorParseError, KeyboardInterrupt) as exc: logger.error(repr(exc)) loop.run_until_complete( write_state(confcust, fixed_date, False ) )
def loop(): return uvloop.new_event_loop()
def __init__(self, coroutine=None, **kwargs): self.loop = uvloop.new_event_loop() super().__init__(self.loop.run_forever, autostart=True, **kwargs) if coroutine: self.run_coro(coroutine)
return text("Internal service failure, please check logs.", 500) if __name__ == "__main__": parser = ArgumentParser( description="Start asynchronous Switcher web api with Sanic.") parser.add_argument( "-p", "--port", type=int, default=3698, help="port for the server to run on, default is 3698", ) set_event_loop(new_event_loop()) server_coro = sanic_app.create_server( host="0.0.0.0", # nosec port=parser.parse_args().port, return_asyncio_server=True, ) event_loop = None # type: Optional[AbstractEventLoop] try: event_loop = get_event_loop() event_loop.create_task(server_coro) event_loop.run_forever() sanic_app.stop() except RuntimeError as exc: logger.error(exc) finally: if event_loop:
#!/usr/bin/env python3 import asyncio import uvloop from sanic import Sanic from sanic.response import json from signal import signal, SIGINT PORT = 8080 loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) app = Sanic('Sanc') @app.route('/') async def test(request): return json({ 'status': 'OK', }) if __name__ == '__main__': server_coro = app.create_server( host='0.0.0.0', port=PORT, return_asyncio_server=True, debug=True, access_log=True, ) task = asyncio.ensure_future(server_coro)
def main(): global logger, globopts parser = argparse.ArgumentParser( description='Fetch downtimes from GOCDB for given date') parser.add_argument('-d', dest='date', nargs=1, metavar='YEAR-MONTH-DAY', required=True) parser.add_argument('-c', dest='custconf', nargs=1, metavar='customer.conf', help='path to customer configuration file', type=str, required=False) parser.add_argument('-g', dest='gloconf', nargs=1, metavar='global.conf', help='path to global configuration file', type=str, required=False) args = parser.parse_args() logger = Logger(os.path.basename(sys.argv[0])) confpath = args.gloconf[0] if args.gloconf else None cglob = Global(sys.argv[0], confpath) globopts = cglob.parse() confpath = args.custconf[0] if args.custconf else None confcust = CustomerConf(sys.argv[0], confpath) confcust.parse() confcust.make_dirstruct() confcust.make_dirstruct(globopts['InputStateSaveDir'.lower()]) feed = confcust.get_topofeed() logger.customer = confcust.get_custname() if len(args.date) == 0: print(parser.print_help()) raise SystemExit(1) # calculate start and end times try: start = datetime.datetime.strptime(args.date[0], '%Y-%m-%d') end = datetime.datetime.strptime(args.date[0], '%Y-%m-%d') timestamp = start.strftime('%Y_%m_%d') start = start.replace(hour=0, minute=0, second=0) end = end.replace(hour=23, minute=59, second=59) except ValueError as exc: logger.error(exc) raise SystemExit(1) uidservtype = confcust.get_uidserviceendpoints() auth_custopts = confcust.get_authopts() auth_opts = cglob.merge_opts(auth_custopts, 'authentication') auth_complete, missing = cglob.is_complete(auth_opts, 'authentication') if not auth_complete: missing_err = ''.join(missing) logger.error( 'Customer:{} authentication options incomplete, missing {}'.format( logger.customer, missing_err)) raise SystemExit(1) loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) try: # we don't have multiple tenant definitions in one # customer file so we can safely assume one tenant/customer write_empty = confcust.send_empty(sys.argv[0]) if not write_empty: res = loop.run_until_complete( fetch_data(feed, auth_opts, start, end)) dts = parse_source(res, start, end, uidservtype) else: dts = [] loop.run_until_complete(write_state(confcust, timestamp, True)) webapi_opts = get_webapi_opts(cglob, confcust) if eval(globopts['GeneralPublishWebAPI'.lower()]): loop.run_until_complete(send_webapi(webapi_opts, args.date[0], dts)) if dts or write_empty: cust = list(confcust.get_customers())[0] logger.info('Customer:%s Fetched Date:%s Endpoints:%d' % (confcust.get_custname(cust), args.date[0], len(dts))) if eval(globopts['GeneralWriteAvro'.lower()]): write_avro(confcust, dts, timestamp) except (ConnectorHttpError, ConnectorParseError, KeyboardInterrupt) as exc: logger.error(repr(exc)) loop.run_until_complete(write_state(confcust, timestamp, False)) loop.close()
# Run with python3 simple_server.py PORT import asyncio import logging import sys import ujson as json import uvloop from aiohttp import web loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) logging.basicConfig(level=logging.DEBUG) async def handle(request): return web.Response(body=json.dumps({"test": True}).encode('utf-8'), content_type='application/json') app = web.Application(loop=loop) app.router.add_route('GET', '/', handle) web.run_app(app, port=int(sys.argv[1]), access_log=None)
def event_loop(): loop = uvloop.new_event_loop() yield loop loop.close()