async def _local_request(self, method, uri, cookies=None, *args, **kwargs): import aiohttp if uri.startswith(('http:', 'https:', 'ftp:', 'ftps://' '//')): url = uri else: url = 'http://{host}:{port}{uri}'.format( host=HOST, port=self.port, uri=uri) logger.info(url) conn = aiohttp.TCPConnector(verify_ssl=False) async with aiohttp.ClientSession( cookies=cookies, connector=conn) as session: async with getattr( session, method.lower())(url, *args, **kwargs) as response: try: response.text = await response.text() except UnicodeDecodeError as e: response.text = None try: response.json = await response.json() except (JSONDecodeError, UnicodeDecodeError, aiohttp.ClientResponseError): response.json = None response.body = await response.read() return response
async def _local_request(self, method, url, cookies=None, *args, **kwargs): import aiohttp logger.info(url) conn = aiohttp.TCPConnector(ssl=False) async with aiohttp.ClientSession( cookies=cookies, connector=conn ) as session: async with getattr(session, method.lower())( url, *args, **kwargs ) as response: try: response.text = await response.text() except UnicodeDecodeError: response.text = None try: response.json = await response.json() except ( JSONDecodeError, UnicodeDecodeError, aiohttp.ClientResponseError, ): response.json = None response.body = await response.read() return response
def run(self, host=None, port=None, debug=False, ssl=None, sock=None, workers=1, protocol=None, backlog=100, stop_event=None, register_sys_signals=True, access_log=True): """Run the HTTP Server and listen until keyboard interrupt or term signal. On termination, drain connections before closing. :param host: Address to host on :param port: Port to host on :param debug: Enables debug output (slows server) :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :param sock: Socket for the server to accept connections from :param workers: Number of processes received before it is respected :param backlog: :param stop_event: :param register_sys_signals: :param protocol: Subclass of asyncio protocol class :return: Nothing """ if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = (WebSocketProtocol if self.websocket_enabled else HttpProtocol) if stop_event is not None: if debug: warnings.simplefilter('default') warnings.warn("stop_event will be removed from future versions.", DeprecationWarning) server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, workers=workers, protocol=protocol, backlog=backlog, register_sys_signals=register_sys_signals, access_log=access_log) try: self.is_running = True if workers == 1: serve(**server_settings) else: serve_multiple(server_settings, workers) except BaseException: error_logger.exception( 'Experienced exception while trying to serve') raise finally: self.is_running = False logger.info("Server Stopped")
def keep_alive_timeout_callback(self): # Check if elapsed time since last response exceeds our configured # maximum keep alive timeout value time_elapsed = current_time - self._last_response_time if time_elapsed < self.keep_alive_timeout: time_left = self.keep_alive_timeout - time_elapsed self._keep_alive_timeout_handler = ( self.loop.call_later(time_left, self.keep_alive_timeout_callback) ) else: logger.info('KeepAlive Timeout. Closing connection.') self.transport.close() self.transport = None
async def fetch_data(self, url, session): try: async with session.get(url) as response: data = await response.text() data = pjson.loads(data) cnfy_id = 'cnfy-{}'.format(str(uuid.uuid4())) for tx in data['transactions']: if tx['type'] in [4] and tx['feeAssetId'] == os.environ['ASSET_ID']: attachment_base58 = base58.b58decode(tx['attachment']).decode('utf-8') attachment = None try: attachment = requests.get('{0}:{1}/ipfs/{2}'.format(config['ipfs']['host'], config['ipfs']['port'], attachment_base58), timeout=2).text except Exception as error: logger.error('IPFS Error: {0}'.format(error)) if attachment == None: logger.warning('CONTINUE ON IPFS HASH {0}'.format(attachment_base58) ) continue attachment_hash = hashlib.sha256(attachment.encode('utf-8')).hexdigest() root = ET.fromstring(attachment) version = root.findall('version')[0].text if len(root.findall('version')) > 0 else None blockchain = root.findall('blockchain')[0].text if len(root.findall('blockchain')) > 0 else None network = root.findall('network')[0].text if len(root.findall('network')) > 0 else None messages = root.findall('messages')[0] if len(root.findall('messages')) > 0 else [] if str(version) != str(os.environ['CDM_VERSION']): continue for message in messages: to_public_key = None cc_public_key = None to = message.findall('to')[0] if len(message.findall('to')) > 0 else None cc = message.findall('cc')[0] if len(message.findall('cc')) > 0 else None if to: to_public_key = to.findall('publickey')[0].text if len(to.findall('publickey')) > 0 else None if cc: cc_public_key = cc.findall('publickey')[0].text if len(cc.findall('publickey')) > 0 else None subject_ciphertext = None subject_sha256hash = None subject = message.findall('subject')[0] if len(message.findall('subject')) > 0 else None if subject: subject_ciphertext = subject.findall('ciphertext')[0].text if len(subject.findall('ciphertext')) > 0 else None subject_sha256hash = subject.findall('sha256')[0].text if len(subject.findall('sha256')) > 0 else None body_ciphertext = None body_sha256hash = None body = message.findall('body')[0] if len(message.findall('body')) > 0 else None if body: body_ciphertext = body.findall('ciphertext')[0].text if len(body.findall('ciphertext')) > 0 else None body_sha256hash = body.findall('sha256')[0].text if len(body.findall('sha256')) > 0 else None recipient_public_key = to_public_key if to_public_key else cc_public_key recipient_type = 'to' if to_public_key else 'cc' thread_hash = hashlib.sha256(''.join([subject_sha256hash or '', body_sha256hash or '']).encode('utf-8')).hexdigest() re_subject_hash = None re_message_hash = None regarding = message.findall('regarding')[0] if len(message.findall('regarding')) > 0 else None if regarding: re_subject_hash = regarding.findall('subjecthash')[0].text if len(regarding.findall('subjecthash')) > 0 else None re_message_hash = regarding.findall('messagehash')[0].text if len(regarding.findall('messagehash')) > 0 else None thread_hash = hashlib.sha256(''.join([re_subject_hash or '', re_message_hash or '']).encode('utf-8')).hexdigest() fwd_subject_hash = None fwd_message_hash = None forwarded = message.findall('forwarded')[0] if len(message.findall('forwarded')) > 0 else None if forwarded: fwd_subject_hash = forwarded.findall('subjecthash')[0].text if len(forwarded.findall('subjecthash')) > 0 else None fwd_message_hash = forwarded.findall('messagehash')[0].text if len(forwarded.findall('messagehash')) > 0 else None cdm_id = 'cdm-' + str(uuid.uuid4()) self.sql_data_cdms.append(( cdm_id, tx['id'], recipient_public_key, subject_ciphertext, subject_sha256hash, body_ciphertext, body_sha256hash, thread_hash, blockchain, network, recipient_type, re_subject_hash, re_message_hash, fwd_subject_hash, fwd_message_hash, datetime.fromtimestamp(tx['timestamp'] / 1e3), )) from_block = message.findall('from')[0] if len(message.findall('from')) > 0 else None if from_block: senders = from_block.findall('sender') if len(from_block.findall('sender')) > 0 else None for sender in senders: sender_public_key = sender.findall('publickey')[0].text if len(sender.findall('publickey')) > 0 else None sender_signature = sender.findall('signature')[0].text if len(sender.findall('signature')) > 0 else None sender_id = str(uuid.uuid4()) self.sql_data_senders.append((sender_id, cdm_id, sender_public_key, sender_signature, True)) tx_data = ( tx['id'], data['height'], tx['type'], tx['sender'], tx['senderPublicKey'], tx['recipient'], tx['amount'], tx['assetId'], tx['feeAssetId'], tx['feeAsset'], tx['fee'], tx['attachment'], tx['version'], datetime.fromtimestamp(tx['timestamp'] / 1e3), cnfy_id, attachment_hash ) self.sql_data_transactions.append(tx_data) for proof in tx['proofs']: proof_id = 'proof-' + str(uuid.uuid4()) self.sql_data_proofs.append((tx['id'], proof, proof_id)) except asyncio.CancelledError: logger.info('Parser has been stopped') raise except Exception as error: logger.error('Fetching data error: {}'.format(error)) pass
async def index(request): logger.info('Here is your log') return text('Hello World!')
async def save_data(self): conn = psycopg2.connect(**dsn) try: with conn: with conn.cursor() as cur: if len(self.sql_data_transactions) > 0: sql = """INSERT INTO transactions ( id, height, type, sender, sender_public_key, recipient, amount, asset_id, fee_asset_id, fee_asset, fee, attachment, version, timestamp, cnfy_id, attachment_hash ) VALUES %s ON CONFLICT (id) DO UPDATE SET height = EXCLUDED.height""" execute_values(cur, sql, self.sql_data_transactions) if cur.rowcount > 0: self.transactions_inserted += cur.rowcount sql = """INSERT INTO proofs (tx_id, proof, id) VALUES %s ON CONFLICT DO NOTHING""" execute_values(cur, sql, self.sql_data_proofs) sql = """INSERT INTO cdms ( id, tx_id, recipient, subject, subject_hash, message, message_hash, thread_hash, blockchain, network, type, re_subject_hash, re_message_hash, fwd_subject_hash, fwd_message_hash, timestamp ) VALUES %s ON CONFLICT DO NOTHING""" execute_values(cur, sql, self.sql_data_cdms) if len(self.sql_data_senders) > 0: sql = """INSERT INTO senders (id, cdm_id, sender, signature, verified) VALUES %s ON CONFLICT DO NOTHING""" execute_values(cur, sql, self.sql_data_senders) conn.commit() logger.info('Saved {0} transactions'.format(self.transactions_inserted)) except psycopg2.IntegrityError as error: logger.info('Error', error) pass except asyncio.CancelledError: logger.info('Parser has been stopped') raise except Exception as error: logger.info('Height: {}'.format(self.height)) logger.error('Batch insert error: {}'.format(error)) await self.emergency_stop_loop('Batch insert error', error) finally: self.transactions_inserted = 0 self.sql_data_transactions = [] self.sql_data_proofs = [] self.sql_data_cdms = [] self.sql_data_senders = []
def autostart(app, loop): loop.create_task(controls.start()) logger.info('Autostart Success!') logger.info('CDM Version: {0}'.format(os.environ['CDM_VERSION']))
def log_info(request): logger.info(rand_string) return text("hello")
def main_process_start(app, loop): logger.info("main_process_start")
def _helper( self, host=None, port=None, debug=False, ssl=None, sock=None, unix=None, workers=1, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_async=False, auto_reload=False, ): """Helper function used by `run` and `create_server`.""" self.listeners["before_server_start"] = [ self.finalize ] + self.listeners["before_server_start"] if isinstance(ssl, dict): # try common aliaseses cert = ssl.get("cert") or ssl.get("certificate") key = ssl.get("key") or ssl.get("keyfile") if cert is None or key is None: raise ValueError("SSLContext or certificate and key required.") context = create_default_context(purpose=Purpose.CLIENT_AUTH) context.load_cert_chain(cert, keyfile=key) ssl = context if self.config.PROXIES_COUNT and self.config.PROXIES_COUNT < 0: raise ValueError( "PROXIES_COUNT cannot be negative. " "https://sanic.readthedocs.io/en/latest/sanic/config.html" "#proxy-configuration") self.error_handler.debug = debug self.debug = debug server_settings = { "protocol": protocol, "host": host, "port": port, "sock": sock, "unix": unix, "ssl": ssl, "app": self, "signal": ServerSignal(), "loop": loop, "register_sys_signals": register_sys_signals, "backlog": backlog, } # Register start/stop events for event_name, settings_name, reverse in ( ("before_server_start", "before_start", False), ("after_server_start", "after_start", False), ("before_server_stop", "before_stop", True), ("after_server_stop", "after_stop", True), ("main_process_start", "main_start", False), ("main_process_stop", "main_stop", True), ): listeners = self.listeners[event_name].copy() if reverse: listeners.reverse() # Prepend sanic to the arguments when listeners are triggered listeners = [partial(listener, self) for listener in listeners] server_settings[settings_name] = listeners if self.configure_logging and debug: logger.setLevel(logging.DEBUG) if (self.config.LOGO and os.environ.get("SANIC_SERVER_RUNNING") != "true"): logger.debug(self.config.LOGO if isinstance(self.config.LOGO, str ) else BASE_LOGO) if run_async: server_settings["run_async"] = True # Serve if host and port: proto = "http" if ssl is not None: proto = "https" if unix: logger.info(f"Goin' Fast @ {unix} {proto}://...") else: logger.info(f"Goin' Fast @ {proto}://{host}:{port}") return server_settings
def _helper(self, host=None, port=None, debug=False, ssl=None, sock=None, workers=1, loop=None, protocol=HttpProtocol, backlog=100, stop_event=None, register_sys_signals=True, run_async=False, auto_reload=False): """Helper function used by `run` and `create_server`.""" if isinstance(ssl, dict): # try common aliaseses cert = ssl.get('cert') or ssl.get('certificate') key = ssl.get('key') or ssl.get('keyfile') if cert is None or key is None: raise ValueError("SSLContext or certificate and key required.") context = create_default_context(purpose=Purpose.CLIENT_AUTH) context.load_cert_chain(cert, keyfile=key) ssl = context if stop_event is not None: if debug: warnings.simplefilter('default') warnings.warn("stop_event will be removed from future versions.", DeprecationWarning) self.error_handler.debug = debug self.debug = debug server_settings = { 'protocol': protocol, 'request_class': self.request_class, 'is_request_stream': self.is_request_stream, 'router': self.router, 'host': host, 'port': port, 'sock': sock, 'ssl': ssl, 'signal': Signal(), 'debug': debug, 'request_handler': self.handle_request, 'error_handler': self.error_handler, 'request_timeout': self.config.REQUEST_TIMEOUT, 'response_timeout': self.config.RESPONSE_TIMEOUT, 'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT, 'request_max_size': self.config.REQUEST_MAX_SIZE, 'keep_alive': self.config.KEEP_ALIVE, 'loop': loop, 'register_sys_signals': register_sys_signals, 'backlog': backlog, 'access_log': self.config.ACCESS_LOG, 'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE, 'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE, 'websocket_read_limit': self.config.WEBSOCKET_READ_LIMIT, 'websocket_write_limit': self.config.WEBSOCKET_WRITE_LIMIT, 'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT } # -------------------------------------------- # # Register start/stop events # -------------------------------------------- # for event_name, settings_name, reverse in ( ("before_server_start", "before_start", False), ("after_server_start", "after_start", False), ("before_server_stop", "before_stop", True), ("after_server_stop", "after_stop", True), ): listeners = self.listeners[event_name].copy() if reverse: listeners.reverse() # Prepend sanic to the arguments when listeners are triggered listeners = [partial(listener, self) for listener in listeners] server_settings[settings_name] = listeners if self.configure_logging and debug: logger.setLevel(logging.DEBUG) if self.config.LOGO is not None and \ os.environ.get('SANIC_SERVER_RUNNING') != 'true': logger.debug(self.config.LOGO) if run_async: server_settings['run_async'] = True # Serve if host and port and os.environ.get('SANIC_SERVER_RUNNING') != 'true': proto = "http" if ssl is not None: proto = "https" logger.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port)) return server_settings
list中每个元素为tuple,包含了返回词以及该词与目标词的cosine相关性,并按照相关性从高到低排序 例如输入"篮球"目标词返回前10个结果: [(篮球队,0.833797), (排球, 0.833721) ..., (篮球圈, 0.752021)] 如果输入目标词不在词典中,则返回None。""") async def nearest_words(request): try: word = get_param(request, 'word', is_list=True) topic_id = get_param(request, 'topic_id', is_list=True) n = int(get_param(request, 'n', 10)) if word: result = {w: twe.nearest_words(w, n) for w in word} return response(data=result) if topic_id: result = { int(_id): twe.nearest_words_around_topic(int(_id), n) for _id in topic_id } return response(data=result) return error_response() except Exception as err: logger.error(err, exc_info=True) return error_response(str(err)) if __name__ == '__main__': logger.info(f"running familia api with {n_workers} workers") app.run(host='0.0.0.0', port=5000, workers=n_workers)
async def get_all_commits(repo_path, github_token): default_branch = await get_default_branch(repo_path, github_token) headers = {"Authorization": f"token {github_token}"} initial_page = ( f"https://api.github.com/repos/{repo_path}/commits?sha={default_branch}&per_page=100" ) logger.info(f'Retrieving commits from Github via "{initial_page}"...') initial_request = await httpx.get(initial_page, headers=headers) link_header = initial_request.headers.get("link") if not link_header: commits = initial_request.json() else: parsed_header = parse_header_links(link_header) concurrent_pages = 10 # total_pages = int(parsed_header["last"]["qs"]["page"]) total_pages = min(int(parsed_header["last"]["qs"]["page"]), 2) commits = [] current_page = 1 while current_page <= total_pages: requests = [] for i in range(0, concurrent_pages): request = httpx.get( f"https://api.github.com/repos/{repo_path}/commits?sha={default_branch}&per_page=100&page={current_page}", headers=headers, ) requests.append(request) current_page = current_page + 1 if current_page > total_pages: break responses = await asyncio.gather(*requests) for response in responses: response.raise_for_status() request_commits = response.json() commits.extend(request_commits) commits_count = len(commits) mapped_commits = [] i = 0 for c in commits: if not all(k in c for k in ("sha", "commit")): continue mapped_commits.append(map_commit(c, commits_count - i - 1)) i += 1 return mapped_commits
def after_stop(app, loop): log.info("TRIED EVERYTHING")
def before_stop(app, loop): log.info("SERVER STOPPING")
def after_start(app, loop): log.info("OH OH OH OH OHHHHHHHH")
async def remove_robot_distribute(robot_id): '''将机器人置为不可分配''' distribute_key = DISTRIBUTE_KEY_REDIS_KEY.format(today=today()) await redis.conn.zrem(distribute_key, robot_id) logger.info(f'remove_robot_distribute --> [{robot_id}]')
def run( self, host: Optional[str] = None, port: Optional[int] = None, *, debug: bool = False, auto_reload: Optional[bool] = None, ssl: Union[dict, SSLContext, None] = None, sock: Optional[socket] = None, workers: int = 1, protocol: Optional[Type[Protocol]] = None, backlog: int = 100, register_sys_signals: bool = True, access_log: Optional[bool] = None, unix: Optional[str] = None, loop: None = None, ) -> None: """ Run the HTTP Server and listen until keyboard interrupt or term signal. On termination, drain connections before closing. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param auto_reload: Reload app whenever its source code is changed. Enabled by default in debug mode. :type auto_relaod: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl: SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param workers: Number of processes received before it is respected :type workers: int :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param register_sys_signals: Register SIG* events :type register_sys_signals: bool :param access_log: Enables writing access logs (slows server) :type access_log: bool :param unix: Unix socket to listen on instead of TCP port :type unix: str :return: Nothing """ if loop is not None: raise TypeError( "loop is not a valid argument. To use an existing loop, " "change to create_server().\nSee more: " "https://sanic.readthedocs.io/en/latest/sanic/deploying.html" "#asynchronous-support") if auto_reload or auto_reload is None and debug: if os.environ.get("SANIC_SERVER_RUNNING") != "true": return reloader_helpers.watchdog(1.0) if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = (WebSocketProtocol if self.websocket_enabled else HttpProtocol) # if access_log is passed explicitly change config.ACCESS_LOG if access_log is not None: self.config.ACCESS_LOG = access_log server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, unix=unix, workers=workers, protocol=protocol, backlog=backlog, register_sys_signals=register_sys_signals, auto_reload=auto_reload, ) try: self.is_running = True self.is_stopping = False if workers > 1 and os.name != "posix": logger.warn( f"Multiprocessing is currently not supported on {os.name}," " using workers=1 instead") workers = 1 if workers == 1: serve_single(server_settings) else: serve_multiple(server_settings, workers) except BaseException: error_logger.exception( "Experienced exception while trying to serve") raise finally: self.is_running = False logger.info("Server Stopped")
def sig_handler(signal, frame): logger.info("Received signal %s. Shutting down.", Signals(signal).name) for process in processes: os.kill(process.pid, SIGTERM)
async def http1(self): """ HTTP 1.1 connection handler """ # Handle requests while the connection stays reusable while self.keep_alive and self.stage is Stage.IDLE: self.init_for_request() # Wait for incoming bytes (in IDLE stage) if not self.recv_buffer: await self._receive_more() self.stage = Stage.REQUEST try: # Receive and handle a request self.response_func = self.http1_response_header await self.http1_request_header() self.stage = Stage.HANDLER self.request.conn_info = self.protocol.conn_info await self.protocol.request_handler(self.request) # Handler finished, response should've been sent if self.stage is Stage.HANDLER and not self.upgrade_websocket: raise ServerError("Handler produced no response") if self.stage is Stage.RESPONSE: await self.response.send(end_stream=True) except CancelledError: # Write an appropriate response before exiting if not self.protocol.transport: logger.info( f"Request: {self.request.method} {self.request.url} " "stopped. Transport is closed.") return e = self.exception or ServiceUnavailable("Cancelled") self.exception = None self.keep_alive = False await self.error_response(e) except Exception as e: # Write an error response await self.error_response(e) # Try to consume any remaining request body if self.request_body: if self.response and 200 <= self.response.status < 300: error_logger.error(f"{self.request} body not consumed.") # Limit the size because the handler may have set it infinite self.request_max_size = min(self.request_max_size, self.protocol.request_max_size) try: async for _ in self: pass except PayloadTooLarge: # We won't read the body and that may cause httpx and # tests to fail. This little delay allows clients to push # a small request into network buffers before we close the # socket, so that they are then able to read the response. await sleep(0.001) self.keep_alive = False # Clean up to free memory and for the next request if self.request: self.request.stream = None if self.response: self.response.stream = None
def before_start(app, loop): log.info("SERVER STARTING")
async def close_mysql(_app, loop): _app.mysql.close() log.info('closing mysql connection for [pid:{}]'.format(os.getpid())) await _app.mysql.wait_closed()
def news(req): job = newsloader.job_queue.enqueue(newsloader.retrieve_articles) logger.info("Enqueued news job with ID", job.id) return json(list(newsloader.articles))
"""<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">""" ] for ds in await _fetch("https://api.dandiarchive.org/api/dandisets"): versions = await _fetch(f"https://api.dandiarchive.org/api/dandisets/" f"{ds['identifier']}/versions") for version in versions: url = (f"https://dandiarchive.org/dandiset/{ds['identifier']}/" f"{version['version']}") sitemap.append( f"""<url><loc>{url}</loc><lastmod>{version['modified']}</lastmod></url>""" ) sitemap.append("</urlset>") sitemap = "\n".join(sitemap) sitemapfile.write_text(sitemap) return sitemap @app.route("sitemap.xml", methods=["GET"]) async def sitemap(request): return HTTPResponse(await _sitemap(), status=200, headers=None, content_type="text/xml") if __name__ == "__main__": logger.info("Starting backend") app.run(host="0.0.0.0", port=8080)
def main_process_stop(app, loop): logger.info("main_process_stop")
async def send_state(request): """Send a dataset state to CoMeT (the broker). This should only ever be called by kotekan's datasetManager. """ try: hash = request.json["hash"] state = request.json["state"] if state: type = state["type"] else: type = None logger.info("/send-state {} {}".format(type, hash)) reply = dict() archive_state = False # In case the shielded part of this endpoint gets cancelled, we ignore it but # re-raise the CancelledError in the end cancelled = None # Lock states and check if we know this state already. async with lock_states: try: found = await get_state(hash, wait=False) except StateNotFoundError: await redis.execute_command("hset", "states", hash, json.dumps(state)) reply["result"] = "success" archive_state = True # Notify anything waiting for this state to arrive signal_created(hash, "state", lock_states, waiting_states) else: # if we know it already, does it differ? if found != state: reply["result"] = ( "error: hash collision ({})\nTrying to register the following " "dataset state:\n{},\nbut a different state is know to " "the broker with the same hash:\n{}".format( hash, state, found)) logger.warning("send-state: {}".format(reply["result"])) else: reply["result"] = "success" # Remove it from the set of requested states (if it's in there.) try: await asyncio.shield( redis.execute_command("hdel", "requested_states", hash)) except asyncio.CancelledError as err: logger.info( "/send-state {}: Cancelled while removing requested state. Ignoring..." .format(hash)) cancelled = err if archive_state: await asyncio.shield(archive("state", request.json)) # Done cleaning up, re-raise if this request got cancelled. if cancelled: raise cancelled return response.json(reply) except Exception as e: logger.error( "send-state: threw exception {} while handling request from {}", str(e), request.ip, ) traceback.print_exc() raise finally: logger.debug("send-state: finished")
async def start(self): conn = None try: conn = psycopg2.connect(**dsn) except psycopg2.OperationalError as error: logger.error('Postgres Engine Error:', error) await self.emergency_stop_loop('No conn error', 'Error on connection to Postgres Engine') try: with conn: with conn.cursor() as cur: cur.execute("SELECT max(height) FROM transactions") max_height = cur.fetchone() if max_height and max_height[0]: if max_height[0] > self.blocks_to_check: self.height = max_height[0] - self.blocks_to_check if os.environ['START_HEIGHT']: start_height = int(os.environ['START_HEIGHT']) if self.height < start_height: self.height = start_height except Exception as error: logger.error('Max height request error: {}'.format(error)) await self.emergency_stop_loop('Max height request error', error) while True: try: req = requests.get('{0}/node/status'.format(os.environ['NODE_URL'])) data = req.json() self.last_block = int(data['blockchainHeight']) with conn: with conn.cursor() as cur: if self.height > self.last_block: cur.execute(""" DELETE FROM transactions WHERE height > '{height}' """.format( height=self.last_block )) self.height = self.last_block conn.commit() except Exception as error: await self.emergency_stop_loop('Waves node is not responding', error) logger.info('Start height: {}, last block: {}'.format(self.height, self.last_block)) logger.info('-' * 40) try: async with aiohttp.ClientSession() as session: try: while self.height < self.last_block: t0 = time() batch = self.height + self.step if self.height + self.step >= self.last_block: batch = self.last_block + 1 batch_range = (self.height, batch) tasks = [] for i in range(batch_range[0], batch_range[1]): url = '{0}/blocks/at/{1}'.format(os.environ['NODE_URL'], self.height) task = asyncio.create_task(self.fetch_data(url, session)) tasks.append(task) self.height += 1 logger.info('Height range {0} - {1}'.format(batch_range[0], batch_range[1])) await asyncio.gather(*tasks) await self.save_data() logger.info('Parsing time: {0} sec'.format(time() - t0)) logger.info('-' * 40) except asyncio.CancelledError: logger.info('Parser stopping...') raise except Exception as error: logger.error('Blocks session cycle error on height {0}: {1}'.format(self.height, error)) await self.emergency_stop_loop('Blocks session cycle error', error) except asyncio.CancelledError: logger.info('Parser has been stopped') raise except Exception as error: logger.error('Request blocks cycle error: {0}'.format(error)) await self.emergency_stop_loop('Request blocks cycle', error) finally: self.height = self.height - self.blocks_to_check await asyncio.sleep(2)
async def register_dataset(request): """Register a dataset with CoMeT (the broker). This should only ever be called by kotekan's datasetManager. """ try: hash = request.json["hash"] logger.info("/register-dataset {}".format(hash)) ds = request.json["ds"] dataset_valid = await check_dataset(ds) reply = dict() root = None if dataset_valid: root = await find_root(hash, ds) if root is None: reply["result"] = "Dataset {} invalid.".format(hash) logger.debug( "register-dataset: Received invalid dataset with hash {} : {}". format(hash, ds)) return response.json(reply) archive_ds = False # In case the shielded part of this endpoint gets cancelled, we ignore it but # re-raise the CancelledError in the end cancelled = None # Lack datasets and check if dataset already known. async with lock_datasets: try: found = await get_dataset(hash, wait=False) except DatasetNotFoundError: if dataset_valid and root is not None: # save the dataset await redis.execute_command("hset", "datasets", hash, json.dumps(ds)) reply["result"] = "success" archive_ds = True # Notify anything waiting for this dataset to arrive signal_created(hash, "dataset", lock_datasets, waiting_datasets) else: # if we know it already, does it differ? if found != ds: reply["result"] = ( "error: hash collision ({})\nTrying to register the following dataset:\n{},\nbut a different one is know to " "the broker with the same hash:\n{}".format( hash, ds, found)) logger.warning("register-dataset: {}".format( reply["result"])) else: reply["result"] = "success" if archive_ds: await asyncio.shield(archive("dataset", request.json)) # Done cleaning up, re-raise if this request got cancelled. if cancelled: raise cancelled return response.json(reply) except Exception as e: logger.error( "register-dataset: threw exception {} while handling request from {}", str(e), request.ip, ) traceback.print_exc() raise finally: logger.debug("register-dataset: finished")
def gentle_exit(app, loop): logger.info('Killing the process') os.kill(os.getpid(), signal.SIGKILL)
async def connect_to_db(*args, **kwargs): try: await main_svr_app.db.connect() logger.info("DB connected") except: logger.error("DB Connection Error")
async def log(request): logger.info("received => [%s]" % request.body) return text('OK')
async def disconnect_from_db(*args, **kwargs): try: await main_svr_app.db.disconnect() logger.info("DB disconnected") except: logger.error("DB Disconnection Error")
def serve( host, port, app, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, response_timeout=60, keep_alive_timeout=5, ssl=None, sock=None, request_max_size=None, request_buffer_queue_size=100, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_multiple=False, run_async=False, connections=None, signal=Signal(), request_class=None, access_log=True, keep_alive=True, is_request_stream=False, router=None, websocket_max_size=None, websocket_max_queue=None, websocket_read_limit=2 ** 16, websocket_write_limit=2 ** 16, state=None, graceful_shutdown_timeout=15.0, asyncio_server_kwargs=None, ): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param response_timeout: time in seconds :param keep_alive_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :param request_class: Request class to use :param access_log: disable/enable access log :param websocket_max_size: enforces the maximum size for incoming messages in bytes. :param websocket_max_queue: sets the maximum length of the queue that holds incoming messages. :param websocket_read_limit: sets the high-water limit of the buffer for incoming bytes, the low-water limit is half the high-water limit. :param websocket_write_limit: sets the high-water limit of the buffer for outgoing bytes, the low-water limit is a quarter of the high-water limit. :param is_request_stream: disable/enable Request.stream :param request_buffer_queue_size: streaming request buffer queue size :param router: Router object :param graceful_shutdown_timeout: How long take to Force close non-idle connection :param asyncio_server_kwargs: key-value args for asyncio/uvloop create_server method :return: Nothing """ if not run_async: # create new event_loop after fork loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, app=app, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, response_timeout=response_timeout, keep_alive_timeout=keep_alive_timeout, request_max_size=request_max_size, request_class=request_class, access_log=access_log, keep_alive=keep_alive, is_request_stream=is_request_stream, router=router, websocket_max_size=websocket_max_size, websocket_max_queue=websocket_max_queue, websocket_read_limit=websocket_read_limit, websocket_write_limit=websocket_write_limit, state=state, debug=debug, ) asyncio_server_kwargs = ( asyncio_server_kwargs if asyncio_server_kwargs else {} ) server_coroutine = loop.create_server( server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog, **asyncio_server_kwargs ) if run_async: return server_coroutine trigger_events(before_start, loop) try: http_server = loop.run_until_complete(server_coroutine) except BaseException: logger.exception("Unable to start server") return trigger_events(after_start, loop) # Ignore SIGINT when run_multiple if run_multiple: signal_func(SIGINT, SIG_IGN) # Register signals for graceful termination if register_sys_signals: _singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM) for _signal in _singals: try: loop.add_signal_handler(_signal, loop.stop) except NotImplementedError: logger.warning( "Sanic tried to use loop.add_signal_handler " "but it is not implemented on this platform." ) pid = os.getpid() try: logger.info("Starting worker [%s]", pid) loop.run_forever() finally: logger.info("Stopping worker [%s]", pid) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() # Gracefully shutdown timeout. # We should provide graceful_shutdown_timeout, # instead of letting connection hangs forever. # Let's roughly calcucate time. start_shutdown = 0 while connections and (start_shutdown < graceful_shutdown_timeout): loop.run_until_complete(asyncio.sleep(0.1)) start_shutdown = start_shutdown + 0.1 # Force close non-idle connection after waiting for # graceful_shutdown_timeout coros = [] for conn in connections: if hasattr(conn, "websocket") and conn.websocket: coros.append(conn.websocket.close_connection()) else: conn.close() _shutdown = asyncio.gather(*coros, loop=loop) loop.run_until_complete(_shutdown) trigger_events(after_stop, loop) loop.close()
def serve(host, port, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, response_timeout=60, keep_alive_timeout=5, ssl=None, sock=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_async=False, connections=None, signal=Signal(), request_class=None, access_log=True, keep_alive=True, is_request_stream=False, router=None, websocket_max_size=None, websocket_max_queue=None, state=None, graceful_shutdown_timeout=15.0): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param response_timeout: time in seconds :param keep_alive_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :param request_class: Request class to use :param access_log: disable/enable access log :param is_request_stream: disable/enable Request.stream :param router: Router object :return: Nothing """ if not run_async: loop = async_loop.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, response_timeout=response_timeout, keep_alive_timeout=keep_alive_timeout, request_max_size=request_max_size, request_class=request_class, access_log=access_log, keep_alive=keep_alive, is_request_stream=is_request_stream, router=router, websocket_max_size=websocket_max_size, websocket_max_queue=websocket_max_queue, state=state, debug=debug, ) server_coroutine = loop.create_server( server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog ) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) if run_async: return server_coroutine trigger_events(before_start, loop) try: http_server = loop.run_until_complete(server_coroutine) except BaseException: logger.exception("Unable to start server") return trigger_events(after_start, loop) # Register signals for graceful termination if register_sys_signals: for _signal in (SIGINT, SIGTERM): try: loop.add_signal_handler(_signal, loop.stop) except NotImplementedError: logger.warning('Sanic tried to use loop.add_signal_handler ' 'but it is not implemented on this platform.') pid = os.getpid() try: logger.info('Starting worker [%s]', pid) loop.run_forever() finally: logger.info("Stopping worker [%s]", pid) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() # Gracefully shutdown timeout. # We should provide graceful_shutdown_timeout, # instead of letting connection hangs forever. # Let's roughly calcucate time. start_shutdown = 0 while connections and (start_shutdown < graceful_shutdown_timeout): loop.run_until_complete(asyncio.sleep(0.1)) start_shutdown = start_shutdown + 0.1 # Force close non-idle connection after waiting for # graceful_shutdown_timeout coros = [] for conn in connections: if hasattr(conn, "websocket") and conn.websocket: coros.append( conn.websocket.close_connection(after_handshake=True) ) else: conn.close() _shutdown = asyncio.gather(*coros, loop=loop) loop.run_until_complete(_shutdown) trigger_events(after_stop, loop) loop.close()
def serve( host, port, app, request_handler, error_handler, before_start=None, after_start=None, before_stop=None, after_stop=None, debug=False, request_timeout=60, response_timeout=60, keep_alive_timeout=5, ssl=None, sock=None, request_max_size=None, request_buffer_queue_size=100, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_multiple=False, run_async=False, connections=None, signal=Signal(), request_class=None, access_log=True, keep_alive=True, is_request_stream=False, router=None, websocket_max_size=None, websocket_max_queue=None, websocket_read_limit=2**16, websocket_write_limit=2**16, state=None, graceful_shutdown_timeout=15.0, asyncio_server_kwargs=None, ): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param response_timeout: time in seconds :param keep_alive_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :param request_class: Request class to use :param access_log: disable/enable access log :param websocket_max_size: enforces the maximum size for incoming messages in bytes. :param websocket_max_queue: sets the maximum length of the queue that holds incoming messages. :param websocket_read_limit: sets the high-water limit of the buffer for incoming bytes, the low-water limit is half the high-water limit. :param websocket_write_limit: sets the high-water limit of the buffer for outgoing bytes, the low-water limit is a quarter of the high-water limit. :param is_request_stream: disable/enable Request.stream :param request_buffer_queue_size: streaming request buffer queue size :param router: Router object :param graceful_shutdown_timeout: How long take to Force close non-idle connection :param asyncio_server_kwargs: key-value args for asyncio/uvloop create_server method :return: Nothing """ if not run_async: # create new event_loop after fork loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if debug: loop.set_debug(debug) connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, app=app, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, response_timeout=response_timeout, keep_alive_timeout=keep_alive_timeout, request_max_size=request_max_size, request_class=request_class, access_log=access_log, keep_alive=keep_alive, is_request_stream=is_request_stream, router=router, websocket_max_size=websocket_max_size, websocket_max_queue=websocket_max_queue, websocket_read_limit=websocket_read_limit, websocket_write_limit=websocket_write_limit, state=state, debug=debug, ) asyncio_server_kwargs = (asyncio_server_kwargs if asyncio_server_kwargs else {}) server_coroutine = loop.create_server(server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog, **asyncio_server_kwargs) if run_async: return server_coroutine trigger_events(before_start, loop) try: http_server = loop.run_until_complete(server_coroutine) except BaseException: logger.exception("Unable to start server") return trigger_events(after_start, loop) # Ignore SIGINT when run_multiple if run_multiple: signal_func(SIGINT, SIG_IGN) # Register signals for graceful termination if register_sys_signals: _singals = (SIGTERM, ) if run_multiple else (SIGINT, SIGTERM) for _signal in _singals: try: loop.add_signal_handler(_signal, loop.stop) except NotImplementedError: logger.warning("Sanic tried to use loop.add_signal_handler " "but it is not implemented on this platform.") pid = os.getpid() try: logger.info("Starting worker [%s]", pid) loop.run_forever() finally: logger.info("Stopping worker [%s]", pid) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() # Gracefully shutdown timeout. # We should provide graceful_shutdown_timeout, # instead of letting connection hangs forever. # Let's roughly calcucate time. start_shutdown = 0 while connections and (start_shutdown < graceful_shutdown_timeout): loop.run_until_complete(asyncio.sleep(0.1)) start_shutdown = start_shutdown + 0.1 # Force close non-idle connection after waiting for # graceful_shutdown_timeout coros = [] for conn in connections: if hasattr(conn, "websocket") and conn.websocket: coros.append(conn.websocket.close_connection()) else: conn.close() _shutdown = asyncio.gather(*coros, loop=loop) loop.run_until_complete(_shutdown) trigger_events(after_stop, loop) loop.close()
def _helper(self, host=None, port=None, debug=False, ssl=None, sock=None, workers=1, loop=None, protocol=HttpProtocol, backlog=100, stop_event=None, register_sys_signals=True, run_async=False, access_log=True): """Helper function used by `run` and `create_server`.""" if isinstance(ssl, dict): # try common aliaseses cert = ssl.get('cert') or ssl.get('certificate') key = ssl.get('key') or ssl.get('keyfile') if cert is None or key is None: raise ValueError("SSLContext or certificate and key required.") context = create_default_context(purpose=Purpose.CLIENT_AUTH) context.load_cert_chain(cert, keyfile=key) ssl = context if stop_event is not None: if debug: warnings.simplefilter('default') warnings.warn("stop_event will be removed from future versions.", DeprecationWarning) self.error_handler.debug = debug self.debug = debug server_settings = { 'protocol': protocol, 'request_class': self.request_class, 'is_request_stream': self.is_request_stream, 'router': self.router, 'host': host, 'port': port, 'sock': sock, 'ssl': ssl, 'signal': Signal(), 'debug': debug, 'request_handler': self.handle_request, 'error_handler': self.error_handler, 'request_timeout': self.config.REQUEST_TIMEOUT, 'response_timeout': self.config.RESPONSE_TIMEOUT, 'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT, 'request_max_size': self.config.REQUEST_MAX_SIZE, 'keep_alive': self.config.KEEP_ALIVE, 'loop': loop, 'register_sys_signals': register_sys_signals, 'backlog': backlog, 'access_log': access_log, 'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE, 'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE, 'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT } # -------------------------------------------- # # Register start/stop events # -------------------------------------------- # for event_name, settings_name, reverse in ( ("before_server_start", "before_start", False), ("after_server_start", "after_start", False), ("before_server_stop", "before_stop", True), ("after_server_stop", "after_stop", True), ): listeners = self.listeners[event_name].copy() if reverse: listeners.reverse() # Prepend sanic to the arguments when listeners are triggered listeners = [partial(listener, self) for listener in listeners] server_settings[settings_name] = listeners if self.configure_logging and debug: logger.setLevel(logging.DEBUG) if self.config.LOGO is not None: logger.debug(self.config.LOGO) if run_async: server_settings['run_async'] = True # Serve if host and port: proto = "http" if ssl is not None: proto = "https" logger.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port)) return server_settings
def run(self, host=None, port=None, debug=False, ssl=None, sock=None, workers=1, protocol=None, backlog=100, stop_event=None, register_sys_signals=True, access_log=True, **kwargs): """Run the HTTP Server and listen until keyboard interrupt or term signal. On termination, drain connections before closing. :param host: Address to host on :param port: Port to host on :param debug: Enables debug output (slows server) :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :param sock: Socket for the server to accept connections from :param workers: Number of processes received before it is respected :param backlog: :param stop_event: :param register_sys_signals: :param protocol: Subclass of asyncio protocol class :return: Nothing """ # Default auto_reload to false auto_reload = False # If debug is set, default it to true if debug: auto_reload = True # Allow for overriding either of the defaults auto_reload = kwargs.get("auto_reload", auto_reload) if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = (WebSocketProtocol if self.websocket_enabled else HttpProtocol) if stop_event is not None: if debug: warnings.simplefilter('default') warnings.warn("stop_event will be removed from future versions.", DeprecationWarning) # compatibility old access_log params self.config.ACCESS_LOG = access_log server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, workers=workers, protocol=protocol, backlog=backlog, register_sys_signals=register_sys_signals, auto_reload=auto_reload) try: self.is_running = True if workers == 1: if auto_reload and os.name != 'posix': # This condition must be removed after implementing # auto reloader for other operating systems. raise NotImplementedError if auto_reload and \ os.environ.get('SANIC_SERVER_RUNNING') != 'true': reloader_helpers.watchdog(2) else: serve(**server_settings) else: serve_multiple(server_settings, workers) except BaseException: error_logger.exception( 'Experienced exception while trying to serve') raise finally: self.is_running = False logger.info("Server Stopped")
TF = TensorflowImg() @sanic.route("/test") def test(request): return response.json({'message': 'Hello world!'}) @sanic.route("/predictImage", methods=['POST']) def predict(request): """ Responsible for the prediction of the image @return - [<filename>, <prediction_label>, <prediction_accuracy>] """ file = request.files['file'] retVal = TF.predict(file) return response.json({'data': retVal}) if utils.isDevelopment(): logger.info("Starting localhost development") sanic.run(host="0.0.0.0", port=8000, debug=True, access_log=True) else: logger.info("Starting production server") sanic.run(host="0.0.0.0", port=os.getenv("SANIC_XRAY_PORT"), access_log=True)
def run(self, host: Optional[str] = None, port: Optional[int] = None, debug: bool = False, ssl: Union[dict, SSLContext, None] = None, sock: Optional[socket] = None, workers: int = 1, protocol: Type[Protocol] = None, backlog: int = 100, stop_event: Any = None, register_sys_signals: bool = True, access_log: Optional[bool] = None, **kwargs: Any) -> None: """Run the HTTP Server and listen until keyboard interrupt or term signal. On termination, drain connections before closing. :param host: Address to host on :type host: str :param port: Port to host on :type port: int :param debug: Enables debug output (slows server) :type debug: bool :param ssl: SSLContext, or location of certificate and key for SSL encryption of worker(s) :type ssl:SSLContext or dict :param sock: Socket for the server to accept connections from :type sock: socket :param workers: Number of processes received before it is respected :type workers: int :param protocol: Subclass of asyncio Protocol class :type protocol: type[Protocol] :param backlog: a number of unaccepted connections that the system will allow before refusing new connections :type backlog: int :param stop_event: event to be triggered before stopping the app - deprecated :type stop_event: None :param register_sys_signals: Register SIG* events :type register_sys_signals: bool :param access_log: Enables writing access logs (slows server) :type access_log: bool :return: Nothing """ if "loop" in kwargs: raise TypeError( "loop is not a valid argument. To use an existing loop, " "change to create_server().\nSee more: " "https://sanic.readthedocs.io/en/latest/sanic/deploying.html" "#asynchronous-support") # Default auto_reload to false auto_reload = False # If debug is set, default it to true (unless on windows) if debug and os.name == "posix": auto_reload = True # Allow for overriding either of the defaults auto_reload = kwargs.get("auto_reload", auto_reload) if sock is None: host, port = host or "127.0.0.1", port or 8000 if protocol is None: protocol = (WebSocketProtocol if self.websocket_enabled else HttpProtocol) if stop_event is not None: if debug: warnings.simplefilter("default") warnings.warn( "stop_event will be removed from future versions.", DeprecationWarning, ) # if access_log is passed explicitly change config.ACCESS_LOG if access_log is not None: self.config.ACCESS_LOG = access_log server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, workers=workers, protocol=protocol, backlog=backlog, register_sys_signals=register_sys_signals, auto_reload=auto_reload, ) try: self.is_running = True if workers == 1: if auto_reload and os.name != "posix": # This condition must be removed after implementing # auto reloader for other operating systems. raise NotImplementedError if (auto_reload and os.environ.get("SANIC_SERVER_RUNNING") != "true"): reloader_helpers.watchdog(2) else: serve(**server_settings) else: serve_multiple(server_settings, workers) except BaseException: error_logger.exception( "Experienced exception while trying to serve") raise finally: self.is_running = False logger.info("Server Stopped")
def serve( host, port, app, before_start=None, after_start=None, before_stop=None, after_stop=None, ssl=None, sock=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, register_sys_signals=True, run_multiple=False, run_async=False, connections=None, signal=Signal(), state=None, asyncio_server_kwargs=None, ): """Start asynchronous HTTP Server on an individual process. :param host: Address to host on :param port: Port to host on :param before_start: function to be executed before the server starts listening. Takes arguments `app` instance and `loop` :param after_start: function to be executed after the server starts listening. Takes arguments `app` instance and `loop` :param before_stop: function to be executed when a stop signal is received before it is respected. Takes arguments `app` instance and `loop` :param after_stop: function to be executed when a stop signal is received after it is respected. Takes arguments `app` instance and `loop` :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param run_async: bool: Do not create a new event loop for the server, and return an AsyncServer object rather than running it :param asyncio_server_kwargs: key-value args for asyncio/uvloop create_server method :return: Nothing """ if not run_async: # create new event_loop after fork loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if app.debug: loop.set_debug(app.debug) app.asgi = False connections = connections if connections is not None else set() server = partial( protocol, loop=loop, connections=connections, signal=signal, app=app, state=state, ) asyncio_server_kwargs = ( asyncio_server_kwargs if asyncio_server_kwargs else {} ) server_coroutine = loop.create_server( server, host, port, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog, **asyncio_server_kwargs, ) if run_async: return AsyncioServer( loop=loop, serve_coro=server_coroutine, connections=connections, after_start=after_start, before_stop=before_stop, after_stop=after_stop, ) trigger_events(before_start, loop) try: http_server = loop.run_until_complete(server_coroutine) except BaseException: logger.exception("Unable to start server") return trigger_events(after_start, loop) # Ignore SIGINT when run_multiple if run_multiple: signal_func(SIGINT, SIG_IGN) # Register signals for graceful termination if register_sys_signals: if OS_IS_WINDOWS: ctrlc_workaround_for_windows(app) else: for _signal in [SIGTERM] if run_multiple else [SIGINT, SIGTERM]: loop.add_signal_handler(_signal, app.stop) pid = os.getpid() try: logger.info("Starting worker [%s]", pid) loop.run_forever() finally: logger.info("Stopping worker [%s]", pid) # Run the on_stop function if provided trigger_events(before_stop, loop) # Wait for event loop to finish and all connections to drain http_server.close() loop.run_until_complete(http_server.wait_closed()) # Complete all tasks on the loop signal.stopped = True for connection in connections: connection.close_if_idle() # Gracefully shutdown timeout. # We should provide graceful_shutdown_timeout, # instead of letting connection hangs forever. # Let's roughly calcucate time. graceful = app.config.GRACEFUL_SHUTDOWN_TIMEOUT start_shutdown = 0 while connections and (start_shutdown < graceful): loop.run_until_complete(asyncio.sleep(0.1)) start_shutdown = start_shutdown + 0.1 # Force close non-idle connection after waiting for # graceful_shutdown_timeout coros = [] for conn in connections: if hasattr(conn, "websocket") and conn.websocket: coros.append(conn.websocket.close_connection()) else: conn.close() _shutdown = asyncio.gather(*coros) loop.run_until_complete(_shutdown) trigger_events(after_stop, loop) loop.close()
def sig_handler(signal, frame): logger.info("Received signal %s. Shutting down.", Signals(signal).name) for process in processes: os.kill(process.pid, SIGINT)
def _helper( self, host=None, port=None, debug=False, ssl=None, sock=None, workers=1, loop=None, protocol=HttpProtocol, backlog=100, stop_event=None, register_sys_signals=True, run_async=False, auto_reload=False, ): """Helper function used by `run` and `create_server`.""" if isinstance(ssl, dict): # try common aliaseses cert = ssl.get("cert") or ssl.get("certificate") key = ssl.get("key") or ssl.get("keyfile") if cert is None or key is None: raise ValueError("SSLContext or certificate and key required.") context = create_default_context(purpose=Purpose.CLIENT_AUTH) context.load_cert_chain(cert, keyfile=key) ssl = context if stop_event is not None: if debug: warnings.simplefilter("default") warnings.warn( "stop_event will be removed from future versions.", DeprecationWarning, ) self.error_handler.debug = debug self.debug = debug server_settings = { "protocol": protocol, "request_class": self.request_class, "is_request_stream": self.is_request_stream, "router": self.router, "host": host, "port": port, "sock": sock, "ssl": ssl, "app": self, "signal": Signal(), "debug": debug, "request_handler": self.handle_request, "error_handler": self.error_handler, "request_timeout": self.config.REQUEST_TIMEOUT, "response_timeout": self.config.RESPONSE_TIMEOUT, "keep_alive_timeout": self.config.KEEP_ALIVE_TIMEOUT, "request_max_size": self.config.REQUEST_MAX_SIZE, "request_buffer_queue_size": self.config.REQUEST_BUFFER_QUEUE_SIZE, "keep_alive": self.config.KEEP_ALIVE, "loop": loop, "register_sys_signals": register_sys_signals, "backlog": backlog, "access_log": self.config.ACCESS_LOG, "websocket_max_size": self.config.WEBSOCKET_MAX_SIZE, "websocket_max_queue": self.config.WEBSOCKET_MAX_QUEUE, "websocket_read_limit": self.config.WEBSOCKET_READ_LIMIT, "websocket_write_limit": self.config.WEBSOCKET_WRITE_LIMIT, "graceful_shutdown_timeout": self.config.GRACEFUL_SHUTDOWN_TIMEOUT, } # -------------------------------------------- # # Register start/stop events # -------------------------------------------- # for event_name, settings_name, reverse in ( ("before_server_start", "before_start", False), ("after_server_start", "after_start", False), ("before_server_stop", "before_stop", True), ("after_server_stop", "after_stop", True), ): listeners = self.listeners[event_name].copy() if reverse: listeners.reverse() # Prepend sanic to the arguments when listeners are triggered listeners = [partial(listener, self) for listener in listeners] server_settings[settings_name] = listeners if self.configure_logging and debug: logger.setLevel(logging.DEBUG) if (self.config.LOGO and os.environ.get("SANIC_SERVER_RUNNING") != "true"): logger.debug(self.config.LOGO if isinstance(self.config.LOGO, str ) else BASE_LOGO) if run_async: server_settings["run_async"] = True # Serve if host and port and os.environ.get("SANIC_SERVER_RUNNING") != "true": proto = "http" if ssl is not None: proto = "https" logger.info("Goin' Fast @ {}://{}:{}".format(proto, host, port)) return server_settings