def bail_out(self, message, from_error=False): """ In case if the transport pipes are closed and the sanic app encounters an error while writing data to the transport pipe, we log the error with proper details. :param message: Error message to display :param from_error: If the bail out was invoked while handling an exception scenario. :type message: str :type from_error: bool :return: None """ if from_error or self.transport is None or self.transport.is_closing(): logger.error( "Transport closed @ %s and exception " "experienced during error handling", ( self.transport.get_extra_info("peername") if self.transport is not None else "N/A" ), ) logger.debug("Exception:", exc_info=True) else: self.write_error(ServerError(message)) logger.error(message)
def write_error(self, exception): # An error _is_ a response. # Don't throw a response timeout, when a response _is_ given. if self._response_timeout_handler: self._response_timeout_handler.cancel() self._response_timeout_handler = None response = None try: response = self.error_handler.response(self.request, exception) version = self.request.version if self.request else '1.1' self.transport.write(response.output(version)) except RuntimeError: if self._debug: logger.error('Connection lost before error written @ %s', self.request.ip if self.request else 'Unknown') except Exception as e: self.bail_out( "Writing error failed, connection closed {}".format( repr(e)), from_error=True ) finally: if self.parser and (self.keep_alive or getattr(response, 'status', 0) == 408): self.log_response(response) self.transport.close()
def response(self, request, exception): """Fetches and executes an exception handler and returns a response object :param request: Request :param exception: Exception to handle :return: Response object """ handler = self.lookup(exception) response = None try: if handler: response = handler(request=request, exception=exception) if response is None: response = self.default(request=request, exception=exception) except Exception: self.log(format_exc()) if self.debug: url = getattr(request, 'url', 'unknown') response_message = ('Exception raised in exception handler ' '"%s" for uri: "%s"\n%s') logger.error(response_message, handler.__name__, url, format_exc()) return text(response_message % ( handler.__name__, url, format_exc()), 500) else: return text('An error occurred while handling an error', 500) return response
def bail_out(self, message, from_error=False): if from_error or self.transport.is_closing(): logger.error("Transport closed @ %s and exception " "experienced during error handling", self.transport.get_extra_info('peername')) logger.debug('Exception:\n%s', traceback.format_exc()) else: exception = ServerError(message) self.write_error(exception) logger.error(message)
async def _collect_response(sanic, loop): try: response = await self._local_request( method, uri, *request_args, **request_kwargs) results[-1] = response except Exception as e: logger.error( 'Exception:\n{}'.format(traceback.format_exc())) exceptions.append(e) self.app.stop()
async def auth_center_top_ip(request): """中央鉴权Top10 ip""" return_code = 0 data_list = [] try: mongodb = request.app.CCM domain_info = request.json domain = domain_info.get("domain") start_time = domain_info.get("start_time") end_time = domain_info.get("end_time") if domain and start_time and end_time: query_condition = [{ '$match': { 'domain': domain, 'msecRegion': { '$gt': start_time, '$lt': end_time } } }, { '$group': { '_id': { 'ip': '$ip', 'area': '$area' }, 'term_total_461': { '$sum': '$term_total_461' } } }, { '$sort': { 'term_total_461': -1 } }, { '$limit': 10 }] auth_center_ip = mongodb.auth_center_ip async for tmpdata in auth_center_ip.aggregate(query_condition): ip = tmpdata.get("_id").get("ip") area = tmpdata.get("_id").get("area") term_total_461 = tmpdata.get("term_total_461") data_list.append({ "ip": ip, "area": area, "term_total_461": term_total_461 }) else: return_code = 1 # 参数错误 except Exception as e: return_code = -1 logger.error(f'auth center top ip: {e}') ret = {"ip_list": str(data_list), "return_code": return_code} return json(ret)
async def worker(redis_uri: str, scraper_url: str, pusher_url: str): """ Async worker which processes URLs (calls API endpoints) :param redis_uri: Redis connection URI :param scraper_url: Scraper Endpoint for everything :param pusher_url: Push gateway for Elasticsearch """ print("[+] Created worker") redis = await aioredis.create_connection(redis_uri) scraper = Scraper(scraper_url) pusher = Pusher(pusher_url) while True: _, queue_element = await redis.execute("BLPOP", "queue:items", "0") try: to_process = QueueElement(**ujson.loads(queue_element)) logger.info(f"[+] Processing {to_process.url}") scraped = await scraper.scrape(to_process.url) to_add = Document( **{ "created": datetime.now().astimezone(), "author": scraped.author or "", "title": to_process.title or scraped.title, "raw_content": scraped.raw_content, "markdown_content": scraped.markdown_content, "pdf": scraped.pdf, "screenshot": scraped.screenshot, "thumbnail": scraped.thumbnail, "url": to_process.url, "from_feed": to_process.from_feed, "feed_url": to_process.feed_url, "categories": to_process.categories, "starred": to_process.starred, "read_later": to_process.read_later }) logger.info(f"[+] Done scraping {to_process.url}") pushed = await pusher.push( PusherRequest(indexes=to_process.indexes, docs=[to_add])) if pushed: logger.info(f"[+] Done processing {to_process.url}") else: logger.error(f"[+] Error pushing {to_process.url}") except Exception as e: logger.exception(e) # Add element to queue so we can try it again # Background: Request Rate limiting await redis.execute("RPUSH", "queue:items", queue_element)
async def cancelSellTrigger(self, trans_num, command, user_id, stock_symbol): endpoint = '/triggers/sell/cancel' data = {'transaction_num': trans_num, 'user_id': user_id, 'stock_symbol': stock_symbol} results, status = await self.client.postRequest(f'{self.dbm_url}{endpoint}', data) if status != 200 or results is None: logger.error(f'Failed during cancel sell trigger call to DBM: {results}') return "cancelSellTrigger failed on DBM call", 404 await self.updateCacheStocks(user_id, stock_symbol) return "Trigger removed", 200
def validateRequest(data, schema): if not data: err_msg = 'Data is not Content-Type: application/json' logger.error(f'{err_msg} - {data}') return False, err_msg try: validate(instance=data, schema=schema) return True, '' except ValidationError as err: logger.error(f'{err.message} - {data}') return False, err.message
def is_authenticated(request: Request) -> bool: token = extract_token(request) try: # This will attempt to decode the JWT and will also apply any known # claims that are on it. Since our example only uses exp, # the only claim to test against is expiration jwt.decode(token, request.app.config.JWT_SECRET) except Exception as e: logger.error(e) return False else: return True
async def http1(self): """HTTP 1.1 connection handler""" while True: # As long as connection stays keep-alive try: # Receive and handle a request self.stage = Stage.REQUEST self.response_func = self.http1_response_header await self.http1_request_header() self.request.conn_info = self.protocol.conn_info await self.protocol.request_handler(self.request) # Handler finished, response should've been sent if self.stage is Stage.HANDLER and not self.upgrade_websocket: raise ServerError("Handler produced no response") if self.stage is Stage.RESPONSE: await self.response.send(end_stream=True) except CancelledError: # Write an appropriate response before exiting e = self.exception or ServiceUnavailable("Cancelled") self.exception = None self.keep_alive = False await self.error_response(e) except Exception as e: # Write an error response await self.error_response(e) # Try to consume any remaining request body if self.request_body: if self.response and 200 <= self.response.status < 300: logger.error(f"{self.request} body not consumed.") try: async for _ in self: pass except PayloadTooLarge: # We won't read the body and that may cause httpx and # tests to fail. This little delay allows clients to push # a small request into network buffers before we close the # socket, so that they are then able to read the response. await sleep(0.001) self.keep_alive = False # Exit and disconnect if no more requests can be taken if self.stage is not Stage.IDLE or not self.keep_alive: break # Wait for next request if not self.recv_buffer: await self._receive_more()
async def stream_callback(self, response: HTTPResponse) -> None: """ Write the response. """ try: headers = [(str(name).encode("latin-1"), str(value).encode("latin-1")) for name, value in response.headers.items()] except AttributeError: logger.error( "Invalid response object for url %s, " "Expected Type: HTTPResponse, Actual Type: %s", self.request.url, type(response), ) exception = ServerError("Invalid response type") response = self.sanic_app.error_handler.response( self.request, exception) headers = [(str(name).encode("latin-1"), str(value).encode("latin-1")) for name, value in response.headers.items() if name not in (b"Set-Cookie", )] if "content-length" not in response.headers and not isinstance( response, StreamingHTTPResponse): headers += [(b"content-length", str(len(response.body)).encode("latin-1"))] if response.cookies: cookies = SimpleCookie() cookies.load(response.cookies) headers += [(b"set-cookie", cookie.encode("utf-8")) for name, cookie in response.cookies.items()] await self.transport.send({ "type": "http.response.start", "status": response.status, "headers": headers, }) if isinstance(response, StreamingHTTPResponse): response.protocol = self.transport.get_protocol() await response.stream() await response.protocol.complete() else: await self.transport.send({ "type": "http.response.body", "body": response.body, "more_body": False, })
async def api_distance(request): try: a = get_param(request, 'a') b = get_param(request, 'b') if a is None or b is None: return error_response('Invalid request') words_a = inference_engine_lda.tokenize(a) words_b = inference_engine_lda.tokenize(b) result = inference_engine_lda.cal_doc_distance(words_a, words_b) return response(data=result) except Exception as err: logger.error(err, exc_info=True) return error_response(str(err))
async def create_article_of_type(pool, type, text): # get connection from the pool async with pool.acquire() as connection: # get transaction async with connection.transaction(): try: sql = "INSERT INTO article (type, text) VALUES ({0}, {1})".format( type, text) status = await connection.execute(sql, timeout=util.timeout) return status except Exception as e: logger.error("{0}: sql = {1}".format(e, sql)) raise SanicException(e)
async def __aenter__(self): msg = 'An {} Error raised when connect to {}' try: await self._connect() self.connect_cursor = await self.cursor(*self.default_cursors) except OperationalError: logger.error(msg.format('OperationalError', self.strict_url)) return None except: logger.error(msg.format("UnExpected", self.strict_url)) return None return self
async def api_similarity_query(request): try: query = get_param(request, 'query') text = get_param(request, 'text') if query is None or text is None: return error_response('Invalid request') words_query = inference_engine_lda.tokenize(query) words_text = inference_engine_lda.tokenize(text) result = inference_engine_lda.cal_query_doc_similarity(words_query, words_text) return response(data=result) except Exception as err: logger.error(err, exc_info=True) return error_response(str(err))
def server_ping(app): # Handler for sync ping json = get_ping_params(app) if json["tls_created_at"] is None: json.pop("tls_created_at") logger.info(f"Pinging control server! - {json}") r = httpx.post(f"{app.api_server}/ping", verify=False, json=json, headers=default_ping_headers) if r.status_code == httpx.codes.OK: return handle_ping(app, r.json()) else: logger.error(f"Ping errored out! - {r.text}")
async def handle(self, websocket: websockets.WebSocketCommonProtocol): try: while not websocket.closed: msg = await websocket.recv() loaded_msg = json.loads(msg) print('loaded_msg', loaded_msg, websocket) try: await self._chat(loaded_msg) except AttributeError: logger.warn(f"Invalid action requested full msg: {msg}") except websockets.ConnectionClosed as e: logger.error(f"<ConsumerHandler:handle>[error] {e}")
async def get_article_by_type(pool, type): # get connection from the pool async with pool.acquire() as connection: # get transaction async with connection.transaction(): # fetch article by type try: sql = "SELECT * FROM article WHERE type = '{0}'".format(type) result = await connection.fetchrow(sql, timeout=util.timeout) return result except Exception as e: logger.error("{0}: sql = {1}".format(e, sql)) raise SanicException(e)
async def send_message(self, message: str): if not TELEGRAM_TOKEN: logger.error('Required env var TELEGRAM_TOKEN must be set') return if not TELEGRAM_CHAT_ID: logger.error('Required env var TELEGRAM_CHAT_ID must be set') return bot = telepot.Bot(token=TELEGRAM_TOKEN) bot.sendMessage(chat_id=TELEGRAM_CHAT_ID, text=message, parse_mode='HTML')
async def batch_update_active_user_operate(user_id, actions, act_type, day): '''批量更新活跃用户操作记录''' try: async with db.conn.acquire() as con: up = await con.prepare( '''update "operation_record" set detail = $1 where user_id=$2 and type=$3 and day=$4''' ) user_act = await up.fetch(actions, user_id, act_type, day) return user_act except Exception as e: logger.error( f'update user: {user_id} actions: {actions} type: {act_type} error: {e}' )
async def member_retreat_group_callback(data): """群成员退群回调""" # 用户ID,群ID,退群时间 group_code = data.get('group_id') logger.debug(f'new member retreat group, group_code:{group_code}') group_info = await get_group_info(group_code) if not group_info: logger.error( f'member_retreat_group_callback: not match group: {group_code}') return user_id = group_info['user_id'] await update_join_and_retreat_redis_record(user_id, group_code, 'retreat_group')
async def test(request): logger.info('request info') logger.error('request error') url = app.url_for('request_test', id=5, arg_one=['one', 'two'], arg_two=2, _anchor='anchor', _scheme='http', _external=True, _server='localhost:8000') # 如果路由有name属性, 则url_for第一个参数取name, 而非处理方法名 return html(f'<h1>{url}</h1>')
def build_avatar(clothing, size): avatar_image = Image.new('RGBA', (size, size), (0, 0, 0, 0)) for item in filter(None, clothing): try: item_image = Image.open( f'{avatar_item_directory}/{size}/{item}.png', 'r') avatar_image.paste(item_image, (0, 0), item_image) except FileNotFoundError as e: logger.error(e) b = io.BytesIO() avatar_image.save(b, 'PNG') return b.getvalue()
def _init(self): if os.path.isdir(self.job_dir): logger.error(f'job dir {self.job_dir} already exists') raise RuntimeError(f'job {self.id} already exists') try: os.makedirs(self.job_dir) self._update_status(MLJobStatus.INITIALIZED) self._save_meta() except OSError: logger.error(f'failed to create job dir {self.job_dir}') else: logger.debug(f'successfully created the directory {self.job_dir}')
async def wrapper(endpoint, session, request, *args, **kwargs): if not request.token: logger.error('No token provided in request') return response.json({'error': 'Unauthorized'}, status=401) user_id = util.check_jwt(request.token, endpoint.server.config.secret) if not user_id: logger.error('Invalid auth token') return response.json({'error': 'Unauthorized'}, status=401) kwargs['id_from_token'] = user_id # Call the request handler return await coro(endpoint, session, request, *args, **kwargs)
async def error_callback(exception): app = connect_func.app prefix = connect_func.prefix transport_prop = connect_func.transport protocol_prop = connect_func.protocol channel_prop = connect_func.channel transport = getattr(connect_func.app, transport_prop, None) protocol = getattr(connect_func.app, protocol_prop, None) channel = getattr(connect_func.app, channel_prop, None) # при нормальном закрытии channels происходит это исключение # if isinstance(exception, (aioamqp.ChannelClosed, aioamqp.AmqpClosedConnection)): logger.warning( 'error_callback_factory exception type {} channel {} func {} exception {}' .format(str(type(exception)), str(connect_func.channel), str(connect_func), str(exception))) # для одного соединения этот коллбек может быть вызван дважды # условие позволяет этого избежать # вот тут упомянуто об этом баге (https://github.com/Polyconseil/aioamqp/issues/65#issuecomment-301737344) if not all([transport, protocol, channel]): return setattr(app, transport_prop, None) setattr(app, protocol_prop, None) setattr(app, channel_prop, None) logger.warning( 'error_callback_factory AMQP connection "{}" is lost. Worker: {}'. format(prefix, os.getpid())) pending_count = 0 while pending_count < settings.RECONNECT_RESTART_COUNT: try: logger.warning( 'error_callback_factory new_log AMQP connection "%s" attempt %s for worker %s', prefix, pending_count, os.getpid()) return await connect_func() except Exception: # OSError, ConnectionTimeout logger.warning( 'error_callback_factory new_log AMQP connection "%s" attempt %s FAILED for worker %s, sleep...', prefix, pending_count, os.getpid()) pending_count += 1 await asyncio.sleep(settings.RECONNECT_SLEEP_TIME) else: logger.error( 'error_callback_factory new_log AMQP connection %s attempts are UNSUCCESSFUL for worker %s', prefix, os.getpid())
async def delete(self, request): """Uninstall the package""" organization = request.ctx.organization team = request.ctx.team user = request.ctx.user data = request.json proprietary = js2python_bool(data.get('proprietary', False)) package_type = data.get('package_type', None) if not package_type: return json(response_message(EINVAL, 'Field package_type is required')) version = data.get('version', None) if not version: return json(response_message(EINVAL, 'Field version is required')) package_name = data.get('name', None) if not package_name: return json(response_message(EINVAL, 'Field name is required')) query = {'name': package_name, 'proprietary': proprietary, 'package_type': package_type} if proprietary: query['organization'] = organization.pk query['team'] = team.pk if team else None else: query['organization'] = None query['team'] = None package = await Package.find_one(query) if not package: return json(response_message(ENOENT, 'Package not found')) scripts_root = await get_user_scripts_root(organization=organization, team=team) libraries_root = await get_back_scripts_root(organization=organization, team=team) pypi_root = await get_test_store_root(proprietary=proprietary, team=team, organization=organization) package_path = pypi_root / package.package_name / (await package.get_package_by_version(version)).filename pkg_names = await get_internal_packages(package_path) for pkg_name in pkg_names: for script in await async_listdir(scripts_root / pkg_name): test = await Test.find_one({'test_suite': os.path.splitext(script)[0], 'path': pkg_name, 'organization': organization.pk, 'team': team.pk if team else None}) if test: await test.delete() # test.staled = True # await test.commit() else: logger.error(f'test not found for {script}') for pkg_name in pkg_names: if await async_exists(scripts_root / pkg_name): await async_rmtree(scripts_root / pkg_name) if await async_exists(libraries_root / pkg_name): await async_rmtree(libraries_root / pkg_name) return json(response_message(SUCCESS))
async def train_core_async( domain: Union[Domain, Text], config: Text, stories: Text, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: """Trains a Core model. Args: domain: Path to the domain file. config: Path to the config file for Core. stories: Path to the Core training data. output: Output path. train_path: If `None` the model will be trained in a temporary directory, otherwise in the provided directory. fixed_model_name: Name of model to be stored. uncompress: If `True` the model will not be compressed. additional_arguments: Additional training parameters. Returns: If `train_path` is given it returns the path to the model archive, otherwise the path to the directory with the trained model files. """ file_importer = TrainingDataImporter.load_core_importer_from_config( config, domain, [stories]) domain = await file_importer.get_domain() if domain.is_empty(): _logger.error( "Core training was skipped because no valid domain file was found. " "Please specify a valid domain using '--domain' argument or check if the provided domain file exists." ) return None if not await file_importer.get_stories(): _logger.error( "No stories given. Please provide stories in order to " "train a Rasa Core model using the '--stories' argument.") return return await _train_core_with_validated_data( file_importer, output=output, train_path=train_path, fixed_model_name=fixed_model_name, additional_arguments=additional_arguments, )
async def updater(self, request: Request, path: str = None): if request.json is None: return ErrorMessage(message=f"Data must be provided", code=400) path_ = f"/{path or ''}" try: paper = await self.get_path(path_, self._models, 1) paper._table = request.app._table except NotFound as e: return ErrorMessage(message=e.args[0], code=404) url = paper.get_url() member = path_.replace(url, "")[1:] if url > "/" else path_[1:] if not member: member = "update" perm = await self._models.Permission.get(self._table, context=paper.type, name=member) token = AuthToken.get(request.headers) actor = await token.get_actor(self._table, self.config["JWT_SECRET"], self._models.User) # actor = await self._models.User.get(self._table, slug = "garito") if not perm or not await perm.allows(actor, paper): return ErrorMessage(message="Unauthorized", code=401) args = [request] _introspection = self._introspection[paper.type]["call" if member == "index" else member] if "actor" in _introspection: args.append(actor) consumes = _introspection.get("consumes", None) try: model = consumes(**request.json) args.append(model) except TypeError as e: return ErrorMessage(message=f"Validation error: {e}", code=400) try: result = await getattr(paper, member)(*args) if isinstance(result, Tree): result = result.to_plain_dict() return OkListResult(result=result, code=200) if isinstance( result, list) else OkResult(result=result, code=200) except Exception as e: message = format_exception(*exc_info()) if request.app.config.get( "DEBUG", False) else str(e) for line in message: logger.error(line) return ErrorMessage(message=message, code=400)
async def render_image( request, key: str, slug: str = "", watermark: str = "", ext: str = settings.DEFAULT_EXT, ): status = 200 if len(slug.encode()) > 200: logger.error(f"Slug too long: {slug}") slug = slug[:50] + "..." template = models.Template.objects.get("_error") style = settings.DEFAULT_STYLE status = 414 elif key == "custom": style = settings.DEFAULT_STYLE url = request.args.get("background") or request.args.get("alt") if url: template = await models.Template.create(url) if not template.image.exists(): logger.error(f"Unable to download image URL: {url}") template = models.Template.objects.get("_error") status = 415 else: logger.error("No image URL specified for custom template") template = models.Template.objects.get("_error") status = 422 else: template = models.Template.objects.get_or_none(key) if not template or not template.image.exists(): logger.error(f"No such template: {key}") template = models.Template.objects.get("_error") status = 404 style = request.args.get("style") or request.args.get("alt") if style and style not in template.styles: logger.error(f"Invalid style for template: {style}") status = 422 lines = utils.text.decode(slug) size = int(request.args.get("width", 0)), int(request.args.get("height", 0)) await utils.meta.track_url(request, lines) path = await asyncio.to_thread( utils.images.save, template, lines, watermark, ext=ext, style=style, size=size ) return await response.file(path, status)
async def get_base_images(): async with httpx.AsyncClient() as client: r = await client.get( "https://api.digitalocean.com/v2/images?private=true", headers=DIGITALOCEAN_COMMON_HEADERS, ) if r.is_error: logger.info("request={}".format(r.request.__dict__)) logger.error("response={}".format(r.json())) # TODO handle this error correctly return response.json({"status": "error", "message": r.reason_phrase}, status=500) base_images = [i for i in r.json()['images'] if i['name'].startswith('baseimage')] latest_base = sorted(base_images, key=lambda x: x['created_at'], reverse=True)[0] return latest_base
def __enter__(self): try: self._conn = psycopg2.connect(user=USERNAME, password=PASSWORD, host=HOST, dbname=DB_NAME) self._cur = self._conn.cursor() log.debug("Operning connection and cursor...") return self except Exception as err: log.error(f"DB CONNECTION FAILED: {err}")
def _wait_and_register(self): # Wait until the port has been set (meaning comet is available) while not self.port: time.sleep(1) manager = Manager("localhost", self.port) try: manager.register_start(self.startup_time, __version__, self.config) except CometError as exc: logger.error( "Comet failed registering its own startup and initial config: {}" .format(exc)) exit(1)
def original_lexer(text: str, debug: object = False) -> list: text = strip_to_none(text) if text is None: return [] result = get_client().lexer(text) error_msg = result.get('error_msg') if error_msg is not None: msg = f"{error_msg}: '{text}'" logger.error(msg) return [] items = result.get('items') if items is None: return [] return items
def default(self, request, exception): self.log(format_exc()) if issubclass(type(exception), SanicException): return text( 'Error: {}'.format(exception), status=getattr(exception, 'status_code', 500), headers=getattr(exception, 'headers', dict()) ) elif self.debug: html_output = self._render_traceback_html(exception, request) response_message = ('Exception occurred while handling uri: ' '"%s"\n%s') logger.error(response_message, request.url, format_exc()) return html(html_output, status=500) else: return html(INTERNAL_SERVER_ERROR_HTML, status=500)
async def stream_response(self, response): """ Streams a response to the client asynchronously. Attaches the transport to the response so the response consumer can write to the response as needed. """ if self._response_timeout_handler: self._response_timeout_handler.cancel() self._response_timeout_handler = None try: keep_alive = self.keep_alive response.protocol = self await response.stream( self.request.version, keep_alive, self.keep_alive_timeout ) self.log_response(response) except AttributeError: logger.error( "Invalid response object for url %s, " "Expected Type: HTTPResponse, Actual Type: %s", self.url, type(response), ) self.write_error(ServerError("Invalid response type")) except RuntimeError: if self._debug: logger.error( "Connection lost before response written @ %s", self.request.ip, ) keep_alive = False except Exception as e: self.bail_out( "Writing response failed, connection closed {}".format(repr(e)) ) finally: if not keep_alive: self.transport.close() self.transport = None else: self._keep_alive_timeout_handler = self.loop.call_later( self.keep_alive_timeout, self.keep_alive_timeout_callback ) self._last_response_time = time() self.cleanup()
def write_response(self, response): """ Writes response content synchronously to the transport. """ if self._response_timeout_handler: self._response_timeout_handler.cancel() self._response_timeout_handler = None try: keep_alive = self.keep_alive self.transport.write( response.output( self.request.version, keep_alive, self.keep_alive_timeout)) self.log_response(response) except AttributeError: logger.error('Invalid response object for url %s, ' 'Expected Type: HTTPResponse, Actual Type: %s', self.url, type(response)) self.write_error(ServerError('Invalid response type')) except RuntimeError: if self._debug: logger.error('Connection lost before response written @ %s', self.request.ip) keep_alive = False except Exception as e: self.bail_out( "Writing response failed, connection closed {}".format( repr(e))) finally: if not keep_alive: self.transport.close() self.transport = None else: self._keep_alive_timeout_handler = self.loop.call_later( self.keep_alive_timeout, self.keep_alive_timeout_callback) self._last_response_time = current_time self.cleanup()
module = import_module(module_name) app = getattr(module, app_name, None) if not isinstance(app, Sanic): raise ValueError( "Module is not a Sanic app, it is a {}. " "Perhaps you meant {}.app?".format( type(app).__name__, args.module ) ) if args.cert is not None or args.key is not None: ssl = {"cert": args.cert, "key": args.key} else: ssl = None app.run( host=args.host, port=args.port, workers=args.workers, debug=args.debug, ssl=ssl, ) except ImportError as e: logger.error( "No module named {} found.\n" " Example File: project/sanic_server.py -> app\n" " Example Module: project.sanic_server.app".format(e.name) ) except ValueError: logger.exception("Failed to run app")