def start_app(title, base_path, debug: bool, host, port, reloader=False, # pylint: disable=too-many-arguments unminified=False): global app, dash_app, dispatcher try: lang = locale.getlocale()[0].split("_")[0] locale.setlocale(locale.LC_TIME, ".".join(locale.getlocale())) # make sure LC_TIME is set locale_url = [f"https://cdn.plot.ly/plotly-locale-{lang}-latest.js"] except (IndexError, locale.Error): locale_url = None logger.warning("Can't get language") if unminified: locale_url = ["assets/plotly-with-meta.js"] app = Flask(__name__) app.wsgi_app = ProxyFix(app.wsgi_app) app.config["DEBUG"] = debug if base_path == "/": application = DispatcherMiddleware(app) requests_pathname_prefix = None else: application = DispatcherMiddleware(Flask('dummy_app'), {base_path: app}) requests_pathname_prefix = base_path + "/" dash_app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP], external_scripts=locale_url, title=title, server=app, requests_pathname_prefix=requests_pathname_prefix) dash_app.enable_dev_tools(reloader) # keep this line import web.views # pylint: disable=unused-import,import-outside-toplevel return run_simple(host, port, application, use_reloader=reloader, use_debugger=debug)
def __parsePage(self): '''解析函数,完成解析模块的核心功能''' htmlNode = self.__htmlQueue.get() # 过滤页面,判断页面是否需要存储 if self.__myPageFilter.isGood(htmlNode.html): dataNode = HtmlModel(htmlNode.url, '', htmlNode.time, htmlNode.depth) self.__dataQueue.put(dataNode) # 爬取深度控制,如果爬取深度达到指定深度则不继续解析页面中的链接 if htmlNode.depth >= self.__depth: return linkList = [] try: # 解析html页面中的所有链接,使用lxml模块 doc = lxml.html.document_fromstring(htmlNode.html) doc.make_links_absolute(htmlNode.url) links = doc.iterlinks() for link in links: linkList.append(link[2]) except Exception, e: logger.warning('Parse page exception: %s', str(e)) return
async def update(self): args = list(map(self.get_value, self.__fields__)) args.append(self.get_value(self.__primary_key__)) rows = await execute(self.__update__, args) if rows != 1: logger.warning( 'failed update by primary key: affected rows: {}'.format(rows))
def __on_mqtt_message(self, client, userdata, msg): try: logger.info("mqtt msg %s %s", msg.topic, msg.payload) data = json.loads(msg.payload) charge_info = None if msg.topic.startswith(MQTT_RESP_TOPIC): if "return_code" not in data: logger.debug("mqtt msg hasn't return code") elif data["return_code"] == "400": self.refresh_remote_token(force=True) logger.error("retry last request, token was expired") elif data["return_code"] == "300": logger.error('%s', data["return_code"]) elif data["return_code"] != "0": logger.error('%s : %s', data["return_code"], data["reason"]) if msg.topic.endswith("/VehicleState"): charge_info = data["resp_data"]["charging_state"] self.precond_programs[data["vin"]] = data["resp_data"]["precond_state"]["programs"] elif msg.topic.startswith(MQTT_EVENT_TOPIC): charge_info = data["charging_state"] if charge_info is not None and charge_info['remaining_time'] != 0 and charge_info['rate'] == 0: # fix a psa server bug where charge beginning without status api being properly updated logger.warning("charge begin but API isn't updated") sleep(60) self.wakeup(data["vin"]) except KeyError: logger.exception("mqtt message:")
async def save(self): args = list(map(self.get_value_or_default, self.__fields__)) args.append(self.get_value_or_default(self.__primary_key__)) rows = await execute(self.__insert__, args) if rows != 1: logger.warning( 'failed insert record: affected rows: {}'.format(row))
def __staticDownload(self, url): '''静态下载函数,使用requests模块进行下载''' if self.__isBigPage(url): return "" user_agent = random.choice(USER_AGENTS) headers = {'User-Agent': user_agent} try: # logger.debug('Downloading url : %s', url) response = requests.get(url, timeout=CONNECT_TIME_OUT, headers=headers) if response.status_code == 200: try: # 再次判断文件大小,用于处理重定向链接 contentLen = response.headers['content-length'] contentLen = int(contentLen) if contentLen > MAX_PAGE_SIZE: logger.warning('This is redirect page, before URL : %s, after URL : %s', url, response.url) return "" except Exception,e: pass page = response.text # 判断文件的实际大小,防止content-length与实际文件大小不符的情况 if len(page) > MAX_PAGE_SIZE: logger.warning('Downloaded big file, Length : %d , URL : %s', len(page), url) return "" return page else:
def __staticDownload(self, url): '''静态下载函数,使用requests模块进行下载''' if self.__isBigPage(url): return "" user_agent = random.choice(USER_AGENTS) headers = {'User-Agent': user_agent} try: # logger.debug('Downloading url : %s', url) response = requests.get(url, timeout=CONNECT_TIME_OUT, headers=headers) if response.status_code == 200: try: # 再次判断文件大小,用于处理重定向链接 contentLen = response.headers['content-length'] contentLen = int(contentLen) if contentLen > MAX_PAGE_SIZE: logger.warning( 'This is redirect page, before URL : %s, after URL : %s', url, response.url) return "" except Exception, e: pass page = response.text # 判断文件的实际大小,防止content-length与实际文件大小不符的情况 if len(page) > MAX_PAGE_SIZE: logger.warning( 'Downloaded big file, Length : %d , URL : %s', len(page), url) return "" return page else:
def backup(conn): if sys.version_info < (3, 7): logger.warning( "Can't do database backup, please upgrade to python 3.7") else: back_conn = sqlite3.connect("info_backup.db") conn.backup(back_conn) back_conn.close()
def find_model_by_vin(vin): for carmodel in carmodels: if carmodel.match(vin): return carmodel logger.warning( "Can't get car model, please report an issue on github with your car model" " and first ten letter of your VIN") return CarModel("unknown", DEFAULT_BATTERY_POWER, DEFAULT_FUEL_CAPACITY)
def backup(conn): if sys.version_info < (3, 7): logger.warning( "Can't do database backup, please upgrade to python 3.7") else: filename = f"info_backup_{datetime.now()}.db".replace(":", "_") back_conn = sqlite3.connect(filename) conn.backup(back_conn) back_conn.close()
def config_flask( title, base_path, debug: bool, host, port, reloader=False, # pylint: disable=too-many-arguments unminified=False, view="web.view.views"): global app, dash_app reload_view = app is not None app = Flask(__name__) app.logger.addHandler(file_handler) try: lang = locale.getlocale()[0].split("_")[0] locale.setlocale(locale.LC_TIME, ".".join( locale.getlocale())) # make sure LC_TIME is set if lang != "en": locale_url = [ f"https://cdn.plot.ly/plotly-locale-{lang}-latest.js" ] else: locale_url = None except (IndexError, locale.Error): locale_url = None logger.warning("Can't get language") if unminified: locale_url = ["assets/plotly-with-meta.js"] app.config["DEBUG"] = debug if base_path == "/": application = DispatcherMiddleware(app) requests_pathname_prefix = None else: application = DispatcherMiddleware(Flask('dummy_app'), {base_path: app}) requests_pathname_prefix = base_path + "/" dash_app = DashCustom(external_stylesheets=[dbc.themes.BOOTSTRAP], external_scripts=locale_url, title=title, server=app, requests_pathname_prefix=requests_pathname_prefix, suppress_callback_exceptions=True, serve_locally=False) dash_app.enable_dev_tools(debug) app.wsgi_app = MyProxyFix(dash_app) # keep this line importlib.import_module(view) if reload_view: importlib.reload(view) return { "hostname": host, "port": port, "application": application, "use_reloader": reloader, "use_debugger": debug }
def __dynamicDownload(self, url): '''动态下载模块,使用了splinter模块、phantomjs模块(需单独安装)''' try: # logger.debug('Downloading url : %s', url) browser = Browser('phantomjs') browser.visit(url) html = browser.html browser.quit() return html except Exception, e: logger.warning('Download exception (dynamic): %s', str(e)) return ""
def __isBigPage(self, url): '''判断页面(文件)大小,过滤较大页面(文件)''' try: response = requests.head(url) contentLen = response.headers['content-length'] contentLen = int(contentLen) if contentLen > MAX_PAGE_SIZE: logger.warning('This is big page, Length : %d, URL : %s', contentLen, url) return True return False except Exception,e: return False
def get_country(latitude, longitude, country_code_default): try: location = reverse_geocode.search([(latitude, longitude)])[0] country_code = location["country_code"] return country_code except (UnicodeDecodeError, IndexError): logger.error("Can't find country for %s %s", latitude, longitude) # return None country_code = country_code_default logger.warning( "Using country of origin : %s (wrong co2 when traveling abroad)", country_code) return country_code
def __isBigPage(self, url): '''判断页面(文件)大小,过滤较大页面(文件)''' try: response = requests.head(url) contentLen = response.headers['content-length'] contentLen = int(contentLen) if contentLen > MAX_PAGE_SIZE: logger.warning('This is big page, Length : %d, URL : %s', contentLen, url) return True return False except Exception, e: return False
def makeDict(self): """リテンションタイム毎にdictを作成する関数 Args: returns: """ for file in self.filelist: removeFileFlag = True skipFlag = False fileName = file.lstrip(self.inFolder).lstrip("/") with open(file, 'r',encoding='shift_jis')as rf: line = rf.readline() while line: while not skipFlag: skipFlag, line = self.readSkiper(line, rf, skipFlag) line_list = line.split('\t') # データが存在するかどうか try: r_time = round(float(line_list[1]),2) removeFileFlag = False # 'r_time'をkeyとするdictが存在するかどうか、存在しなければ作成する try: var = self.r_time_dict[r_time] except: self.r_time_dict[r_time] = [] except: break line = rf.readline() if removeFileFlag: logger.warning('{}よりデータが見つかりませんでした。'.format(fileName)) self.removeFiles.append(file) logger.info("Create dict proc done.") # '-n'オプション選択時に、空データのファイルをリストから取り除く if self.args.not_incnull: for file in self.removeFiles: self.filelist.remove(file) # 'Total_Peak'を含まないkeyのリストを作成 self.key_list = sorted(list(self.r_time_dict.keys())) self.key_list = [str(name) for name in self.key_list] # 'Total_Peak'をkeyに追加 self.r_time_dict['Total_Peak'] = [] '''各々のkey('Total_Peak'を含む)に対して, filelistの数だけ各要素が初期値0のリストを作成''' for key in self.r_time_dict.keys(): self.r_time_dict[key] = [0]*len(self.filelist)
def control_charge_with_ack(self, charge: bool): self.psacc.charge_now(self.vin, charge) self.retry_count += 1 sleep(ChargeControl.MQTT_TIMEOUT) vehicle_status = self.psacc.get_vehicle_info(self.vin) status = vehicle_status.get_energy('Electric').charging.status if status in (FINISHED, DISCONNECTED): logger.warning("Car state isn't compatible with charging %s", status) if (status == INPROGRESS) != charge: logger.warning("retry to control the charge of %s", self.vin) self.psacc.charge_now(self.vin, charge) self.retry_count += 1 return False self.retry_count = 0 return True
def add_altitude_to_db(conn): max_pos_by_req = 100 nb_null = conn.execute( "SELECT COUNT(1) FROM position WHERE altitude IS NULL " "and longitude IS NOT NULL AND latitude IS NOT NULL;").fetchone( )[0] if nb_null > max_pos_by_req: logger.warning( "There is %s to fetch from API, it can take some time", nb_null) try: while True: res = conn.execute( "SELECT DISTINCT latitude,longitude FROM position WHERE altitude IS NULL " "and longitude IS NOT NULL AND latitude IS NOT NULL LIMIT ?;", (max_pos_by_req, )).fetchall() nb_res = len(res) if nb_res > 0: logger.debug("add altitude for %s positions point", nb_null) nb_null -= nb_res locations_str = "" for line in res: locations_str += str(line[0]) + "," + str( line[1]) + "|" locations_str = locations_str[:-1] res = requests.get( "https://api.opentopodata.org/v1/srtm30m", params={"locations": locations_str}) data = res.json()["results"] for line in data: conn.execute( "UPDATE position SET altitude=? WHERE latitude=? and longitude=?", (line["elevation"], line["location"]["lat"], line["location"]["lng"])) conn.commit() if nb_res == 100: sleep(1) # API is limited to 1 call by sec else: break except (ValueError, KeyError, requests.exceptions.RequestException): logger.error("Can't get altitude from API")
def __on_mqtt_message(self, client, userdata, msg): try: logger.info("mqtt msg received: %s %s", msg.topic, msg.payload) logger.debug("client: %s userdata: %s", client, userdata) data = json.loads(msg.payload) charge_info = None if msg.topic.startswith(MQTT_RESP_TOPIC): if "return_code" not in data: logger.debug("mqtt msg hasn't return code") elif data["return_code"] == "400": self._refresh_remote_token(force=True) if self.last_request: logger.warning( "last request is send again, token was expired") last_request = self.last_request self.last_request = None self.publish(last_request, store=False) else: logger.error( "Last request might have been send twice without success" ) elif data["return_code"] != "0": logger.error('%s : %s', data["return_code"], data.get("reason", "?")) elif msg.topic.startswith(MQTT_EVENT_TOPIC): charge_info = data["charging_state"] self.precond_programs[ data["vin"]] = data["precond_state"]["programs"] if charge_info is not None and charge_info['remaining_time'] != 0: try: car = self.vehicles_list.get_car_by_vin( vin=msg.topic.split("/")[-1]) if car and car.status.get_energy( 'Electric').charging.status != INPROGRESS: # fix a psa server bug where charge beginning without status api being properly updated logger.warning("charge begin but API isn't updated") sleep(60) self.wakeup(data["vin"]) except (IndexError, AttributeError, RateLimitException): logger.exception("on_mqtt_message:") except KeyError: logger.exception("on_mqtt_message:")
def serve_layout(): global cached_layout if cached_layout is None: logger.debug("Create new layout") try: figures.get_figures(trips, chargings) summary_tab = [dbc.Container(dbc.Row(id="summary-cards", children=create_card(figures.SUMMARY_CARDS)), fluid=True), dcc.Graph(figure=figures.consumption_fig, id="consumption_fig"), dcc.Graph(figure=figures.consumption_fig_by_speed, id="consumption_fig_by_speed"), figures.consumption_graph_by_temp] maps = dcc.Graph(figure=figures.trips_map, id="trips_map", style={"height": '90vh'}) create_callback() range_slider = dcc.RangeSlider( id='date-slider', min=min_millis, max=max_millis, step=step, marks=marks, value=[min_millis, max_millis], ) except (IndexError, TypeError, NameError): summary_tab = figures.ERROR_DIV maps = figures.ERROR_DIV logger.warning("Failed to generate figure, there is probably not enough data yet", exc_info_debug=True) range_slider = html.Div() data_div = html.Div([ range_slider, html.Div([ dbc.Tabs([ dbc.Tab(label="Summary", tab_id="summary", children=summary_tab), dbc.Tab(label="Trips", tab_id="trips", id="tab_trips", children=[html.Div(id="tab_trips_fig", children=figures.table_fig), dbc.Modal( [ dbc.ModalHeader("Altitude"), dbc.ModalBody(html.Div( id="tab_trips_popup_graph")), dbc.ModalFooter( dbc.Button("Close", id="tab_trips_popup-close", className="ml-auto") ), ], id="tab_trips_popup", size="xl", ) ]), dbc.Tab(label="Charge", tab_id="charge", id="tab_charge", children=[figures.battery_table, dbc.Modal( [ dbc.ModalHeader( "Charging speed"), dbc.ModalBody(html.Div( id="tab_battery_popup_graph")), dbc.ModalFooter( dbc.Button("Close", id="tab_battery_popup-close", className="ml-auto") ), ], id="tab_battery_popup", size="xl", ) ]), dbc.Tab(label="Map", tab_id="map", children=[maps]), dbc.Tab(label="Control", tab_id="control", children=dbc.Tabs(id="control-tabs", children=__get_control_tabs()))], id="tabs", active_tab="summary", persistence=True), html.Div(id=EMPTY_DIV), html.Div(id=EMPTY_DIV + "1") ])]) cached_layout = dbc.Container(fluid=True, children=[html.H1('My car info'), data_div]) return cached_layout
contentLen = response.headers['content-length'] contentLen = int(contentLen) if contentLen > MAX_PAGE_SIZE: logger.warning('This is redirect page, before URL : %s, after URL : %s', url, response.url) return "" except Exception,e: pass page = response.text # 判断文件的实际大小,防止content-length与实际文件大小不符的情况 if len(page) > MAX_PAGE_SIZE: logger.warning('Downloaded big file, Length : %d , URL : %s', len(page), url) return "" return page else: logger.warning('Download failed. status code : %d', response.status_code) return "" except Exception, e: logger.warning('Download exception (static): %s', str(e)) return "" def __dynamicDownload(self, url): '''动态下载模块,使用了splinter模块、phantomjs模块(需单独安装)''' try: # logger.debug('Downloading url : %s', url) browser = Browser('phantomjs') browser.visit(url) html = browser.html browser.quit() return html
async def remove(self): args = [self.get_value(self.__primary_key__)] rows = await execute(self.__delete__, args) if rows != 1: logger.warning( 'failed remove by primary key, affected rows: {}'.format(rows))
def _on_mqtt_disconnect(self, client, userdata, result_code): # pylint: disable=unused-argument logger.warning("Disconnected with result code %d", result_code) if result_code == 1: self._refresh_remote_token(force=True) else: logger.warning(mqtt.error_string(result_code))
def serve_layout(): global cached_layout if cached_layout is None: logger.debug("Create new layout") fig_filter = Figure_Filter() try: range_slider = dcc.RangeSlider( id='date-slider', min=min_millis, max=max_millis, step=step, marks=marks, value=[min_millis, max_millis], ) summary_tab = [ dbc.Container(dbc.Row(id="summary-cards", children=create_card( figures.SUMMARY_CARDS)), fluid=True), fig_filter.add_graph(dcc.Graph(id="consumption_fig"), "start_at", ["consumption_km"], figures.consumption_fig), fig_filter.add_graph(dcc.Graph(id="consumption_fig_by_speed"), "speed_average", ["consumption_km"] * 2, figures.consumption_fig_by_speed), fig_filter.add_graph(dcc.Graph(id="consumption_graph_by_temp"), "consumption_by_temp", ["consumption_km"] * 2, figures.consumption_fig_by_temp) ] maps = fig_filter.add_map( dcc.Graph(id="trips_map", style={"height": '90vh'}), "lat", ["long", "start_at"], figures.trips_map) fig_filter.add_table("trips", figures.table_fig) fig_filter.add_table("chargings", figures.battery_table) fig_filter.src = { "trips": trips.get_trips_as_dict(), "chargings": chargings } fig_filter.set_clientside_callback(dash_app) create_callback() except (IndexError, TypeError, NameError, AssertionError, NameError): summary_tab = figures.ERROR_DIV maps = figures.ERROR_DIV logger.warning( "Failed to generate figure, there is probably not enough data yet", exc_info_debug=True) range_slider = html.Div() figures.battery_table = figures.ERROR_DIV data_div = html.Div([ *fig_filter.get_store(), range_slider, html.Div([ dbc.Tabs([ dbc.Tab(label="Summary", tab_id="summary", children=summary_tab), dbc.Tab( label="Trips", tab_id="trips", id="tab_trips", children=[ html.Div(id="tab_trips_fig", children=figures.table_fig), dbc.Modal( [ dbc.ModalHeader("Altitude"), dbc.ModalBody( html.Div(id="tab_trips_popup_graph")), dbc.ModalFooter( dbc.Button("Close", id="tab_trips_popup-close", className="ml-auto")), ], id="tab_trips_popup", size="xl", ) ]), dbc.Tab(label="Charge", tab_id="charge", id="tab_charge", children=[ figures.battery_table, dbc.Modal( [ dbc.ModalHeader("Charging speed"), dbc.ModalBody( html.Div( id="tab_battery_popup_graph")), dbc.ModalFooter( dbc.Button( "Close", id="tab_battery_popup-close", className="ml-auto")), ], id="tab_battery_popup", size="xl", ) ]), dbc.Tab(label="Map", tab_id="map", children=[maps]), dbc.Tab(label="Control", tab_id="control", children=dbc.Tabs(id="control-tabs", children=__get_control_tabs())) ], id="tabs", active_tab="summary", persistence=True), html.Div(id=EMPTY_DIV), html.Div(id=EMPTY_DIV + "1") ]) ]) cached_layout = dbc.Container( fluid=True, children=[html.H1('My car info'), data_div]) return cached_layout
class Parser(WorkRequest): '''继承自线程池中的WorkRequest类,并实现线程执行函数 功能: 过滤html页面,判断其是否符合存储条件并将符合条件的页面放入data队列 解析html页面,过滤出符合条件的url并将其放入url队列 ''' def __init__(self, depth, startUrls, keyword, htmlQueue, dataQueue, urlQueue, exitEvent): self.__htmlQueue = htmlQueue self.__dataQueue = dataQueue self.__urlQueue = urlQueue self.__keyword = keyword self.__depth = depth self.__startUrls = startUrls self.__exitEvent = exitEvent # pageFilter用于页面过滤,判断此页面是否需要存储 self.__myPageFilter = PageFilter(keyword) # urlFilter用于url过滤,判断url是否需要继续下载 self.__myUrlFilter = UrlFilter(self.__startUrls) def getRepeatSetSize(self): return self.__myUrlFilter.getRepeatSetSize() def __parsePage(self): '''解析函数,完成解析模块的核心功能''' htmlNode = self.__htmlQueue.get() # 过滤页面,判断页面是否需要存储 if self.__myPageFilter.isGood(htmlNode.html): dataNode = HtmlModel(htmlNode.url, '', htmlNode.time, htmlNode.depth) self.__dataQueue.put(dataNode) # 爬取深度控制,如果爬取深度达到指定深度则不继续解析页面中的链接 if htmlNode.depth >= self.__depth: return linkList = [] try: # 解析html页面中的所有链接,使用lxml模块 doc = lxml.html.document_fromstring(htmlNode.html) doc.make_links_absolute(htmlNode.url) links = doc.iterlinks() for link in links: linkList.append(link[2]) except Exception, e: logger.warning('Parse page exception: %s', str(e)) return if len(linkList) == 0: logger.warning('Parse page success, but link is null: %s', htmlNode.url) return # 过滤url,包括去url重复、特定后缀以及站外链接 linkList = self.__myUrlFilter.urlfilter(linkList) # 将符合条件的url重新添加回url队列 for url in linkList: urlNode = UrlModel(url, htmlNode.url, timestamp(), htmlNode.depth + 1) self.__urlQueue.put(urlNode)
# import pdb import logging from logging.handlers import RotatingFileHandler import re from mylogger import logger # ================================= # logger setting # ================================ logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # logger = logging.getLogger(__name__) logger.info("Start print log") logger.debug("Do something") logger.warning("Something maybe fail.") logger.info("Finish") handler = logging.FileHandler("log.txt") handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) console = logging.StreamHandler() console.setLevel(logging.INFO) logger.addHandler(handler) logger.addHandler(console) rHandler = RotatingFileHandler("log.txt", maxBytes=1 * 1024, backupCount=3)
'This is redirect page, before URL : %s, after URL : %s', url, response.url) return "" except Exception, e: pass page = response.text # 判断文件的实际大小,防止content-length与实际文件大小不符的情况 if len(page) > MAX_PAGE_SIZE: logger.warning( 'Downloaded big file, Length : %d , URL : %s', len(page), url) return "" return page else: logger.warning('Download failed. status code : %d', response.status_code) return "" except Exception, e: logger.warning('Download exception (static): %s', str(e)) return "" def __dynamicDownload(self, url): '''动态下载模块,使用了splinter模块、phantomjs模块(需单独安装)''' try: # logger.debug('Downloading url : %s', url) browser = Browser('phantomjs') browser.visit(url) html = browser.html browser.quit() return html except Exception, e:
async def __call__(self, request): kw = None # 当传入的处理函数具有 关键字参数集 或 命名关键字参数 或 request参数 if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args: if request.method == 'POST': if not request.content_type: return web.HTTPBadRequest('missing content-type') ct = request.content_type.lower() if ct.startswith('application/json'): params = await request.json() if not isinstance(params, dict): return web.HTTPBadRequest('JSON body must be object') kw = params elif ct.startswith(('application/x-www-form-urlencoded', 'multipart/form-data')): # 处理表单类型的数据,传入参数字典中 params = await request.post() kw = dict(**params) else: # 暂不支持处理其他正文类型的数据 return web.HTTPBadRequest( 'unsupported content-type: {}'.format( request.content_type)) if request.method == 'GET': qs = request.query_string if qs: # 获取URL中的请求参数,如 id=1 kw = dict() for k, v in parse.parse_qs(qs, True).items(): kw[k] = v[0] if kw is None: kw = dict(**request.match_info) else: if not self._has_var_kw_arg and self._named_kw_args: # remove all unamed kw: copy = {} for name in self._named_kw_args: if name in kw: copy[name] = kw[name] kw = copy # check named arg: for k, v in request.match_info.items(): if k in kw: logger.warning( 'duplicat arg name in named arg and kw args: {}'. format(k)) kw[k] = v if self._has_request_arg: kw['request'] = request # check required kw: if self._required_kw_args: # 收集无默认值的关键字参数 for name in self._required_kw_args: if not name in kw: return web.HTTPBadRequest( 'missing argument: {}'.format(name)) logger.warning('call with args: {}'.format(kw)) try: # 最后调用处理函数,并传入请求参数,进行请求处理 r = await self._func(**kw) return r except APIError as e: return dict(error=e.error, data=e.data, message=e.message)