def tornado_init(): """tornado init configure""" asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) if platform.system() != "Windows": Resolver.configure('tornado.playform.careresolver.CaresResolver') AsyncHTTPClient.configure( 'tornado.curl_httpclient.CurlAsyncHTTPClient')
def main(): # using CaresResolver as DNS resolver # see also: http://www.tornadoweb.org/en/branch3.0/caresresolver.html Resolver.configure('tornado.platform.caresresolver.CaresResolver') # CurlAsyncHTTPClient to be used as httpclient subclass tornado.httpclient.AsyncHTTPClient.configure( "tornado.curl_httpclient.CurlAsyncHTTPClient") define("port", default=8080, help="run on the given port", type=int) # define("address", default=get_listening_address(), help="run on the given address", type=str) define("daemon", default=settings.daemon, help="run as daemon", type=bool) define("webgate", default=settings.webgate, help="run on web gate mode", type=bool) define("log_to_file", default=False, help="log to file", type=bool) define("game_host", default=settings.game_servers['development']['host'], help="bind address", type=str) define("game_port", default=settings.game_servers['development']['port'], help="run on the given port", type=int) define("mode", default="development", help="default run in development mode", type=str) if '--daemon' not in sys.argv: parse_command_line(sys.argv + ['--log_to_stderr']) else: parse_command_line(final=False) game_server = settings.game_servers[options.mode] assert (game_server) if options.daemon: from lockfile.pidlockfile import PIDLockFile import daemon main_log = open(settings.main_log_file, "a+") pid_file = os.path.join(settings.ROOT, "pids", "%s-%s.pid" % (settings.APPNAME, options.port)) if not daemon_running(pid_file): ctx = daemon.DaemonContext( stdout=main_log, stderr=main_log, pidfile=PIDLockFile(pid_file, threaded=False), working_directory=settings.ROOT, ) ctx.open() settings.daemon = options.daemon options.log_to_file = True options.log_file_prefix = settings.tornado_log_prefix % options.port parse_command_line(['--log_file_prefix', options.log_file_prefix]) start()
def callback(self,result): #回调函数 self.finish(result) @async_execute def dosomething(self,a,b,callback=None): #耗时操作 result='return' return result """ import functools from tornado.netutil import Resolver, ThreadedResolver from tornado.ioloop import IOLoop Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=10) from torngas.exception import ArgumentError def async_execute(method): @functools.wraps(method) def decor(*args, **kwargs): def wrapper(callback): thread_resolver = ThreadedResolver() fut = thread_resolver.executor.submit(method, *args, **kwargs) fut.add_done_callback(lambda future: IOLoop.current().add_callback( functools.partial(callback, future.result()))) callback = kwargs.get('callback', None)
class TestRunner(object): Resolver.configure('tornado.netutil.ThreadedResolver') def __init__(self, pid=0, jid=0): self.default_headers = { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4', 'Cookie': '' } self.headers = dict() self.setting = SettingModule() self.common_func = CommonFunction() self.option_func = OptionsFunction() self.pid = pid self.jid = jid # 获取请求头 def get_headers(self, headers=''): flag, headers = self.common_func.convert_to_list_or_dict( string=headers, s_type='dict') self.headers = dict() for key in self.default_headers: self.headers[key] = self.default_headers[key] if flag: for key in headers.keys(): self.headers[key] = headers[key] else: headers = headers.splitlines() for header in headers: header = header.strip().split(sep=':', maxsplit=1) if len(header) == 2: name = header[0].strip() value = header[1].strip() self.headers[name] = value return self.headers # 加解密操作 @gen.coroutine def __do_crypt(self, do='encrypt', body=None, crypt_key=''): crypt_info = yield self.option_func.get_crypt_info(pid=self.pid, do=do) if not crypt_info: return body func = crypt_info.function try: if func: if crypt_key: flag, body = self.common_func.convert_to_list_or_dict( string=body, s_type='dict') if not flag and isinstance(body, str) and re.match( r'^.*=.*(&.*=.*)*$', body) is not None: body = self.common_func.url_query_decode(body) if isinstance(body, dict): flag = True if flag: source = body[crypt_key] if not isinstance(source, str): try: source = json.dumps(source, ensure_ascii=False) except Exception as e: log.warning(e) if isinstance(source, bytes): source = source.decode('utf8', errors='ignore') else: source = str(source) body[crypt_key] = func(source, crypt_info['key'], crypt_info['iv'], crypt_info['mode']) else: if not isinstance(body, str): try: body = json.dumps(body, ensure_ascii=False) except Exception as e: log.warning(e) if isinstance(body, bytes): body = body.decode('utf8', errors='ignore') else: body = str(body) body = func(body, crypt_info['key'], crypt_info['iv'], crypt_info['mode']) except Exception as e: log.warning(e) return body # 尝试将请求数据转换成字典 def __parse_body_arguments(self, body): flag, body = self.common_func.convert_to_list_or_dict(string=body, s_type='dict') if not flag and isinstance(body, str) and re.match( r'^.*=.*(&.*=.*)*$', body) is not None: body = self.common_func.url_query_decode(body) if isinstance(body, dict): flag = True if not flag: self.headers = self.headers if len( self.headers) != 0 else self.default_headers try: request_body = dict() if re.match(r'^.*=.*(&.*=.*)*$', body) is not None: parse_body_arguments( content_type=self.headers['Content-Type'], body=body, arguments=request_body, files=request_body, headers=self.headers) if len(request_body) > 0: body = request_body for key in body: if isinstance(body[key], list): body[key] = body[key][0] body[key] = body[key].decode('utf8', errors='ignore') flag = True except Exception as e: log.warning(e) flag = False return flag, body # 获取请求响应数据 @gen.coroutine def __get_body(self, body='', do='encrypt', name='', crypt_key=''): try: body = ast.literal_eval(body) except Exception as e: log.warning(e) if do == 'encrypt' and crypt_key != '': flag, body = self.__parse_body_arguments(body) if isinstance(body, dict): if name != 'none': if do == 'encrypt' and crypt_key == '': body = urlencode(body, encoding='utf8', quote_via=quote) body = yield self.__do_crypt(do=do, body=body, crypt_key=crypt_key) if isinstance(body, dict): if do == 'encrypt' and self.headers['Content-Type'].find( 'x-www-form-urlencoded') != -1: body = urlencode(body, encoding='utf8', quote_via=quote) else: body = json.dumps(body, ensure_ascii=False) else: if name != 'none': body = yield self.__do_crypt(do=do, body=body, crypt_key='') if isinstance(body, dict): body = json.dumps(body, ensure_ascii=False) return body # 解析Host配置 @gen.coroutine def __parse_host(self, url='', env='none'): urls = self.common_func.url_split(url=url) host = urls.host ips, total = yield self.setting.get_settings_list(pid=self.pid, s_type='host', name=host, pj_status=1, limit=None) for row in ips: if env != 'none': url = '{}://{}:{}{}'.format(urls.scheme, env, urls.port, urls.path) break elif row.status == 1: url = '{}://{}:{}{}'.format(urls.scheme, row.value, urls.port, urls.path) break self.headers['Host'] = urls.netloc return url # 解析接口返回值全字段检查配置 def __parse_check_key(self, check_key): keys = [] top = [] rex = re.compile( r'^\[\w+=\d\|(int|float|num|str|/.*/|date|time|datetime|list|dict)(' r',\w+=\d\|(int|float|num|str|/.*/|date|time|datetime|list|dict))*\]$' ) for row in check_key.splitlines(): row = row.strip().split(sep='.', maxsplit=1) if len(row) == 2: if re.match(rex, row[1]) is not None: deeps = [row[1]] else: deeps = row[1].split(sep='.', maxsplit=1) if re.match(r'^\[\d+\]$', deeps[0]) is None: top.append('{}=1|dict'.format(row[0])) else: top.append('{}=1|list'.format(row[0])) deep = '{}.'.format(row[0]) tmp_key = [] for i in range(len(row[1].split('.'))): if len(deeps) == 2 and re.match(rex, deeps[1]) is not None: deep += '{}.'.format(deeps[0]) for j in deeps[1][1:-1].split(','): tmp_key.append(j) break elif re.match(rex, deeps[0]) is not None: for j in deeps[0][1:-1].split(','): tmp_key.append(j) break deep += '{}.'.format(deeps[0]) if len(deeps) == 2: if re.match(rex, deeps[1]) is not None: deeps = deeps[1] else: deeps = deeps[1].split(sep='.', maxsplit=1) keys.append(dict(deep=deep[:-1], keys=tmp_key, result=dict())) else: top.append(row[0]) keys.append(dict(deep='top', keys=list(set(top)), result=dict())) return keys # 返回值全字段检查结果判断 def __check_key_result(self, body, check_key, key, k): check_key[k]['result'][key[0]] = True key[1] = key[1].split(sep='|', maxsplit=1) require = key[1][0] key_type = key[1][1] if isinstance(body[key[0]], str): body[key[0]] = body[key[0]].strip() if require == '1' and (body[key[0]] == '' or body[key[0]] is None): check_key[k]['result'][key[0]] = False elif body[key[0]] != '' and body[key[0]] is not None: if re.match(r'^/.*/$', key_type): rex = re.compile(key_type[1:-1]) if re.match(rex, body[key[0]]) is None: check_key[k]['result'][key[0]] = False elif key_type == 'int' and not isinstance(body[key[0]], int): check_key[k]['result'][key[0]] = False elif key_type == 'float' and not isinstance(body[key[0]], float): check_key[k]['result'][key[0]] = False elif key_type == 'num' and not isinstance( body[key[0]], int) and not isinstance(body[key[0]], float): check_key[k]['result'][key[0]] = False elif key_type == 'str' and not isinstance(body[key[0]], str): check_key[k]['result'][key[0]] = False elif key_type == 'list' and not isinstance(body[key[0]], list): check_key[k]['result'][key[0]] = False elif key_type == 'dict' and not isinstance(body[key[0]], dict): check_key[k]['result'][key[0]] = False elif key_type == 'date' and re.match(r'^\d{4}-\d{2}-\d{2}$', body[key[0]]) is None: check_key[k]['result'][key[0]] = False elif key_type == 'time' and re.match(r'^\d{2}:\d{2}:\d{2}$', body[key[0]]) is None: check_key[k]['result'][key[0]] = False elif key_type == 'datetime' and re.match( r'^\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}$', body[key[0]]) is None: check_key[k]['result'][key[0]] = False if not check_key[k]['result'][key[0]]: check_key[k]['result']['key_result'] = False return body, check_key # 解析响应内容 @gen.coroutine def __parse_response(self, response, name='', crypt_key='', checkpoint='', check_key='', correlation='', method='GET', url=''): body = response.body if response else '' request_body = response.request.body if response else '' if isinstance(body, bytes): body = body.decode('utf8', errors='ignore') if isinstance(request_body, bytes): request_body = request_body.decode('utf8', errors='ignore') headers_dict = dict(response.headers if response else '') headers = '' for key in headers_dict: headers += '{}: {}\r\n'.format(key, headers_dict[key]) request_headers_dict = dict( response.request.headers if response else '') request_headers = '' for key in request_headers_dict: request_headers += '{}: {}\r\n'.format(key, request_headers_dict[key]) if response: error = response.error if not response.error else str( response.error) else: error = str(httpclient.HTTPError(599, 'Timeout while connecting')) resp = dict( body=body, code=response.code if response else 599, effective_url=response.effective_url if response else '', error=error, headers=headers, request_headers=request_headers, request_body=request_body, reason=response.reason if response else 'Timeout while connecting', request_time=response.request_time if response else '', time_info=response.time_info if response else '', method=method, url=url) resp['body_decrypt'] = yield self.__get_body(body, do='decrypt', name=name, crypt_key=crypt_key) resp['checkpoint'] = [] if checkpoint != '': if re.match(r'^/.*/$', checkpoint) is not None: rex = re.compile(checkpoint[1:-1]) if re.findall(rex, resp['body_decrypt']): resp['checkpoint'].append( dict(result=True, checkpoint=checkpoint)) else: log.warning('检查点 {} 检查不通过'.format(checkpoint)) resp['checkpoint'].append( dict(result=False, checkpoint=checkpoint)) else: for check in checkpoint.split('|'): check = check.strip() if resp['body_decrypt'].find(check) != -1: resp['checkpoint'].append( dict(result=True, checkpoint=check)) else: log.warning('检查点 {} 检查不通过'.format(check)) resp['checkpoint'].append( dict(result=False, checkpoint=check)) resp['check_key'] = [] if check_key != '': check_key = self.__parse_check_key(check_key) for k in range(len(check_key)): body = resp['body_decrypt'] check_key[k]['result']['key_result'] = True if check_key[k]['deep'] == 'top': for key in check_key[k]['keys']: if re.match(r'^\[\d+\]$', key) is not None: key = int(key[1:-1]) flag, body = self.common_func.convert_to_list_or_dict( body, 'list') if not flag: log.warning('返回值全字段检查 被检查数据格式不是List, 无法继续') if body in ['', '[]', '{}', [], {}]: check_key[k]['result']['ERROR'] = '被检查数据为空' else: check_key[k]['result'][ 'ERROR'] = '被检查数据类型不是List' check_key[k]['result']['key_result'] = False continue try: body[key] except Exception as e: log.warning(e) check_key[k]['result'][ 'ERROR'] = '被检查数据格式不包含List类型' check_key[k]['result']['key_result'] = False continue else: flag, body = self.common_func.convert_to_list_or_dict( body, 'dict') if not flag and isinstance(body, str) and re.match( r'^.*=.*(&.*=.*)*$', body) is not None: body = self.common_func.url_query_decode(body) if isinstance(body, dict): flag = True if not flag: log.warning('返回值全字段检查 被检查数据格式不是Dict, 无法继续') if body in ['', '[]', '{}', [], {}]: check_key[k]['result']['ERROR'] = '被检查数据为空' else: check_key[k]['result'][ 'ERROR'] = '被检查数据类型不是Dict' check_key[k]['result']['key_result'] = False continue key = key.split(sep='=', maxsplit=1) if key[0] not in body.keys(): log.warning('返回值全字段检查 {} 检查不通过'.format( check_key[k]['keys'])) check_key[k]['result'][key[0]] = '字段不存在' check_key[k]['result']['key_result'] = False continue else: body, check_key = self.__check_key_result( body=body, check_key=check_key, key=key, k=k) else: keys = check_key[k]['keys'] flag, keys = self.common_func.convert_to_list_or_dict( keys, 'list') for key in check_key[k]['deep'].split('.'): if re.match(r'^\[\d+\]$', key) is not None: key = int(key[1:-1]) flag, body = self.common_func.convert_to_list_or_dict( body, 'list') if not flag: log.warning('返回值全字段检查 被检查数据格式不是List, 无法继续') if body in ['', '[]', '{}', [], {}]: check_key[k]['result']['ERROR'] = '被检查数据为空' else: check_key[k]['result'][ 'ERROR'] = '被检查数据类型不是List' check_key[k]['result']['key_result'] = False continue elif not isinstance(body, dict): flag, body = self.common_func.convert_to_list_or_dict( body, 'dict') if not flag and isinstance(body, str) and re.match( r'^.*=.*(&.*=.*)*$', body) is not None: body = self.common_func.url_query_decode(body) if isinstance(body, dict): flag = True if not flag: log.warning('返回值全字段检查 被检查数据格式不是Dict, 无法继续') if body in ['', '[]', '{}', [], {}]: check_key[k]['result']['ERROR'] = '被检查数据为空' else: check_key[k]['result'][ 'ERROR'] = '被检查数据类型不是Dict' check_key[k]['result']['key_result'] = False continue try: body = body[key] except Exception as e: log.warning(e) check_key[k]['result'][ 'ERROR'] = '被检查数据格式不包含List类型' check_key[k]['result']['key_result'] = False continue flag, body = self.common_func.convert_to_list_or_dict( body, 'dict') if not flag and isinstance(body, str) and re.match( r'^.*=.*(&.*=.*)*$', body) is not None: body = self.common_func.url_query_decode(body) if isinstance(body, dict): flag = True if not flag: log.warning('返回值全字段检查 被检查数据格式不是Dict, 无法继续') if body in ['', '[]', '{}', [], {}]: check_key[k]['result']['ERROR'] = '被检查数据为空' else: check_key[k]['result']['ERROR'] = '被检查数据类型不是Dict' check_key[k]['result']['key_result'] = False continue for key in keys: key = key.split(sep='=', maxsplit=1) if key[0] not in body.keys(): log.warning('返回值全字段检查 {} 检查不通过'.format( check_key[k]['keys'])) check_key[k]['result'][key[0]] = '字段不存在' check_key[k]['result']['key_result'] = False continue else: body, check_key = self.__check_key_result( body=body, check_key=check_key, key=key, k=k) resp['check_key'] = check_key resp['correlation'] = dict() if response and correlation != '': correlations = correlation.split('|') correlation = dict() for corr in correlations: body = resp['body_decrypt'] cor = corr.split(sep='=', maxsplit=1) key = cor[0].strip() c_type = 'string' words = cor[1].strip() if re.match(r'^int\(.+\)$', words) is not None: c_type = 1 words = words[4:-1] elif re.match(r'^float\(.+\)$', words) is not None: c_type = 1.00 words = words[6:-1] word = words.split('.') correlation[key] = word for k in word: if k == 'response_headers' and len(word) != 1: header = words.split(sep='.', maxsplit=1) if len(header) == 2: header_key = header[1] if re.match(r'^/.*/$', header_key) is not None: rex = re.compile(header_key[1:-1]) if isinstance(resp['headers'], bytes): resp['headers'] = resp['headers'].decode( 'utf8', errors='ignore') result = re.findall(rex, resp['headers']) if result: if isinstance(result[0], tuple): body = result[0][0] else: body = '' for row in result: row = row if row != '' else '\n' body += row else: body = '' else: body = response.headers.get(header_key) break if k == 'response_body' and len(word) == 1: if isinstance(resp['body'], bytes): resp['body'] = resp['body'].decode('utf8', errors='ignore') body = escape.xhtml_escape(resp['body']) break if re.match(r'^/.*/$', words) is not None: rex = re.compile(words[1:-1]) if isinstance(resp['body'], bytes): resp['body'] = resp['body'].decode('utf8', errors='ignore') result = re.findall(rex, resp['body']) if result: if isinstance(result[0], tuple): body = escape.xhtml_escape(result[0][0]) else: body = escape.xhtml_escape(result[0]) else: body = '' break if re.match(r'^\[\d+\]$', k) is not None: k = int(k[1:-1]) flag, body = self.common_func.convert_to_list_or_dict( body, 'list') if not flag: log.warning('响应数据格式不是List, 无法继续') body = '' break elif not isinstance(body, dict): flag, body = self.common_func.convert_to_list_or_dict( body, 'dict') if not flag and isinstance(body, str) and re.match( r'^.*=.*(&.*=.*)*$', body) is not None: body = self.common_func.url_query_decode(body) if isinstance(body, dict): flag = True if not flag: log.warning('响应数据格式不是Dict, 无法继续') body = '' break try: body = body[k] except Exception as e: log.warning(e) body = '' break correlation[key] = body try: if isinstance(c_type, int): correlation[key] = int( float(re.sub(r'[^\d+\.]', '', body))) elif isinstance(c_type, float): correlation[key] = float(re.sub(r'[^\d+\.]', '', body)) except Exception as e: log.warning(e) resp['correlation'] = correlation flag = True if resp['error'] is not None and resp['code'] != 302 and resp[ 'code'] != 301: flag = False if resp['check_key']: for line in resp['check_key']: if not line['result']['key_result']: flag = False break if resp['checkpoint']: for line in resp['checkpoint']: if not line['result']: flag = False break resp['test_result'] = flag return resp # 解析自定义参数配置 @gen.coroutine def __parse_custom_param(self, headers, body, correlation_result={}): correlation_result = dict(self.common_func.default_param(), **correlation_result) params = yield self.option_func.get_custom_param( pid=self.pid, correlation=correlation_result) if isinstance(headers, bytes): headers = headers.decode('utf8', errors='ignore') if isinstance(body, bytes): body = body.decode('utf8', errors='ignore') for key in correlation_result: if not isinstance(correlation_result[key], str): correlation_result[key] = str(correlation_result[key]) if headers.find(key) != -1: headers = headers.replace(key, correlation_result[key]) if body.find(key) != -1: body = body.replace(key, correlation_result[key]) for param in params: if headers.find('{%s}' % param['name']) != -1: if param['type'] == 'Function': func = param['function'] flag, body_dict = self.__parse_body_arguments(body) encrypt = yield self.option_func.get_crypt_info(self.pid) headers = headers.replace('{%s}' % param['name'], func(body_dict, params, encrypt)) else: headers = headers.replace('{%s}' % param['name'], param['value']) if body.find('{%s}' % param['name']) != -1: if param['type'] == 'Function': func = param['function'] flag, body_dict = self.__parse_body_arguments(body) encrypt = yield self.option_func.get_crypt_info(self.pid) body = body.replace('{%s}' % param['name'], func(body_dict, params, encrypt)) else: body = body.replace('{%s}' % param['name'], param['value']) return headers, body # 请求操作 @gen.coroutine def __request_url(self, url='', env='none', method='GET', body='', follow_redirects=True): test_client = httpclient.AsyncHTTPClient(max_clients=100) argv = dict(method=method, headers=self.headers, follow_redirects=False, request_timeout=600, validate_cert=False, raise_error=False) url = yield self.__parse_host(url=url, env=env) if method == 'GET': url = '{}?{}'.format(url, body) elif method == 'POST': argv['body'] = body try: log.info('开始请求接口 {}'.format(url)) response = yield test_client.fetch(url, **argv) except httpclient.HTTPError as e: response = e.response log.warning('请求接口 {} 异常# {}'.format( url, str(response.error if response else e))) # test_client.close() if response and response.code in [301, 302]: for cookie in response.headers.get_list('Set-Cookie'): self.headers['Cookie'] += '{};'.format(cookie) url = response.headers.get('Location') log.info('{} {} {}'.format(response.code, response.reason, url)) if response.reason.find('Moved') >= 0: response = yield self.__request_url( url=url, env=env, method=method, body=body, follow_redirects=follow_redirects) elif follow_redirects: response = yield self.__request_url( url=url, env=env, method='GET', follow_redirects=follow_redirects) log.info('结束请求接口 {}'.format(url)) return response # 生成测试报告 @gen.coroutine def __gen_report(self, job_name, test_suites, start_time, end_time): setting = yield self.setting.get_settings_by_range(pid=self.pid, s_type='log', start=start_time, end=end_time, sort=self.jid) if setting: elapsed_time = end_time - start_time start_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime(float(start_time) + 3600 * 8)) end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(float(end_time) + 3600 * 8)) url, total = yield self.setting.get_settings_list(pid=self.pid, s_type='url', limit=None) overview = dict(name=job_name, start_time=start_time, end_time=end_time, elapsed_time=elapsed_time, total=total, total_test=len(test_suites), success_test=0, fail_test=0, success_rate='0.0 %', report_time=time.strftime('%Y-%m-%d %H:%M:%S')) for suite in test_suites: suite['total_test'] = len(suite['cases']) suite['success_test'] = 0 suite['fail_test'] = 0 suite['report'] = [] suite['result'] = True for row in setting: res = json.loads(row.value) for suite in test_suites: if suite['suite_id'] == row.status: if res['test_result']: suite['success_test'] += 1 else: suite['fail_test'] += 1 suite['result'] = False suite['report'].append(res) for suite in test_suites: suite['result'] = suite['result'] if suite[ 'success_test'] + suite['fail_test'] == suite[ 'total_test'] else False if suite['result']: overview['success_test'] += 1 else: overview['fail_test'] += 1 overview['success_rate'] = '{:.2f} %'.format( overview['success_test'] / overview['total_test'] * 100) result = dict(overview=overview, report=test_suites) report_id, msg = yield self.setting.add_setting( pid=self.pid, s_type='report', sort=self.jid, name=time.time(), value=json.dumps(result, ensure_ascii=False)) return report_id else: return False # 执行单接口测试 @gen.coroutine def run_test(self, url='', label='', comment='', method='GET', headers='', body='', crypt='none', encrypt_content='', no_test=False, check_key='', decrypt_content='', checkpoint='', env='none', correlation='', correlation_result={}, follow_redirects=True): headers, body = yield self.__parse_custom_param( headers=headers, body=body, correlation_result=correlation_result) self.get_headers(headers) request_body = yield self.__get_body(body=body, do='encrypt', name=crypt, crypt_key=encrypt_content) if no_test: return request_body resp = yield self.__request_url(url=url, env=env, method=method, body=request_body, follow_redirects=follow_redirects) response = yield self.__parse_response(response=resp, name=crypt, crypt_key=decrypt_content, checkpoint=checkpoint, check_key=check_key, correlation=correlation, method=method, url=url) response['label'] = label response['comment'] = comment if not resp: response['request_body'] = request_body for key in self.headers: response['request_headers'] += '{}: {}\r\n'.format( key, self.headers[key]) elif method == 'GET': response['request_body'] = request_body log.info('响应返回 {}'.format(json.dumps(response, ensure_ascii=False))) return response # 执行多接口测试 @gen.coroutine def __run_all_test(self, job_name, test_suites): if not test_suites: return False start_time = time.time() correlation_result = dict() for test in test_suites: cases = yield self.setting.get_settings_by_ids(test['cases']) for url_info in cases: try: url_info = json.loads(url_info.value) resp = yield self.run_test( correlation_result=correlation_result, **url_info) yield self.setting.add_setting(pid=self.pid, s_type='log', name=time.time(), sort=self.jid, value=json.dumps( resp, ensure_ascii=False), status=test['suite_id']) correlation_result = dict(correlation_result, **resp['correlation']) except Exception as e: log.error(e) end_time = time.time() report_id = yield self.__gen_report(job_name, test_suites, start_time, end_time) return report_id # 执行排队任务 @gen.coroutine def run_job(self): job = yield self.setting.get_setting_by_id(sid=self.jid) if job: start_time = time.time() job_value = json.loads(job.value) start_strftime = time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime(float(start_time) + 3600 * 8)) job_value['overview']['start_time'] = start_strftime yield self.setting.edit_setting(sid=job.id, status=2, value=json.dumps( job_value, ensure_ascii=False)) test_suites = [] suites = yield self.setting.get_settings_by_ids( job_value['testsuite']) for suite in suites: tests = dict(suite_id=suite.id, suite_name=suite.name, cases=json.loads(suite.value)['cases']) test_suites.append(tests) result = yield self.__run_all_test(job_value['name'], test_suites) if result: status = 3 job_value['lastreport'] = result else: status = 5 end_time = time.time() elapsed_time = end_time - start_time end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(float(end_time) + 3600 * 8)) job_value['overview']['end_time'] = end_time job_value['overview']['elapsed_time'] = elapsed_time name = float(job.name) if job_value['overview']['cycle_time'] != 0: while name < time.time(): name += job_value['overview']['cycle_time'] job_value['overview']['plan_time'] = time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime(name + 3600 * 8)) status = 0 yield self.setting.edit_setting(sid=job.id, status=status, name=name, value=json.dumps( job_value, ensure_ascii=False)) yield Mail().send_html_report(result)
def set_resolver(resolver): Resolver.configure(resolver)
from tornado import netutil, ioloop, iostream, httpclient, stack_context from tornado.netutil import Resolver from functools import partial import socket import ctypes import os, sys import traceback import re import socket_error Resolver.configure('tornado.platform.caresresolver.CaresResolver') resolver = Resolver() libc = ctypes.cdll.LoadLibrary('libc.so.6') splice_syscall = libc.splice SPLICE_F_NONBLOCK = 0x02 SPLICE_F_MOVE = 0x01 try: chunk_size = os.pathconf('.', os.pathconf_names['PC_PIPE_BUF']) except: print 'pathconf failed' import resource chunk_size = resource.getpagesize() header = 'GET /' opt_header = 'OPTIO' def make_response(status,
def _config_resolver(cls, num_threads=10): from tornado.netutil import Resolver Resolver.configure( 'tornado.netutil.ThreadedResolver', num_threads=num_threads) cls._resolver_configured = True
def _config_resolver(cls, num_threads=10): from tornado.netutil import Resolver Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=num_threads) cls._resolver_configured = True
@async_execute def dosomething(self,a,b,callback=None): #耗时操作 result='return' return result """ import functools from tornado.netutil import Resolver, ThreadedResolver from tornado.ioloop import IOLoop from ..settings_manager import settings from multiprocessing import cpu_count Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=settings.THREADS_NUM if 'THREADS_NUM' in settings else cpu_count()) def async_execute(fn): """ 新版tornado已有这个函数的实现,但是需要在class中绑定self.ioloop和self.executor,不太方便,这个版本消除了这个问题 """ thread_resolver = ThreadedResolver() @functools.wraps(fn) def wrapper(self, *args, **kwargs): callback = kwargs.pop("callback", None) future = thread_resolver.executor.submit(fn, self, *args, **kwargs) if callback: IOLoop.current().add_future(
from tornado import netutil, ioloop, iostream, httpclient, stack_context from tornado.netutil import Resolver from functools import partial import socket import ctypes import os, sys import traceback import re import socket_error Resolver.configure('tornado.platform.caresresolver.CaresResolver') resolver = Resolver() libc = ctypes.cdll.LoadLibrary('libc.so.6') splice_syscall = libc.splice SPLICE_F_NONBLOCK = 0x02 SPLICE_F_MOVE = 0x01 try: chunk_size = os.pathconf('.', os.pathconf_names['PC_PIPE_BUF']) except: print 'pathconf failed' import resource chunk_size = resource.getpagesize() header = 'GET /' opt_header = 'OPTIO' def make_response(status, body, content_type='text/plain', extra_headers=None, length=True): res = 'HTTP/1.1 %s\r\n' % status
elif host in self.mapping: host = self.mapping[host] if not isinstance(port, int): assert os.path.exists( port ), 'Here ONLY support UNIX socket file exists !' # eg.: port = '/www/ctrl.unixsock' raise Return([(socket.AF_UNIX, port)]) result = self.resolve_addr(host, port, family) # result = yield self.io_loop.run_in_executor(None, self.resolve_addr, host, port, family) raise Return(result) # This configure the default `Resolver` to `tornado.tcpclient.TCPClient` Resolver.configure(CachedOverrideResolver) def sock_options(sock, blocking=False, keepalive=True, conn_timeout=None, rcvtimeo=None, sndtimeo=None): sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1 if keepalive else 0) sock.setblocking(blocking) def setopt(s, op, val): try: s.setsockopt(socket.SOL_SOCKET, op, struct.pack('i', val))
import logging import logging.config import random import time from functools import partial from tornado.tcpclient import TCPClient from tornado.netutil import Resolver, ThreadedResolver from tornado.ioloop import IOLoop from tornado.log import gen_log from tornado.iostream import IOStream from tornado import stack_context from dubbo import Request, Response Resolver.configure('tornado.netutil.ThreadedResolver') class DubboClient(object): def __init__(self, host, port, io_loop=None): self.io_loop = io_loop or IOLoop.current() self.callbacks = {} self.queue = deque() self.conn = DubboConnection(host, port, self.io_loop) def fetch(self, dubbo_request, callback): self.conn.fetch(dubbo_request, callback) class DubboConnection(object):
#!/usr/bin/env python # encoding: utf-8 from copy import copy from tornado.web import Cookie from tornado.gen import coroutine, Return from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError, HTTPResponse from tornado.httputil import HTTPHeaders from tornado.ioloop import IOLoop from . import PY2 try: import pycares from tornado.netutil import Resolver from tornado.platform.caresresolver import CaresResolver Resolver.configure(CaresResolver) except ImportError: pass if PY2: b = unicode iteritems = lambda x: x.iteritems() else: b = str iteritems = lambda x: x.items() try: import ujson as json except ImportError:
@async_execute def dosomething(self,a,b,callback=None): #耗时操作 result='return' return result """ import functools from tornado.netutil import Resolver, ThreadedResolver from tornado.ioloop import IOLoop from ..settings_manager import settings from multiprocessing import cpu_count #设置接口的实现类的类型,同时设置设置实现类的参数 Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=settings.THREADS_NUM if 'THREADS_NUM' in settings else cpu_count()) def async_execute(fn): """ 新版tornado已有这个函数的实现,但是需要在class中绑定self.ioloop和self.executor,不太方便,这个版本消除了这个问题 其实在 concurrent.py 中 run_on_executor 装饰器方法实现了类似的功能。 """ thread_resolver = ThreadedResolver() @functools.wraps(fn) def wrapper(self, *args, **kwargs): callback = kwargs.pop("callback", None) future = thread_resolver.executor.submit(fn, self, *args, **kwargs)
#回调函数 self.finish(result) @async_execute def dosomething(self,a,b,callback=None): #耗时操作 result='return' return result """ import functools from tornado.netutil import Resolver, ThreadedResolver from tornado.ioloop import IOLoop Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=10) def async_execute(fn): """ 新版tornado已有这个函数的实现,但是需要在class中绑定self.ioloop和self.executor,不太方便,这个版本消除了这个问题 """ thread_resolver = ThreadedResolver() @functools.wraps(fn) def wrapper(self, *args, **kwargs): callback = kwargs.pop("callback", None) future = thread_resolver.executor.submit(fn, self, *args, **kwargs) if callback: IOLoop.current().add_future(future, lambda future: callback(future.result())) return future
class PageHandler(BaseHandler): executor = ThreadPoolExecutor(100) Resolver.configure('tornado.netutil.ThreadedResolver') @authenticated_async @gen.coroutine def get(self, op='reports', page=1, limit=10): if not isinstance(limit, int): limit = int(limit) else: limit = limit if self.limit == '' else int(self.limit) try: page = int(page) except Exception as e: log.warning(e) page = 1 page = 1 if int(page) <= 0 else int(page) lists = [] total_page = 1 if op == 'reports': res, total = yield self.setting.get_settings_list( s_type='page_report') total_page = int(math.ceil(total / limit)) for row in res: report = json.loads(row.value) report['sid'] = row.id lists.append(munchify(report)) elif op not in ['checklinks', 'checkpages']: self.redirect('/admin/page-test') return hosts = '' if platform.system().lower() == 'Windows'.lower(): host_path = 'C:\\Windows\\System32\\drivers\\etc\\hosts' else: host_path = '/etc/hosts' if os.path.exists(host_path): with open(host_path, 'r', encoding='utf8') as fp: hosts = fp.read() log.info('读取Hosts {} 配置成功'.format(host_path)) argv = dict(title='页面监控', op=op, lists=lists, hosts=hosts, total_page=total_page, page=page, limit=limit) argv = dict(self.argv, **argv) self.render('admin/page.html', **argv) @authenticated_async @gen.coroutine def post(self, op='checklinks', do=''): if op == 'checklinks': name = self.get_argument('name', default='') links = self.get_argument('url', default='') exclude_links = self.get_argument('exclude_url', default='') check_all = self.get_argument('check_all', default='off') device = self.get_argument('device', default='PC') cookie = self.get_argument('cookie', default='') check_all = True if check_all == 'on' else False hosts = self.get_argument('hosts', default='') viewport = '1920x1080' if device == 'iPhone': user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1' viewport = '1080x1920' elif device == 'Android': user_agent = 'Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36' viewport = '1080x1920' else: user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36' check_links = [] check_history = [] url_history = [] error_links = '' for link in links.splitlines(): link = link.strip() if link == '': continue if self.common_func.check_string(string=link, str_type='url'): check_links.append(link) else: error_links += '{}</br>'.format(link) for link in exclude_links.splitlines(): link = link.strip() if link == '': continue if self.common_func.check_string(string=link, str_type='url'): check_history.append(link) url_history.append(link) else: error_links += '{}</br>'.format(link) if error_links != '': msg = dict(result=False, msg='链接 {} 格式不正确, 请检查'.format(error_links)) elif check_links: if platform.system().lower() == 'Windows'.lower(): host_path = 'C:\\Windows\\System32\\drivers\\etc\\hosts' else: host_path = '/etc/hosts' if os.path.exists(host_path): with open(host_path, 'w', encoding='utf8') as fp: fp.write(hosts) log.info('写入Hosts {} 配置 {} 成功'.format( host_path, hosts)) result = yield self.__check_links(name=name, check_links=check_links, check_history=check_history, url_history=url_history, user_agent=user_agent, cookie=cookie, viewport=viewport, check_all=check_all) msg = dict(result=True, msg='/{}'.format(result)) add_logs = AddLogs(tool_code='tool_checklinks', operate_ip=self.request.remote_ip) add_logs.add_logs() else: msg = dict(result=False, msg='入口链接不能为空') yield self.return_json(msg) elif op == 'reports' and do == 'delete': sid = int(self.get_argument('id', default=0)) flag, msg = yield self.setting.delete_setting(sid=sid) if flag: msg = dict(result=True, msg=msg) else: msg = dict(result=False, msg=msg) yield self.return_json(msg=msg) return # 链接检查 @gen.coroutine def __check_links(self, name, check_links, check_history=list(), url_history=list(), user_agent='', cookie='', viewport='1920x1080', check_all=False): log.info('页面链接检查开始') start_time = time.time() html_dir = 'static/results/html/{}'.format(time.strftime('%Y%m%d')) if not os.path.exists(html_dir): os.makedirs(html_dir) html_index = '{}/index.{}.html'.format(html_dir, time.time()) text = '' for url in check_links: if url in url_history: log.info('链接 {} 已检查过或已排除'.format(url)) continue next_links = [] results = [] code = 599 reason = 'Unknown' title = '' try: links, next_links, resp = yield self.__get_links( url=url, user_agent=user_agent, cookie=cookie, check_all=check_all) code = resp.code if resp else code reason = resp.reason if resp else reason body = resp.body if not isinstance( resp.body, bytes) else resp.body.decode('utf8', errors='ignore') title = re.findall( r'<title>(.*?)</title>', body if isinstance(body, str) else str(body)) title = title[0] if title else '' for link in links: if link['href'] in check_history: log.info('链接 {} 已检查过或已排除'.format(link['href'])) continue res = yield self.__hot_link_check(url=link['href'], user_agent=user_agent, cookie=cookie) res['text'] = link['text'] results.append(res) check_history.append(link['href']) load_results = yield self.__phantomas_check( url=url, user_agent=user_agent, cookie=cookie, viewport=viewport) except Exception as e: log.error('链接 {} 检查出现异常#{}'.format(url, e)) load_results = dict(url=url, screenshot='', har='', results=dict()) url_history.append(url) for link in next_links: if link not in check_links: check_links.append(link) html_file = yield self.__gen_report(results_dir=html_dir, results=results, load_results=load_results) log.info('生成链接 {} 扫描报告 /{}'.format(url, html_file)) text += '<li><b>[{} {}]</b> {} >> <a href="/{}" target="_blank">{}</a></li>'.format( code, reason, title, html_file, url) html = """ <!doctype html> <html lang="zh-CN"> <head> <meta charset="UTF-8"> <title>页面链接扫描报告</title> <style type="text/css"> h2, p {text-align: center;} li {padding-bottom: 5px;} </style> </head> <body> <div> <h2>页面链接扫描报告</h2> <div> <ul> %s </ul> </div> </div> </body> </html> """ % text with open(html_index, 'w') as fp: fp.write(html) log.info('生成页面链接扫描报告 /{}'.format(html_index)) log.info('页面链接检查结束') report_time = time.time() elapsed_time = report_time - start_time name = name.strip() if name.strip() != '' else '页面链接检查_{}'.format( time.strftime('%Y%m%d%H%M%S')) reports = dict(name=name, total_links=len(check_links), elapsed_time=elapsed_time, report_url='/{}'.format(html_index), start_time=time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime(int(start_time) + 3600 * 8)), report_time=time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime(int(report_time) + 3600 * 8))) pid = yield self.project.get_project(name='页面链接检查') if not pid: pid, msg = yield self.project.add_project(name='页面链接检查') else: pid = pid.id self.setting.add_setting(pid=pid, s_type='page_report', name=name, value=json.dumps(reports, ensure_ascii=False)) return html_index # 请求操作 @gen.coroutine def __request_url(self, url, user_agent='', cookie=''): page_client = httpclient.AsyncHTTPClient(max_clients=100) headers = {'User-Agent': user_agent, 'Cookie': cookie} argv = dict(method='GET', headers=headers, request_timeout=120, validate_cert=False, raise_error=False) try: response = yield page_client.fetch(url, **argv) except httpclient.HTTPError as e: log.warning('请求页面 {} 异常, 异常信息 {}'.format( url, str(e.response.error if e.response else e))) response = None # page_client.close() if response and response.code in [301, 302]: for cookies in response.headers.get_list('Set-Cookie'): cookie += '{};'.format(cookies) url = response.headers.get('Location') response = yield self.__request_url(url=url, user_agent=user_agent, cookie=cookie) return response # 获取页面链接 @gen.coroutine def __get_links(self, url, user_agent='', cookie='', check_all=False): response = yield self.__request_url(url, user_agent, cookie) common_func = CommonFunction() url_split = common_func.url_split(url) host = url_split.host scheme = url_split.scheme netloc = '{}://{}'.format(scheme, url_split.netloc) links = [] if response and response.body: body = response.body if not isinstance( response.body, bytes) else response.body.decode( 'utf8', errors='ignore') body = body if isinstance(body, str) else str(body) link_a = re.findall( r'<a.*?href=[\'"](.*?)[\'"].*?title=[\'"](.*?)[\'"].*?>(.*?)</a>', body) link_b = re.findall( r'<a.*?title=[\'"](.*?)[\'"].*?href=[\'"](.*?)[\'"].*?>(.*?)</a>', body) link_c = re.findall(r'<a.*?href=[\'"](.*?)[\'"].*?>(.*?)</a>', body) link_d = re.findall(r'data-href=[\'"](.*?)[\'"]', body) link_e = re.findall(r'data-url=[\'"](.*?)[\'"]', body) for link in link_a: href = link[0].strip() if re.match(r'^(http|https):', href) is not None: pass elif re.match(r'^//', href) is not None: href = scheme + href elif re.match(r'^/', href) is not None: href = netloc + href elif re.match(r'^(javascript|#)', href.lower()) is not None: continue else: href = '{}/{}'.format(url, href) title = link[1] text = link[2] if link[2] != '' else link[1] log.info('获取到链接: {} {}'.format(text, href)) link = dict(href=href, title=title, text=text) if link not in links: links.append(link) for link in link_b: href = link[1].strip() if re.match(r'^(http|https):', href) is not None: pass elif re.match(r'^//', href) is not None: href = scheme + href elif re.match(r'^/', href) is not None: href = netloc + href elif re.match(r'^(javascript|#)', href.lower()) is not None: continue else: href = '{}/{}'.format(url, href) title = link[0] text = link[2] if link[2] != '' else link[0] log.info('获取到链接: {} {}'.format(text, href)) link = dict(href=href, title=title, text=text) if link not in links: links.append(link) for link in link_c: href = link[0].strip() if re.match(r'^(http|https):', href) is not None: pass elif re.match(r'^//', href) is not None: href = scheme + href elif re.match(r'^/', href) is not None: href = netloc + href elif re.match(r'^(javascript|#)', href.lower()) is not None: continue else: href = '{}/{}'.format(url, href) title = '' text = link[1] log.info('获取到链接: {} {}'.format(text, href)) link = dict(href=href, title=title, text=text) if link not in links: links.append(link) for link in link_d + link_e: href = link[0].strip() if re.match(r'^(http|https):', href) is not None: pass elif re.match(r'^//', href) is not None: href = scheme + href elif re.match(r'^/', href) is not None: href = netloc + href elif re.match(r'^(javascript|#)', href.lower()) is not None: continue else: href = '{}/{}'.format(url, href) title = '' text = '' log.info('获取到链接: {} {}'.format(text, href)) link = dict(href=href, title=title, text=text) if link not in links: links.append(link) next_links = [] if check_all: for link in links: cur_host = common_func.url_split(link['href']).host if cur_host == host and link['href'] not in next_links: next_links.append(link['href']) return links, next_links, response # 跳转链接检查 @gen.coroutine def __hot_link_check(self, url, user_agent='', cookie=''): results = dict(url='', code=599, reason='Unknown', title='') response = yield self.__request_url(url, user_agent, cookie) if response: body = response.body if not isinstance( response.body, bytes) else response.body.decode( 'utf8', errors='ignore') title = re.findall(r'<title>(.*?)</title>', body if isinstance(body, str) else str(body)) title = title[0] if title else '' results = dict(url=url, code=response.code, reason=response.reason, title=title) log.info('检查链接: {} {} {} {}'.format(response.code, response.reason, title, url)) return results # Phantomas页面动态链接检查 @run_on_executor def __phantomas_check(self, url, user_agent='', cookie='', runs=1, viewport='1920x1080'): if platform.system().lower() == 'Windows'.lower(): exec_path = 'phantomas.cmd' else: exec_path = 'phantomas' screenshot_dir = 'static/results/screenshot' har_dir = 'static/results/har' if not os.path.exists(screenshot_dir): os.mkdir(screenshot_dir) if not os.path.exists(har_dir): os.mkdir(har_dir) screenshot_file = '{}/{}.png'.format(screenshot_dir, time.time()) har_file = '{}/{}.har'.format(har_dir, time.time()) param = dict(url=url, exec_path=exec_path, timeout=120, scroll=True, viewport=viewport, screenshot=screenshot_file, har=har_file, ignore_ssl_errors=True, runs=runs, user_agent=user_agent, cookie=cookie) try: results = Phantomas(**param).run() except Exception as e: log.error('{} 加载失败#{}'.format(url, e)) results = None check_results = dict(ajaxRequests='', cssCount='', jsCount='', jsonCount='', imageCount='', webfontCount='', videoCount='', iframesCount='', otherCount='', domains='', notFound='') if results: ajaxRequests = results.get_offenders( 'ajaxRequests' ) if runs == 1 else results.runs[0].get_offenders('ajaxRequests') cssCount = results.get_offenders( 'cssCount') if runs == 1 else results.runs[0].get_offenders( 'cssCount') jsCount = results.get_offenders( 'jsCount') if runs == 1 else results.runs[0].get_offenders( 'jsCount') jsonCount = results.get_offenders( 'jsonCount') if runs == 1 else results.runs[0].get_offenders( 'jsonCount') imageCount = results.get_offenders( 'imageCount') if runs == 1 else results.runs[0].get_offenders( 'imageCount') webfontCount = results.get_offenders( 'webfontCount' ) if runs == 1 else results.runs[0].get_offenders('webfontCount') videoCount = results.get_offenders( 'videoCount') if runs == 1 else results.runs[0].get_offenders( 'videoCount') iframesCount = results.get_offenders( 'iframesCount' ) if runs == 1 else results.runs[0].get_offenders('iframesCount') otherCount = results.get_offenders( 'otherCount') if runs == 1 else results.runs[0].get_offenders( 'otherCount') domains = results.get_offenders( 'domains') if runs == 1 else results.runs[0].get_offenders( 'domains') notFound = results.get_offenders( 'notFound') if runs == 1 else results.runs[0].get_offenders( 'notFound') log.info('ajaxRequests: {}'.format(ajaxRequests)) log.info('cssCount: {}'.format(cssCount)) log.info('jsCount: {}'.format(jsCount)) log.info('jsonCount: {}'.format(jsonCount)) log.info('imageCount: {}'.format(imageCount)) log.info('webfontCount: {}'.format(webfontCount)) log.info('videoCount: {}'.format(videoCount)) log.info('iframesCount: {}'.format(iframesCount)) log.info('otherCount: {}'.format(otherCount)) log.info('domains: {}'.format(domains)) log.info('notFound: {}'.format(notFound)) check_results = dict(ajaxRequests=ajaxRequests, cssCount=cssCount, jsCount=jsCount, jsonCount=jsonCount, imageCount=imageCount, webfontCount=webfontCount, videoCount=videoCount, iframesCount=iframesCount, otherCount=otherCount, domains=domains, notFound=notFound) return dict(url=url, screenshot=screenshot_file, har=har_file, results=check_results) # 生成测试报告 @run_on_executor def __gen_report(self, results_dir, results, load_results): html_file = '{}/{}.html'.format(results_dir, time.time()) results = sorted(results, key=lambda r: r['code'], reverse=True) s = '' if 'notFound' in load_results['results'].keys( ) and load_results['results']['notFound']: s += '<li><h4>notFound ({})</h4><ul>'.format( len(load_results['results']['notFound'])) for res in load_results['results']['notFound']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if results: s += '<li><h4>links ({})</h4><ul>'.format(len(results)) for res in results: s += """ <li>|--- <b>[{} {}]</b> <b>Page Title:</b> {}, <b>Link Title:</b> {} >> {}</li> """.format(res['code'], res['reason'], escape.xhtml_escape(res['title']), escape.xhtml_escape(res['text']), escape.xhtml_escape(res['url'])) s += '</ul></li>' if 'cssCount' in load_results['results'].keys( ) and load_results['results']['cssCount']: s += '<li><h4>cssCount ({})</h4><ul>'.format( len(load_results['results']['cssCount'])) for res in load_results['results']['cssCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'jsCount' in load_results['results'].keys( ) and load_results['results']['jsCount']: s += '<li><h4>jsCount ({})</h4><ul>'.format( len(load_results['results']['jsCount'])) for res in load_results['results']['jsCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'imageCount' in load_results['results'].keys( ) and load_results['results']['imageCount']: s += '<li><h4>imageCount ({})</h4><ul>'.format( len(load_results['results']['imageCount'])) for res in load_results['results']['imageCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'jsonCount' in load_results['results'].keys( ) and load_results['results']['jsonCount']: s += '<li><h4>jsonCount ({})</h4><ul>'.format( len(load_results['results']['jsonCount'])) for res in load_results['results']['jsonCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'ajaxRequests' in load_results['results'].keys( ) and load_results['results']['ajaxRequests']: s += '<li><h4>ajaxRequests ({})</h4><ul>'.format( len(load_results['results']['ajaxRequests'])) for res in load_results['results']['ajaxRequests']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'webfontCount' in load_results['results'].keys( ) and load_results['results']['webfontCount']: s += '<li><h4>webfontCount ({})</h4><ul>'.format( len(load_results['results']['webfontCount'])) for res in load_results['results']['webfontCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'videoCount' in load_results['results'].keys( ) and load_results['results']['videoCount']: s += '<li><h4>videoCount ({})</h4><ul>'.format( len(load_results['results']['videoCount'])) for res in load_results['results']['videoCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'iframesCount' in load_results['results'].keys( ) and load_results['results']['iframesCount']: s += '<li><h4>iframesCount ({})</h4><ul>'.format( len(load_results['results']['iframesCount'])) for res in load_results['results']['iframesCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'otherCount' in load_results['results'].keys( ) and load_results['results']['otherCount']: s += '<li><h4>otherCount ({})</h4><ul>'.format( len(load_results['results']['otherCount'])) for res in load_results['results']['otherCount']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' if 'domains' in load_results['results'].keys( ) and load_results['results']['domains']: s += '<li><h4>domains ({})</h4><ul>'.format( len(load_results['results']['domains'])) for res in load_results['results']['domains']: s += """ <li>|--- {}</li> """.format(escape.xhtml_escape(res)) s += '</ul></li>' text = """ <!doctype html> <html lang="zh-CN"> <head> <meta charset="UTF-8"> <title>页面链接扫描报告 - %(url)s</title> <style type="text/css"> h2, p {text-align: center;} li ul {list-style-type: none; margin-top: -20px;} </style> </head> <body> <div> <h2>页面链接扫描报告 - %(url)s</h2> <p><a href="/%(screenshot)s" target="_blank">查看页面截图</a><!--a href="/%(har)s" target="_blank">查看页面HTTP请求/响应信息(HTTP Archive)</a--></p> <div> <ul> %(res)s </ul> </div> </div> </body> </html> """ % (dict(url=load_results['url'], screenshot=load_results['screenshot'], har=load_results['har'], res=s)) with open(html_file, 'w') as fp: fp.write(text) return html_file
#!/usr/bin/env python # encoding: utf-8 from copy import copy from tornado.web import Cookie from tornado.gen import coroutine, Return from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError, HTTPResponse from tornado.httputil import HTTPHeaders from tornado.ioloop import IOLoop from . import PY2 try: import pycares from tornado.netutil import Resolver from tornado.platform.caresresolver import CaresResolver Resolver.configure(CaresResolver) except ImportError: pass if PY2: b = unicode iteritems = lambda x: x.iteritems() else: b = str iteritems = lambda x: x.items() try: import ujson as json except ImportError: import json
def config_solver(cls, solver_type='tornado.netutil.BlockingResolver'): Resolver.configure(solver_type)