def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL data = self.requests.get_body_data().decode(self.response.decoding or 'utf-8') resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc if params: for k, v in params.items(): if len(v) > 1024: continue self._check(k, v, method, url, data) if method == "POST": if self.requests.post_data: for k, v in self.requests.post_data: if len(v) > 1024: continue self._check(k, v, method, url, data) if headers and "cookie" in headers: cookie = paramToDict(headers["cookie"], place=PLACE.COOKIE) if cookie: for k, v in cookie.items(): if len(v) > 1024: continue self._check(k, v, method, url, data)
def __init__(self, url, headers: dict, method='GET', data={}): HttpTransfer.__init__(self) self.https = False self.urlparse = p = urlparse(url) port = 80 if p.scheme == "https": port = 443 self.https = True hostname = p.netloc if ":" in p.netloc: try: hostname, port = p.netloc.split(":") port = int(port) except: hostname = p.netloc port = 80 self.hostname = hostname self.port = port self.command = 'GET' if method == 'GET' else method self._body = b'' if self.command == 'POST': self.post_hint = POST_HINT.NORMAL self._body = '' for k, v in data.items(): self._body += "{}={};".format(k, v) self._body = self._body.encode() self.post_hint = POST_HINT.NORMAL self.post_data = data self.path = p.path if p.query: self.path = p.path + "?" + p.query self.request_version = 1.1 # self.urlparse = None self.netloc = "{}://{}{}".format(p.scheme, p.netloc, p.path) self.tld = get_fld(self.netloc, fix_protocol=True, fail_silently=True) self.params = paramToDict(p.query, place=PLACE.GET) self.cookies = None if "cookie" in headers or "Cookie" in headers: self.cookies = paramToDict(headers.get("cookie", headers.get("Cookie")), place=PLACE.COOKIE) self._headers = headers
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc # cookie exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return origin_len = len(resp_str) sql_flag = '鎈\'"\(' if headers and "cookie" in headers: cookies = paramToDict(headers["cookie"], place=PLACE.COOKIE) tmp_headers = copy.deepcopy(headers) del tmp_headers["cookie"] if cookies: for k, v in cookies.items(): cookie = copy.deepcopy(cookies) cookie[k] = v + sql_flag r = requests.get(url, headers=tmp_headers, cookies=urlencode(cookie)) if origin_len == len(r.text): continue for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(r.text) if match: out.success(url, self.name, payload="cookie: {}={}".format( k, cookie[k]), dbms_type=dbms_type, raw=r.raw) break if method == 'GET': if p.query == '': return exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return for k, v in params.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(params) data[k] = v + sql_flag url1 = prepare_url(netloc, params=data) r = requests.get(url1, headers=headers) html = r.text for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, payload="{}={}".format(k, data[k]), dbms_type=dbms_type, raw=r.raw, errinfo=match.group()) break # test header if headers: sql_flag = '\'"\(' new_headers = { "user-agent": headers.get("User-Agent", "") + sql_flag, # "referer": headers.get("referer", url) + sql_flag, "x-forwarded-for": headers.get("x-forwarded-for", "127.0.0.1") + sql_flag, "via": headers.get("via", "") + sql_flag } headers.update(new_headers) r = requests.get(url, headers=headers) html = r.text if origin_len == len(html): return for sql_regex, dbms_type in Get_sql_errors(): match = sql_regex.search(html) if match: out.success(url, self.name, type="header inject", dbms_type=dbms_type, raw=r.raw, errinfo=match.group()) break
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 p = self.requests.urlparse params = self.requests.params netloc = self.requests.netloc regx = 'Parse error: syntax error,.*?\sin\s' randint = random.randint(5120, 10240) verify_result = md5(str(randint).encode()) payloads = [ "print(md5({}));", ";print(md5({}));", "';print(md5({}));$a='", "\";print(md5({}));$a=\"", "${{@print(md5({}))}}", "${{@print(md5({}))}}\\", "'.print(md5({})).'" ] if not self.response.language or self.response.language != "PHP": return if headers and "cookie" in headers: cookies = paramToDict(headers["cookie"], place=PLACE.COOKIE) tmp_header = copy.deepcopy(headers) del tmp_header["cookie"] if cookies: for k, v in cookies.items(): cookie = copy.deepcopy(cookies) for payload in payloads: if payload[0] == "p": cookie[k] = payload.format(randint) else: cookie[k] = v + payload.format(randint) r = requests.get(url, headers=tmp_header, cookies=cookie) html1 = r.text if verify_result in html1: out.success(url, self.name, type="Cookie", payload="{}:{}".format(k, cookie[k]), raw=r.raw) break if re.search(regx, html1, re.I | re.S | re.M): out.success(url, self.name, type="Cookie", payload="{}:{}".format(k, cookie[k]), raw=r.raw) break if method == 'GET': # cookie if p.query == '': return exi = os.path.splitext(p.path)[1] if exi not in acceptedExt: return for k, v in params.items(): if k.lower() in ignoreParams: continue data = copy.deepcopy(params) for payload in payloads: if payload[0] == "p": data[k] = payload.format(randint) else: data[k] = v + payload.format(randint) url1 = prepare_url(netloc, params=data) r = requests.get(url1, headers=headers) html1 = r.text if verify_result in html1: out.success(url, self.name, payload="{}:{}".format(k, data[k]), raw=r.raw) break if re.search(regx, html1, re.I | re.S | re.M): out.success(url, self.name, payload="{}:{}".format(k, data[k]), raw=r.raw) break
def audit(self): method = self.requests.command # 请求方式 GET or POST headers = self.requests.get_headers() # 请求头 dict类型 url = self.build_url() # 请求完整URL post_data = self.requests.get_body_data().decode( errors='ignore') # POST 数据 resp_data = self.response.get_body_data() # 返回数据 byte类型 resp_str = self.response.get_body_str() # 返回数据 str类型 自动解码 resp_headers = self.response.get_headers() # 返回头 dict类型 encoding = self.response.decoding or 'utf-8' p = self.requests.urlparse = urlparse(url) netloc = self.requests.netloc = "{}://{}{}".format( p.scheme, p.netloc, p.path) self.requests.tld = get_fld(netloc, fix_protocol=True, fail_silently=True) data = unquote(p.query, encoding) params = paramToDict(data, place=PLACE.GET) self.requests.params = params if "cookie" in headers: self.requests.cookies = paramToDict(headers["cookie"], place=PLACE.COOKIE) # finger basic info self.response.language, self.response.system, self.response.webserver = fingter_loader( resp_str, self.response.build_headers()) if not self.response.language: if p.path.endswith(".asp"): self.response.language = "ASP" self.response.system = "WINDOWS" elif p.path.endswith(".aspx"): self.response.language = "ASPX" self.response.system = "WINDOWS" elif p.path.endswith(".php"): self.response.language = "PHP" elif p.path.endswith(".jsp") or p.path.endswith( ".do") or p.path.endswith(".action"): self.response.language = "JAVA" if method == "POST": post_data = unquote(post_data, encoding) if re.search( '([^=]+)=([^%s]+%s?)' % (DEFAULT_GET_POST_DELIMITER, DEFAULT_GET_POST_DELIMITER), post_data): self.requests.post_hint = POST_HINT.NORMAL self.requests.post_data = paramToDict( post_data, place=PLACE.POST, hint=self.requests.post_hint) elif re.search(JSON_RECOGNITION_REGEX, post_data): self.requests.post_hint = POST_HINT.JSON self.requests.post_data = paramToDict( post_data, place=PLACE.POST, hint=self.requests.post_hint) elif re.search(XML_RECOGNITION_REGEX, post_data): self.requests.post_hint = POST_HINT.XML elif re.search(JSON_LIKE_RECOGNITION_REGEX, post_data): self.requests.post_hint = POST_HINT.JSON_LIKE elif re.search(ARRAY_LIKE_RECOGNITION_REGEX, post_data): self.requests.post_hint = POST_HINT.ARRAY_LIKE self.requests.post_data = paramToDict( post_data, place=PLACE.POST, hint=self.requests.post_hint) elif re.search(MULTIPART_RECOGNITION_REGEX, post_data): self.requests.post_hint = POST_HINT.MULTIPART # 支持自动识别并转换参数的类型有 NORMAL,JSON,ARRAY-LIKE if self.requests.post_hint and self.requests.post_hint in [ POST_HINT.NORMAL, POST_HINT.JSON, POST_HINT.ARRAY_LIKE ]: # if KB["spiderset"].add(method + url + ''.join(self.requests.post_data), 'PostScan'): task_push('PostScan', self.requests, self.response) elif self.requests.post_hint is None: print("post data数据识别失败") elif method == "GET": if KB["spiderset"].add(url, 'PerFile'): task_push('PerFile', self.requests, self.response) # Send PerScheme domain = "{}://{}".format(p.scheme, p.netloc) if KB["spiderset"].add(domain, 'PerScheme'): self.requests.path = "/" task_push('PerScheme', self.requests, self.response) if conf["no_active"]: return # Collect from response links = get_links(resp_str, url, True) for link in set(links): is_continue = True for item in logoutParams: if item in link.lower(): is_continue = False break for item in notAcceptedExt: if link.endswith(item): is_continue = False break if not is_continue: continue # 去重复 if not KB["spiderset"].add(link, 'get_links'): continue try: # 超过5M拒绝请求 r = requests.head(link, headers=headers) if "Content-Length" in r.headers: if int(r.headers["Content-Length"]) > 1024 * 1024 * 5: raise Exception("length > 5M") r = requests.get(link, headers=headers) req = FakeReq(link, headers) resp = FakeResp(r) except Exception as e: continue if KB["spiderset"].add(resp._url, 'PerFile'): task_push('PerFile', req, resp) # Collect directory from response urls = set(get_parent_paths(url)) for link in set(links): urls |= set(get_parent_paths(link)) for i in urls: if not KB["spiderset"].add(i, 'get_link_directory'): continue try: r = requests.get(i, headers=headers) req = FakeReq(i, headers) resp = FakeResp(r) except: continue if KB["spiderset"].add(resp._url, 'PerFolder'): task_push('PerFolder', req, resp)