def _SEARCH(self, domain_path): """ Test SEARCH method. """ content = "<?xml version='1.0'?>\r\n" content += "<g:searchrequest xmlns:g='DAV:'>\r\n" content += "<g:sql>\r\n" content += "Select 'DAV:displayname' from scope()\r\n" content += "</g:sql>\r\n" content += "</g:searchrequest>\r\n" res = self._uri_opener.SEARCH(domain_path, data=content, headers=self.CONTENT_TYPE) content_matches = '<a:response>' in res or '<a:status>' in res or \ 'xmlns:a="DAV:"' in res if content_matches and res.get_code() in xrange(200, 300): msg = 'Directory listing with HTTP SEARCH method was found at' \ 'directory: "%s".' % domain_path v = Vuln('Insecure DAV configuration', msg, severity.MEDIUM, res.id, self.get_name()) v.set_url(res.get_url()) v.set_method('SEARCH') self.kb_append(self, 'dav', v)
def grep(self, request, response): """ Plugin entry point. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. """ if not response.is_text_or_html(): return uri = response.get_uri() for regex in self.RE_LIST: for m in regex.findall(response.get_body()): user = m[0] desc = 'The URL: "%s" contains a SVN versioning signature'\ ' with the username "%s".' desc = desc % (uri, user) v = Vuln('SVN user disclosure vulnerability', desc, severity.LOW, response.id, self.get_name()) v.add_to_highlight(user) v.set_uri(uri) v[SVNUserInfoSet.ITAG] = user self.kb_append_uniq_group(self, 'users', v, group_klass=SVNUserInfoSet)
def _universal_allow(self, forged_req, url, origin, response, allow_origin, allow_credentials, allow_methods): """ Check if the allow_origin is set to *. :return: A list of vulnerability objects with the identified vulns (if any). """ if allow_origin == '*': msg = 'The remote Web application, specifically "%s", returned' \ ' an %s header with the value set to "*" which is insecure'\ ' and leaves the application open to Cross-domain attacks.' msg = msg % (forged_req.get_url(), ACCESS_CONTROL_ALLOW_ORIGIN) v = Vuln('Access-Control-Allow-Origin set to "*"', msg, severity.LOW, response.get_id(), self.get_name()) v.set_url(forged_req.get_url()) self.kb_append(self, 'cors_origin', v) return self._filter_report('_universal_allow_counter', 'universal allow-origin', severity.MEDIUM, [v, ]) return []
def _is_trusted_cert(self): plugin = 'certinfo' if plugin not in self._plugin_xml_result: return is_affected = False trust_store = {} certificate_validation = self._plugin_xml_result[plugin].find("certificateValidation") for path_validation in certificate_validation.findall("pathValidation"): name = path_validation.get('usingTrustStore', None) version = path_validation.get('trustStoreVersion', None) result = path_validation.get('validationResult', None) if name: trust_store[name] = {} if version: trust_store[name]['version'] = version if result: trust_store[name]['result'] = result is_affected = True if result == 'self signed certificate' else False if is_affected: desc = 'Host uses self signed certificate.' v = Vuln("Invalid SSL certificate", desc, severity.HIGH, self._response_id, self._plugin_name) v.set_url(self._target_url) self.kb_append(self, 'wg_invalid_ssl', v)
def _send_and_check(self, repo_url, repo_get_files, repo, domain_path): """ Check if a repository index exists in the domain_path. :return: None, everything is saved to the self.out_queue. """ http_response = self.http_get_and_parse(repo_url) if not is_404(http_response): filenames = repo_get_files(http_response.get_body()) parsed_url_set = set() for filename in self._clean_filenames(filenames): test_url = domain_path.url_join(filename) if test_url not in self._analyzed_filenames: parsed_url_set.add(test_url) self._analyzed_filenames.add(filename) self.worker_pool.map(self.http_get_and_parse, parsed_url_set) if parsed_url_set: desc = 'A %s was found at: "%s"; this could indicate that'\ ' a %s is accessible. You might be able to download'\ ' the Web application source code.' desc = desc % (repo, http_response.get_url(), repo) v = Vuln('Source code repository', desc, severity.MEDIUM, http_response.id, self.get_name()) v.set_url(http_response.get_url()) kb.kb.append(self, repo, v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def grep(self, request, response): """ Plugin entry point, search for directory indexing. :param request: The HTTP request object. :param response: The HTTP response object :return: None """ if not response.is_text_or_html(): return if response.get_url().get_domain_path() in self._already_visited: return self._already_visited.add(response.get_url().get_domain_path()) html_string = response.get_body() for _ in self._multi_in.query(html_string): desc = 'The URL: "%s" has a directory indexing vulnerability.' desc = desc % response.get_url() v = Vuln('Directory indexing', desc, severity.LOW, response.id, self.get_name()) v.set_url(response.get_url()) self.kb_append_uniq(self, 'directory', v, 'URL') break
def _http_only(self, request, response, cookie_obj, cookie_header_value, fingerprinted): """ Verify if the cookie has the httpOnly parameter set Reference: http://www.owasp.org/index.php/HTTPOnly http://en.wikipedia.org/wiki/HTTP_cookie :param request: The http request object :param response: The http response object :param cookie_obj: The cookie object to analyze :param cookie_header_value: The cookie, as sent in the HTTP response :param fingerprinted: True if the cookie was fingerprinted :return: None """ if not self.HTTPONLY_RE.search(cookie_header_value): vuln_severity = severity.MEDIUM if fingerprinted else severity.LOW desc = 'A cookie without the HttpOnly flag was sent when ' \ ' requesting "%s". The HttpOnly flag prevents potential' \ ' intruders from accessing the cookie value through' \ ' Cross-Site Scripting attacks.' desc = desc % response.get_url() v = Vuln('Cookie without HttpOnly', desc, vuln_severity, response.id, self.get_name()) v.set_url(response.get_url()) self._set_cookie_to_rep(v, cobj=cookie_obj) kb.kb.append(self, 'security', v)
def _ssl_cookie_via_http(self, request, response): """ Analyze if a cookie value, sent in a HTTPS request, is now used for identifying the user in an insecure page. Example: Login is done over SSL The rest of the page is HTTP """ if request.get_url().get_protocol().lower() == 'https': return for cookie in kb.kb.get('analyze_cookies', 'cookies'): if cookie.get_url().get_protocol().lower() == 'https' and \ request.get_url().get_domain() == cookie.get_url().get_domain(): # The cookie was sent using SSL, I'll check if the current # request, is using these values in the POSTDATA / QS / COOKIE for key in cookie['cookie-object'].keys(): value = cookie['cookie-object'][key].value # This if is to create less false positives if len(value) > 6 and value in request.dump(): desc = 'Cookie values that were set over HTTPS, are' \ ' then sent over an insecure channel in a' \ ' request to "%s".' desc = desc % request.get_url() v = Vuln('Secure cookies over insecure channel', desc, severity.HIGH, response.id, self.get_name()) v.set_url(response.get_url()) self._set_cookie_to_rep(v, cobj=cookie['cookie-object']) kb.kb.append(self, 'security', v)
def _not_secure_over_https(self, request, response, cookie_obj, cookie_header_value): """ Checks if a cookie that does NOT have a secure flag is sent over https. :param request: The http request object :param response: The http response object :param cookie_obj: The cookie object to analyze :param cookie_header_value: The cookie, as sent in the HTTP response :return: None """ # BUGBUG: See other reference in this file for http://bugs.python.org/issue1028088 if response.get_url().get_protocol().lower() == 'https' and \ not self.SECURE_RE.search(cookie_header_value): desc = 'A cookie without the secure flag was sent in an HTTPS' \ ' response at "%s". The secure flag prevents the browser' \ ' from sending a "secure" cookie over an insecure HTTP' \ ' channel, thus preventing potential session hijacking' \ ' attacks.' desc = desc % response.get_url() v = Vuln('Secure flag missing in HTTPS cookie', desc, severity.HIGH, response.id, self.get_name()) v.set_url(response.get_url()) self._set_cookie_to_rep(v, cobj=cookie_obj) kb.kb.append(self, 'security', v)
def grep(self, request, response): """ Plugin entry point, find the SSN numbers. :param request: The HTTP request object. :param response: The HTTP response object :return: None. """ if not response.is_text_or_html() or response.get_code() != 200 \ or response.get_clear_text_body() is None: return found_ssn, validated_ssn = self._find_SSN(response.get_clear_text_body()) if validated_ssn: uri = response.get_uri() desc = 'The URL: "%s" possibly discloses a US Social Security'\ ' Number: "%s".' desc = desc % (uri, validated_ssn) v = Vuln('US Social Security Number disclosure', desc, severity.LOW, response.id, self.get_name()) v.set_uri(uri) v.add_to_highlight(found_ssn) self.kb_append_uniq(self, 'ssn', v, 'URL')
def _lowest_privilege_test(self, response): regex_str = 'User/Group </td><td class="v">(.*?)\((\d.*?)\)/(\d.*?)</td>' lowest_privilege_test = re.search(regex_str, response.get_body(), re.I) if lowest_privilege_test: lpt_uname = lowest_privilege_test.group(1) lpt_uid = lowest_privilege_test.group(2) lpt_uid = int(lpt_uid) lpt_gid = lowest_privilege_test.group(3) if lpt_uid < 99 or lpt_gid < 99 or \ re.match('root|apache|daemon|bin|operator|adm', lpt_uname, re.I): desc = 'phpinfo()::PHP may be executing as a higher privileged'\ ' group. Username: %s, UserID: %s, GroupID: %s.' desc = desc % (lpt_uname, lpt_uid, lpt_gid) v = Vuln('PHP lowest_privilege_test:fail', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity()) else: lpt_name = 'privilege:' + lpt_uname lpt_desc = 'phpinfo()::PHP is executing under ' lpt_desc += 'username: '******', ' lpt_desc += 'userID: ' + str(lpt_uid) + ', ' lpt_desc += 'groupID: ' + lpt_gid i = Info(lpt_name, lpt_desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', i) om.out.information(i.get_desc())
def _check_if_exists(self, web_shell_url): """ Check if the file exists. :param web_shell_url: The URL to check """ try: response = self._uri_opener.GET(web_shell_url, cache=True) except BaseFrameworkException: om.out.debug('Failed to GET webshell:' + web_shell_url) else: if self._is_possible_backdoor(response): desc = 'A web backdoor was found at: "%s"; this could ' \ 'indicate that the server has been compromised.' desc = desc % response.get_url() v = Vuln('Potential web backdoor', desc, severity.HIGH, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'backdoors', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity()) fr = FuzzableRequest.from_http_response(response) self.output_queue.put(fr)
def _analyze_SQL(self, request, response, ref, token_name, token_value): """ To find this kind of vulns http://thedailywtf.com/Articles/Oklahoma- Leaks-Tens-of-Thousands-of-Social-Security-Numbers,-Other- Sensitive-Data.aspx :return: True if the parameter value contains SQL sentences """ for match in SQL_RE.findall(token_value): if request.sent(match): continue desc = ('The URI: "%s" has a parameter named: "%s" with value:' ' "%s", which is a SQL query.') desc %= (response.get_uri(), token_name, token_value) v = Vuln('Parameter has SQL sentence', desc, severity.LOW, response.id, self.get_name()) v['parameter_value'] = token_value v.add_to_highlight(token_value) v.set_uri(ref) self.kb_append(self, 'strange_parameters', v) return True return False
def end(self): """ This method is called when the plugin wont be used anymore. The real job of this plugin is done here, where I will try to see if one of the error_500 responses were not identified as a vuln by some of my audit plugins """ all_vuln_ids = set() for info in kb.kb.get_all_findings(): for _id in info.get_id(): all_vuln_ids.add(_id) for request, error_500_response_id in self._error_500_responses: if error_500_response_id not in all_vuln_ids: # Found a error 500 that wasn't identified ! desc = 'An unidentified web application error (HTTP response'\ ' code 500) was found at: "%s". Enable all plugins and'\ ' try again, if the vulnerability still is not'\ ' identified, please verify manually and report it to'\ ' the w3af developers.' desc = desc % request.get_url() v = Vuln('Unhandled error in web application', desc, severity.MEDIUM, error_500_response_id, self.get_name()) v.set_uri(request.get_uri()) self.kb_append_uniq(self, 'error_500', v, 'VAR') self._error_500_responses.cleanup()
def _analyze_html(self, request, response): """ Search for IP addresses in the HTML """ if not response.is_text_or_html(): return # Performance improvement! if not (('10.' in response) or ('172.' in response) or ('192.168.' in response) or ('169.254.' in response)): return for regex in self._regex_list: for match in regex.findall(response.get_body()): match = match.strip() # Some proxy servers will return errors that include headers in the body # along with the client IP which we want to ignore if re.search("^.*X-Forwarded-For: .*%s" % match, response.get_body(), re.M): continue # If i'm requesting 192.168.2.111 then I don't want to be alerted about it if match not in self._ignore_if_match and \ not request.sent(match): desc = 'The URL: "%s" returned an HTML document'\ ' with a private IP address: "%s".' desc = desc % (response.get_url(), match) v = Vuln('Private IP disclosure vulnerability', desc, severity.LOW, response.id, self.get_name()) v.set_url(response.get_url()) v['IP'] = match v.add_to_highlight(match) self.kb_append(self, 'HTML', v)
def _analyze_headers(self, request, response): """ Search for IP addresses in HTTP headers """ # Get the headers string headers_string = response.dump_headers() # Match the regular expressions for regex in self._regex_list: for match in regex.findall(headers_string): # If i'm requesting 192.168.2.111 then I don't want to be # alerted about it if match not in self._ignore_if_match: desc = 'The URL: "%s" returned an HTTP header with a'\ ' private IP address: "%s".' desc = desc % (response.get_url(), match) v = Vuln('Private IP disclosure vulnerability', desc, severity.LOW, response.id, self.get_name()) v.set_url(response.get_url()) v['IP'] = match v.add_to_highlight(match) self.kb_append(self, 'header', v)
def _check_if_exists(self, web_shell_url): """ Check if the file exists. :param web_shell_url: The URL to check """ try: response = self._uri_opener.GET(web_shell_url, cache=True) except BaseFrameworkException: om.out.debug('Failed to GET webshell:' + web_shell_url) else: signature = self._match_signature(response) if signature is None: return desc = (u'An HTTP response matching the web backdoor signature' u' "%s" was found at: "%s"; this could indicate that the' u' server has been compromised.') desc %= (signature, response.get_url()) # It's probability is higher if we found a long signature _severity = severity.HIGH if len(signature) > 8 else severity.MEDIUM v = Vuln(u'Potential web backdoor', desc, _severity, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'backdoors', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity()) fr = FuzzableRequest.from_http_response(response) self.output_queue.put(fr)
def _check_methods(self, url): """ Perform some requests in order to check if we are able to retrieve some data with methods that may be wrongly enabled. """ allowed_methods = [] for method in ['GET', 'POST', 'ABCD', 'HEAD']: method_functor = getattr(self._uri_opener, method) try: response = apply(method_functor, (url,), {}) code = response.get_code() except: pass else: if code in self.SUCCESS_CODES: allowed_methods.append((method, response.id)) if len(allowed_methods) > 0: response_ids = [i for m, i in allowed_methods] methods = ', '.join([m for m, i in allowed_methods]) + '.' desc = 'The resource: "%s" requires authentication but the access'\ ' is misconfigured and can be bypassed using these'\ ' methods: %s' desc = desc % (url, methods) v = Vuln('Misconfigured access control', desc, severity.MEDIUM, response_ids, self.get_name()) v.set_url(url) v['methods'] = allowed_methods self.kb_append(self, 'auth', v)
def crawl(self, fuzzable_request): """ Plugin entry point, perform all the work. """ to_check = self._get_to_check(fuzzable_request.get_url()) # I found some URLs, create fuzzable requests phishtank_matches = self._is_in_phishtank(to_check) for ptm in phishtank_matches: response = self._uri_opener.GET(ptm.url) for fr in self._create_fuzzable_requests(response): self.output_queue.put(fr) # Only create the vuln object once if phishtank_matches: desc = 'The URL: "%s" seems to be involved in a phishing scam.' \ ' Please see %s for more info.' desc = desc % (ptm.url, ptm.more_info_URL) v = Vuln('Phishing scam', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(ptm.url) kb.kb.append(self, 'phishtank', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def _PROPFIND(self, domain_path): """ Test PROPFIND method """ content = "<?xml version='1.0'?>\r\n" content += "<a:propfind xmlns:a='DAV:'>\r\n" content += "<a:prop>\r\n" content += "<a:displayname:/>\r\n" content += "</a:prop>\r\n" content += "</a:propfind>\r\n" headers = copy.deepcopy(self.CONTENT_TYPE) headers['Depth'] = '1' res = self._uri_opener.PROPFIND(domain_path, data=content, headers=headers) if "D:href" in res and res.get_code() in xrange(200, 300): msg = 'Directory listing with HTTP PROPFIND method was found at' \ ' directory: "%s".' % domain_path v = Vuln('Insecure DAV configuration', msg, severity.MEDIUM, res.id, self.get_name()) v.set_url(res.get_url()) v.set_method('PROPFIND') self.kb_append(self, 'dav', v)
def _PUT(self, domain_path): """ Tests PUT method. """ # upload url = domain_path.url_join(rand_alpha(5)) rnd_content = rand_alnum(6) headers = Headers([('content-type', 'text/plain')]) put_response = self._uri_opener.PUT(url, data=rnd_content, headers=headers) # check if uploaded res = self._uri_opener.GET(url, cache=True) if res.get_body() == rnd_content: msg = 'File upload with HTTP PUT method was found at resource:' \ ' "%s". A test file was uploaded to: "%s".' msg = msg % (domain_path, res.get_url()) v = Vuln('Insecure DAV configuration', msg, severity.HIGH, [put_response.id, res.id], self.get_name()) v.set_url(url) v.set_method('PUT') self.kb_append(self, 'dav', v) # Report some common errors elif put_response.get_code() == 500: msg = 'DAV seems to be incorrectly configured. The web server' \ ' answered with a 500 error code. In most cases, this means'\ ' that the DAV extension failed in some way. This error was'\ ' found at: "%s".' % put_response.get_url() i = Info('DAV incorrect configuration', msg, res.id, self.get_name()) i.set_url(url) i.set_method('PUT') self.kb_append(self, 'dav', i) # Report some common errors elif put_response.get_code() == 403: msg = 'DAV seems to be correctly configured and allowing you to'\ ' use the PUT method but the directory does not have the'\ ' correct permissions that would allow the web server to'\ ' write to it. This error was found at: "%s".' msg = msg % put_response.get_url() i = Info('DAV incorrect configuration', msg, [put_response.id, res.id], self.get_name()) i.set_url(url) i.set_method('PUT') self.kb_append(self, 'dav', i)
def _parse_xssed_result(self, response): """ Parse the result from the xssed site and create the corresponding info objects. :return: Fuzzable requests pointing to the XSS (if any) """ html_body = response.get_body() if "<b>XSS:</b>" not in html_body: # Nothing to see here... om.out.debug('xssed_dot_com did not find any previously reported' ' XSS vulnerabilities.') return # # Work! # regex_many_vulns = re.findall("<a href='(/mirror/\d*/)' target='_blank'>", html_body) for mirror_relative_link in regex_many_vulns: mirror_url = self._xssed_url.url_join(mirror_relative_link) xss_report_response = self._uri_opener.GET(mirror_url) matches = re.findall("URL:.+", xss_report_response.get_body()) dxss = self._decode_xssed_url xss_url = dxss(dxss(matches[0])) if self._fixed in xss_report_response.get_body(): vuln_severity = severity.LOW desc = 'According to xssed.com, this URL contained a XSS'\ ' vulnerability: "%s".' desc = desc % xss_url else: vuln_severity = severity.HIGH desc = 'According to xssed.com, this URL contains a'\ ' XSS vulnerability: "%s".' desc = desc % xss_url v = Vuln('Potential XSS vulnerability', desc, vuln_severity, response.id, self.get_name()) v.set_url(mirror_url) kb.kb.append(self, 'xss', v) om.out.information(v.get_desc()) # # Add the fuzzable request, this is useful if I have the # XSS plugin enabled because it will re-test this and # possibly confirm the vulnerability # fr = FuzzableRequest(URL(xss_url)) self.output_queue.put(fr)
def _analyze_result(self, mutant, response): """ Analyze results of the send_mutant method. """ orig_resp_body = mutant.get_original_response_body() body = response.get_body() for pattern_match in self._find_patterns(body): # Remove false positives if pattern_match in orig_resp_body: continue # Only report vulnerabilities once if self._has_bug(mutant): return # Create the vulnerability! desc = 'An XML External Entity injection was found at: %s' desc %= mutant.found_at() v = Vuln.from_mutant('XML External Entity', desc, severity.HIGH, response.id, self.get_name(), mutant) v.add_to_highlight(pattern_match) self.kb_append_uniq(self, 'xxe', v) return # We get here when there are no vulnerabilities in the response # but we still want to flag any parsing errors which might be # pointers to other (more complex to identify and exploit) # vulnerabilities for parser_error in self.parser_errors_multi_in.query(body): # Do not report that we found an error when we already found # something with higher priority in the same mutant if self._has_bug(mutant): return # Do not report the same error twice if self._has_bug(mutant, kb_varname='errors'): return desc = ('An XML library parsing error was found at: %s. These' ' errors usually indicate that an XML injection is' ' possible.') desc %= mutant.found_at() v = Vuln.from_mutant('XML Parsing Error', desc, severity.LOW, response.id, self.get_name(), mutant) v.add_to_highlight(parser_error) self.kb_append_uniq(self, 'errors', v) return
def _origin_echo(self, forged_req, url, origin, response, allow_origin, allow_credentials_str, allow_methods): """ First check if the @allow_origin is set to the value we sent (@origin) and if the allow_credentials is set to True. If this test is successful (most important vulnerability) then do not check for the @allow_origin is set to the value we sent. :return: A list of vulnerability objects with the identified vulns (if any). """ if allow_origin is None: return [] allow_origin = allow_origin.lower() allow_credentials = False if allow_credentials_str is not None: allow_credentials = 'true' in allow_credentials_str.lower() if origin not in allow_origin: return [] if allow_credentials: sev = severity.HIGH name = 'Insecure Access-Control-Allow-Origin with credentials' msg = ('The remote Web application, specifically "%s", returned' ' a "%s" header with the value set to the value sent in the' ' request\'s Origin header and a %s header with the value' ' set to "true", which is insecure and leaves the' ' application open to Cross-domain attacks which can' ' affect logged-in users.') msg = msg % (forged_req.get_url(), ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_ALLOW_CREDENTIALS) else: sev = severity.LOW name = 'Insecure Access-Control-Allow-Origin' msg = ('The remote Web application, specifically "%s", returned' ' a "%s" header with the value set to the value sent in the' ' request\'s Origin header, which is insecure and leaves' ' the application open to Cross-domain attacks.') msg = msg % (forged_req.get_url(), ACCESS_CONTROL_ALLOW_ORIGIN) v = Vuln(name, msg, sev, response.get_id(), self.get_name()) v.set_url(forged_req.get_url()) self.kb_append(self, 'cors_origin', v) return self._filter_report('_origin_echo_counter', 'origin echoed in allow-origin', severity.HIGH, [v])
def write_vuln_to_kb(vulnty, url, funcs): vulndata = php_sca.KB_DATA[vulnty] for f in funcs: vuln_sev = vulndata['severity'] desc = name = vulndata['name'] v = Vuln(name, desc, vuln_sev, 1, 'PHP Static Code Analyzer') v.set_uri(url) v.set_token((f.vulnsources[0], 0)) args = list(vulndata['kb_key']) + [v] # TODO: Extract the method from the PHP code # $_GET == GET # $_POST == POST # $_REQUEST == GET v.set_method('GET') # TODO: Extract all the other variables that are # present in the PHP file using the SCA v.set_dc(DataContainer()) # # TODO: This needs to be checked! OS Commanding specific # attributes. v['os'] = 'unix' v['separator'] = '' kb.kb.append(*args)
def _display_errors(self, response): regex_str = 'display_errors</td><td class="v">(On|<i>no value</i>)</td>' display_errors = re.search(regex_str, response.get_body(), re.I) if display_errors: desc = 'The phpinfo()::display_errors is enabled.' v = Vuln('PHP display_errors: On', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def _session_cookie_httponly(self, response): regex_str = 'session\.cookie_httponly</td><td class="v">(Off|no|0)</td>' session_cookie_httponly = re.search(regex_str, response.get_body(), re.I) if session_cookie_httponly: desc = 'The phpinfo()::session.cookie_httponly is off.' v = Vuln('PHP session.cookie_httponly: Off', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def _expose_php(self, response): regex_str = 'expose_php</td><td class="v">(On|<i>no value</i>)</td>' expose_php = re.search(regex_str, response.get_body(), re.I) if expose_php: desc = 'The phpinfo()::expose_php is enabled.' v = Vuln('PHP expose_php: On', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def _allow_url_include(self, response): regex_str = 'allow_url_include</td><td class="v">(On|<i>no value</i>)</td>' allow_url_include = re.search(regex_str, response.get_body(), re.I) if allow_url_include: desc = 'The phpinfo()::allow_url_include is enabled.' v = Vuln('PHP allow_url_include: On', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def _upload_tmp_dir(self, response): regex_str = 'upload_tmp_dir</td><td class="v">(<i>no value</i>)</td>' upload_tmp_dir = re.search(regex_str, response.get_body(), re.I) if upload_tmp_dir: desc = 'The phpinfo()::upload_tmp_dir may be set to world-'\ 'accessible directory.' v = Vuln('PHP upload_tmp_dir:Everyone', desc, severity.LOW, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'phpinfo', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity())
def test_no_duplicate_vuln_reports(self): # The xml_file plugin had a bug where vulnerabilities were written to # disk multiple times, this test makes sure I fixed that vulnerability # First we create one vulnerability in the KB self.kb.cleanup() desc = 'Just a test for the XML file output plugin.' v = Vuln('SQL injection', desc, severity.HIGH, 1, 'sqli') self.kb.append('sqli', 'sqli', v) self.assertEqual(len(self.kb.get_all_vulns()), 1) # Setup the plugin plugin_instance = xml_file() # Set the output file for the unittest ol = OptionList() d = 'Output file name where to write the XML data' o = opt_factory('output_file', self.FILENAME, d, OUTPUT_FILE) ol.add(o) # Then we flush() twice to disk, this reproduced the issue plugin_instance.set_options(ol) plugin_instance.flush() plugin_instance.flush() plugin_instance.flush() # Now we parse the vulnerabilities from disk and confirm only one # is there file_vulns = self._from_xml_get_vulns(self.FILENAME) self.assertEqual(len(file_vulns), 1, file_vulns)
def is_injectable(self, mutant): """ Check if this mutant is delay injectable or not. @mutant: The mutant object that I have to inject to :return: A vulnerability object or None if nothing is found """ for delay_obj in self._get_delays(): ed = ExactDelayController(mutant, delay_obj, self._uri_opener) success, responses = ed.delay_is_controlled() if success: # Now I can be sure that I found a vuln, we control the response # time with the delay desc = 'Blind SQL injection using time delays was found at: %s' desc = desc % mutant.found_at() response_ids = [r.id for r in responses] v = Vuln.from_mutant('Blind SQL injection vulnerability', desc, severity.HIGH, response_ids, 'blind_sqli', mutant) om.out.debug(v.get_desc()) return v return None
def _confirm_file_upload(self, path, mutant, http_response): """ Confirms if the file was uploaded to path :param path: The URL where we suspect that a file was uploaded to. :param mutant: The mutant that originated the file on the remote end :param http_response: The HTTP response asociated with sending mutant """ get_response = self._uri_opener.GET(path, cache=False) if not is_404(get_response) and self._has_no_bug(mutant): # This is necessary, if I don't do this, the session # saver will break cause REAL file objects can't # be picked mutant.set_mod_value('<file_object>') desc = 'A file upload to a directory inside the webroot' \ ' was found at: %s' % mutant.found_at() v = Vuln.from_mutant('Insecure file upload', desc, severity.HIGH, [http_response.id, get_response.id], self.get_name(), mutant) v['file_dest'] = get_response.get_url() v['file_vars'] = mutant.get_file_vars() self.kb_append_uniq(self, 'file_upload', v) return
def check_is_open_web_socket(self, web_socket_url, web_socket_version): """ Note that this method only makes sense if called in a loop with the other check_* methods. :param web_socket_url: The URL of the web socket :param web_socket_version: The protocol version :return: True if the web socket is open: * Any Origin can connect * No cookies required for authentication * No basic auth required for authentication """ upgrade_request = build_ws_upgrade_request(web_socket_url, web_socket_version=web_socket_version, origin=self.W3AF_ORIGIN) upgrade_response = self._uri_opener.send_mutant(upgrade_request, cookies=False, use_basic_auth=False) if not is_successful_upgrade(upgrade_response): return False msg = ('An HTML5 WebSocket which allows connections from any origin' ' without authentication was found at "%s"') msg %= web_socket_url v = Vuln.from_fr('Open WebSocket', msg, severity.LOW, upgrade_response.id, self.get_name(), upgrade_request) self.kb_append_uniq(self, 'websocket_hijacking', v) return True
def audit(self, freq, orig_response): if not self._dns_zone: om.out.debug("DNS zone not configured!") return self.fqdn = "xxe.{target}.{domain}".format( target=freq.get_uri().get_domain(), domain=self._dns_zone) for mutant in create_mutants(freq, [ '&a;', ]): if isinstance(mutant, XMLMutant): mutant.get_dc().doctype = '<!DOCTYPE aa [\n' mutant.get_dc( ).doctype += ' <!ENTITY a SYSTEM "http://{FQDN}">\n'.format( FQDN=self.fqdn) mutant.get_dc().doctype += ']>' try: response = self._uri_opener.send_mutant(mutant, cache=False, timeout=10) if self.check('get.' + self.fqdn): desc = 'XXE injection at: "%s", using'\ ' HTTP method %s. The injectable parameter may be: "%s"' desc = desc % (mutant.get_url(), mutant.get_method(), mutant.get_token_name()) vuln = Vuln.from_mutant('XXE injection vulnerability', desc, severity.HIGH, response.id, 'xxe', mutant) om.out.debug(vuln.get_desc()) om.out.vulnerability("XXE injection", severity=severity.HIGH) except HTTPRequestException: om.out.debug("HTTPRequestException") except Exception as e: om.out.debug(str(e))
def audit(self, freq, orig_response): """ Test URLs for CSRF vulnerabilities. :param freq: A FuzzableRequest """ if not self._is_suitable(freq): return # Referer/Origin check # # IMPORTANT NOTE: I'm aware that checking for the referer header does # NOT protect the application against all cases of CSRF, but it's a # very good first step. In order to exploit a CSRF in an application # that protects using this method an intruder would have to identify # other vulnerabilities such as XSS or open redirects. # # TODO: This algorithm has lots of room for improvement if self._is_origin_checked(freq, orig_response): om.out.debug('Origin for %s is checked' % freq.get_url()) return # Does the request have CSRF token in query string or POST payload? if self._find_csrf_token(freq): return # Ok, we have found vulnerable to CSRF attack request msg = 'Cross Site Request Forgery has been found at: ' + freq.get_url() v = Vuln.from_fr('CSRF vulnerability', msg, severity.MEDIUM, orig_response.id, self.get_name(), freq) self.kb_append_uniq(self, 'csrf', v)
def _report_vuln(self, mutant, response, mod_value): """ Create a Vuln object and store it in the KB. :return: None """ csp_protects = site_protected_against_xss_by_csp(response) vuln_severity = severity.LOW if csp_protects else severity.MEDIUM desc = 'A Cross Site Scripting vulnerability was found at: %s' desc %= mutant.found_at() if csp_protects: desc += ('The risk associated with this vulnerability was lowered' ' because the site correctly implements CSP. The' ' vulnerability is still a risk for the application since' ' only the latest versions of some browsers implement CSP' ' checking.') v = Vuln.from_mutant('Cross site scripting vulnerability', desc, vuln_severity, response.id, self.get_name(), mutant) v.add_to_highlight(mod_value) self.kb_append_uniq(self, 'xss', v)
def _analyze_result(self, mutant, response): """ Analyze results of the _send_mutant method. """ if not response.is_text_or_html(): return if self._has_bug(mutant): return for tag in mp_doc_parser.get_tags_by_filter(response, self.TAGS): src_attr = tag.attrib.get('src', None) if src_attr is None: continue for url in self._test_urls: if not src_attr.startswith(url): continue # Vuln vuln! desc = 'A phishing vector was found at: %s' desc %= mutant.found_at() v = Vuln.from_mutant('Phishing vector', desc, severity.LOW, response.id, self.get_name(), mutant) v.add_to_highlight(src_attr) self.kb_append_uniq(self, 'phishing_vector', v) break
def _find_delay_in_mutant(self, mutant, delay_obj, debugging_id): """ Try to delay the response and save a vulnerability if successful :param mutant: The mutant to modify and test :param delay_obj: The delay to use :param debugging_id: The debugging ID for logging """ adc = AproxDelayController(mutant, delay_obj, self._uri_opener, delay_setting=EXPONENTIALLY) adc.set_debugging_id(debugging_id) success, responses = adc.delay_is_controlled() if not success: return # Now I can be sure that I found a vuln, we control the # response time with the delay desc = 'ReDoS was found at: %s' % mutant.found_at() response_ids = [r.id for r in responses] v = Vuln.from_mutant('ReDoS vulnerability', desc, severity.MEDIUM, response_ids, self.get_name(), mutant) self.kb_append_uniq(self, 'redos', v)
def _with_time_delay(self, freq): """ Tests an URL for OS Commanding vulnerabilities using time delays. :param freq: A FuzzableRequest """ fake_mutants = create_mutants(freq, [ '', ]) for mutant in fake_mutants: if self._has_bug(mutant): continue for delay_obj in self._get_wait_commands(): ed = ExactDelayController(mutant, delay_obj, self._uri_opener) success, responses = ed.delay_is_controlled() if success: desc = 'OS Commanding was found at: %s' % mutant.found_at() v = Vuln.from_mutant('OS commanding vulnerability', desc, severity.HIGH, [r.id for r in responses], self.get_name(), mutant) v['os'] = delay_obj.get_OS() v['separator'] = delay_obj.get_separator() self.kb_append_uniq(self, 'os_commanding', v) break
def test_from_mutant(self): dc = DataContainer() url = URL('http://moth/') payloads = ['abc', 'def'] dc['a'] = [ '1', ] dc['b'] = [ '2', ] freq = FuzzableRequest(url, dc=dc) fuzzer_config = {} created_mutants = Mutant.create_mutants(freq, payloads, [], False, fuzzer_config) mutant = created_mutants[0] inst = Vuln.from_mutant('TestCase', 'desc' * 30, 'High', 1, 'plugin_name', mutant) self.assertIsInstance(inst, Vuln) self.assertEqual(inst.get_uri(), mutant.get_uri()) self.assertEqual(inst.get_url(), mutant.get_url()) self.assertEqual(inst.get_method(), mutant.get_method()) self.assertEqual(inst.get_dc(), mutant.get_dc()) self.assertEqual(inst.get_var(), mutant.get_var())
def _html_in_comment(self, comment, request, response): """ Find HTML code in HTML comments """ html_in_comment = self.HTML_RE.search(comment) if html_in_comment is None: return if (comment, response.get_url()) in self._already_reported: return # There is HTML code in the comment. comment = comment.strip() comment = comment.replace('\n', '') comment = comment.replace('\r', '') comment = comment[:40] desc = ('A comment with the string "%s" was found in: "%s".' ' This could be interesting.') desc %= (comment, response.get_url()) v = Vuln.from_fr('HTML comment contains HTML code', desc, severity.INFORMATION, response.id, self.get_name(), request) v.set_uri(response.get_uri()) v.add_to_highlight(html_in_comment.group(0)) om.out.vulnerability(v.get_desc(), severity=severity.INFORMATION) kb.kb.append(self, 'html_comment_hides_html', v) self._already_reported.add((comment, response.get_url()))
def _check_potential_vhosts(self, fuzzable_request, vhosts): """ Send the HTTP requests to check for potential findings :param fuzzable_request: The fuzzable request as received by the plugin :param vhosts: A generator yielding potential vhosts to check :return: None, vulnerabilities (if any) are written to the KB """ # Get some responses to compare later base_url = fuzzable_request.get_url().base_url() original_response = self._uri_opener.GET(base_url, cache=True) orig_resp_body = original_response.get_body() non_existent_responses = self._get_non_exist(fuzzable_request) for vhost, vhost_response in self._send_in_threads(base_url, vhosts): if not self._response_is_different(vhost_response, orig_resp_body, non_existent_responses): continue domain = fuzzable_request.get_url().get_domain() desc = (u'Found a new virtual host at the target web server, the' u' virtual host name is: "%s". To access this site' u' you might need to change your DNS resolution settings' u' in order to point "%s" to the IP address of "%s".') desc %= (vhost, vhost, domain) ids = [vhost_response.id, original_response.id] ids.extend([r.id for r in non_existent_responses]) v = Vuln.from_fr('Virtual host identified', desc, severity.LOW, ids, self.get_name(), fuzzable_request) kb.kb.append(self, 'find_vhosts', v) om.out.information(v.get_desc())
def _analyze_result(self, mutant, response): """ Analyze results of the _send_mutant method. """ if self._has_bug(mutant): return if self._header_was_injected(mutant, response): desc = 'Response splitting was found at: %s' % mutant.found_at() v = Vuln.from_mutant('Response splitting vulnerability', desc, severity.MEDIUM, response.id, self.get_name(), mutant) self.kb_append_uniq(self, 'response_splitting', v) # When trying to send a response splitting to php 5.1.2 I get : # Header may not contain more than a single header, new line detected for error in self.HEADER_ERRORS: if error in response: desc = ('The variable "%s" at URL "%s" modifies the HTTP' ' response headers, but this error was sent while' ' testing for response splitting: "%s".') args = (mutant.get_token_name(), mutant.get_url(), error) desc = desc % args i = Info.from_mutant('Parameter modifies response headers', desc, response.id, self.get_name(), mutant) self.kb_append_uniq(self, 'response_splitting', i) return
def _with_header_echo_injection(self, freq): """ We're sending a payload that will trigger the injection of various headers in the HTTP response body. :param freq: A FuzzableRequest :return: True if a vulnerability was found """ injected_header = 'shellshock' injected_value = 'check' payload = '() { :;}; echo "%s: %s"' % (injected_header, injected_value) mutant = self.create_mutant(freq, TEST_HEADER) mutant.set_token_value(payload) response = self._uri_opener.send_mutant(mutant) header_value, header_name = response.get_headers().iget(injected_header) if header_value is not None and injected_value in header_value.lower(): desc = u'Shell shock was found at: %s' % mutant.found_at() v = Vuln.from_mutant(u'Shell shock vulnerability', desc, severity.HIGH, [response.id], self.get_name(), mutant) self.kb_append_uniq(self, 'shell_shock', v) return True
def _confirm_file_upload(self, path, mutant, http_response, debugging_id): """ Confirms if the file was uploaded to path :param path: The URL where we suspect that a file was uploaded to. :param mutant: The mutant that originated the file on the remote end :param http_response: The HTTP response associated with sending mutant """ response = self._uri_opener.GET(path, cache=False, grep=False, debugging_id=debugging_id) if mutant.file_payload not in response.body: return if self._has_bug(mutant): return desc = 'A file upload to a directory inside the webroot was found at: %s' desc %= mutant.found_at() v = Vuln.from_mutant('Insecure file upload', desc, severity.HIGH, [http_response.id, response.id], self.get_name(), mutant) v['file_dest'] = response.get_url() v['file_vars'] = mutant.get_file_vars() self.kb_append_uniq(self, 'file_upload', v)
def end(self): # If all URLs implement protection, don't report anything. if not self._vuln_count: return # If none of the URLs implement protection, simply report # ONE vulnerability that says that. if self._total_count == self._vuln_count: desc = 'The whole target web application has no protection (Pragma'\ ' and Cache-Control headers) against sensitive content'\ ' caching.' # If most of the URLs implement the protection but some # don't, report ONE vulnerability saying: "Most are protected, but x, y # are not. if self._total_count > self._vuln_count: desc = 'Some URLs have no protection (Pragma and Cache-Control'\ ' headers) against sensitive content caching. Among them:\n' desc += ' '.join([str(url) + '\n' for url in self._vulns]) response_ids = [_id for _id in self._ids] v = Vuln('Missing cache control for HTTPS content', desc, severity.LOW, response_ids, self.get_name()) self.kb_append_uniq(self, 'cache_control', v, 'URL') self._vulns.cleanup() self._ids.cleanup()
def end(self): # If all URLs implement protection, don't report anything. if not self._vuln_count: return response_ids = [_id for _id in self._ids] # If none of the URLs implement protection, simply report # ONE vulnerability that says that. if self._total_count == self._vuln_count: desc = 'The whole target has no protection (X-Frame-Options'\ ' header) against Click-Jacking attacks' # If most of the URLs implement the protection but some # don't, report ONE vulnerability saying: "Most are protected, # but x, y are not. if self._total_count > self._vuln_count: desc = 'Some URLs have no protection (X-Frame-Options header) '\ 'against Click-Jacking attacks. Among them:\n '\ ' '.join([str(url) + '\n' for url in self._vulns]) v = Vuln('Click-Jacking vulnerability', desc, severity.MEDIUM, response_ids, self.get_name()) self.kb_append(self, 'click_jacking', v) self._vulns.cleanup() self._ids.cleanup()
def _analyze_persistent(self, freq, response): """ Analyze the response of sending each fuzzable request found by the framework, trying to identify any locations where we might have injected a payload. :param freq: The fuzzable request :param response: The HTTP response :return: None, vulns are stored in KB """ multi_in_inst = multi_in(self._expected_mutant_dict.keys()) for matched_expected_result in multi_in_inst.query( response.get_body()): # We found one of the expected results, now we search the # self._expected_mutant_dict to find which of the mutants sent it # and create the vulnerability mutant = self._expected_mutant_dict[matched_expected_result] desc = ('Server side include (SSI) was found at: %s' ' The result of that injection is shown by browsing' ' to "%s".') desc %= (mutant.found_at(), freq.get_url()) v = Vuln.from_mutant( 'Persistent server side include vulnerability', desc, severity.HIGH, response.id, self.get_name(), mutant) v.add_to_highlight(matched_expected_result) self.kb_append(self, 'ssi', v)
def _analyze_result(self, mutant, response): """ Analyze results of the _send_mutant method. """ # # I will only report the vulnerability once. # if self._has_no_bug(mutant): for error in self.ERROR_STRINGS: # Check if the error string is in the response if error in response.body and \ error not in mutant.get_original_response_body(): desc = 'A possible (detection is really hard...) format' \ ' string vulnerability was found at: %s' desc = desc % mutant.found_at() v = Vuln.from_mutant('Format string vulnerability', desc, severity.MEDIUM, response.id, self.get_name(), mutant) v.add_to_highlight(error) self.kb_append_uniq(self, 'format_string', v) break
def _analyze_result(self, mutant, response): """ Analyze results of the _send_mutant method. """ # # I will only report the vulnerability once. # if self._has_no_bug(mutant): ldap_error_list = self._find_ldap_error(response) for ldap_error_string in ldap_error_list: if ldap_error_string not in mutant.get_original_response_body( ): desc = 'LDAP injection was found at: %s' % mutant.found_at( ) v = Vuln.from_mutant('LDAP injection vulnerability', desc, severity.HIGH, response.id, self.get_name(), mutant) v.add_to_highlight(ldap_error_string) self.kb_append_uniq(self, 'ldapi', v) break
def _analyze_result(self, mutant, response): """ Analyze results of the _send_mutant method. """ if self._has_bug(mutant): return dom = response.get_dom() if response.is_text_or_html() and dom is not None: elem_list = self._tag_xpath(dom) for element in elem_list: if 'src' not in element.attrib: return [] src_attr = element.attrib['src'] for url in self._test_urls: if src_attr.startswith(url): # Vuln vuln! desc = 'A phishing vector was found at: %s' desc = desc % mutant.found_at() v = Vuln.from_mutant('Phishing vector', desc, severity.LOW, response.id, self.get_name(), mutant) v.add_to_highlight(src_attr) self.kb_append_uniq(self, 'phishing_vector', v)
def discover(self, fuzzable_request): """ Checks if JBoss Interesting Directories exist in the target server. Also verifies some vulnerabilities. """ base_url = fuzzable_request.get_url().base_url() args_iter = izip(repeat(base_url), self.JBOSS_VULNS) otm_send_request = one_to_many(self.send_request) response_pool = self.worker_pool.imap_unordered( otm_send_request, args_iter) for vuln_db_instance, response in response_pool: if is_404(response): continue vuln_url = base_url.url_join(vuln_db_instance['url']) name = vuln_db_instance['name'] desc = vuln_db_instance['desc'] if vuln_db_instance['type'] == 'info': o = Info(name, desc, response.id, self.get_name()) else: o = Vuln(name, desc, severity.LOW, response.id, self.get_name()) o.set_url(vuln_url) kb.kb.append(self, 'find_jboss', o) self.output_queue.put(FuzzableRequest(response.get_uri()))
def _analyze_ips(self, ip_address_list, fuzzable_request): """ Search all IP addresses in Bing and determine if they have more than one domain hosted on it. Store findings in KB. """ bing_wrapper = bing(self._uri_opener) # This is the best way to search, one by one! for ip_address in ip_address_list: results = bing_wrapper.get_n_results('ip:' + ip_address, self._result_limit) results = [r.URL.base_url() for r in results] results = list(set(results)) # not vuln by default is_vulnerable = False if len(results) > 1: # We may have something... is_vulnerable = True if len(results) == 2: # Maybe we have this case: # [Mon 09 Jun 2008 01:08:26 PM ART] - http://216.244.147.14/ # [Mon 09 Jun 2008 01:08:26 PM ART] - http://www.business.com/ # Where www.business.com resolves to 216.244.147.14; so we don't really # have more than one domain in the same server. try: res0 = socket.gethostbyname(results[0].get_domain()) res1 = socket.gethostbyname(results[1].get_domain()) except: pass else: if res0 == res1: is_vulnerable = False if is_vulnerable: desc = 'The web application under test seems to be in a shared' \ ' hosting. This list of domains, and the domain of the ' \ ' web application under test, all point to the same IP' \ ' address (%s):\n' % ip_address domain_list = kb.kb.raw_read(self, 'domains') for url in results: domain = url.get_domain() desc += '- %s\n' % domain domain_list.append(domain) kb.kb.raw_write(self, 'domains', domain_list) v = Vuln.from_fr('Shared hosting', desc, severity.MEDIUM, 1, self.get_name(), fuzzable_request) v['also_in_hosting'] = results om.out.vulnerability(desc, severity=severity.MEDIUM) kb.kb.append(self, 'shared_hosting', v)
def grep(self, request, response): """ Plugin entry point, find the error pages and report them. :param request: The HTTP request object. :param response: The HTTP response object :return: None """ if not response.is_text_or_html(): return if not response.get_code() == 200: return for _, (key, keypair_type) in self._multi_in.query(response.body): desc = u'The URL: "%s" discloses a key of type: "%s"' desc %= (response.get_url(), key) if keypair_type == self.PUBLIC: item = Info( 'Public key disclosure', desc, response.id, self.get_name()) elif keypair_type == self.PRIVATE: item = Vuln( 'Private key disclosure', desc, severity.HIGH, response.id, self.get_name()) item.set_url(response.get_url()) item.add_to_highlight(key) self.kb_append(self, 'keys', item)
def _analyze_echo(self, mutant, response): """ Analyze results of the _send_mutant method that was sent in the _with_echo method. """ # # I will only report the vulnerability once. # if self._has_bug(mutant): return for file_pattern_match in self._multi_in.query(response.get_body()): if file_pattern_match not in mutant.get_original_response_body(): # Search for the correct command and separator sentOs, sentSeparator = self._get_os_separator(mutant) desc = 'OS Commanding was found at: %s' % mutant.found_at() # Create the vuln obj v = Vuln.from_mutant('OS commanding vulnerability', desc, severity.HIGH, response.id, self.get_name(), mutant) v['os'] = sentOs v['separator'] = sentSeparator v.add_to_highlight(file_pattern_match) self.kb_append_uniq(self, 'os_commanding', v) break
def _analyze_result(self, mutant, response): """ Analyze the result of the previously sent request. :return: None, save the vuln to the kb. """ # Store the mutants in order to be able to analyze the persistent case # later expected_results = self._get_expected_results(mutant) for expected_result in expected_results: self._expected_mutant_dict[expected_result] = mutant # Now we analyze the "reflected" case if self._has_bug(mutant): return for expected_result in expected_results: if expected_result not in response: continue if expected_result in mutant.get_original_response_body(): continue desc = 'Server side include (SSI) was found at: %s' desc %= mutant.found_at() v = Vuln.from_mutant('Server side include vulnerability', desc, severity.HIGH, response.id, self.get_name(), mutant) v.add_to_highlight(expected_result) self.kb_append_uniq(self, 'ssi', v)
def audit(self, freq, orig_response): """ Tests an URL for ReDoS vulnerabilities using time delays. :param freq: A FuzzableRequest """ if self.ignore_this_request(freq): return fake_mutants = create_mutants(freq, [ '', ]) for mutant in fake_mutants: for delay_obj in self.get_delays(): adc = AproxDelayController(mutant, delay_obj, self._uri_opener, delay_setting=EXPONENTIALLY) success, responses = adc.delay_is_controlled() if success: # Now I can be sure that I found a vuln, we control the # response time with the delay desc = 'ReDoS was found at: %s' % mutant.found_at() response_ids = [r.id for r in responses] v = Vuln.from_mutant('ReDoS vulnerability', desc, severity.MEDIUM, response_ids, self.get_name(), mutant) self.kb_append_uniq(self, 'redos', v) break
def _parse_zone_h_result(self, response): """ Parse the result from the zone_h site and create the corresponding info objects. :return: None """ # # I'm going to do only one big "if": # # - The target site was hacked more than one time # - The target site was hacked only one time # # This is the string I have to parse: # in the zone_h response, they are two like this, the first has to be ignored! regex = 'Total notifications: <b>(\d*)</b> of which <b>(\d*)</b> single ip and <b>(\d*)</b> mass' regex_result = re.findall(regex, response.get_body()) try: total_attacks = int(regex_result[0][0]) except IndexError: om.out.debug('An error was generated during the parsing of the zone_h website.') else: # Do the if... if total_attacks > 1: desc = 'The target site was defaced more than one time in the' \ ' past. For more information please visit the following' \ ' URL: "%s".' % response.get_url() v = Vuln('Previous defacements', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'defacements', v) om.out.information(v.get_desc()) elif total_attacks == 1: desc = 'The target site was defaced in the past. For more' \ ' information please visit the following URL: "%s".' desc = desc % response.get_url() i = Info('Previous defacements', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'defacements', i) om.out.information(i.get_desc())
def _handle_result(self, url, response_id, json_result): """ Write a result to the KB. :param url: The URL associated with this result :param json_result: A finding from retirejs JSON document :return: None, everything is written to the KB. """ version = json_result.get('version', None) component = json_result.get('component', None) vulnerabilities = json_result.get('vulnerabilities', []) if version is None or component is None: om.out.debug('The retirejs generated JSON document is invalid.' ' Either the version or the component attribute is' ' missing. Will ignore this result and continue with' ' the next.') return if not vulnerabilities: om.out.debug('The retirejs generated JSON document is invalid. No' ' vulnerabilities were found. Will ignore this result' ' and continue with the next.') return message = VulnerabilityMessage(url, component, version) for vulnerability in vulnerabilities: vuln_severity = vulnerability.get('severity', 'unknown') summary = vulnerability.get('identifiers', {}).get('summary', 'unknown') info_urls = vulnerability.get('info', []) retire_vuln = RetireJSVulnerability(vuln_severity, summary, info_urls) message.add_vulnerability(retire_vuln) desc = message.to_string() real_severity = message.get_severity() v = Vuln('Vulnerable JavaScript library in use', desc, real_severity, response_id, self.get_name()) v.set_uri(url) self.kb_append_uniq(self, 'js', v, filter_by='URL')