def _fingerprint_data(self, domain_path, wp_unique_url, response): ''' Find wordpress version from data ''' for wp_fingerprint in self._get_wp_fingerprints(): # The URL in the XML is relative AND it has two different variables # that we need to replace: # $wp-content$ -> wp-content/ # $wp-plugins$ -> wp-content/plugins/ path = wp_fingerprint.filepath path = path.replace('$wp-content$', 'wp-content/') path = path.replace('$wp-plugins$', 'wp-content/plugins/') test_url = domain_path.url_join(path) response = self._uri_opener.GET(test_url, cache=True) response_hash = hashlib.md5(response.get_body()).hexdigest() if response_hash == wp_fingerprint.hash: version = wp_fingerprint.version # Save it to the kb! desc = 'WordPress version "%s" fingerprinted by matching known md5'\ ' hashes to HTTP responses of static resources available at'\ ' the remote WordPress install.' desc = desc % version i = Info('Fingerprinted Wordpress version', desc, response.id, self.get_name()) i.set_url(test_url) kb.kb.append(self, 'info', i) om.out.information(i.get_desc()) break
def _force_disclosures(self, domain_path, potentially_vulnerable_paths): ''' :param domain_path: The path to wordpress' root directory :param potentially_vulnerable_paths: A list with the paths I'll URL-join with @domain_path, GET and parse. ''' for pvuln_path in potentially_vulnerable_paths: pvuln_url = domain_path.url_join(pvuln_path) response = self._uri_opener.GET(pvuln_url, cache=True) if is_404(response): continue response_body = response.get_body() if 'Fatal error: ' in response_body: desc = 'Analyze the HTTP response body to find the full path'\ ' where wordpress was installed.' i = Info('WordPress path disclosure', desc, response.id, self.get_name()) i.set_url(pvuln_url) kb.kb.append(self, 'info', i) om.out.information(i.get_desc()) break
def discover(self, fuzzable_request): ''' :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. ''' root_domain = fuzzable_request.get_url().get_root_domain() pks_se = pks(self._uri_opener) results = pks_se.search(root_domain) pks_url = 'http://pgp.mit.edu:11371/' for result in results: mail = result.username + '@' + root_domain desc = 'The mail account: "%s" was found at: "%s".' desc = desc % (mail, pks_url) i = Info('Email account', desc, result.id, self.get_name()) i.set_url(URL(pks_url)) i['mail'] = mail i['user'] = result.username i['name'] = result.name i['url_list'] = [ URL(pks_url), ] kb.kb.append('emails', 'emails', i) # Don't save duplicated information in the KB. It's useless. #kb.kb.append( self, 'emails', i ) om.out.information(i.get_desc())
def _match_cookie_fingerprint(self, request, response, cookie_obj): ''' Now we analyze the cookie and try to guess the remote web server or programming framework based on the cookie that was sent. :return: True if the cookie was fingerprinted ''' cookie_obj_str = cookie_obj.output(header='') for cookie_str_db, system_name in self.COOKIE_FINGERPRINT: if cookie_str_db in cookie_obj_str: if system_name not in self._already_reported_server: desc = 'A cookie matching the cookie fingerprint DB'\ ' has been found when requesting "%s".'\ ' The remote platform is: "%s".' desc = desc % (response.get_url(), system_name) i = Info('Identified cookie', desc, response.id, self.get_name()) i.set_url(response.get_url()) i['httpd'] = system_name self._set_cookie_to_rep(i, cobj=cookie_obj) kb.kb.append(self, 'security', i) self._already_reported_server.append(system_name) return True return False
def _content_location_not_300(self, request, response): """ Check if the response has a content-location header and the response code is not in the 300 range. :return: None, all results are saved in the kb. """ if ( "content-location" in response.get_lower_case_headers() and response.get_code() > 300 and response.get_code() < 310 ): desc = ( 'The URL: "%s" sent the HTTP header: "content-location"' ' with value: "%s" in an HTTP response with code %s which' " is a violation to the RFC." ) desc = desc % ( response.get_url(), response.get_lower_case_headers()["content-location"], response.get_code(), ) i = Info("Content-Location HTTP header anomaly", desc, response.id, self.get_name()) i.set_url(response.get_url()) i.add_to_highlight("content-location") kb.kb.append(self, "anomaly", i)
def grep(self, request, response): """ Plugin entry point. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. """ url = response.get_url() if response.is_text_or_html() and url not in self._already_inspected: # Don't repeat URLs self._already_inspected.add(url) dom = response.get_dom() # In some strange cases, we fail to normalize the document if dom is None: return script_elements = self._script_xpath(dom) for element in script_elements: # returns the text between <script> and </script> script_content = element.text if script_content is not None: res = self._ajax_regex_re.search(script_content) if res: desc = 'The URL: "%s" has AJAX code.' % url i = Info("AJAX code", desc, response.id, self.get_name()) i.set_url(url) i.add_to_highlight(res.group(0)) self.kb_append_uniq(self, "ajax", i, "URL")
def _fingerprint_meta(self, domain_path, wp_unique_url, response): ''' Check if the wp version is in index header ''' # Main scan URL passed from w3af + wp index page wp_index_url = domain_path.url_join('index.php') response = self._uri_opener.GET(wp_index_url, cache=True) # Find the string in the response html find = '<meta name="generator" content="[Ww]ord[Pp]ress (\d\.\d\.?\d?)" />' m = re.search(find, response.get_body()) # If string found, group version if m: version = m.group(1) # Save it to the kb! desc = 'WordPress version "%s" found in the index header.' desc = desc % version i = Info('Fingerprinted Wordpress version', desc, response.id, self.get_name()) i.set_url(wp_index_url) kb.kb.append(self, 'info', i) om.out.information(i.get_desc())
def _test_DNS(self, original_response, dns_wildcard_url): ''' Check if http://www.domain.tld/ == http://domain.tld/ ''' headers = Headers([('Host', dns_wildcard_url.get_domain())]) try: modified_response = self._uri_opener.GET( original_response.get_url(), cache=True, headers=headers) except w3afException: return else: if relative_distance_lt(modified_response.get_body(), original_response.get_body(), 0.35): desc = 'The target site has NO DNS wildcard, and the contents' \ ' of "%s" differ from the contents of "%s".' desc = desc % (dns_wildcard_url, original_response.get_url()) i = Info('No DNS wildcard', desc, modified_response.id, self.get_name()) i.set_url(dns_wildcard_url) kb.kb.append(self, 'dns_wildcard', i) om.out.information(i.get_desc()) else: desc = 'The target site has a DNS wildcard configuration, the'\ ' contents of "%s" are equal to the ones of "%s".' desc = desc % (dns_wildcard_url, original_response.get_url()) i = Info('DNS wildcard', desc, modified_response.id, self.get_name()) i.set_url(original_response.get_url()) kb.kb.append(self, 'dns_wildcard', i) om.out.information(i.get_desc())
def grep(self, request, response): ''' Plugin entry point. Parse the object tags. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() dom = response.get_dom() if response.is_text_or_html() and dom is not None \ and url not in self._already_analyzed: self._already_analyzed.add(url) elem_list = self._tag_xpath(dom) for element in elem_list: tag_name = element.tag desc = 'The URL: "%s" has an "%s" tag. We recommend you download'\ ' the client side code and analyze it manually.' desc = desc % (response.get_uri(), tag_name) i = Info('Browser plugin content', desc, response.id, self.get_name()) i.set_url(url) i.add_to_highlight(tag_name) self.kb_append_uniq(self, tag_name, i, 'URL')
def discover(self, fuzzable_request): ''' :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. ''' root_domain = fuzzable_request.get_url().get_root_domain() pks_se = pks(self._uri_opener) results = pks_se.search(root_domain) pks_url = 'http://pgp.mit.edu:11371/' for result in results: mail = result.username + '@' + root_domain desc = 'The mail account: "%s" was found at: "%s".' desc = desc % (mail, pks_url) i = Info('Email account', desc, result.id, self.get_name()) i.set_url(URL(pks_url)) i['mail'] = mail i['user'] = result.username i['name'] = result.name i['url_list'] = set([URL(pks_url), ]) kb.kb.append('emails', 'emails', i) # Don't save duplicated information in the KB. It's useless. #kb.kb.append( self, 'emails', i ) om.out.information(i.get_desc())
def grep(self, request, response): ''' Plugin entry point. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. ''' url = response.get_url() if response.is_text_or_html() and url not in self._already_inspected: # Don't repeat URLs self._already_inspected.add(url) if self.symfony_detected(response): dom = response.get_dom() if dom is not None and not self.csrf_detected(dom): desc = 'The URL: "%s" seems to be generated by the'\ ' Symfony framework and contains a form that'\ ' perhaps has CSRF protection disabled.' desc = desc % url i = Info('Symfony Framework with CSRF protection disabled', desc, response.id, self.get_name()) i.set_url(url) self.kb_append_uniq(self, 'symfony', i, 'URL')
def _parse_document(self, response): ''' Parses the HTML and adds the mail addresses to the kb. ''' try: document_parser = parser_cache.dpc.get_document_parser_for(response) except w3afException: # Failed to find a suitable parser for the document pass else: # Search for email addresses for mail in document_parser.get_emails(self._domain_root): if mail not in self._accounts: self._accounts.append(mail) desc = 'The mail account: "%s" was found at: "%s".' desc = desc % (mail, response.get_uri()) i = Info('Email account', desc, response.id, self.get_name()) i.set_url(response.get_uri()) i['mail'] = mail i['user'] = mail.split('@')[0] i['url_list'] = [response.get_uri(), ] self.kb_append('emails', 'emails', i) self.kb_append(self, 'emails', i)
def _analyze_methods(self, url, allowed_methods, id_list): # Check for DAV if set(allowed_methods).intersection(self.DAV_METHODS): # dav is enabled! # Save the results in the KB so that other plugins can use this # information desc = 'The URL "%s" has the following allowed methods. These'\ ' include DAV methods and should be disabled: %s' desc = desc % (url, ', '.join(allowed_methods)) i = Info('DAV methods enabled', desc, id_list, self.get_name()) i.set_url(url) i['methods'] = allowed_methods kb.kb.append(self, 'dav-methods', i) else: # Save the results in the KB so that other plugins can use this # information. Do not remove these information, other plugins # REALLY use it ! desc = 'The URL "%s" has the following enabled HTTP methods: %s' desc = desc % (url, ', '.join(allowed_methods)) i = Info('Allowed HTTP methods', desc, id_list, self.get_name()) i.set_url(url) i['methods'] = allowed_methods kb.kb.append(self, 'methods', i)
def _parse_document(self, response): ''' Parses the HTML and adds the mail addresses to the kb. ''' try: document_parser = parser_cache.dpc.get_document_parser_for( response) except w3afException: # Failed to find a suitable parser for the document pass else: # Search for email addresses for mail in document_parser.get_emails(self._domain_root): if mail not in self._accounts: self._accounts.append(mail) desc = 'The mail account: "%s" was found at: "%s".' desc = desc % (mail, response.get_uri()) i = Info('Email account', desc, response.id, self.get_name()) i.set_url(response.get_uri()) i['mail'] = mail i['user'] = mail.split('@')[0] i['url_list'] = set([ response.get_uri(), ]) self.kb_append('emails', 'emails', i) self.kb_append(self, 'emails', i)
def grep(self, request, response): ''' Plugin entry point, verify if the HTML has a form with file uploads. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() if response.is_text_or_html() and not url in self._already_inspected: self._already_inspected.add(url) dom = response.get_dom() # In some strange cases, we fail to normalize the document if dom is not None: # Loop through file inputs tags for input_file in self._file_input_xpath(dom): msg = 'The URL: "%s" has form with file upload capabilities.' msg = msg % url i = Info('File upload form', msg, response.id, self.get_name()) i.set_url(url) to_highlight = etree.tostring(input_file) i.add_to_highlight(to_highlight) self.kb_append_uniq(self, 'file_upload', i, 'URL')
def _analyze_author(self, response, frontpage_author): ''' Analyze the author URL. :param response: The http response object for the _vti_inf file. :param frontpage_author: A regex match object. :return: None. All the info is saved to the kb. ''' author_location = response.get_url().get_domain_path().url_join( frontpage_author.group(1)) # Check for anomalies in the location of author.exe if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe': name = 'Customized frontpage configuration' desc = 'The FPAuthorScriptUrl is at: "%s" instead of the default'\ ' location: "/_vti_bin/_vti_adm/author.exe". This is very'\ ' uncommon.' desc = desc % author_location else: name = 'FrontPage FPAuthorScriptUrl' desc = 'The FPAuthorScriptUrl is at: "%s".' desc = desc % author_location i = Info(name, desc, response.id, self.get_name()) i.set_url(author_location) i['FPAuthorScriptUrl'] = author_location kb.kb.append(self, 'frontpage_version', i) om.out.information(i.get_desc())
def grep(self, request, response): ''' Plugin entry point, find the blank bodies and report them. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' if response.get_body() == '' and request.get_method() in self.METHODS\ and response.get_code() not in self.HTTP_CODES\ and 'location' not in response.get_lower_case_headers()\ and response.get_url() not in self._already_reported: # report these informations only once self._already_reported.add(response.get_url()) desc = 'The URL: "%s" returned an empty body, this could indicate'\ ' an application error.' desc = desc % response.get_url() i = Info('Blank http response body', desc, response.id, self.get_name()) i.set_url(response.get_url()) self.kb_append(self, 'blank_body', i)
def grep(self, request, response): ''' Plugin entry point. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. ''' url = response.get_url() if response.is_text_or_html() and url not in self._already_inspected: # Don't repeat URLs self._already_inspected.add(url) dom = response.get_dom() # In some strange cases, we fail to normalize the document if dom is None: return script_elements = self._script_xpath(dom) for element in script_elements: # returns the text between <script> and </script> script_content = element.text if script_content is not None: res = self._ajax_regex_re.search(script_content) if res: desc = 'The URL: "%s" has AJAX code.' % url i = Info('AJAX code', desc, response.id, self.get_name()) i.set_url(url) i.add_to_highlight(res.group(0)) self.kb_append_uniq(self, 'ajax', i, 'URL')
def _do_request(self, url, mutant): ''' Perform a simple GET to see if the result is an error or not, and then run the actual fuzzing. ''' response = self._uri_opener.GET( mutant, cache=True, headers=self._headers) if not (is_404(response) or response.get_code() in (403, 401) or self._return_without_eval(mutant)): for fr in self._create_fuzzable_requests(response): self.output_queue.put(fr) # # Save it to the kb (if new)! # if response.get_url() not in self._seen and response.get_url().get_file_name(): desc = 'A potentially interesting file was found at: "%s".' desc = desc % response.get_url() i = Info('Potentially interesting file', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'files', i) om.out.information(i.get_desc()) # Report only once self._seen.add(response.get_url())
def _do_request(self, mutated_url, user): ''' Perform the request and compare. :return: The HTTP response id if the mutated_url is a web user directory, None otherwise. ''' response = self._uri_opener.GET(mutated_url, cache=True, headers=self._headers) path = mutated_url.get_path() response_body = response.get_body().replace(path, '') if relative_distance_lt(response_body, self._non_existent, 0.7): # Avoid duplicates if user not in [u['user'] for u in kb.kb.get('user_dir', 'users')]: desc = 'A user directory was found at: %s' desc = desc % response.get_url() i = Info('Web user home directory', desc, response.id, self.get_name()) i.set_url(response.get_url()) i['user'] = user kb.kb.append(self, 'users', i) for fr in self._create_fuzzable_requests(response): self.output_queue.put(fr) return response.id return None
def grep(self, request, response): ''' Plugin entry point, search for the user defined regex. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' if self._all_in_one is None: return if not response.is_text_or_html(): return # TODO: Verify this this is really a performance improvement html_string = response.get_body() if not self._all_in_one.search(html_string): return #One of them is in there, now we need to find out which one for index, regex_tuple in enumerate(self._regexlist_compiled): regex, info_inst = regex_tuple match_object = regex.search(html_string) if match_object: with self._plugin_lock: #Don't change the next line to "if info_inst:", #because the info_inst is an empty dict {} #which evaluates to false #but an info object is not the same as None if not info_inst is None: ids = info_inst.get_id() ids.append(response.id) info_inst.set_id(ids) else: str_match = match_object.group(0) if len(str_match) > 20: str_match = str_match[:20] + '...' desc = 'User defined regular expression "%s" matched a' \ ' response. The matched string is: "%s".' desc = desc % (regex.pattern, str_match) info_inst = Info( 'User defined regular expression match', desc, response.id, self.get_name()) info_inst.set_url(response.get_url()) om.out.information(desc) self.kb_append_uniq(self, 'user_defined_regex', info_inst, 'URL') # Save the info_inst self._regexlist_compiled[index] = (regex, info_inst)
def _PUT(self, domain_path): ''' Tests PUT method. ''' # upload url = domain_path.url_join(rand_alpha(5)) rnd_content = rand_alnum(6) put_response = self._uri_opener.PUT(url, data=rnd_content) # check if uploaded res = self._uri_opener.GET(url, cache=True) if res.get_body() == rnd_content: msg = 'File upload with HTTP PUT method was found at resource:' \ ' "%s". A test file was uploaded to: "%s".' msg = msg % (domain_path, res.get_url()) v = Vuln('Insecure DAV configuration', msg, severity.HIGH, [put_response.id, res.id], self.get_name()) v.set_url(url) v.set_method('PUT') self.kb_append(self, 'dav', v) # Report some common errors elif put_response.get_code() == 500: msg = 'DAV seems to be incorrectly configured. The web server' \ ' answered with a 500 error code. In most cases, this means'\ ' that the DAV extension failed in some way. This error was'\ ' found at: "%s".' % put_response.get_url() i = Info('DAV incorrect configuration', msg, res.id, self.get_name()) i.set_url(url) i.set_method('PUT') self.kb_append(self, 'dav', i) # Report some common errors elif put_response.get_code() == 403: msg = 'DAV seems to be correctly configured and allowing you to'\ ' use the PUT method but the directory does not have the'\ ' correct permissions that would allow the web server to'\ ' write to it. This error was found at: "%s".' msg = msg % put_response.get_url() i = Info('DAV incorrect configuration', msg, [put_response.id, res.id], self.get_name()) i.set_url(url) i.set_method('PUT') self.kb_append(self, 'dav', i)
def grep(self, request, response): ''' Plugin entry point, search for the user defined regex. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' if self._all_in_one is None: return if not response.is_text_or_html(): return # TODO: Verify this this is really a performance improvement html_string = response.get_body() if not self._all_in_one.search(html_string): return #One of them is in there, now we need to find out which one for index, regex_tuple in enumerate(self._regexlist_compiled): regex, info_inst = regex_tuple match_object = regex.search(html_string) if match_object: with self._plugin_lock: #Don't change the next line to "if info_inst:", #because the info_inst is an empty dict {} #which evaluates to false #but an info object is not the same as None if not info_inst is None: ids = info_inst.get_id() ids.append(response.id) info_inst.set_id(ids) else: str_match = match_object.group(0) if len(str_match) > 20: str_match = str_match[:20] + '...' desc = 'User defined regular expression "%s" matched a' \ ' response. The matched string is: "%s".' desc = desc % (regex.pattern, str_match) info_inst = Info('User defined regular expression match', desc, response.id, self.get_name()) info_inst.set_url(response.get_url()) om.out.information(desc) self.kb_append_uniq(self, 'user_defined_regex', info_inst, 'URL') # Save the info_inst self._regexlist_compiled[index] = (regex, info_inst)
def _report_no_realm(self, response): # Report this strange case desc = 'The resource: "%s" requires authentication (HTTP Code'\ ' 401) but the www-authenticate header is not present.'\ ' This requires human verification.' desc = desc % response.get_url() i = Info('Authentication without www-authenticate header', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'non_rfc_auth', i) om.out.information(i.get_desc())
def _ssl_info_to_kb(self, url, domain): cert, cert_der, cipher = self._get_cert(url, domain) # Print the SSL information to the log desc = 'This is the information about the SSL certificate used for'\ ' %s site:\n%s' % (domain, self._dump_ssl_info(cert, cert_der, cipher)) om.out.information(desc) i = Info('SSL Certificate dump', desc, 1, self.get_name()) i.set_url(url) self.kb_append(self, 'certificate', i)
def _analyze_crossdomain_clientaccesspolicy(self, url, response, file_name): try: dom = xml.dom.minidom.parseString(response.get_body()) except Exception: # Report this, it may be interesting for the final user # not a vulnerability per-se... but... it's information after all if 'allow-access-from' in response.get_body() or \ 'cross-domain-policy' in response.get_body() or \ 'cross-domain-access' in response.get_body(): desc = 'The "%s" file at: "%s" is not a valid XML.' desc = desc % (file_name, response.get_url()) i = Info('Invalid RIA settings file', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'info', i) om.out.information(i.get_desc()) else: if (file_name == 'crossdomain.xml'): url_list = dom.getElementsByTagName("allow-access-from") attribute = 'domain' if (file_name == 'clientaccesspolicy.xml'): url_list = dom.getElementsByTagName("domain") attribute = 'uri' for url in url_list: url = url.getAttribute(attribute) desc = 'The "%s" file at "%s" allows flash/silverlight'\ ' access from any site.' desc = desc % (file_name, response.get_url()) if url == '*': v = Vuln('Insecure RIA settings', desc, severity.LOW, response.id, self.get_name()) v.set_url(response.get_url()) v.set_method('GET') kb.kb.append(self, 'vuln', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity()) else: i = Info('Cross-domain allow ACL', desc, response.id, self.get_name()) i.set_url(response.get_url()) i.set_method('GET') kb.kb.append(self, 'info', i) om.out.information(i.get_desc())
def _report_finding(self, response): ''' Save the finding to the kb. :param response: The response that triggered the detection ''' desc = 'The remote web server seems to have a reverse proxy installed.' i = Info('Reverse proxy identified', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'detect_reverse_proxy', i) om.out.information(i.get_desc())
def analyze_disco(self, request, response): for disco_string in self._disco_strings: if disco_string in response: desc = 'The URL: "%s" is a DISCO file that contains references'\ ' to WSDL URLs.' desc = desc % response.get_url() i = Info('DISCO resource', desc, response.id, self.get_name()) i.set_url(response.get_url()) i.add_to_highlight(disco_string) self.kb_append_uniq(self, 'disco', i, 'URL') break
def _analyze_crossdomain_clientaccesspolicy(self, url, response, file_name): try: dom = xml.dom.minidom.parseString(response.get_body()) except Exception: # Report this, it may be interesting for the final user # not a vulnerability per-se... but... it's information after all if 'allow-access-from' in response.get_body() or \ 'cross-domain-policy' in response.get_body() or \ 'cross-domain-access' in response.get_body(): desc = 'The "%s" file at: "%s" is not a valid XML.' desc = desc % (file_name, response.get_url()) i = Info('Invalid RIA settings file', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'info', i) om.out.information(i.get_desc()) else: if(file_name == 'crossdomain.xml'): url_list = dom.getElementsByTagName("allow-access-from") attribute = 'domain' if(file_name == 'clientaccesspolicy.xml'): url_list = dom.getElementsByTagName("domain") attribute = 'uri' for url in url_list: url = url.getAttribute(attribute) desc = 'The "%s" file at "%s" allows flash/silverlight'\ ' access from any site.' desc = desc % (file_name, response.get_url()) if url == '*': v = Vuln('Insecure RIA settings', desc, severity.LOW, response.id, self.get_name()) v.set_url(response.get_url()) v.set_method('GET') kb.kb.append(self, 'vuln', v) om.out.vulnerability(v.get_desc(), severity=v.get_severity()) else: i = Info('Cross-domain allow ACL', desc, response.id, self.get_name()) i.set_url(response.get_url()) i.set_method('GET') kb.kb.append(self, 'info', i) om.out.information(i.get_desc())
def _analyze_gears_manifest(self, url, response, file_name): if '"entries":' in response: # Save it to the kb! desc = 'A gears manifest file was found at: "%s".'\ ' Each file should be manually reviewed for sensitive'\ ' information that may get cached on the client.' desc = desc % url i = Info('Gears manifest resource', desc, response.id, self.get_name()) i.set_url(url) kb.kb.append(self, url, i) om.out.information(i.get_desc())
def _kb_info_user(self, url, response_id, username): ''' Put user in Kb :return: None, everything is saved in kb ''' desc = 'WordPress user "%s" found during username enumeration.' desc = desc % username i = Info('Identified WordPress user', desc, response_id, self.get_name()) i.set_url(url) kb.kb.append(self, 'users', i) om.out.information(i.get_desc())
def _identify_with_bruteforce(self, url): id_list = [] allowed_methods = [] # # Before doing anything else, I'll send a request with a # non-existant method if that request succeds, then all will... # non_exist_response = self._uri_opener.ARGENTINA(url) get_response = self._uri_opener.GET(url) if non_exist_response.get_code() not in self.BAD_CODES\ and get_response.get_body() == non_exist_response.get_body(): desc = 'The remote Web server has a custom configuration, in'\ ' which any not implemented methods that are invoked are'\ ' defaulted to GET instead of returning a "Not Implemented"'\ ' response.' response_ids = [non_exist_response.get_id(), get_response.get_id()] i = Info('Non existent methods default to GET', desc, response_ids, self.get_name()) i.set_url(url) kb.kb.append(self, 'custom-configuration', i) # # It makes no sense to continue working, all methods will # appear as enabled because of this custom configuration. # return [], [non_exist_response.id, get_response.id] # 'DELETE' is not tested! I don't want to remove anything... # 'PUT' is not tested! I don't want to overwrite anything... methods_to_test = self._supported_methods.copy() # remove dangerous methods. methods_to_test.remove('DELETE') methods_to_test.remove('PUT') for method in methods_to_test: method_functor = getattr(self._uri_opener, method) try: response = apply(method_functor, (url,), {}) except: pass else: code = response.get_code() if code not in self.BAD_CODES: allowed_methods.append(method) id_list.append(response.id) return allowed_methods, id_list
def analyze_wsdl(self, request, response): match_list = self._multi_in.query(response.body) if len(match_list): desc = 'The URL: "%s" is a Web Services Description Language'\ ' page. This requires manual analysis to determine the'\ ' security of the web service.' desc = desc % response.get_url() i = Info('WSDL resource', desc, response.id, self.get_name()) i.set_url(response.get_url()) i.add_to_highlight(*match_list) self.kb_append_uniq(self, 'wsdl', i, 'URL')
def grep(self, request, response): ''' Plugin entry point, test existance of HTML auto-completable forms containing password-type inputs. Either form's <autocomplete> attribute is not present or is 'off'. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. ''' url = response.get_url() dom = response.get_dom() if response.is_text_or_html() and dom is not None \ and not url in self._already_inspected: self._already_inspected.add(url) autocompletable = lambda inp: inp.get('autocomplete', 'on').lower( ) != 'off' # Loop through "auto-completable" forms for form in self._autocomplete_forms_xpath(dom): passwd_inputs = self._pwd_input_xpath(form) # Test existence of password-type inputs and verify that # all inputs are autocompletable if passwd_inputs and all( map(autocompletable, chain(passwd_inputs, self._text_input_xpath(form)))): desc = 'The URL: "%s" has a "<form>" element with ' \ 'auto-complete enabled.' desc = desc % url i = Info('Auto-completable form', desc, response.id, self.get_name()) i.set_url(url) form_str = etree.tostring(form) to_highlight = form_str[:(form_str).find('>') + 1] i.add_to_highlight(to_highlight) # Store and print kb.kb.append(self, 'form_autocomplete', i) om.out.information(desc) break
def _identify_with_bruteforce(self, url): id_list = [] allowed_methods = [] # # Before doing anything else, I'll send a request with a # non-existant method if that request succeds, then all will... # non_exist_response = self._uri_opener.ARGENTINA(url) get_response = self._uri_opener.GET(url) if non_exist_response.get_code() not in self.BAD_CODES\ and get_response.get_body() == non_exist_response.get_body(): desc = 'The remote Web server has a custom configuration, in'\ ' which any not implemented methods that are invoked are'\ ' defaulted to GET instead of returning a "Not Implemented"'\ ' response.' response_ids = [non_exist_response.get_id(), get_response.get_id()] i = Info('Non existent methods default to GET', desc, response_ids, self.get_name()) i.set_url(url) kb.kb.append(self, 'custom-configuration', i) # # It makes no sense to continue working, all methods will # appear as enabled because of this custom configuration. # return [], [non_exist_response.id, get_response.id] # 'DELETE' is not tested! I don't want to remove anything... # 'PUT' is not tested! I don't want to overwrite anything... methods_to_test = self._supported_methods.copy() # remove dangerous methods. methods_to_test.remove('DELETE') methods_to_test.remove('PUT') for method in methods_to_test: method_functor = getattr(self._uri_opener, method) try: response = apply(method_functor, (url, ), {}) except: pass else: code = response.get_code() if code not in self.BAD_CODES: allowed_methods.append(method) id_list.append(response.id) return allowed_methods, id_list
def _parse_zone_h_result(self, response): ''' Parse the result from the zone_h site and create the corresponding info objects. :return: None ''' # # I'm going to do only one big "if": # # - The target site was hacked more than one time # - The target site was hacked only one time # # This is the string I have to parse: # in the zone_h response, they are two like this, the first has to be ignored! regex = 'Total notifications: <b>(\d*)</b> of which <b>(\d*)</b> single ip and <b>(\d*)</b> mass' regex_result = re.findall(regex, response.get_body()) try: total_attacks = int(regex_result[0][0]) except IndexError: om.out.debug( 'An error was generated during the parsing of the zone_h website.' ) else: # Do the if... if total_attacks > 1: desc = 'The target site was defaced more than one time in the'\ ' past. For more information please visit the following'\ ' URL: "%s".' % response.get_url() v = Vuln('Previous defacements', desc, severity.MEDIUM, response.id, self.get_name()) v.set_url(response.get_url()) kb.kb.append(self, 'defacements', v) om.out.information(v.get_desc()) elif total_attacks == 1: desc = 'The target site was defaced in the past. For more'\ ' information please visit the following URL: "%s".' desc = desc % response.get_url() i = Info('Previous defacements', desc, response.id, self.get_name()) i.set_url(response.get_url()) kb.kb.append(self, 'defacements', i) om.out.information(i.get_desc())
def crawl(self, fuzzable_request): ''' Get the robots.txt file and parse it. :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. ''' dirs = [] base_url = fuzzable_request.get_url().base_url() robots_url = base_url.url_join('robots.txt') http_response = self._uri_opener.GET(robots_url, cache=True) if not is_404(http_response): # Save it to the kb! desc = 'A robots.txt file was found at: "%s", this file might'\ ' expose private URLs and requires a manual review. The'\ ' scanner will add all URLs listed in this files to the'\ ' analysis queue.' desc = desc % robots_url i = Info('robots.txt file', desc, http_response.id, self.get_name()) i.set_url(robots_url) kb.kb.append(self, 'robots.txt', i) om.out.information(i.get_desc()) # Work with it... dirs.append(robots_url) for line in http_response.get_body().split('\n'): line = line.strip() if len(line) > 0 and line[0] != '#' and \ (line.upper().find('ALLOW') == 0 or line.upper().find('DISALLOW') == 0): url = line[line.find(':') + 1:] url = url.strip() try: url = base_url.url_join(url) except: # Simply ignore the invalid URL pass else: dirs.append(url) self.worker_pool.map(self.http_get_and_parse, dirs)
def grep(self, request, response): ''' Plugin entry point, test existance of HTML auto-completable forms containing password-type inputs. Either form's <autocomplete> attribute is not present or is 'off'. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. ''' url = response.get_url() dom = response.get_dom() if response.is_text_or_html() and dom is not None \ and not url in self._already_inspected: self._already_inspected.add(url) autocompletable = lambda inp: inp.get( 'autocomplete', 'on').lower() != 'off' # Loop through "auto-completable" forms for form in self._autocomplete_forms_xpath(dom): passwd_inputs = self._pwd_input_xpath(form) # Test existence of password-type inputs and verify that # all inputs are autocompletable if passwd_inputs and all(map(autocompletable, chain(passwd_inputs, self._text_input_xpath(form)))): desc = 'The URL: "%s" has a "<form>" element with ' \ 'auto-complete enabled.' desc = desc % url i = Info('Auto-completable form', desc, response.id, self.get_name()) i.set_url(url) form_str = etree.tostring(form) to_highlight = form_str[:(form_str).find('>') + 1] i.add_to_highlight(to_highlight) # Store and print kb.kb.append(self, 'form_autocomplete', i) om.out.information(desc) break
def _fingerprint_installer(self, domain_path, wp_unique_url, response): ''' GET latest.zip and latest.tar.gz and compare with the hashes from the release.db that was previously generated from wordpress.org [0] and contains all release hashes. This gives the initial wordpress version, not the current one. [0] http://wordpress.org/download/release-archive/ ''' zip_url = domain_path.url_join('latest.zip') tar_gz_url = domain_path.url_join('latest.tar.gz') install_urls = [zip_url, tar_gz_url] for install_url in install_urls: response = self._uri_opener.GET(install_url, cache=True, respect_size_limit=False) # md5sum the response body m = hashlib.md5() m.update(response.get_body()) remote_release_hash = m.hexdigest() release_db = self._release_db for line in file(release_db): try: line = line.strip() release_db_hash, release_db_name = line.split(',') except: continue if release_db_hash == remote_release_hash: desc = 'The sysadmin used WordPress version "%s" during the'\ ' installation, which was found by matching the contents'\ ' of "%s" with the hashes of known releases. If the'\ ' sysadmin did not update wordpress, the current version'\ ' will still be the same.' desc = desc % (release_db_name, install_url) i = Info('Fingerprinted Wordpress version', desc, response.id, self.get_name()) i.set_url(install_url) kb.kb.append(self, 'info', i) om.out.information(i.get_desc())
def find_error_page(self, request, response): for msg in self._multi_in.query(response.body): desc = 'The URL: "%s" contains the descriptive error: "%s".' desc = desc % (response.get_url(), msg) i = Info('Descriptive error page', desc, response.id, self.get_name()) i.set_url(response.get_url()) i.add_to_highlight(msg) self.kb_append_uniq(self, 'error_page', i, 'URL') # There is no need to report more than one info for the same result, # the user will read the info object and analyze it even if we # report it only once. If we report it twice, he'll get mad ;) break
def discover(self, fuzzable_request): ''' :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. ''' if self._is_proxyed_conn(fuzzable_request): desc = 'Your ISP seems to have a transparent proxy installed,'\ ' this can influence scan results in unexpected ways.' i = Info('Transparent proxy detected', desc, 1, self.get_name()) i.set_url(fuzzable_request.get_url()) kb.kb.append(self, 'detect_transparent_proxy', i) om.out.information(i.get_desc()) else: om.out.information('Your ISP has no transparent proxy.')