def _checkResponse(self, response, file_name): """ Analyze XML files. """ om.out.debug("Checking XML response in ria_enumerator.") try: dom = xml.dom.minidom.parseString(response.getBody()) except Exception: # Report this, it may be interesting for the final user # not a vulnerability per-se... but... it's information after all if ( "allow-access-from" in response.getBody() or "cross-domain-policy" in response.getBody() or "cross-domain-access" in response.getBody() ): i = info.info() i.setPluginName(self.getName()) i.setName("Invalid " + file_name) i.setURL(response.getURL()) i.setMethod("GET") msg = 'The "' + file_name + '" file at: "' + response.getURL() msg += '" is not a valid XML.' i.setDesc(msg) i.setId(response.id) kb.kb.append(self, "info", i) om.out.information(i.getDesc()) else: if file_name == "crossdomain.xml": url_list = dom.getElementsByTagName("allow-access-from") attribute = "domain" if file_name == "clientaccesspolicy.xml": url_list = dom.getElementsByTagName("domain") attribute = "uri" for url in url_list: url = url.getAttribute(attribute) if url == "*": v = vuln.vuln() v.setPluginName(self.getName()) v.setURL(response.getURL()) v.setMethod("GET") v.setName('Insecure "' + file_name + '" settings') v.setSeverity(severity.LOW) msg = 'The "' + file_name + '" file at "' + response.getURL() + '" allows' msg += " flash/silverlight access from any site." v.setDesc(msg) v.setId(response.id) kb.kb.append(self, "vuln", v) om.out.vulnerability(v.getDesc(), severity=v.getSeverity()) else: i = info.info() i.setPluginName(self.getName()) i.setName("Crossdomain allow ACL") i.setURL(response.getURL()) i.setMethod("GET") i.setDesc(file_name + '" file allows access from: "' + url + '".') i.setId(response.id) kb.kb.append(self, "info", i) om.out.information(i.getDesc())
def grep(self, request, response): ''' Plugin entry point, find the error pages and report them. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' if response.is_text_or_html(): for msg in self._get_error_strings(): # Remember that httpResponse objects have a faster "__in__" than # the one in strings; so string in response.getBody() is slower than # string in response if msg in response: i = info.info() i.setPluginName(self.getName()) # Set a nicer name for the vulnerability name = 'Descriptive error page - "' if len(msg) > 12: name += msg[:12] + '..."' else: name += msg + '"' i.setName( name ) i.setURL( response.getURL() ) i.setId( response.id ) i.setDesc( 'The URL: "' + response.getURL() + '" contains the descriptive error: "' + msg + '"' ) i.addToHighlight( msg ) kb.kb.append( self , 'errorPage' , i ) # There is no need to report more than one info for the same result, # the user will read the info object and analyze it even if we report it # only once. If we report it twice, he'll get mad ;) break # Now i'll check if I can get a version number from the error page # This is common in apache, tomcat, etc... if response.getCode() in range(400, 600): for server, error_regex in self._get_regex_tuples(): match = error_regex.search( response.getBody() ) if match: match_string = match.groups()[0] if match_string not in self._already_reported_versions: # Save the info obj i = info.info() i.setPluginName(self.getName()) i.setName('Error page with information disclosure') i.setURL( response.getURL() ) i.setId( response.id ) i.setName( 'Error page with information disclosure' ) i.setDesc( 'An error page sent this ' + server +' version: "' + match_string + '".' ) i.addToHighlight( server ) i.addToHighlight( match_string ) kb.kb.append( self , 'server' , i ) # Save the string kb.kb.append( self , 'server' , match_string ) self._already_reported_versions.append( match_string )
def _analyze_response(self, response): ''' It seems that we have found a _vti_inf file, parse it and analyze the content! @parameter response: The http response object for the _vti_inf file. @return: None. All the info is saved to the kb. ''' regex_str = 'FPVersion="(.*?)"' regex_admin = 'FPAdminScriptUrl="(.*?)"' regex_author = 'FPAuthorScriptUrl="(.*?)"' #Get the Frontpage version frontpage_version_match = re.search(regex_str, response.getBody(), re.IGNORECASE) #Get the FPAdminScript url frontpage_admin = re.search(regex_admin, response.getBody(), re.IGNORECASE) #Get the FPAuthorScript url frontpage_author = re.search(regex_author, response.getBody(), re.IGNORECASE) if frontpage_version_match and frontpage_admin and frontpage_author: #Set the self._exec to false self._exec = False i = info.info() i.setPluginName(self.getName()) i.setId( response.id ) i.setName( 'FrontPage Configuration Information' ) i.setURL( response.getURL() ) desc = 'The FrontPage Configuration Information file was found at: "' desc += i.getURL() desc += '" and the version of FrontPage Server Extensions is: "' desc += frontpage_version_match.group(1) + '". ' i.setDesc( desc ) i['version'] = frontpage_version_match.group(1) kb.kb.append( self, 'frontpage_version', i ) om.out.information( i.getDesc() ) # # Handle the admin.exe file # self._analyze_admin( response, frontpage_admin ) # # Handle the author.exe file # self._analyze_author( response, frontpage_author ) else: # This is wierd... we found a _vti_inf file, but there is no frontpage # information in it... IPS? WAF? honeypot? i = info.info() i.setPluginName(self.getName()) i.setId( response.id ) i.setName( 'Fake FrontPage Configuration Information' ) i.setURL( response.getURL() ) desc = 'A fake FrontPage Configuration Information file was found at: "' desc += i.getURL() desc += '". This may be an indication of a honeypot, a WAF or an IPS.' i.setDesc( desc ) kb.kb.append( self, 'fake_frontpage', i ) om.out.information( i.getDesc() )
def _checkResponse(self, response, file_name ): ''' Analyze XML files. ''' om.out.debug( 'Checking XML response in ria_enumerator.') try: dom = xml.dom.minidom.parseString( response.getBody() ) except Exception: # Report this, it may be interesting for the final user # not a vulnerability per-se... but... it's information after all if 'allow-access-from' in response.getBody() or \ 'cross-domain-policy' in response.getBody() or \ 'cross-domain-access' in response.getBody(): i = info.info() i.setPluginName(self.getName()) i.setName('Invalid ' + file_name) i.setURL( response.getURL() ) i.setMethod( 'GET' ) msg = 'The "' + file_name + '" file at: "' + response.getURL() msg += '" is not a valid XML.' i.setDesc( msg ) i.setId( response.id ) kb.kb.append( self, 'info', i ) om.out.information( i.getDesc() ) else: if(file_name == 'crossdomain.xml'): url_list = dom.getElementsByTagName("allow-access-from") attribute = 'domain' if(file_name == 'clientaccesspolicy.xml'): url_list = dom.getElementsByTagName("domain") attribute = 'uri' for url in url_list: url = url.getAttribute(attribute) if url == '*': v = vuln.vuln() v.setPluginName(self.getName()) v.setURL( response.getURL() ) v.setMethod( 'GET' ) v.setName( 'Insecure "' + file_name + '" settings' ) v.setSeverity(severity.LOW) msg = 'The "' + file_name + '" file at "' + response.getURL() + '" allows' msg += ' flash/silverlight access from any site.' v.setDesc( msg ) v.setId( response.id ) kb.kb.append( self, 'vuln', v ) om.out.vulnerability( v.getDesc(), severity=v.getSeverity() ) else: i = info.info() i.setPluginName(self.getName()) i.setName('Crossdomain allow ACL') i.setURL( response.getURL() ) i.setMethod( 'GET' ) i.setDesc( file_name + '" file allows access from: "' + url + '".') i.setId( response.id ) kb.kb.append( self, 'info', i ) om.out.information( i.getDesc() )
def _PUT(self, domain_path): """ Tests PUT method. """ # upload url = domain_path.urlJoin(createRandAlpha(5)) rndContent = createRandAlNum(6) put_response = self._urlOpener.PUT(url, data=rndContent) # check if uploaded res = self._urlOpener.GET(url, useCache=True) if res.getBody() == rndContent: v = vuln.vuln() v.setPluginName(self.getName()) v.setURL(url) v.setId([put_response.id, res.id]) v.setSeverity(severity.HIGH) v.setName("Insecure DAV configuration") v.setMethod("PUT") msg = 'File upload with HTTP PUT method was found at resource: "' + domain_path + '".' msg += ' A test file was uploaded to: "' + res.getURL() + '".' v.setDesc(msg) kb.kb.append(self, "dav", v) # Report some common errors elif put_response.getCode() == 500: i = info.info() i.setPluginName(self.getName()) i.setURL(url) i.setId(res.id) i.setName("DAV incorrect configuration") i.setMethod("PUT") msg = "DAV seems to be incorrectly configured. The web server answered with a 500" msg += " error code. In most cases, this means that the DAV extension failed in" msg += ' some way. This error was found at: "' + put_response.getURL() + '".' i.setDesc(msg) kb.kb.append(self, "dav", i) # Report some common errors elif put_response.getCode() == 403: i = info.info() i.setPluginName(self.getName()) i.setURL(url) i.setId([put_response.id, res.id]) i.setName("DAV insufficient privileges") i.setMethod("PUT") msg = "DAV seems to be correctly configured and allowing you to use the PUT method" msg += " but the directory does not have the correct permissions that would allow" msg += ' the web server to write to it. This error was found at: "' msg += put_response.getURL() + '".' i.setDesc(msg) kb.kb.append(self, "dav", i)
def _find_OS(self, fuzzableRequest): ''' Analyze responses and determine if remote web server runs on windows or *nix @Return: None, the knowledge is saved in the knowledgeBase ''' found_os = False freq_url = fuzzableRequest.getURL() filename = freq_url.getFileName() dirs = freq_url.getDirectories()[:-1] # Skipping "domain level" dir. if dirs and filename: last_url = dirs[-1] last_url = last_url.url_string windows_url = url_object(last_url[0:-1] + '\\' + filename) windows_response = self._uri_opener.GET(windows_url) original_response = self._uri_opener.GET(freq_url) found_os = True if relative_distance_ge(original_response.getBody(), windows_response.getBody(), 0.98): i = info.info() i.setPluginName(self.getName()) i.setName('Operating system') i.setURL( windows_response.getURL() ) i.setMethod( 'GET' ) i.setDesc('Fingerprinted this host as a Microsoft Windows system.' ) i.setId( [windows_response.id, original_response.id] ) kb.kb.append( self, 'operating_system_str', 'windows' ) kb.kb.append( self, 'operating_system', i ) om.out.information( i.getDesc() ) else: i = info.info() i.setPluginName(self.getName()) i.setName('Operating system') i.setURL( original_response.getURL() ) i.setMethod( 'GET' ) msg = 'Fingerprinted this host as a *nix system. Detection for this operating' msg += ' system is weak, "if not windows: is linux".' i.setDesc( msg ) i.setId( [original_response.id, windows_response.id] ) kb.kb.append( self, 'operating_system_str', 'unix' ) kb.kb.append( self, 'operating_system', i ) om.out.information( i.getDesc() ) return found_os
def discover(self, fuzzableRequest): """ Checks if JBoss Interesting Directories exist in the target server. Also verifies some vulnerabilities. """ base_url = fuzzableRequest.getURL().baseUrl() for vuln_db_instance in findJBoss._jboss_vulns: vuln_url = base_url.urlJoin(vuln_db_instance["url"]) response = self._uri_opener.GET(vuln_url) if response.getCode() == 200: if vuln_db_instance["type"] == "info": i = info.info() i.setPluginName(self.getName()) i.setName(vuln_db_instance["name"]) i.setURL(vuln_url) i.setId(response.id) i.setDesc(vuln_db_instance["desc"]) kb.kb.append(self, vuln_db_instance["name"], i) else: v = vuln.vuln() v.setPluginName(self.getName()) v.setName(vuln_db_instance["name"]) v.setURL(vuln_url) v.setId(response.id) v.setDesc(vuln_db_instance["desc"]) kb.kb.append(self, vuln_db_instance["name"], v) fuzzable_requests = self._createFuzzableRequests(response) self._fuzzable_requests_to_return.extend(fuzzable_requests) return self._fuzzable_requests_to_return
def _test_IP( self, original_response, domain ): ''' Check if http://ip(domain)/ == http://domain/ ''' try: ip_address = socket.gethostbyname( domain ) except: return url = original_response.getURL() ip_url = url.copy() ip_url.setDomain( ip_address ) try: modified_response = self._uri_opener.GET( ip_url, cache=True ) except w3afException: om.out.debug('An error occurred while fetching IP address URL in dnsWildcard plugin.') else: if modified_response.getBody() != original_response.getBody(): i = info.info() i.setPluginName(self.getName()) i.setName('Default domain') i.setURL( modified_response.getURL() ) i.setMethod( 'GET' ) msg = 'The contents of ' + modified_response.getURI() msg += ' differ from the contents of ' + original_response.getURI() i.setDesc( msg ) i.setId( modified_response.id ) kb.kb.append( self, 'dnsWildcard', i ) om.out.information( i.getDesc() )
def _parse_netcraft(self, response): ''' Parses netcraft's response and stores information in the KB @param response: The http response object from querying netcraft @return: None, data stored in KB. ''' # Netblock owner: # # Example: <td><b>Netblock owner</b></td><td width=38%> # <a href="/netblock?q=GO-DADDY-COM-LLC,64.202.160.0,64.202.191.255"> # GoDaddy.com, LLC</a></td> # # (all in the same line) re_netblock = '<td><b>Netblock owner</b></td><td width=38%><a href=".*?">(.*?)</a></td>' netblock_owner_match = re.search( re_netblock, response.body ) if netblock_owner_match: netblock_owner = netblock_owner_match.group(1) i = info.info() i.setPluginName(self.getName()) i.setName('Netblock owner') i.setId( response.getId() ) msg = 'Netcraft reports that the netblock owner for the target domain' msg += ' is %s' % netblock_owner i.setDesc( msg) # Save the results in the KB so the user can look at it kb.kb.append( self, 'netblock_owner', i )
def discover(self, fuzzableRequest ): ''' Find captcha images. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' # GET the document, and fetch the images image_map_1 = self._get_images( fuzzableRequest ) # Re-GET the document, and fetch the images image_map_2 = self._get_images( fuzzableRequest ) # Compare the images (different images may be captchas) changed_images_list = [] if image_map_1.keys() != image_map_2.keys(): for img_src in image_map_1: if img_src not in image_map_2: changed_images_list.append( img_src ) else: # Compare content for img_src in image_map_1: if image_map_1[ img_src ] != image_map_2[ img_src ]: changed_images_list.append( img_src ) for img_src in changed_images_list: i = info.info() i.setPluginName(self.getName()) i.setName('Captcha image detected') i.setURL( img_src ) i.setMethod( 'GET' ) i.setDesc( 'Found a CAPTCHA image at: "' + img_src + '".') kb.kb.append( self, 'findCaptchas', i ) om.out.information( i.getDesc() ) return []
def _test_IP(self, original_response, domain): """ Check if http://ip(domain)/ == http://domain/ """ ip_address = socket.gethostbyname(domain) url = original_response.getURL() ip_url = url.copy() ip_url.setDomain(ip_address) try: modified_response = self._urlOpener.GET(ip_url, useCache=True) except w3afException: om.out.debug("An error occurred while fetching IP address URL in dnsWildcard plugin.") else: if modified_response.getBody() != original_response.getBody(): i = info.info() i.setPluginName(self.getName()) i.setName("Default domain") i.setURL(modified_response.getURL()) i.setMethod("GET") msg = "The contents of " + modified_response.getURI() msg += " differ from the contents of " + original_response.getURI() i.setDesc(msg) i.setId(modified_response.id) kb.kb.append(self, "dnsWildcard", i) om.out.information(i.getDesc())
def _do_request(self, url, mutant): ''' Perform a simple GET to see if the result is an error or not, and then run the actual fuzzing. ''' response = self._uri_opener.GET(mutant, cache=True, headers=self._headers) if not (is_404(response) or response.getCode() in (403, 401) or self._return_without_eval(mutant)): fr_list = self._createFuzzableRequests(response) self._fuzzable_requests.extend(fr_list) # # Save it to the kb (if new)! # if response.getURL() not in self._seen and response.getURL().getFileName(): i = info.info() i.setPluginName(self.getName()) i.setName('Potentially interesting file') i.setURL(response.getURL()) i.setId(response.id) i.setDesc('A potentially interesting file was found at: "' + response.getURL() + '".') kb.kb.append(self, 'files', i) om.out.information(i.getDesc()) # Report only once self._seen.add(response.getURL())
def grep(self, request, response): ''' Plugin entry point. Parse the object tags. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' url = response.getURL() if response.is_text_or_html() and url not in self._already_analyzed: self._already_analyzed.add(url) dom = response.getDOM() # In some strange cases, we fail to normalize the document if dom is not None: for tag_name in self._tag_names: # Find all input tags with a type file attribute element_list = dom.xpath('//%s' % tag_name ) if element_list: i = info.info() i.setPluginName(self.getName()) i.setName(tag_name.title() + ' tag') i.setURL(url) i.setId( response.id ) i.setDesc( 'The URL: "' + i.getURL() + '" has an '+ tag_name + ' tag.' ) i.addToHighlight( tag_name ) kb.kb.append( self, tag_name, i )
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # This plugin will only run one time. self._run = False pks_se = pks( self._urlOpener) url = fuzzableRequest.getURL() domain_root = urlParser.getRootDomain( url ) results = pks_se.search( domain_root ) for result in results: i = info.info() i.setPluginName(self.getName()) i.setURL( 'http://pgp.mit.edu:11371/' ) i.setId( [] ) mail = result.username +'@' + domain_root i.setName( mail ) i.setDesc( 'The mail account: "'+ mail + '" was found in the MIT PKS server. ' ) i['mail'] = mail i['user'] = result.username i['name'] = result.name i['url_list'] = ['http://pgp.mit.edu:11371/', ] kb.kb.append( 'mails', 'mails', i ) # Don't save duplicated information in the KB. It's useless. #kb.kb.append( self, 'mails', i ) om.out.information( i.getDesc() ) return []
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # I will only run this one time. All calls to detectTransparentProxy # return the same url's self._run = False if self._is_proxyed_conn( fuzzableRequest ): i = info.info() i.setPluginName(self.getName()) i.setName( 'Transparent proxy detected' ) i.setURL( fuzzableRequest.getURL() ) msg = 'Your ISP seems to have a transparent proxy installed, this can influence' msg += ' w3af results.' i.setDesc( msg ) kb.kb.append( self, 'detectTransparentProxy', i ) om.out.information( i.getDesc() ) else: om.out.information( 'Your ISP has no transparent proxy.' ) return []
def _parse_document( self, response ): ''' Parses the HTML and adds the mail addresses to the kb. ''' try: document_parser = dpCache.dpc.getDocumentParserFor( response ) except w3afException: # Failed to find a suitable parser for the document pass else: # Search for email addresses for mail in document_parser.getEmails( self._domain_root ): if mail not in self._accounts: self._accounts.append( mail ) i = info.info() i.setPluginName(self.getName()) i.setName(mail) i.setURL( response.getURI() ) msg = 'The mail account: "'+ mail + '" was found in: "' msg += response.getURI() + '"' i.setDesc( msg ) i['mail'] = mail i['user'] = mail.split('@')[0] i['url_list'] = [response.getURI(), ] kb.kb.append( 'mails', 'mails', i ) kb.kb.append( self, 'mails', i )
def _analyze_author(self, response, frontpage_author): ''' Analyze the author URL. @parameter response: The http response object for the _vti_inf file. @parameter frontpage_author: A regex match object. @return: None. All the info is saved to the kb. ''' i = info.info() i.setPluginName(self.getName()) i.setId( response.id ) i.setURL( response.getURL() ) # Check for anomalies in the location of author.exe if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe': name = 'Uncommon FrontPage configuration' desc = 'The FPAuthorScriptUrl is at: "' desc += frontpage_author.group(1) desc += '" instead of the default location: "' desc += '/_vti_bin/_vti_adm/author.exe".' else: name = 'FrontPage FPAuthorScriptUrl' desc = 'The FPAuthorScriptUrl is at: "' desc += urlParser.getDomainPath(i.getURL()) + frontpage_author.group(1) desc += '".' i.setName( name ) i.setDesc( desc ) i['FPAuthorScriptUrl'] = frontpage_author.group(1) kb.kb.append( self, 'frontpage_version', i ) om.out.information( i.getDesc() )
def _match_cookie_fingerprint(self, request, response, cookieObj): """ Now we analize and try to guess the remote web server based on the cookie that was sent. """ for cookie in self._get_fingerprint_db(): if cookie[0] in cookieObj.output(header=""): if cookie[1] not in self._already_reported_server: i = info.info() i.setPluginName(self.getName()) i.setId(response.id) i.setName("Identified cookie") i.setURL(response.getURL()) self._setCookieToRep(i, cobj=cookieObj) i["httpd"] = cookie[1] i.setDesc( "A cookie matching the cookie fingerprint DB " + 'has been found when requesting "' + response.getURL() + '" . ' + 'The remote platform is: "' + cookie[1] + '"' ) kb.kb.append(self, "cookies", i) self._already_reported_server.append(cookie[1])
def grep(self, request, response): ''' Plugin entry point, find feeds. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' dom = response.getDOM() uri = response.getURI() # In some strange cases, we fail to normalize the document if uri not in self._already_inspected and dom is not None: self._already_inspected.add(uri) # Find all feed tags element_list = self._tag_xpath(dom) for element in element_list: feed_tag = element.tag feed_type = self._feed_types[ feed_tag.lower() ] version = element.attrib.get('version', 'unknown') i = info.info() i.setPluginName(self.getName()) i.setName(feed_type +' feed') i.setURI(uri) fmt = 'The URL "%s" is a %s version %s feed.' msg = fmt % (uri, feed_type, version) i.setDesc( msg ) i.setId( response.id ) i.addToHighlight( feed_type ) kb.kb.append( self, 'feeds', i )
def grep(self, request, response): ''' Plugin entry point, verify if the HTML has a form with file uploads. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' url = response.getURL() if response.is_text_or_html() and not url in self._already_inspected: self._already_inspected.add(url) dom = response.getDOM() # In some strange cases, we fail to normalize the document if dom is not None: # Loop through file inputs tags for input_file in dom.xpath(FILE_INPUT_XPATH): i = info.info() i.setPluginName(self.getName()) i.setName('File upload form') i.setURL(url) i.setId(response.id) msg = 'The URL: "%s" has form with file upload ' \ 'capabilities.' % url i.setDesc(msg) to_highlight = etree.tostring(input_file) i.addToHighlight(to_highlight) kb.kb.append(self, 'fileUpload', i)
def grep(self, request, response): ''' Plugin entry point. Grep for oracle applications. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' url = response.getURL() if response.is_text_or_html() and url not in self._already_analyzed: self._already_analyzed.add(url) for msg in self._getDescriptiveMessages(): # Remember that httpResponse objects have a faster "__in__" than # the one in strings; so string in response.getBody() is slower than # string in response if msg in response: i = info.info() i.setPluginName(self.getName()) i.setName('Oracle application') i.setURL(url) i.setId( response.id ) i.addToHighlight( msg ) msg = 'The URL: "' + url + '" was created using Oracle' msg += ' Application server.' i.setDesc( msg ) kb.kb.append( self , 'oracle' , i )
def grep(self, request, response): ''' Plugin entry point. Parse the object tags. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' url = response.getURL() dom = response.getDOM() if response.is_text_or_html() and dom is not None \ and url not in self._already_analyzed: self._already_analyzed.add(url) elem_list = self._tag_xpath( dom ) for element in elem_list: tag_name = element.tag i = info.info() i.setPluginName(self.getName()) i.setName(tag_name.title() + ' tag') i.setURL(url) i.setId( response.id ) msg = 'The URL: "%s" has an "%s" tag. We recommend you download the ' msg += 'client side code and analyze it manually.' msg = msg % (i.getURI(), tag_name) i.setDesc( msg ) i.addToHighlight( tag_name ) kb.kb.append( self, tag_name, i )
def _test_DNS(self, original_response, dns_wildcard_url): """ Check if http://www.domain.tld/ == http://domain.tld/ """ # # I only want to perform an HTTP request if the domain # actually exists. If not... we know it's going to fail # and that will increase the library's error count, show # a traceback, etc. # try: socket.gethostbyname(dns_wildcard_url.getDomain()) except: return try: modified_response = self._urlOpener.GET(dns_wildcard_url, useCache=True) except w3afException, w3: if "Failed to resolve" in str(w3): i = info.info() i.setPluginName(self.getName()) i.setName("No DNS wildcard") i.setURL(original_response.getURL()) i.setMethod("GET") i.setDesc("The target site has no DNS wildcard.") kb.kb.append(self, "dnsWildcard", i) om.out.information(i.getDesc())
def _is_vulnerable(self, fuzzableRequest): ''' Checks if the remote website is vulnerable or not. Saves the result in self._is_vulnerable_result , because we want to perform this test only once. @return: True if vulnerable. ''' if self._is_vulnerable_result is not None: # The test was already performed, we return the old response return self._is_vulnerable_result else: # We perform the test, for this we need a URL that has a filename, URL's # that don't have a filename can't be used for this. filename = fuzzableRequest.getURL().getFileName() if filename == '': return None filename = filename.split('.')[0] # Now I simply perform the request: alternate_resource = fuzzableRequest.getURL().urlJoin(filename) headers = fuzzableRequest.getHeaders() headers['Accept'] = 'w3af/bar' response = self._uri_opener.GET( alternate_resource, headers = headers ) if 'alternates' in response.getLowerCaseHeaders(): # Even if there is only one file, with an unique mime type, the content negotiation # will return an alternates header. So this is pretty safe. # Save the result internally self._is_vulnerable_result = True # Save the result as an info in the KB, for the user to see it: i = info.info() i.setPluginName(self.getName()) i.setName('HTTP Content Negotiation enabled') i.setURL( response.getURL() ) i.setMethod( 'GET' ) desc = 'HTTP Content negotiation is enabled in the remote web server. This ' desc += ' could be used to bruteforce file names and find new resources.' i.setDesc( desc ) i.setId( response.id ) kb.kb.append( self, 'content_negotiation', i ) om.out.information( i.getDesc() ) else: om.out.information('The remote Web server has Content Negotiation disabled.') # I want to perform this test a couple of times... so I only return False # if that "couple of times" is empty self._tries_left -= 1 if self._tries_left == 0: # Save the FALSE result internally self._is_vulnerable_result = False else: # None tells the plugin to keep trying with the next URL return None # return the result =) return self._is_vulnerable_result
def discover(self, fuzzableRequest ): ''' Get the urllist.txt file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() else: # Only run once self._exec = False dirs = [] self._new_fuzzable_requests = [] base_url = fuzzableRequest.getURL().baseUrl() urllist_url = base_url.urlJoin( 'urllist.txt' ) http_response = self._uri_opener.GET( urllist_url, cache=True ) if not is_404( http_response ): # Work with it... dirs.append( urllist_url ) is_urllist = 5 for line in http_response.getBody().split('\n'): line = line.strip() if not line.startswith('#') and line: try: url = base_url.urlJoin( line ) except: is_urllist -= 1 if not is_urllist: break else: dirs.append( url ) if is_urllist: # Save it to the kb! i = info.info() i.setPluginName(self.getName()) i.setName('urllist.txt file') i.setURL( urllist_url ) i.setId( http_response.id ) i.setDesc( 'A urllist.txt file was found at: "'+ urllist_url +'".' ) kb.kb.append( self, 'urllist.txt', i ) om.out.information( i.getDesc() ) for url in dirs: # Send the requests using threads: self._run_async(meth=self._get_and_parse, args=(url,)) # Wait for all threads to finish self._join() return self._new_fuzzable_requests
def grep(self, request, response): """ Plugin entry point, search for cookies. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None """ for key in response.getHeaders(): if key.upper() in self._cookieHeaders: # save headers = response.getHeaders() # Create the object to save the cookie in the kb i = info.info() i.setPluginName(self.getName()) i.setName("Cookie") i.setURL(response.getURL()) cookieStr = headers[key].strip() self._setCookieToRep(i, cstr=headers[key].strip()) C = Cookie.SimpleCookie() try: # Note to self: This line may print some chars to the console C.load(headers[key].strip()) except Cookie.CookieError: # The cookie is invalid, this is worth mentioning ;) msg = "The cookie that was sent by the remote web application" msg += " doesn't respect the RFC." om.out.information(msg) i.setDesc(msg) i.setName("Invalid cookie") kb.kb.append(self, "invalid-cookies", i) else: i["cookie-object"] = C """ The expiration date tells the browser when to delete the cookie. If no expiration date is provided, the cookie is deleted at the end of the user session, that is, when the user quits the browser. As a result, specifying an expiration date is a means for making cookies to survive across browser sessions. For this reason, cookies that have an expiration date are called persistent. """ i["persistent"] = False if "expires" in C: i["persistent"] = True i.setId(response.id) i.addToHighlight(i["cookie-string"]) msg = 'The URL: "' + i.getURL() + '" sent the cookie: "' msg += i["cookie-string"] + '".' i.setDesc(msg) kb.kb.append(self, "cookies", i) # Find if the cookie introduces any vulnerability, or discloses information self._analyzeCookie(request, response, C) # do this check everytime self._sslCookieValueUsedInHTTP(request, response)
def _analyze_wait(self, mutant, response): """ Analyze results of the _sendMutant method that was sent in the _with_time_delay method. """ # # Only one thread at the time can enter here. This is because I want to report each # vulnerability only once, and by only adding the "if self._hasNoBug" statement, that # could not be done. # with self._plugin_lock: # # I will only report the vulnerability once. # if self._hasNoBug("osCommanding", "osCommanding", mutant.getURL(), mutant.getVar()): if response.getWaitTime() > ( self._original_wait_time + self._wait_time - 2 ) and response.getWaitTime() < (self._original_wait_time + self._wait_time + 2): sentOs, sentSeparator = self._get_os_separator(mutant) # This could be because of an osCommanding vuln, or because of an error that # generates a delay in the response; so I'll resend changing the time and see # what happens original_wait_param = mutant.getModValue() more_wait_param = original_wait_param.replace(str(self._wait_time), str(self._second_wait_time)) mutant.setModValue(more_wait_param) response = self._sendMutant(mutant, analyze=False) if response.getWaitTime() > ( self._original_wait_time + self._second_wait_time - 3 ) and response.getWaitTime() < (self._original_wait_time + self._second_wait_time + 3): # Now I can be sure that I found a vuln, I control the time of the response. v = vuln.vuln(mutant) v.setPluginName(self.getName()) v.setName("OS commanding vulnerability") v.setSeverity(severity.HIGH) v["os"] = sentOs v["separator"] = sentSeparator v.setDesc("OS Commanding was found at: " + mutant.foundAt()) v.setDc(mutant.getDc()) v.setId(response.id) v.setURI(response.getURI()) kb.kb.append(self, "osCommanding", v) else: # The first delay existed... I must report something... i = info.info() i.setPluginName(self.getName()) i.setName("Possible OS commanding vulnerability") i.setId(response.id) i.setDc(mutant.getDc()) i.setMethod(mutant.getMethod()) i["os"] = sentOs i["separator"] = sentSeparator msg = "A possible OS Commanding was found at: " + mutant.foundAt() msg += "Please review manually." i.setDesc(msg) kb.kb.append(self, "osCommanding", i)
def grep(self, request, response): ''' Plugin entry point. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None, all results are saved in the kb. ''' url = response.getURL() if response.is_text_or_html() and response.getCode() == 200 and \ url not in self._already_inspected: # Don't repeat URLs self._already_inspected.add(url) is_WSDL = False for wsdl_string in self._wsdl_strings: if wsdl_string in response: is_WSDL = True break if is_WSDL: i = info.info() i.setPluginName(self.getName()) i.setName('WSDL file') i.setURL( response.getURL() ) i.setId( response.id ) i.addToHighlight( wsdl_string ) msg = 'The URL: "' + i.getURL() + '" is a Web Services ' msg += 'Description Language page.' i.setDesc( msg ) kb.kb.append( self , 'wsdl' , i ) is_Disco = False for disco_string in self._disco_strings: if disco_string in response: is_Disco = True break if is_Disco: i = info.info() i.setPluginName(self.getName()) i.setURL( response.getURL() ) msg = 'The URL: "' + i.getURL() + '" is a DISCO file that contains' msg += ' references to WSDLs.' i.setDesc( msg ) i.addToHighlight( disco_string ) kb.kb.append( self , 'disco' , i )
def grep(self, request, response): ''' Plugin entry point. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None, all results are saved in the kb. ''' uri = response.getURI() # 501 Code is "Not Implemented" which in some cases responds with this in the body: # <body><h2>HTTP/1.1 501 Not Implemented</h2></body> # Which creates a false positive. if response.getCode() != 501 and uri not in self._already_inspected \ and response.is_text_or_html(): # Don't repeat URLs self._already_inspected.add(uri) # First if, mostly for performance. # Remember that httpResponse objects have a faster "__in__" than # the one in strings; so string in response.getBody() is slower than # string in response if 'HTTP/1' in response and response.getClearTextBody() is not None: # Now, remove tags body_without_tags = response.getClearTextBody() res = self._re_request.search(body_without_tags) if res: i = info.info() i.setPluginName(self.getName()) i.setName('HTTP Request in HTTP body') i.setURI(uri) i.setId(response.id) i.setDesc('An HTTP request was found in the HTTP body of a response') i.addToHighlight(res.group(0)) kb.kb.append(self, 'request', i) res = self._re_response.search(body_without_tags) if res: i = info.info() i.setPluginName(self.getName()) i.setName('HTTP Response in HTTP body') i.setURI(uri) i.setId(response.id) i.setDesc('An HTTP response was found in the HTTP body of a response') kb.kb.append(self, 'response', i)
def grep(self, request, response): ''' Plugin entry point, search for meta tags. @parameter request: The HTTP request object. @parameter response: The HTTP response object @return: None ''' uri = response.getURI() if response.is_text_or_html() and not is_404( response ) and \ uri not in self._already_inspected: self._already_inspected.add(uri) try: dp = dpCache.dpc.getDocumentParserFor( response ) except w3afException: pass else: meta_tag_list = dp.getMetaTags() for tag in meta_tag_list: name = self._find_name( tag ) for attr in tag: for word in self._interesting_words: # Check if we have something interesting # and WHERE that thing actually is where = value = None if ( word in attr[0].lower() ): where = 'name' value = attr[0].lower() elif ( word in attr[1].lower() ): where = 'value' value = attr[1].lower() # Now... if we found something, report it =) if where: # The atribute is interesting! i = info.info() i.setPluginName(self.getName()) i.setName('Interesting META tag') i.setURI( response.getURI() ) i.setId( response.id ) msg = 'The URI: "' + i.getURI() + '" sent a META tag with ' msg += 'attribute '+ where +' "'+ value +'" which' msg += ' looks interesting.' i.addToHighlight( where, value ) if self._interesting_words.get(name, None): msg += ' The tag is used for ' msg += self._interesting_words[name] + '.' i.setDesc( msg ) kb.kb.append( self , 'metaTags' , i ) else: # The attribute is not interesting pass