def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # I will only run this one time. All calls to googleSpider return the same url's self._run = False google_se = google(self._urlOpener) domain = fuzzableRequest.getURL().getDomain() if is_private_site( domain ): msg = 'There is no point in searching google for "site:'+ domain + '".' msg += ' Google doesnt index private pages.' raise w3afException( msg ) try: results = google_se.getNResults('site:'+ domain, self._result_limit) except w3afException, w3: om.out.error(str(w3)) # If I found an error, I don't want to be run again raise w3afRunOnce() else:
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' self._new_fuzzable_requests = [] if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # I will only run this one time. All calls to yahooSiteExplorer return the same url's self._run = False self._yse = yse( self._urlOpener ) domain = urlParser.getDomain( fuzzableRequest.getURL() ) if is_private_site( domain ): msg = 'There is no point in searching yahoo site explorer for site: "' msg += domain + '" . Yahoo doesnt index private pages.' raise w3afException(msg) results = self._yse.search( domain, 0, self._result_limit ) for res in results: # Send the requests using threads: targs = (res.URL,) self._tm.startFunction( target=self._generate_fuzzable_requests, \ args=targs, ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) return self._new_fuzzable_requests
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' self._fuzzableRequests = [] if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # update ! if self._update_ghdb: self._update_db() # I will only run this one time. All calls to ghdb return the same url's self._run = False # Get the domain and set some parameters domain = fuzzableRequest.getURL().getDomain() if is_private_site( domain ): msg = 'There is no point in searching google for "site:'+ domain msg += '" . Google doesnt index private pages.' raise w3afException( msg ) return self._do_clasic_GHDB( domain ) return []
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins # to be run. raise w3afRunOnce() else: # I will only run this one time. All calls to fingerprint_WAF return # the same url's ( none! ) self._run = False self._fingerprint_URLScan( fuzzableRequest ) self._fingerprint_ModSecurity( fuzzableRequest ) self._fingerprint_SecureIIS( fuzzableRequest ) self._fingerprint_Airlock( fuzzableRequest ) self._fingerprint_Barracuda( fuzzableRequest ) self._fingerprint_DenyAll( fuzzableRequest ) self._fingerprint_F5ASM( fuzzableRequest ) self._fingerprint_F5TrafficShield( fuzzableRequest ) self._fingerprint_TEROS( fuzzableRequest ) self._fingerprint_NetContinuum( fuzzableRequest ) self._fingerprint_BinarySec( fuzzableRequest ) self._fingerprint_HyperGuard( fuzzableRequest ) return []
def discover(self, fuzzableRequest): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' result = [] # This will remove the plugin from the discovery plugins to be run. if not self._run: raise w3afRunOnce() # This plugin will only run one time. self._run = False bingSE = bing(self._uri_opener) self._domain = fuzzableRequest.getURL().getDomain() self._domain_root = fuzzableRequest.getURL().getRootDomain() results = bingSE.getNResults('@'+self._domain_root, self._resultLimit) for result in results: self._run_async(meth=self._findAccounts, args=(result,)) self._join() self.printUniq(kb.kb.getData('fingerBing', 'mails'), None) return result
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # This plugin will only run one time. self._run = False pks_se = pks( self._urlOpener) url = fuzzableRequest.getURL() domain_root = urlParser.getRootDomain( url ) results = pks_se.search( domain_root ) for result in results: i = info.info() i.setPluginName(self.getName()) i.setURL( 'http://pgp.mit.edu:11371/' ) i.setId( [] ) mail = result.username +'@' + domain_root i.setName( mail ) i.setDesc( 'The mail account: "'+ mail + '" was found in the MIT PKS server. ' ) i['mail'] = mail i['user'] = result.username i['name'] = result.name i['url_list'] = ['http://pgp.mit.edu:11371/', ] kb.kb.append( 'mails', 'mails', i ) # Don't save duplicated information in the KB. It's useless. #kb.kb.append( self, 'mails', i ) om.out.information( i.getDesc() ) return []
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # I will only run this one time. All calls to detectTransparentProxy # return the same url's self._run = False if self._is_proxyed_conn( fuzzableRequest ): i = info.info() i.setPluginName(self.getName()) i.setName( 'Transparent proxy detected' ) i.setURL( fuzzableRequest.getURL() ) msg = 'Your ISP seems to have a transparent proxy installed, this can influence' msg += ' w3af results.' i.setDesc( msg ) kb.kb.append( self, 'detectTransparentProxy', i ) om.out.information( i.getDesc() ) else: om.out.information( 'Your ISP has no transparent proxy.' ) return []
def discover(self, fuzzableRequest ): ''' Get www.site.com and site.com and compare responses. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec : # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False if not re.match('\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?', urlParser.getDomain( fuzzableRequest.getURL() ) ): # Only do all this if this is a domain name! base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) original_response = self._urlOpener.GET( base_url, useCache=True ) domain = urlParser.getDomain( fuzzableRequest.getURL() ) proto = urlParser.getProtocol( fuzzableRequest.getURL() ) if domain.startswith('www.'): dns_wildcard_url = proto + '://' + domain.replace('www.', '') + '/' else: dns_wildcard_url = proto + '://www.' + domain + '/' self._test_DNS( original_response, dns_wildcard_url ) self._test_IP( original_response, domain ) return []
def discover(self, fuzzableRequest): """ Search zone_h and parse the output. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. """ if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False target_domain = urlParser.getRootDomain(fuzzableRequest.getURL()) # Example URL: # http://www.zone-h.org/archive/domain=cyprus-stones.com # TODO: Keep this URL updated! zone_h_url = "http://www.zone-h.org/archive/domain=" + target_domain try: response = self._urlOpener.GET(zone_h_url) except w3afException, e: msg = "An exception was raised while running zone-h plugin. Exception: " + str(e) om.out.debug(msg) else:
def discover(self, fuzzableRequest): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() # I will only run this one time. All calls to bing_spider return the same url's self._run = False bingSE = bing(self._uri_opener) domain = fuzzableRequest.getURL().getDomain() if is_private_site(domain): msg = 'There is no point in searching Bing for "site:'+ domain + '".' msg += ' Bing doesnt index private pages.' raise w3afException( msg ) results = bingSE.getNResults('site:'+ domain, self._resultLimit) for res in results: self._run_async(meth=self._genFuzzableRequests, args=(res.URL,)) self._join() return self._fuzzableRequests
def discover(self, fuzzableRequest): """ Get www.site.com and site.com and compare responses. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. """ if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False if not re.match("\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?", fuzzableRequest.getURL().getDomain()): # Only do all this if this is a domain name! base_url = fuzzableRequest.getURL().baseUrl() original_response = self._urlOpener.GET(base_url, useCache=True) domain = fuzzableRequest.getURL().getDomain() dns_wildcard_url = fuzzableRequest.getURL().copy() # TODO: This is weak! What if the subdomain is "www2"? # Example: Target set by user is www2.host.tld. if domain.startswith("www."): dns_wildcard_url.setDomain(domain.replace("www.", "")) else: dns_wildcard_url.setDomain("www." + domain) self._test_DNS(original_response, dns_wildcard_url) self._test_IP(original_response, domain) return []
def discover(self, fuzzableRequest ): ''' Get the urllist.txt file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() else: # Only run once self._exec = False dirs = [] self._new_fuzzable_requests = [] base_url = fuzzableRequest.getURL().baseUrl() urllist_url = base_url.urlJoin( 'urllist.txt' ) http_response = self._uri_opener.GET( urllist_url, cache=True ) if not is_404( http_response ): # Work with it... dirs.append( urllist_url ) is_urllist = 5 for line in http_response.getBody().split('\n'): line = line.strip() if not line.startswith('#') and line: try: url = base_url.urlJoin( line ) except: is_urllist -= 1 if not is_urllist: break else: dirs.append( url ) if is_urllist: # Save it to the kb! i = info.info() i.setPluginName(self.getName()) i.setName('urllist.txt file') i.setURL( urllist_url ) i.setId( http_response.id ) i.setDesc( 'A urllist.txt file was found at: "'+ urllist_url +'".' ) kb.kb.append( self, 'urllist.txt', i ) om.out.information( i.getDesc() ) for url in dirs: # Send the requests using threads: self._run_async(meth=self._get_and_parse, args=(url,)) # Wait for all threads to finish self._join() return self._new_fuzzable_requests
def discover(self, fuzzableRequest ): ''' Get the robots.txt file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False dirs = [] self._new_fuzzable_requests = [] base_url = fuzzableRequest.getURL().baseUrl() robots_url = base_url.urlJoin( 'robots.txt' ) http_response = self._urlOpener.GET( robots_url, useCache=True ) if not is_404( http_response ): # Save it to the kb! i = info.info() i.setPluginName(self.getName()) i.setName('robots.txt file') i.setURL( robots_url ) i.setId( http_response.id ) i.setDesc( 'A robots.txt file was found at: "'+ robots_url +'".' ) kb.kb.append( self, 'robots.txt', i ) om.out.information( i.getDesc() ) # Work with it... dirs.append( robots_url ) for line in http_response.getBody().split('\n'): line = line.strip() if len(line) > 0 and line[0] != '#' and (line.upper().find('ALLOW') == 0 or\ line.upper().find('DISALLOW') == 0 ): url = line[ line.find(':') + 1 : ] url = url.strip() url = base_url.urlJoin( url ) dirs.append( url ) for url in dirs: # Send the requests using threads: targs = ( url, ) self._tm.startFunction( target=self._get_and_parse, args=targs , ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) return self._new_fuzzable_requests
def discover(self, fuzzableRequest ): ''' Searches for user directories. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: raise w3afRunOnce() else: self._run = False self._fuzzable_requests = [] base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) self._headers = {'Referer': base_url } # Create a response body to compare with the others non_existant_user = '******' test_URL = urlParser.urlJoin( base_url, non_existant_user ) try: response = self._urlOpener.GET( test_URL, useCache=True, \ headers=self._headers ) response_body = response.getBody() except: raise w3afException('userDir failed to create a non existant signature.') self._non_existant = response_body.replace( non_existant_user, '') # Check the users to see if they exist url_user_list = self._create_dirs( base_url ) for url, user in url_user_list : om.out.debug('userDir is testing ' + url ) # Send the requests using threads: targs = ( url, user ) self._tm.startFunction( target=self._do_request, args=targs, ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) # Only do this if I already know that users can be identified. if kb.kb.getData( 'userDir', 'users' ) != []: # AND only run once if self._run_OS_ident: self._run_OS_ident = False self._advanced_identification( base_url, 'os' ) if self._run_app_ident: self._run_app_ident = False self._advanced_identification( base_url, 'apps' ) # Report findings of remote OS, applications, users, etc. self._report_findings() return self._fuzzable_requests
def discover(self, fuzzableRequest): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' possible_vulnerable_files = ['wp-content/plugins/akismet/akismet.php', 'wp-content/plugins/hello.php'] # Search this theme path and add the themes header/footer to the possible vulnerable files domain_path = fuzzableRequest.getURL().getDomainPath() response = self._uri_opener.GET( domain_path, cache=True ) if not is_404( response ): response_body = response.getBody() theme_regexp = domain_path+'wp-content/themes/(.*)/style.css' theme = re.search(theme_regexp, response_body, re.IGNORECASE) if theme: theme_name = theme.group(1) possible_vulnerable_files.append(domain_path+'wp-content/themes/'+theme_name+'/header.php') possible_vulnerable_files.append(domain_path+'wp-content/themes/'+theme_name+'/footer.php') if not self._exec : # Remove the plugin from the discovery plugins to be run. raise w3afRunOnce() else: for vulnerable_file in possible_vulnerable_files: vulnerable_url = domain_path.urlJoin(vulnerable_file) response = self._uri_opener.GET( vulnerable_url, cache=True ) if not is_404( response ): response_body = response.getBody() if 'Fatal error' in response_body: if vulnerable_file in response_body: # Unix-like pass elif vulnerable_file.replace('/', '\\') in response_body: # Microsoft Windows (back slashes) vulnerable_file = vulnerable_file.replace('/', '\\') else: vulnerrable_path = False if vulnerable_file: match = ' <b>(.*)'+vulnerable_file+'</b>' path_disclosure = re.search(match, response_body, re.IGNORECASE) if path_disclosure: i = info.info() i.setPluginName(self.getName()) i.setName('WordPress server path found') i.setURL( vulnerable_url ) i.setId( response.id ) desc = 'WordPress is installed on "%s"' % path_disclosure.group(1) i.setDesc( desc ) kb.kb.append( self, 'info', i ) om.out.information( i.getDesc() ) break # Only run once self._exec = False
def discover(self, fuzzableRequest): """ Get the file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. """ if not self._exec: raise w3afRunOnce() else: # Only run once self._exec = False base_url = fuzzableRequest.getURL().baseUrl() ### Google Gears for ext in self._extensions: for word in file(self._wordlist): manifest_url = base_url.urlJoin(word.strip() + ext) om.out.debug('Google Gears Manifest Testing "%s"' % (manifest_url)) http_response = self._urlOpener.GET(manifest_url, useCache=True) if '"entries":' in http_response and not is_404(http_response): # Save it to the kb! i = info.info() i.setPluginName(self.getName()) i.setName("Gears Manifest") i.setURL(manifest_url) i.setId(http_response.id) desc = 'A gears manifest file was found at: "' + manifest_url desc += '". Each file should be manually reviewed for sensitive' desc += " information that may get cached on the client." i.setDesc(desc) kb.kb.append(self, manifest_url, i) om.out.information(i.getDesc()) ### CrossDomain.XML cross_domain_url = base_url.urlJoin("crossdomain.xml") om.out.debug("Checking crossdomain.xml file") response = self._urlOpener.GET(cross_domain_url, useCache=True) if not is_404(response): self._checkResponse(response, "crossdomain.xml") ### CrossAccessPolicy.XML client_access_url = base_url.urlJoin("clientaccesspolicy.xml") om.out.debug("Checking clientaccesspolicy.xml file") response = self._urlOpener.GET(client_access_url, useCache=True) if not is_404(response): self._checkResponse(response, "clientaccesspolicy.xml") return []
def discover(self, fuzzableRequest ): ''' It calls the "main" from fingerprint_os and writes the results to the kb. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() self._exec = not self._find_OS(fuzzableRequest)
def _do_complete_search( self, domain ): ''' Performs a complete search for email addresses. ''' search_string = '@'+ self._domain_root try: result_page_objects = self._google.getNResultPages( search_string , self._result_limit ) except w3afException, w3: om.out.error(str(w3)) # If I found an error, I don't want to be run again raise w3afRunOnce()
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # I will only run this one time. All calls to detectReverseProxy return the same url's self._run = False # detect using GET if not kb.kb.getData( 'detectTransparentProxy', 'detectTransparentProxy'): response = self._urlOpener.GET( fuzzableRequest.getURL(), useCache=True ) if self._has_proxy_headers( response ): self._report_finding( response ) # detect using TRACE # only if I wasn't able to do it with GET if not kb.kb.getData( 'detectReverseProxy', 'detectReverseProxy' ): response = self._urlOpener.TRACE( fuzzableRequest.getURL(), useCache=True ) if self._has_proxy_content( response ): self._report_finding( response ) # detect using TRACK # This is a rather special case that works with ISA server; example follows: # Request: # TRACK http://www.xyz.com.bo/ HTTP/1.1 # ... # Response headers: # HTTP/1.1 200 OK # content-length: 99 # ... # Response body: # TRACK / HTTP/1.1 # Reverse-Via: MUTUN ------> find this! # .... if not kb.kb.getData( 'detectReverseProxy', 'detectReverseProxy' ): response = self._urlOpener.TRACK( fuzzableRequest.getURL(), useCache=True ) if self._has_proxy_content( response ): self._report_finding( response ) # Report failure to detect reverse proxy if not kb.kb.getData( 'detectReverseProxy', 'detectReverseProxy' ): om.out.information( 'The remote web server doesn\'t seem to have a reverse proxy.' ) return []
def discover(self, fuzzableRequest ): ''' It calls the "main" from hmap and writes the results to the kb. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: if self._runned_hmap: # Nothing else to do here. self._exec = False if not self._runned_hmap: self._runned_hmap = True msg = 'Hmap web server fingerprint is starting, this may take a while.' om.out.information( msg ) url = fuzzableRequest.getURL() protocol = url.getProtocol() server = url.getNetLocation() # Set some defaults that can be overriden later if protocol == 'https': port = 443 ssl = True else: port = 80 ssl = False # Override the defaults if server.count(':'): port = int( server.split(':')[1] ) server = server.split(':')[0] try: results = originalHmap.testServer( ssl, server, port, 1, self._genFpF ) except w3afException, w3: msg = 'A w3afException occurred while running hmap: "' + str(w3) + '"' om.out.error( msg ) except Exception, e: msg = 'An unhandled exception occurred while running hmap: "' + str(e) + '"' om.out.error( msg ) else:
def discover(self, fuzzableRequest ): ''' Nothing strange, just do a GET request to the url and save the server headers to the kb. A smarter way to check the server type is with the hmap plugin. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() else: try: response = self._uri_opener.GET( fuzzableRequest.getURL(), cache=True ) except KeyboardInterrupt,e: raise e else:
def discover(self, fuzzableRequest): """ Get the sitemap.xml file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. """ if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False self._new_fuzzable_requests = [] base_url = fuzzableRequest.getURL().baseUrl() sitemap_url = base_url.urlJoin("sitemap.xml") response = self._urlOpener.GET(sitemap_url, useCache=True) # Remember that httpResponse objects have a faster "__in__" than # the one in strings; so string in response.getBody() is slower than # string in response if "</urlset>" in response and not is_404(response): om.out.debug("Analyzing sitemap.xml file.") self._new_fuzzable_requests.extend(self._createFuzzableRequests(response)) import xml.dom.minidom om.out.debug("Parsing xml file with xml.dom.minidom.") try: dom = xml.dom.minidom.parseString(response.getBody()) except: raise w3afException("Error while parsing sitemap.xml") urlList = dom.getElementsByTagName("loc") for url in urlList: url = url.childNodes[0].data url_instance = url_object(url) # Send the requests using threads: targs = (url_instance,) self._tm.startFunction(target=self._get_and_parse, args=targs, ownerObj=self) # Wait for all threads to finish self._tm.join(self) return self._new_fuzzable_requests
def discover(self, fuzzableRequest): """ It calls the "main" from fingerprint_os and writes the results to the kb. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. """ if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: if self._found_OS: # Nothing else to do here. self._exec = False else: # Work! self._find_OS(fuzzableRequest)
def discover(self, fuzzableRequest ): ''' It calls the "main" from halberd and writes the results to the kb. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: self._exec = False msg = 'halberd plugin is starting. Original halberd author: Juan M. Bello Rivas ;' msg += ' http://halberd.superadditive.com/' om.out.information( msg ) self._main( fuzzableRequest.getURL().baseUrl().url_string ) return []
def discover(self, fuzzableRequest ): ''' Nothing strange, just do some GET requests to the first URL with an invented parameter and the custom payloads that are supposed to be filtered, and analyze the response. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: self._exec = False try: filtered, not_filtered = self._send_requests( fuzzableRequest ) except w3afException, w3: om.out.error( str(w3) ) else:
def discover(self, fuzzableRequest ): ''' Uses several technics to try to find out what methods are allowed for an URL. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery # plugins to be run. raise w3afRunOnce() # Run the plugin. if self._exec_one_time: self._exec = False domain_path = fuzzableRequest.getURL().getDomainPath() if domain_path not in self._already_tested: self._already_tested.add( domain_path ) self._check_methods( domain_path ) return []
def discover(self, fuzzableRequest ): ''' Get the sitemap.xml file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() else: # Only run once self._exec = False self._new_fuzzable_requests = [] base_url = fuzzableRequest.getURL().baseUrl() sitemap_url = base_url.urlJoin( 'sitemap.xml' ) response = self._uri_opener.GET( sitemap_url, cache=True ) # Remember that httpResponse objects have a faster "__in__" than # the one in strings; so string in response.getBody() is slower than # string in response if '</urlset>' in response and not is_404( response ): om.out.debug('Analyzing sitemap.xml file.') self._new_fuzzable_requests.extend( self._createFuzzableRequests( response ) ) import xml.dom.minidom om.out.debug('Parsing xml file with xml.dom.minidom.') try: dom = xml.dom.minidom.parseString( response.getBody() ) except: raise w3afException('Error while parsing sitemap.xml') urlList = dom.getElementsByTagName("loc") for url in urlList: try: url = url.childNodes[0].data url = url_object(url) except ValueError, ve: om.out.debug('Sitemap file had an invalid URL: "%s"' % ve) except: om.out.debug('Sitemap file had an invalid format')
def discover(self, fuzzableRequest ): ''' GET some files and parse them. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' dirs = [] if not self._exec : # This will remove the plugin from the discovery plugins to be run. raise w3afRunOnce() else: # Only run once self._exec = False base_url = fuzzableRequest.getURL().baseUrl() for url, regex_string in self.getOracleData(): oracle_discovery_URL = base_url.urlJoin( url ) response = self._uri_opener.GET( oracle_discovery_URL, cache=True ) if not is_404( response ): dirs.extend( self._createFuzzableRequests( response ) ) if re.match( regex_string , response.getBody(), re.DOTALL): i = info.info() i.setPluginName(self.getName()) i.setName('Oracle application') i.setURL( response.getURL() ) i.setDesc( self._parse( url, response ) ) i.setId( response.id ) kb.kb.append( self, 'info', i ) om.out.information( i.getDesc() ) else: msg = 'oracleDiscovery found the URL: ' + response.getURL() msg += ' but failed to parse it. The content of the URL is: "' msg += response.getBody() + '".' om.out.debug( msg ) return dirs
def discover(self, fuzzableRequest ): ''' 1- Check if HTTP server is vulnerable 2- Exploit using fuzzableRequest 3- Perform bruteforce for each new directory @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec : # This will remove the plugin from the discovery plugins to be run. # This is true only when the remote web server is not vulnerable raise w3afRunOnce() else: if self._is_vulnerable( fuzzableRequest ) is None: # I can't say if it's vulnerable or not (yet), save the current directory to be # included in the bruteforcing process, and return. self._to_bruteforce.append(fuzzableRequest.getURL()) return [] elif self._is_vulnerable( fuzzableRequest ) == False: # Not vulnerable, nothing else to do. self._exec = False return [] else: # Happy, happy, joy! # Now we can test if we find new resources! new_resources = self._find_new_resources( fuzzableRequest ) # and we can also perform a bruteforce: self._to_bruteforce.append(fuzzableRequest.getURL()) bruteforce_result = self._bruteforce() result = [] result.extend( new_resources ) result.extend( bruteforce_result ) return result
def discover(self, fuzzableRequest ): ''' Runs pykto to the site. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' self._new_fuzzable_requests = [] if not self._exec: # dont run anymore raise w3afRunOnce() else: # run! if self._update_scandb: self._update_db() # Run the basic scan (only once) if self._first_time: self._first_time = False self._exec = False url = fuzzableRequest.getURL().baseUrl() self.__run( url ) # And now mutate if the user configured it... if self._mutate_tests: # If mutations are enabled, I should keep running self._exec = True # Tests are to be mutated url = fuzzableRequest.getURL().getDomainPath() if url not in self._already_visited: # Save the directories I already have tested self._already_visited.add( url ) self.__run( url ) return self._new_fuzzable_requests