def _generic_vhosts( self, fuzzableRequest ): ''' Test some generic virtual hosts, only do this once. ''' res = [] base_url = urlParser.baseUrl(fuzzableRequest.getURL()) common_vhost_list = self._get_common_virtualhosts(urlParser.getDomain(base_url)) # Get some responses to compare later original_response = self._urlOpener.GET(base_url, useCache=True) orig_resp_body = original_response.getBody() non_existant = 'iDoNotExistPleaseGoAwayNowOrDie' + createRandAlNum(4) self._non_existant_response = self._urlOpener.GET(base_url, useCache=False, \ headers={'Host': non_existant }) nonexist_resp_body = self._non_existant_response.getBody() for common_vhost in common_vhost_list: try: vhost_response = self._urlOpener.GET( base_url, useCache=False, \ headers={'Host': common_vhost } ) except w3afException: pass else: vhost_resp_body = vhost_response.getBody() # If they are *really* different (not just different by some chars) if relative_distance_lt(vhost_resp_body, orig_resp_body, 0.35) and \ relative_distance_lt(vhost_resp_body, nonexist_resp_body, 0.35): res.append((common_vhost, vhost_response.id)) return res
def discover(self, fuzzableRequest ): ''' Get www.site.com and site.com and compare responses. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec : # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False if not re.match('\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?', urlParser.getDomain( fuzzableRequest.getURL() ) ): # Only do all this if this is a domain name! base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) original_response = self._urlOpener.GET( base_url, useCache=True ) domain = urlParser.getDomain( fuzzableRequest.getURL() ) proto = urlParser.getProtocol( fuzzableRequest.getURL() ) if domain.startswith('www.'): dns_wildcard_url = proto + '://' + domain.replace('www.', '') + '/' else: dns_wildcard_url = proto + '://www.' + domain + '/' self._test_DNS( original_response, dns_wildcard_url ) self._test_IP( original_response, domain ) return []
def discover(self, fuzzableRequest ): ''' Searches for user directories. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: raise w3afRunOnce() else: self._run = False self._fuzzable_requests = [] base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) self._headers = {'Referer': base_url } # Create a response body to compare with the others non_existant_user = '******' test_URL = urlParser.urlJoin( base_url, non_existant_user ) try: response = self._urlOpener.GET( test_URL, useCache=True, \ headers=self._headers ) response_body = response.getBody() except: raise w3afException('userDir failed to create a non existant signature.') self._non_existant = response_body.replace( non_existant_user, '') # Check the users to see if they exist url_user_list = self._create_dirs( base_url ) for url, user in url_user_list : om.out.debug('userDir is testing ' + url ) # Send the requests using threads: targs = ( url, user ) self._tm.startFunction( target=self._do_request, args=targs, ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) # Only do this if I already know that users can be identified. if kb.kb.getData( 'userDir', 'users' ) != []: # AND only run once if self._run_OS_ident: self._run_OS_ident = False self._advanced_identification( base_url, 'os' ) if self._run_app_ident: self._run_app_ident = False self._advanced_identification( base_url, 'apps' ) # Report findings of remote OS, applications, users, etc. self._report_findings() return self._fuzzable_requests
def discover(self, fuzzableRequest ): ''' Get the robots.txt file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False dirs = [] self._new_fuzzable_requests = [] base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) robots_url = urlParser.urlJoin( base_url , 'robots.txt' ) http_response = self._urlOpener.GET( robots_url, useCache=True ) if not is_404( http_response ): # Save it to the kb! i = info.info() i.setPluginName(self.getName()) i.setName('robots.txt file') i.setURL( robots_url ) i.setId( http_response.id ) i.setDesc( 'A robots.txt file was found at: "'+ robots_url +'".' ) kb.kb.append( self, 'robots.txt', i ) om.out.information( i.getDesc() ) # Work with it... dirs.append( robots_url ) for line in http_response.getBody().split('\n'): line = line.strip() if len(line) > 0 and line[0] != '#' and (line.upper().find('ALLOW') == 0 or\ line.upper().find('DISALLOW') == 0 ): url = line[ line.find(':') + 1 : ] url = url.strip() url = urlParser.urlJoin( base_url , url ) dirs.append( url ) for url in dirs: # Send the requests using threads: targs = ( url, ) self._tm.startFunction( target=self._get_and_parse, args=targs , ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) return self._new_fuzzable_requests
def discover(self, fuzzableRequest): """ Get the file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. """ if not self._exec: raise w3afRunOnce() else: # Only run once self._exec = False base_url = urlParser.baseUrl(fuzzableRequest.getURL()) ### Google Gears for ext in self._extensions: for word in file(self._wordlist): manifest_url = urlParser.urlJoin(base_url, word.strip()) manifest_url = manifest_url + ext om.out.debug("Google Gears Manifest Testing " + manifest_url) http_response = self._urlOpener.GET(manifest_url, useCache=True) if '"entries":' in http_response and not is_404(http_response): # Save it to the kb! i = info.info() i.setPluginName(self.getName()) i.setName("Gears Manifest") i.setURL(manifest_url) i.setId(http_response.id) desc = 'A gears manifest file was found at: "' + manifest_url desc += '". Each file should be manually reviewed for sensitive' desc += " information that may get cached on the client." i.setDesc(desc) kb.kb.append(self, manifest_url, i) om.out.information(i.getDesc()) ### CrossDomain.XML cross_domain_url = urlParser.urlJoin(base_url, "crossdomain.xml") om.out.debug("Checking crossdomain.xml file") response = self._urlOpener.GET(cross_domain_url, useCache=True) if not is_404(response): self._checkResponse(response, "crossdomain.xml") ### CrossAccessPolicy.XML client_access_url = urlParser.urlJoin(base_url, "clientaccesspolicy.xml") om.out.debug("Checking clientaccesspolicy.xml file") response = self._urlOpener.GET(client_access_url, useCache=True) if not is_404(response): self._checkResponse(response, "clientaccesspolicy.xml") return []
def discover(self, fuzzableRequest ): ''' It calls the "main" from halberd and writes the results to the kb. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: self._exec = False msg = 'halberd plugin is starting. Original halberd author: Juan M. Bello Rivas ;' msg += ' http://halberd.superadditive.com/' om.out.information( msg ) self._main( urlParser.baseUrl( fuzzableRequest.getURL() ) ) return []
def discover(self, fuzzableRequest ): ''' Get the sitemap.xml file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False self._new_fuzzable_requests = [] base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) sitemap_url = urlParser.urlJoin( base_url , 'sitemap.xml' ) response = self._urlOpener.GET( sitemap_url, useCache=True ) # Remember that httpResponse objects have a faster "__in__" than # the one in strings; so string in response.getBody() is slower than # string in response if '</urlset>' in response and not is_404( response ): om.out.debug('Analyzing sitemap.xml file.') self._new_fuzzable_requests.extend( self._createFuzzableRequests( response ) ) import xml.dom.minidom om.out.debug('Parsing xml file with xml.dom.minidom.') try: dom = xml.dom.minidom.parseString( response.getBody() ) except: raise w3afException('Error while parsing sitemap.xml') urlList = dom.getElementsByTagName("loc") for url in urlList: url = url.childNodes[0].data # Send the requests using threads: targs = ( url, ) self._tm.startFunction( target=self._get_and_parse, args=targs , ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) return self._new_fuzzable_requests
def discover(self, fuzzableRequest ): ''' GET some files and parse them. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' dirs = [] if not self._exec : # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False baseUrl = urlParser.baseUrl( fuzzableRequest.getURL() ) for url, regex_string in self.getOracleData(): oracle_discovery_URL = urlParser.urlJoin( baseUrl , url ) response = self._urlOpener.GET( oracle_discovery_URL, useCache=True ) if not is_404( response ): dirs.extend( self._createFuzzableRequests( response ) ) if re.match( regex_string , response.getBody(), re.DOTALL): i = info.info() i.setPluginName(self.getName()) i.setName('Oracle application') i.setURL( response.getURL() ) i.setDesc( self._parse( url, response ) ) i.setId( response.id ) kb.kb.append( self, 'info', i ) om.out.information( i.getDesc() ) else: msg = 'oracleDiscovery found the URL: ' + response.getURL() msg += ' but failed to parse it. The content of the URL is: "' msg += response.getBody() + '".' om.out.debug( msg ) return dirs
def discover(self, fuzzableRequest ): ''' Runs pykto to the site. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' self._new_fuzzable_requests = [] if not self._exec: # dont run anymore raise w3afRunOnce() else: # run! if self._update_scandb: self._update_db() # Run the basic scan (only once) if self._first_time: self._first_time = False url = urlParser.baseUrl( fuzzableRequest.getURL() ) self._exec = False self.__run( url ) # And now mutate if the user configured it... if self._mutate_tests: # If mutations are enabled, I should keep running self._exec = True # Tests are to be mutated url = urlParser.getDomainPath( fuzzableRequest.getURL() ) if url not in self._already_visited: # Save the directories I already have tested self._already_visited.add( url ) self.__run( url ) return self._new_fuzzable_requests
def discover(self, fuzzableRequest ): ''' Get the file and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._exec: raise w3afRunOnce() else: if not self._be_recursive: # Only run once self._exec = False self._fuzzable_requests = [] domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() ) base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) to_test = [] if not self._tested_base_url: to_test.append( base_url ) self._tested_base_url = True if domain_path != base_url: to_test.append( domain_path ) for base_path in to_test: # Send the requests using threads: targs = ( base_path, ) self._tm.startFunction( target=self._bruteforce_directories, args=targs , ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) return self._fuzzable_requests
def discover(self, fuzzableRequest ): ''' Get the server-status and parse it. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' res = [] if not self._exec : # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Only run once self._exec = False base_url = urlParser.baseUrl( fuzzableRequest.getURL() ) server_status_url = urlParser.urlJoin( base_url , 'server-status' ) response = self._urlOpener.GET( server_status_url, useCache=True ) if not is_404( response ) and response.getCode() not in range(400, 404): msg = 'Apache server-status cgi exists. The URL is: "' + response.getURL() + '".' om.out.information( msg ) # Create some simple fuzzable requests res.extend( self._createFuzzableRequests( response ) ) # Get the server version # <dl><dt>Server Version: Apache/2.2.9 (Unix)</dt> for version in re.findall('<dl><dt>Server Version: (.*?)</dt>', response.getBody()): # Save the results in the KB so the user can look at it i = info.info() i.setPluginName(self.getName()) i.setURL( response.getURL() ) i.setId( response.id ) i.setName( 'Apache Server version' ) msg = 'The web server has the apache server status module enabled, ' msg += 'which discloses the following remote server version: "' + version + '".' i.setDesc( msg ) om.out.information(i.getDesc()) kb.kb.append( self, 'server', i ) # Now really parse the file and create custom made fuzzable requests regex = '<td>.*?<td nowrap>(.*?)</td><td nowrap>.*? (.*?) HTTP/1' for domain, path in re.findall(regex, response.getBody() ): if 'unavailable' in domain: domain = urlParser.getDomain( response.getURL() ) foundURL = urlParser.getProtocol( response.getURL() ) + '://' + domain + path # Check if the requested domain and the found one are equal. if urlParser.getDomain( foundURL ) == urlParser.getDomain( response.getURL() ): # They are equal, request the URL and create the fuzzable requests tmpRes = self._urlOpener.GET( foundURL, useCache=True ) if not is_404( tmpRes ): res.extend( self._createFuzzableRequests( tmpRes ) ) else: # This is a shared hosting server self._shared_hosting_hosts.append( domain ) # Now that we are outsite the for loop, we can report the possible vulns if len( self._shared_hosting_hosts ): v = vuln.vuln() v.setPluginName(self.getName()) v.setURL( fuzzableRequest.getURL() ) v.setId( response.id ) self._shared_hosting_hosts = list( set( self._shared_hosting_hosts ) ) v['alsoInHosting'] = self._shared_hosting_hosts v.setDesc( 'The web application under test seems to be in a shared hosting.' ) v.setName( 'Shared hosting' ) v.setSeverity(severity.MEDIUM) kb.kb.append( self, 'sharedHosting', v ) om.out.vulnerability( v.getDesc(), severity=v.getSeverity() ) msg = 'This list of domains, and the domain of the web application under test,' msg += ' all point to the same server:' om.out.vulnerability(msg, severity=severity.MEDIUM ) for url in self._shared_hosting_hosts: om.out.vulnerability('- ' + url, severity=severity.MEDIUM ) # Check if well parsed elif 'apache' in response.getBody().lower(): msg = 'Couldn\'t find any URLs in the apache server status page. Two things can' msg += ' trigger this:\n - The Apache web server sent a server-status page' msg += ' that the serverStatus plugin failed to parse or,\n - The remote ' msg += ' web server has no traffic. If you are sure about the first one, please' msg += ' report a bug.' om.out.information( msg ) om.out.debug('The server-status body is: "'+response.getBody()+'"') return res
def setOptions(self, optionsMap): """ This method sets all the options that are configured using the user interface generated by the framework using the result of getOptions(). @parameter optionsMap: A dictionary with the options for the plugin. @return: No value is returned. """ targetUrls = optionsMap["target"].getValue() for targetUrl in targetUrls: self._verifyURL(targetUrl) for targetUrl in targetUrls: if targetUrl.count("file://"): try: f = urllib2.urlopen(targetUrl) except: raise w3afException("Cannot open target file: " + targetUrl) else: for line in f: target_in_file = line.strip() self._verifyURL(target_in_file, fileTarget=False) targetUrls.append(target_in_file) f.close() targetUrls.remove(targetUrl) # Now we perform a check to see if the user has specified more than one target # domain, for example: "http://google.com, http://yahoo.com". domainList = [urlParser.getNetLocation(targetURL) for targetURL in targetUrls] domainList = list(set(domainList)) if len(domainList) > 1: msg = "You specified more than one target domain: " + ",".join(domainList) msg += " . And w3af only supports one target domain at the time." raise w3afException(msg) # Save in the config, the target URLs, this may be usefull for some plugins. cf.cf.save("targets", targetUrls) cf.cf.save("targetDomains", [urlParser.getNetLocation(i) for i in targetUrls]) cf.cf.save("baseURLs", [urlParser.baseUrl(i) for i in targetUrls]) if targetUrls: sessName = [urlParser.getNetLocation(x) for x in targetUrls] sessName = "-".join(sessName) else: sessName = "noTarget" cf.cf.save("sessionName", sessName + "-" + time.strftime("%Y-%b-%d_%H-%M-%S")) # Advanced target selection os = optionsMap["targetOS"].getValueStr() if os.lower() in self._operatingSystems: cf.cf.save("targetOS", os.lower()) else: raise w3afException("Unknown target operating system: " + os) pf = optionsMap["targetFramework"].getValueStr() if pf.lower() in self._programmingFrameworks: cf.cf.save("targetFramework", pf.lower()) else: raise w3afException("Unknown target programming framework: " + pf)
def discover(self, fuzzableRequest ): ''' @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' if not self._run: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # I will only run this one time. All calls to sharedHosting return the same url's self._run = False bing_wrapper = bing( self._urlOpener ) domain = urlParser.getDomain( fuzzableRequest.getURL() ) if is_private_site( domain ): msg = 'sharedHosting plugin is not checking for subdomains for domain: ' msg += domain + ' because its a private address.' om.out.debug(msg) else: # Get the ip and do the search addrinfo = None try: addrinfo = socket.getaddrinfo(domain, 0) except: raise w3afException('Could not resolve hostname: ' + domain ) ip_address_list = [info[4][0] for info in addrinfo] ip_address_list = list( set(ip_address_list) ) # This is the best way to search, one by one! for ip_address in ip_address_list: results = bing_wrapper.getNResults('ip:'+ ip_address, self._result_limit ) results = [ urlParser.baseUrl( r.URL ) for r in results ] results = list( set( results ) ) # not vuln by default is_vulnerable = False if len(results) > 1: # We may have something... is_vulnerable = True if len(results) == 2: # Maybe we have this case: # [Mon 09 Jun 2008 01:08:26 PM ART] - http://216.244.147.14/ # [Mon 09 Jun 2008 01:08:26 PM ART] - http://www.business.com/ # Where www.business.com resolves to 216.244.147.14; so we don't really # have more than one domain in the same server. res0 = socket.gethostbyname( urlParser.getDomain( results[0] ) ) res1 = socket.gethostbyname( urlParser.getDomain( results[1] ) ) if res0 == res1: is_vulnerable = False if is_vulnerable: severityOfThisVuln = severity.MEDIUM v = vuln.vuln() v.setPluginName(self.getName()) v.setURL(fuzzableRequest.getURL()) v.setId(1) v['alsoInHosting'] = results msg = 'The web application under test seems to be in a shared hosting. ' msg += 'This list of domains, and the domain of the web application under ' msg += 'test, all point to the same IP address (%s):\n' % ip_address for url in results: domain = urlParser.getDomain(url) msg += '- %s\n' % url kb.kb.append( self, 'domains', domain) v.setDesc( msg ) v.setName( 'Shared hosting' ) v.setSeverity(severityOfThisVuln) om.out.vulnerability( msg, severity=severityOfThisVuln ) kb.kb.append( self, 'sharedHosting', v ) return []
self._report_findings() return self._fuzzable_requests def _do_request( self, mutant, user ): ''' Perform the request and compare. @return: True when the user was found. ''' try: response = self._urlOpener.GET( mutant, useCache=True, headers=self._headers ) except KeyboardInterrupt,e: raise e else: path = mutant.replace( urlParser.baseUrl( mutant ) , '' ) response_body = response.getBody().replace( path, '') if relative_distance_lt(response_body, self._non_existant, 0.7): # Avoid duplicates if user not in [ u['user'] for u in kb.kb.getData( 'userDir', 'users') ]: i = info.info() i.setPluginName(self.getName()) i.setName('User directory: ' + response.getURL() ) i.setId( response.id ) i.setDesc( 'A user directory was found at: ' + response.getURL() ) i['user'] = user kb.kb.append( self, 'users', i )
def _get_dead_links(self, fuzzableRequest): ''' Find every link on a HTML document verify if the domain is reachable or not; after that, verify if the web found a different name for the target site or if we found a new site that is linked. If the link points to a dead site then report it (it could be pointing to some private address or something...) ''' res = [] # Get some responses to compare later base_url = urlParser.baseUrl(fuzzableRequest.getURL()) original_response = self._urlOpener.GET(fuzzableRequest.getURI(), useCache=True) base_response = self._urlOpener.GET(base_url, useCache=True) base_resp_body = base_response.getBody() try: dp = dpCache.dpc.getDocumentParserFor(original_response) except w3afException: # Failed to find a suitable parser for the document return [] # Set the non existant response non_existant = 'iDoNotExistPleaseGoAwayNowOrDie' + createRandAlNum(4) self._non_existant_response = self._urlOpener.GET(base_url, useCache=False, headers={'Host': non_existant}) nonexist_resp_body = self._non_existant_response.getBody() # Note: # - With parsed_references I'm 100% that it's really something in the HTML # that the developer intended to add. # # - The re_references are the result of regular expressions, which in some cases # are just false positives. # # In this case, and because I'm only going to use the domain name of the URL # I'm going to trust the re_references also. parsed_references, re_references = dp.getReferences() parsed_references.extend(re_references) for link in parsed_references: domain = urlParser.getDomain(link) # # First section, find internal hosts using the HTTP Host header: # if domain not in self._already_queried: # If the parsed page has an external link to www.google.com # then I'll send a request to the target site, with Host: www.google.com # This sucks, but it's cool if the document has a link to # http://some.internal.site.target.com/ try: vhost_response = self._urlOpener.GET(base_url, useCache=False, headers={'Host': domain }) except w3afException: pass else: self._already_queried.add(domain) vhost_resp_body = vhost_response.getBody() # If they are *really* different (not just different by some chars) if relative_distance_lt(vhost_resp_body, base_resp_body, 0.35) and \ relative_distance_lt(vhost_resp_body, nonexist_resp_body, 0.35): # and the domain can't just be resolved using a DNS query to # our regular DNS server report = True if self._can_resolve_domain_names: try: socket.gethostbyname(domain) except: # aha! The HTML is linking to a domain that's # hosted in the same server, and the domain name # can NOT be resolved! report = True else: report = False # have found something interesting! if report: res.append( (domain, vhost_response.id) ) # # Second section, find hosts using failed DNS resolutions # if self._can_resolve_domain_names: try: # raises exception when it's not found # socket.gaierror: (-5, 'No address associated with hostname') socket.gethostbyname( domain ) except: i = info.info() i.setPluginName(self.getName()) i.setName('Internal hostname in HTML link') i.setURL( fuzzableRequest.getURL() ) i.setMethod( 'GET' ) i.setId( original_response.id ) msg = 'The content of "'+ fuzzableRequest.getURL() +'" references a non ' msg += 'existant domain: "' + link + '". This may be a broken link, or an' msg += ' internal domain name.' i.setDesc( msg ) kb.kb.append( self, 'findvhost', i ) om.out.information( i.getDesc() ) res = [ r for r in res if r != ''] return res