def discover(self, fuzzableRequest ): ''' For every directory, fetch a list of files and analyze the response. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' self._new_fuzzable_requests = [] for domain_path in urlParser.getDirectories(fuzzableRequest.getURL() ): if domain_path not in self._analyzed_dirs: # Save the domain_path so I know I'm not working in vane self._analyzed_dirs.append( domain_path ) # Work! for php_info_filename in self._get_PHP_infofile(): # Send the requests using threads: targs = ( domain_path, php_info_filename, ) self._tm.startFunction( target=self._check_and_analyze, args=targs , ownerObj=self ) # Wait for all threads to finish self._tm.join( self ) return self._new_fuzzable_requests
def _generate_paths(self, url, uploaded_file_name): ''' @parameter url: A URL where the uploaded_file_name could be @parameter uploaded_file_name: The name of the file that was uploaded to the server @return: A list of paths where the file could be. ''' tmp = ['uploads', 'upload', 'file', 'user', 'files', 'downloads', 'download', 'up', 'down'] for default_path in tmp: for path in urlParser.getDirectories(url): possible_loc = path + default_path + '/' + uploaded_file_name yield possible_loc
def _find_OS( self, fuzzableRequest ): ''' Analyze responses and determine if remote web server runs on windows or *nix @Return: None, the knowledge is saved in the knowledgeBase ''' dirs = urlParser.getDirectories( fuzzableRequest.getURL() ) filename = urlParser.getFileName( fuzzableRequest.getURL() ) if len( dirs ) > 1 and filename: last = dirs[-1] windowsURL = last[0:-1] + '\\' + filename windows_response = self._urlOpener.GET( windowsURL ) original_response = self._urlOpener.GET( fuzzableRequest.getURL() ) self._found_OS = True if relative_distance_ge(original_response.getBody(), windows_response.getBody(), 0.98): i = info.info() i.setPluginName(self.getName()) i.setName('Operating system') i.setURL( windows_response.getURL() ) i.setMethod( 'GET' ) i.setDesc('Fingerprinted this host as a Microsoft Windows system.' ) i.setId( [windows_response.id, original_response.id] ) kb.kb.append( self, 'operating_system_str', 'windows' ) kb.kb.append( self, 'operating_system', i ) om.out.information( i.getDesc() ) else: i = info.info() i.setPluginName(self.getName()) i.setName('Operating system') i.setURL( original_response.getURL() ) i.setMethod( 'GET' ) msg = 'Fingerprinted this host as a *nix system. Detection for this operating' msg += ' system is weak, "if not windows: is linux".' i.setDesc( msg ) i.setId( [original_response.id, windows_response.id] ) kb.kb.append( self, 'operating_system_str', 'unix' ) kb.kb.append( self, 'operating_system', i ) om.out.information( i.getDesc() )
def discover(self, fuzzableRequest ): ''' For every directory, fetch a list of files and analyze the response. @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test. ''' fuzzable_return_value = [] if not self._exec: # This will remove the plugin from the discovery plugins to be runned. raise w3afRunOnce() else: # Run the plugin. self._exec = False for domain_path in urlParser.getDirectories(fuzzableRequest.getURL() ): if domain_path not in self._analyzed_dirs: # Save the domain_path so I know I'm not working in vane self._analyzed_dirs.add( domain_path ) # Request the file frontpage_info_url = urlParser.urlJoin( domain_path , "_vti_inf.html" ) try: response = self._urlOpener.GET( frontpage_info_url, useCache=True ) om.out.debug( '[frontpage_version] Testing "' + frontpage_info_url + '".' ) except w3afException, w3: msg = 'Failed to GET Frontpage Server _vti_inf.html file: "' msg += frontpage_info_url + '". Exception: "' + str(w3) + '".' om.out.debug( msg ) else: # Check if it's a Fronpage Info file if not is_404( response ): fuzzable_return_value.extend( self._createFuzzableRequests( response ) ) self._analyze_response( response ) return fuzzable_return_value
# Note: # - With parsed_references I'm 100% that it's really something in the HTML # that the developer intended to add. # # - The re_references are the result of regular expressions, which in some cases # are just false positives. parsed_references, re_references = documentParser.getReferences() # I also want to analyze all directories, if the URL I just fetched is: # http://localhost/a/b/c/f00.php I want to GET: # http://localhost/a/b/c/ # http://localhost/a/b/ # http://localhost/a/ # http://localhost/ # And analyze the responses... directories = urlParser.getDirectories(response.getURL()) parsed_references.extend(directories) parsed_references = list(set(parsed_references)) references = parsed_references + re_references references = list(set(references)) # Filter only the references that are inside the target domain # I don't want w3af sending request to 3rd parties! references = [r for r in references if urlParser.getDomain(r) == self._target_domain] # Filter the URL's according to the configured regular expressions references = [r for r in references if self._compiled_follow_re.match(r)] references = [r for r in references if not self._compiled_ignore_re.match(r)] # work with the parsed references and report broken links