def audit(self, freq):
        """
        Tries to bruteforce a basic HTTP auth. This is not fast!
        
        @param freq: A fuzzableRequest
        """
        auth_url_list = [urlParser.getDomainPath(i.getURL()) for i in kb.kb.getData("httpAuthDetect", "auth")]

        domain_path = urlParser.getDomainPath(freq.getURL())

        if domain_path in auth_url_list:
            if domain_path not in self._alreadyTested:

                # Save it (we don't want dups!)
                self._alreadyTested.append(domain_path)

                # Let the user know what we are doing
                msg = 'Starting basic authentication bruteforce on URL: "' + domain_path + '".'
                om.out.information(msg)
                self._initBruteforcer(domain_path)

                while not self._found or not self._stopOnFirst:
                    combinations = []

                    for i in xrange(30):
                        try:
                            combinations.append(self._bruteforcer.getNext())
                        except:
                            om.out.information("No more user/password combinations available.")
                            return

                    # wraps around bruteWorker
                    # the wrapper starts a new thread
                    self._bruteforce(domain_path, combinations)
 def grep(self, request, response):
     '''
     Plugin entry point, search for directory indexing.
     @parameter request: The HTTP request object.
     @parameter response: The HTTP response object
     @return: None
     '''
     if urlParser.getDomainPath(response.getURL()) in self._already_visited:
         # Already worked for this URL, no reason to work twice
         return
     
     else:
         # Save it,
         self._already_visited.add( urlParser.getDomainPath(response.getURL()) )
         
         # Work,
         if response.is_text_or_html():
             html_string = response.getBody()
             for indexing_regex in self._compiled_regex_list:
                 if indexing_regex.search( html_string ):
                     v = vuln.vuln()
                     v.setPluginName(self.getName())
                     v.setURL( response.getURL() )
                     msg = 'The URL: "' + response.getURL() + '" has a directory '
                     msg += 'indexing vulnerability.'
                     v.setDesc( msg )
                     v.setId( response.id )
                     v.setSeverity(severity.LOW)
                     path = urlParser.getPath( response.getURL() )
                     v.setName( 'Directory indexing - ' + path )
                     
                     kb.kb.append( self , 'directory' , v )
                     break
Example #3
0
    def _verify_reference(self, reference, original_request, originalURL, possibly_broken):
        """
        This method GET's every new link and parses it in order to get new links and forms.
        """
        fuzzable_request_list = []
        is_forward = self._is_forward(reference)
        if not self._only_forward or is_forward:
            response = None
            #
            #   Remember that this "breaks" the useCache=True in most cases!
            #
            # headers = { 'Referer': originalURL }
            #
            #   But this does not, and it is friendlier that simply ignoring the referer
            #
            referer = urlParser.getDomainPath(originalURL).replace(urlParser.getPath(originalURL), "")
            if not referer.endswith("/"):
                referer += "/"
            headers = {"Referer": referer}

            try:
                response = self._urlOpener.GET(reference, useCache=True, headers=headers)
            except KeyboardInterrupt, e:
                raise e
            except w3afException, w3:
                om.out.error(str(w3))
Example #4
0
    def _analyzeResult(self, mutant, mutant_response):
        '''
        Analyze results of the _sendMutant method. 
        
        In this case, check if the file was uploaded to any of the known directories,
        or one of the "default" ones like "upload" or "files".
        '''
        # Generate a list of directories where I can search for the uploaded file
        domain_path_list = [urlParser.getDomainPath(i) for i in \
                            kb.kb.getData('urls' , 'urlList')]
        domain_path_list = set(domain_path_list)

        # Try to find the file!
        for url in domain_path_list:
            for path in self._generate_paths(url, mutant.uploaded_file_name):

                get_response = self._urlOpener.GET(path, useCache=False)
                if not is_404(get_response):
                    # This is necesary, if I dont do this, the session saver will break cause
                    # REAL file objects can't be picked
                    mutant.setModValue('<file_object>')
                    v = vuln.vuln(mutant)
                    v.setPluginName(self.getName())
                    v.setId([mutant_response.id, get_response.id])
                    v.setSeverity(severity.HIGH)
                    v.setName('Insecure file upload')
                    v['fileDest'] = get_response.getURL()
                    v['fileVars'] = mutant.getFileVariables()
                    msg = 'A file upload to a directory inside the webroot was found at: '
                    msg += mutant.foundAt()
                    v.setDesc(msg)
                    kb.kb.append(self, 'fileUpload', v)
                    return
Example #5
0
 def _mangle_digits(self, fuzzableRequest):
     '''
     Mangle those digits.
     @param fuzzableRequest: The original fuzzableRequest
     @return: A list of fuzzableRequests.
     '''
     res = []
     # First i'll mangle the digits in the URL file
     filename = urlParser.getFileName( fuzzableRequest.getURL() )
     domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
     for fname in self._do_combinations( filename ):
         fr_copy = fuzzableRequest.copy()
         fr_copy.setURL( domain_path + fname)
         res.append( fr_copy )
     
     # Now i'll mangle the query string variables
     if fuzzableRequest.getMethod() == 'GET':
         for parameter in fuzzableRequest.getDc():
             
             # to support repeater parameter names...
             for element_index in xrange(len(fuzzableRequest.getDc()[parameter])):
                 
                 for modified_value in self._do_combinations( fuzzableRequest.getDc()[ parameter ][element_index] ):
                     fr_copy = fuzzableRequest.copy()
                     new_dc = fr_copy.getDc()
                     new_dc[ parameter ][ element_index ] = modified_value
                     fr_copy.setDc( new_dc )
                     res.append( fr_copy )
     
     return res
    def _analyze_author(self, response, frontpage_author):
        '''
        Analyze the author URL.
        
        @parameter response: The http response object for the _vti_inf file.
        @parameter frontpage_author: A regex match object.
        @return: None. All the info is saved to the kb.
        '''
        i = info.info()
        i.setPluginName(self.getName())
        i.setId( response.id )
        i.setURL( response.getURL() )
        # Check for anomalies in the location of author.exe
        if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe':
            name = 'Uncommon FrontPage configuration'
            
            desc = 'The FPAuthorScriptUrl is at: "'
            desc += frontpage_author.group(1)
            desc += '" instead of the default location: "'
            desc += '/_vti_bin/_vti_adm/author.exe".'
        else:
            name = 'FrontPage FPAuthorScriptUrl'

            desc = 'The FPAuthorScriptUrl is at: "'
            desc += urlParser.getDomainPath(i.getURL())  + frontpage_author.group(1)
            desc += '".'
            
        i.setName( name )
        i.setDesc( desc )
        i['FPAuthorScriptUrl'] = frontpage_author.group(1)
        kb.kb.append( self, 'frontpage_version', i )
        om.out.information( i.getDesc() )        
    def discover(self, fuzzableRequest):
        '''
        For every directory, fetch a list of shell files and analyze the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains 
        (among other things) the URL to test.
        '''
        domain_path = urlParser.getDomainPath(fuzzableRequest.getURL())
        self._fuzzable_requests_to_return = []

        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.append(domain_path)

            # Search for the web shells
            for web_shell_filename in WEB_SHELLS:
                web_shell_url = urlParser.urlJoin(domain_path , 
                                                  web_shell_filename)
                # Perform the check in different threads
                targs = (web_shell_url,)
                self._tm.startFunction(target=self._check_if_exists, 
                                       args=targs, ownerObj=self)

            # Wait for all threads to finish
            self._tm.join(self)

            return self._fuzzable_requests_to_return
 def getURL( self ):
     domain_path = urlParser.getDomainPath(self._freq.getURL())
     # Please note that this double encoding is needed if we want to work with mod_rewrite
     encoded = urllib.quote_plus( self._mutant_dc['fuzzedFname'], self._safeEncodeChars )
     if self._doubleEncoding:
         encoded = urllib.quote_plus( encoded, safe=self._safeEncodeChars )
     return  domain_path + self._mutant_dc['start'] + encoded + self._mutant_dc['end']
    def _generate_404_knowledge( self, url ):
        '''
        Based on a URL, request something that we know is going to be a 404.
        Afterwards analyze the 404's and summarise them.
        
        @return: A list with 404 bodies.
        '''
        # Get the filename extension and create a 404 for it
        extension = urlParser.getExtension( url )
        domain_path = urlParser.getDomainPath( url )
        
        # the result
        self._response_body_list = []
        
        #
        #   This is a list of the most common handlers, in some configurations, the 404
        #   depends on the handler, so I want to make sure that I catch the 404 for each one
        #
        handlers = ['py', 'php', 'asp', 'aspx', 'do', 'jsp', 'rb', 'do', 'gif', 'htm', extension]
        handlers += ['pl', 'cgi', 'xhtml', 'htmls']
        handlers = list(set(handlers))
        
        for extension in handlers:

            rand_alnum_file = createRandAlNum( 8 ) + '.' + extension
                
            url404 = urlParser.urlJoin(  domain_path , rand_alnum_file )

            #   Send the requests using threads:
            targs = ( url404,  )
            tm.startFunction( target=self._send_404, args=targs , ownerObj=self )
            
        # Wait for all threads to finish sending the requests.
        tm.join( self )
        
        #
        #   I have the bodies in self._response_body_list , but maybe they all look the same, so I'll
        #   filter the ones that look alike.
        #
        result = [ self._response_body_list[0], ]
        for i in self._response_body_list:
            for j in self._response_body_list:
                
                if relative_distance_ge(i, j, IS_EQUAL_RATIO):
                    # They are equal, we are ok with that
                    continue
                else:
                    # They are no equal, this means that we'll have to add this to the list
                    result.append(j)
        
        # I don't need these anymore
        self._response_body_list = None
        
        # And I return the ones I need
        result = list(set(result))
        om.out.debug('The 404 body result database has a lenght of ' + str(len(result)) +'.')
        
        return result
Example #10
0
    def discover(self, fuzzableRequest):
        """
        Searches for links on the html.

        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        """
        om.out.debug("webSpider plugin is testing: " + fuzzableRequest.getURL())

        if self._first_run:
            # I have to set some variables, in order to be able to code the "onlyForward" feature
            self._first_run = False
            self._target_urls = [urlParser.getDomainPath(i) for i in cf.cf.getData("targets")]
            self._target_domain = urlParser.getDomain(cf.cf.getData("targets")[0])

        # If its a form, then smartFill the Dc.
        original_dc = fuzzableRequest.getDc()
        if isinstance(fuzzableRequest, httpPostDataRequest.httpPostDataRequest):

            # TODO!!!!!!
            if fuzzableRequest.getURL() in self._already_filled_form:
                return []
            else:
                self._already_filled_form.add(fuzzableRequest.getURL())

            to_send = original_dc.copy()
            for parameter_name in to_send:

                # I do not want to mess with the "static" fields
                if isinstance(to_send, form.form):
                    if to_send.getType(parameter_name) in ["checkbox", "file", "radio", "select"]:
                        continue

                #
                #   Set all the other fields, except from the ones that have a value set (example:
                #   hidden fields like __VIEWSTATE).
                #
                for element_index in xrange(len(to_send[parameter_name])):

                    #   should I ignore it because it already has a value?
                    if to_send[parameter_name][element_index] != "":
                        continue

                    #   smartFill it!
                    to_send[parameter_name][element_index] = smartFill(parameter_name)

            fuzzableRequest.setDc(to_send)

        self._fuzzableRequests = []
        response = None

        try:
            response = self._sendMutant(fuzzableRequest, analyze=False)
        except KeyboardInterrupt, e:
            raise e
Example #11
0
 def setOptions( self, optionsMap ):
     '''
     This method sets all the options that are configured using the user interface 
     generated by the framework using the result of getOptions().
     
     @parameter optionsMap: A dictionary with the options for the plugin.
     @return: No value is returned.
     ''' 
     self._content = optionsMap['content'].getValue()
     self._ban_url = optionsMap['banUrl'].getValue()
     self._remote_path = urlParser.getDomainPath( optionsMap['remotePath'].getValue() )
     self._local_dir = optionsMap['localDir'].getValue()
Example #12
0
 def audit(self, freq ):
     '''
     Searches for file upload vulns using PUT method.
     
     @param freq: A fuzzableRequest
     '''
     # Start
     domain_path = urlParser.getDomainPath( freq.getURL() )
     if domain_path not in self._already_tested_dirs:
         om.out.debug( 'dav plugin is testing: ' + freq.getURL() )
         self._already_tested_dirs.add( domain_path )
         
         self._PUT( domain_path )
         self._PROPFIND( domain_path )
         self._SEARCH( domain_path )
Example #13
0
 def _return_without_eval( self, parameters, uri ):
     if urlParser.getDomainPath( uri ) == uri:
         return False
     
     (server, query , expected_response, method , desc) = parameters
     function_reference = getattr( self._urlOpener , method )
     
     url = urlParser.uri2url( uri )
     url += createRandAlNum( 7 )
     if urlParser.getQueryString( query ):
         url = url + '?' + str( urlParser.getQueryString( query ) )
         
     try:
         response = function_reference( url )
     except KeyboardInterrupt,e:
         raise e
    def _verifyVuln(self, vuln_obj):
        """
        This command verifies a vuln. This is really hard work! :P
        
        @parameter vuln_obj: The vuln to exploit.
        @return : True if vuln can be exploited.
        """
        # The vuln was saved to the kb as a vuln object
        url = vuln_obj.getURL()
        method = vuln_obj.getMethod()
        exploit_dc = vuln_obj.getDc()

        # Create a file that will be uploaded
        extension = urlParser.getExtension(url)
        fname = self._create_file(extension)
        file_handler = open(fname, "r")

        #   If there are files,
        if "fileVars" in vuln_obj:
            #
            #   Upload the file
            #
            for file_var_name in vuln_obj["fileVars"]:
                # the [0] was added here to support repeated parameter names
                exploit_dc[file_var_name][0] = file_handler
            http_method = getattr(self._urlOpener, method)
            response = http_method(vuln_obj.getURL(), exploit_dc)

            # Call the uploaded script with an empty value in cmd parameter
            # this will return the shell_handler.SHELL_IDENTIFIER if success
            dst = vuln_obj["fileDest"]
            self._exploit = urlParser.getDomainPath(dst) + self._file_name + "?cmd="
            response = self._urlOpener.GET(self._exploit)

            # Clean-up
            file_handler.close()
            os.remove(self._path_name)

            if shell_handler.SHELL_IDENTIFIER in response.getBody():
                return True

        #   If we got here, there is nothing positive to report ;)
        return False
 def discover(self, fuzzableRequest ):
     '''
     Uses several technics to try to find out what methods are allowed for an URL.
     
     @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
     '''
     if not self._exec:
         # This will remove the plugin from the discovery plugins to be runned.
         raise w3afRunOnce()
         
     else:
         # Run the plugin.
         if self._exec_one_time:
             self._exec = False
         
         domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
         if domain_path not in self._already_tested:
             self._already_tested.add( domain_path )
             self._check_methods( domain_path )
     return []
Example #16
0
    def _generate_URLs(self, original_url):
        '''
        Generate new URLs based on original_url.

        @parameter original_url: The original url that has to be modified in order to trigger errors in the remote application.
        '''
        res = []
        special_chars = ['|', '~']

        filename = urlParser.getFileName( original_url )
        if filename != '' and '.' in filename:
            splitted_filename = filename.split('.')
            extension = splitted_filename[-1:][0]
            name = '.'.join( splitted_filename[0:-1] )

            for char in special_chars:
                new_filename = name + char + '.' + extension
                new_url = urlParser.urlJoin( urlParser.getDomainPath(original_url), new_filename)
                res.append( new_url )

        return res
Example #17
0
 def _generate_URL_from_result( self, analyzed_variable, element_index, result_set, fuzzableRequest ):
     '''
     Based on the result, create the new URLs to test.
     
     @parameter analyzed_variable: The parameter name that is being analyzed
     @parameter element_index: 0 in most cases, >0 if we have repeated parameter names
     @parameter result_set: The set of results that wordnet gave use
     @parameter fuzzableRequest: The fuzzable request that we got as input in the first place.
     
     @return: An URL list.
     '''
     if analyzed_variable is None:
         # The URL was analyzed
         url = fuzzableRequest.getURL()
         fname = urlParser.getFileName( url )
         dp = urlParser.getDomainPath( url )
         
         # The result
         result = []
         
         splitted_fname = fname.split('.')
         if len(splitted_fname) == 2:
             name = splitted_fname[0]
             extension = splitted_fname[1]
         else:
             name = '.'.join(splitted_fname[:-1])
             extension = 'html'
         
         for set_item in result_set:
             new_fname = fname.replace( name, set_item )
             frCopy = fuzzableRequest.copy()
             frCopy.setURL( urlParser.urlJoin( dp, new_fname ) )
             result.append( frCopy )
             
         return result
         
     else:
         mutants = createMutants( fuzzableRequest , result_set, \
                                                 fuzzableParamList=[analyzed_variable,] )
         return mutants
Example #18
0
 def audit(self, freq ):
     '''
     Verify xst vulns by sending a TRACE request and analyzing the response.
     '''
 
     if not self._exec:
         # Do nothing
         pass
     else:
         # Only run once
         self._exec = False  
         
         # Create a mutant based on a fuzzable request
         # It is really important to use A COPY of the fuzzable request, and not the original.
         # The reason: I'm changing the method and the URL !
         fr_copy = freq.copy()
         fr_copy.setURL( urlParser.getDomainPath( fr_copy.getURL() ) )
         fr_copy.setMethod('TRACE')
         # Add a header. I search for this value to determine if XST is valid
         original_headers = freq.getHeaders()
         original_headers['FalseHeader'] = 'XST'
         my_mutant = mutant(fr_copy)
         
         # send the request to the server and recode the response
         response = self._sendMutant( my_mutant, analyze=False )
         
         # create a regex to test the response. 
         regex = re.compile("[FalseHeader: XST]")
         if re.match(regex, response.getBody()):
             # If vulnerable record it. This will now become visible on the KB Browser
             v = vuln.vuln( freq )
             v.setPluginName(self.getName())
             v.setId( response.id )
             v.setSeverity(severity.LOW)
             v.setName( 'Cross site tracing vulnerability' )
             msg = 'The web server at "'+ response.getURL() +'" is vulnerable to'
             msg += ' Cross Site Tracing.'
             v.setDesc( msg )
             om.out.vulnerability( v.getDesc(), severity=v.getSeverity() )
             kb.kb.append( self, 'xst', v )
Example #19
0
    def discover(self, fuzzableRequest ):
        '''
        Runs pykto to the site.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        '''
        self._new_fuzzable_requests = []
        
        if not self._exec:
            # dont run anymore
            raise w3afRunOnce()
            
        else:
            # run!
            if self._update_scandb:
                self._update_db()
            
            # Run the basic scan (only once)
            if self._first_time:
                self._first_time = False
                url = urlParser.baseUrl( fuzzableRequest.getURL() )
                self._exec = False
                self.__run( url )
            
            # And now mutate if the user configured it...
            if self._mutate_tests:
                
                # If mutations are enabled, I should keep running
                self._exec = True
                
                # Tests are to be mutated
                url = urlParser.getDomainPath( fuzzableRequest.getURL() )
                if url not in self._already_visited:
                    # Save the directories I already have tested
                    self._already_visited.add( url )
                    self.__run( url )

        return self._new_fuzzable_requests
Example #20
0
 def audit(self, freq ):
     '''
     Searches for file upload vulns using a POST to author.dll.
     
     @param freq: A fuzzableRequest
     '''
     # Set some value
     domain_path = urlParser.getDomainPath( freq.getURL() )
     
     # Start
     if self._stop_on_first and kb.kb.getData('frontpage', 'frontpage'):
         # Nothing to do, I have found vuln(s) and I should stop on first
         msg = 'Not verifying if I can upload files to: "' + domain_path + '" using author.dll'
         msg += '. Because I already found one vulnerability.'
         om.out.debug(msg)
     else:
         # I haven't found any vulns yet, OR i'm trying to find every
         # directory where I can write a file.
         if domain_path not in self._already_tested:
             om.out.debug( 'frontpage plugin is testing: ' + freq.getURL() )
             self._already_tested.add( domain_path )
             
             # Find a file that doesn't exist
             found404 = False
             for i in xrange(3):
                 randFile = createRandAlpha( 5 ) + '.html'
                 randPathFile = urlParser.urlJoin(domain_path,  randFile)
                 res = self._urlOpener.GET( randPathFile )
                 if is_404( res ):
                     found404 = True
                     break
             
             if found404:
                 upload_id = self._upload_file( domain_path,  randFile )
                 self._verify_upload( domain_path,  randFile,  upload_id )
             else:
                 msg = 'frontpage plugin failed to find a 404 page. This is mostly because of an'
                 msg += ' error in 404 page detection.'
                 om.out.error(msg)
        def find_relative( doc ):
            res = []
            
            # TODO: Also matches //foo/bar.txt and http://host.tld/foo/bar.txt
            # I'm removing those matches manually below
            regex = '((:?[/]{1,2}[A-Z0-9a-z%_\-~\.]+)+\.[A-Za-z0-9]{2,4}(((\?)([a-zA-Z0-9]*=\w*)){1}((&)([a-zA-Z0-9]*=\w*))*)?)'
            relative_regex = re.compile( regex )
            
            for match_tuple in relative_regex.findall(doc):
                
                match_string = match_tuple[0]
                
                #
                #   And now I filter out some of the common false positives
                #
                if match_string.startswith('//'):
                    continue
                    
                if match_string.startswith('://'):
                    continue

                if re.match('HTTP/\d\.\d', match_string):
                    continue
                
                # Matches "PHP/5.2.4-2ubuntu5.7" , "Apache/2.2.8", and "mod_python/3.3.1"
                if re.match('.*?/\d\.\d\.\d', match_string):
                    continue
                #
                #   Filter finished.
                #
                    
                domainPath = urlParser.getDomainPath(httpResponse.getURL())
                url = urlParser.urlJoin( domainPath , match_string )
                url = self._decode_URL(url, self._encoding)
                res.append( url )
            
            return res
Example #22
0
    def discover(self, fuzzableRequest ):
        '''
        Get the file and parse it.
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        '''
        if not self._exec:
            raise w3afRunOnce()
        else:
            
            if not self._be_recursive:
                # Only run once
                self._exec = False

            self._fuzzable_requests = []
            
            domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
            base_url = urlParser.baseUrl( fuzzableRequest.getURL() )
            
            to_test = []
            if not self._tested_base_url:
                to_test.append( base_url )
                self._tested_base_url = True
                
            if domain_path != base_url:
                to_test.append( domain_path )
            
            for base_path in to_test:
                
                #   Send the requests using threads:
                targs = ( base_path,  )
                self._tm.startFunction( target=self._bruteforce_directories, args=targs , ownerObj=self )
            
            # Wait for all threads to finish
            self._tm.join( self )

        return self._fuzzable_requests
Example #23
0
    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response using regex.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
        self._fuzzable_requests_to_return = []
        
        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.append( domain_path )

            #
            #   First we check if the .git/HEAD file exists
            #
            url, regular_expression = self._compiled_git_info[0]
            git_url = urlParser.urlJoin(domain_path, url)
            try:
                response = self._urlOpener.GET( git_url, useCache=True )
            except w3afException:
                om.out.debug('Failed to GET git file: "' + git_url + '"')
            else:
                if not is_404(response):
                    #
                    #   It looks like we have a GIT repository!
                    #
                    for url, regular_expression in self._compiled_git_info:
                        git_url = urlParser.urlJoin(domain_path, url)
                        targs = (domain_path, git_url, regular_expression)
                        # Note: The .git/HEAD request is only sent once. We use the cache.
                        self._tm.startFunction(target=self._check_if_exists, args=targs, ownerObj=self)         
                    
                    # Wait for all threads to finish
                    self._tm.join( self )
                
            return self._fuzzable_requests_to_return
    def is_404(self, http_response):
        '''
        All of my previous versions of is_404 were very complex and tried to struggle with all
        possible cases. The truth is that in most "strange" cases I was failing miserably, so now
        I changed my 404 detection once again, but keeping it as simple as possible.
        
        Also, and because I was trying to cover ALL CASES, I was performing a lot of
        requests in order to cover them, which in most situations was unnecesary.
        
        So now I go for a much simple approach:
            1- Cover the simplest case of all using only 1 HTTP request
            2- Give the users the power to configure the 404 detection by setting a string that
            identifies the 404 response (in case we are missing it for some reason in case #1)
        
        @parameter http_response: The HTTP response which we want to know if it is a 404 or not.
        '''

        #   This is here for testing.
        #return False
        
        #
        #   First we handle the user configured exceptions:
        #
        domain_path = urlParser.getDomainPath(http_response.getURL())
        if domain_path in cf.cf.getData('always404'):
            return True
        elif domain_path in cf.cf.getData('never404'):
            return False        
        
        #
        #   This is the most simple case, we don't even have to think about this.
        #
        #   If there is some custom website that always returns 404 codes, then we are
        #   screwed, but this is open source, and the pentester working on that site can modify
        #   these lines.
        #
        if http_response.getCode() == 404:
            return True
            
        #
        #   The user configured setting. "If this string is in the response, then it is a 404"
        #
        if cf.cf.getData('404string') and cf.cf.getData('404string') in http_response:
            return True
            
        #
        #   Before actually working, I'll check if this response is in the LRU, if it is I just return
        #   the value stored there.
        #
        if http_response.id in self._is_404_LRU:
            return self._is_404_LRU[ http_response.id ]
            
        with self._lock:
            if not self._already_analyzed:
                # Generate a 404 and save it
                self._404_bodies = self._generate_404_knowledge( http_response.getURL() )
                self._already_analyzed = True

        
        # self._404_body was already cleaned inside self._generate_404_knowledge
        # so we need to clean this one.
        html_body = self._get_clean_body( http_response )
        
        #
        #   Compare this response to all the 404's I have in my DB
        #
        for body_404_db in self._404_bodies:
            
            if relative_distance_ge(body_404_db, html_body, IS_EQUAL_RATIO):
                msg = '"%s" is a 404. [similarity_index > %s]' % \
                    (http_response.getURL(), IS_EQUAL_RATIO)
                om.out.debug(msg)
                self._is_404_LRU[ http_response.id ] = True
                return True
            else:
                # If it is not eq to one of the 404 responses I have in my DB, that does not means
                # that it won't match the next one, so I simply do nothing
                pass
        
        else:
            #
            #   I get here when the for ends and no 404 is matched.
            #
            msg = '"%s" is NOT a 404. [similarity_index < %s]' % \
            (http_response.getURL(), IS_EQUAL_RATIO)
            om.out.debug(msg)
            self._is_404_LRU[ http_response.id ] = False
            return False
    def discover(self, fuzzableRequest ):
        '''
        Finds the version of a WordPress installation.   
        @parameter fuzzableRequest: A fuzzableRequest instance that contains 
        (among other things) the URL to test.
        '''
        dirs = []
  
        if not self._exec :
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:

            #########################
            ## Check if the server is running wp ##
            #########################
            
            self._fuzzableRequests = []  
            
            domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
            
            # Main scan URL passed from w3af + unique wp file
            wp_unique_url = urlParser.urlJoin( domain_path, 'wp-login.php' )
            response = self._urlOpener.GET( wp_unique_url, useCache=True )

            # If wp_unique_url is not 404, wordpress = true
            if not is_404( response ):
                dirs.extend( self._createFuzzableRequests( response ) )

                ##############################
                ## Check if the wp version is in index header ##
                ##############################
            
                # Main scan URL passed from w3af + wp index page
                wp_index_url = urlParser.urlJoin( domain_path, 'index.php' )
                response = self._urlOpener.GET( wp_index_url, useCache=True )

                # Find the string in the response html
                find = '<meta name="generator" content="[Ww]ord[Pp]ress (\d\.\d\.?\d?)" />'
                m = re.search(find, response.getBody())

                # If string found, group version
                if m:
                    m = m.group(1)
                    self._version = m

                    # Save it to the kb!
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('WordPress version')
                    i.setURL( wp_index_url )
                    i.setId( response.id )
                    i.setDesc( 'WordPress version "'+ self._version +'" found in the index header.' )
                    kb.kb.append( self, 'info', i )
                    om.out.information( i.getDesc() )

                #########################
                ## Find wordpress version from data ##
                #########################

                # Wordpress version unique data, file/data/version
                self._wp_fingerprint = [ ('wp-includes/js/tinymce/tiny_mce.js','2009-05-25','2.8'), 
                        ('wp-includes/js/thickbox/thickbox.css','-ms-filter:','2.7.1'), 
                        ('wp-admin/css/farbtastic.css','.farbtastic','2.7'),
                        ('wp-includes/js/tinymce/wordpress.css','-khtml-border-radius:','2.6.1, 2.6.2, 2.6.3 or 2.6.5'),
                        ('wp-includes/js/tinymce/tiny_mce.js','0.7','2.5.1'),
                        ('wp-admin/async-upload.php','200','2.5'),
                        ('wp-includes/images/rss.png','200','2.3.1, 2.3.2 or 2.3.3'),
                        ('readme.html','2.3','2.3'),
                        ('wp-includes/rtl.css','#adminmenu a','2.2.3'),
                        ('wp-includes/js/wp-ajax.js','var a = $H();','2.2.1'),
                        ('wp-app.php','200','2.2')]

                for row in self._wp_fingerprint:
                    test_url = urlParser.urlJoin(  domain_path, row[0] )
                    response = self._urlOpener.GET( test_url, useCache=True )

                    if row[1] == '200' and not is_404(response):
                        self._version = row[2]
                        break
                    elif row[1] in response.getBody():
                        self._version = row[2]
                        break
                    else:
                        self._version = 'lower than 2.2'

                # Save it to the kb!
                i = info.info()
                i.setPluginName(self.getName())
                i.setName('WordPress version')
                i.setURL( test_url )
                i.setId( response.id )
                i.setDesc( 'WordPress version "'+ self._version +'" found from data.' )
                kb.kb.append( self, 'info', i )
                om.out.information( i.getDesc() )

                # Only run once
                self._exec = False

        return dirs
    def discover(self, fuzzableRequest ):
        '''
        Identify server software using favicon.   
        @parameter fuzzableRequest: A fuzzableRequest instance that contains 
        (among other things) the URL to test.
        '''
        if not self._exec :
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:
            
            # Only run once
            self._exec = False

            domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
            
            def_favicon_url = urlParser.urlJoin( domain_path, 'favicon.ico' )
            response = self._urlOpener.GET( def_favicon_url, useCache=True )

            if not is_404( response ):
                favmd5=md5.new(response.getBody()).hexdigest()

                try:
                    # read MD5 database.
                    db_file_1 = open(self._db_file, "r")
                except Exception, e:
                    raise w3afException('Failed to open the MD5 database. Exception: "' + str(e) + '".')
                else:
                    favicon_list = db_file_1.readlines()
                    db_file_1.close()
                
                desc = ''
                # check if MD5 is matched in database/list
                for fmd5 in favicon_list:
                    dbline=fmd5.split( ":", 2 );
                    md5part=dbline[0].split();
                    if dbline[0]==favmd5:
                        if len(dbline)>1:
                            favname=dbline[1].rstrip()
                            desc += 'Favicon.ico file was identified as "' + favname + '".'
                            break
                
                #
                #   Left here for debugging, but the final user doesn't really care about the md5
                #   of the favicon if it was not identified.
                #
                #desc += 'Favicon MD5: "'+ favmd5 +'".'
                
                if desc:
                    # Save it to the kb!
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('Favicon identification')
                    i.setURL( def_favicon_url )
                    i.setId( response.id )
                    i.setDesc( desc )
                    kb.kb.append( self, 'info', i )
                    om.out.information( i.getDesc() )
                else:
                    #
                    #   Report to the kb that we failed to ID this favicon.ico and that the md5
                    #   should be sent to the developers.
                    #
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('Favicon identification failed')
                    i.setURL( def_favicon_url )
                    i.setId( response.id )
                    desc = 'Favicon identification failed, please send a mail to w3af-develop'
                    desc += '@lists.sourceforge.net including this md5 hash "'+favmd5+'"'
                    desc += ' and what server or Web application it represents. New fingerprints'
                    desc += ' make this plugin more powerfull and accurate.'
                    i.setDesc( desc )
                    kb.kb.append( self, 'info', i )
                    om.out.information( i.getDesc() )