def discover(self, fuzzableRequest):
        '''
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
        (among other things) the URL to test.
        '''
        possible_vulnerable_files = ['wp-content/plugins/akismet/akismet.php', 'wp-content/plugins/hello.php']

        # Search this theme path and add the themes header/footer to the possible vulnerable files
        domain_path = fuzzableRequest.getURL().getDomainPath()
        response = self._uri_opener.GET( domain_path, cache=True )
        if not is_404( response ):
            response_body = response.getBody()
            theme_regexp = domain_path+'wp-content/themes/(.*)/style.css'
            theme = re.search(theme_regexp, response_body, re.IGNORECASE)
            if theme:
                theme_name = theme.group(1)
                possible_vulnerable_files.append(domain_path+'wp-content/themes/'+theme_name+'/header.php')
                possible_vulnerable_files.append(domain_path+'wp-content/themes/'+theme_name+'/footer.php')

        if not self._exec :
            # Remove the plugin from the discovery plugins to be run.
            raise w3afRunOnce()
        else:
            for vulnerable_file in possible_vulnerable_files:
                vulnerable_url = domain_path.urlJoin(vulnerable_file)
                response = self._uri_opener.GET( vulnerable_url, cache=True )

                if not is_404( response ):
                    response_body = response.getBody()
                    if 'Fatal error' in response_body:
                        if vulnerable_file in response_body:
                            # Unix-like
                            pass
                        elif vulnerable_file.replace('/', '\\') in response_body:
                            # Microsoft Windows (back slashes)
                            vulnerable_file = vulnerable_file.replace('/', '\\')
                        else:
                            vulnerrable_path = False

                        if vulnerable_file:
                            match = ' <b>(.*)'+vulnerable_file+'</b>'
                            path_disclosure = re.search(match, response_body, re.IGNORECASE)
                            if path_disclosure:
                                i = info.info()
                                i.setPluginName(self.getName())
                                i.setName('WordPress server path found')
                                i.setURL( vulnerable_url )
                                i.setId( response.id )
                                desc = 'WordPress is installed on "%s"' % path_disclosure.group(1)
                                i.setDesc( desc )
                                kb.kb.append( self, 'info', i )
                                om.out.information( i.getDesc() )
                                break

        # Only run once
        self._exec = False
Example #2
0
    def discover(self, fuzzableRequest):
        """
        Get the file and parse it.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        """
        if not self._exec:
            raise w3afRunOnce()
        else:
            # Only run once
            self._exec = False

            base_url = fuzzableRequest.getURL().baseUrl()

            ### Google Gears
            for ext in self._extensions:
                for word in file(self._wordlist):

                    manifest_url = base_url.urlJoin(word.strip() + ext)

                    om.out.debug('Google Gears Manifest Testing "%s"' % (manifest_url))
                    http_response = self._urlOpener.GET(manifest_url, useCache=True)

                    if '"entries":' in http_response and not is_404(http_response):
                        # Save it to the kb!
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName("Gears Manifest")
                        i.setURL(manifest_url)
                        i.setId(http_response.id)
                        desc = 'A gears manifest file was found at: "' + manifest_url
                        desc += '".  Each file should be manually reviewed for sensitive'
                        desc += " information that may get cached on the client."
                        i.setDesc(desc)
                        kb.kb.append(self, manifest_url, i)
                        om.out.information(i.getDesc())

            ### CrossDomain.XML
            cross_domain_url = base_url.urlJoin("crossdomain.xml")
            om.out.debug("Checking crossdomain.xml file")
            response = self._urlOpener.GET(cross_domain_url, useCache=True)

            if not is_404(response):
                self._checkResponse(response, "crossdomain.xml")

            ### CrossAccessPolicy.XML
            client_access_url = base_url.urlJoin("clientaccesspolicy.xml")
            om.out.debug("Checking clientaccesspolicy.xml file")
            response = self._urlOpener.GET(client_access_url, useCache=True)

            if not is_404(response):
                self._checkResponse(response, "clientaccesspolicy.xml")

        return []
Example #3
0
 def grep(self, request, response):
     '''
     Plugin entry point, parse those comments!
     
     @parameter request: The HTTP request object.
     @parameter response: The HTTP response object
     @return: None
     '''
     if response.is_text_or_html() and (not is_404( response ) or self._search404):
         try:
             dp = dpCache.dpc.getDocumentParserFor( response )
         except w3afException:
             return
         else:
             for comment in dp.getComments():
                 # These next two lines fix this issue:
                 # audit.ssi + grep.findComments + web app with XSS = false positive
                 if request.sent( comment ):
                     continue
                 
                 # show nice comments ;)
                 comment = comment.strip()
                 
                 if self._is_new( comment, response):
                     
                     self._interesting_word( comment, request, response )
                     self._html_in_comment( comment, request, response )
Example #4
0
    def _analyzeResult(self, mutant, mutant_response):
        '''
        Analyze results of the _sendMutant method. 
        
        In this case, check if the file was uploaded to any of the known directories,
        or one of the "default" ones like "upload" or "files".
        '''
        # Generate a list of directories where I can search for the uploaded file
        domain_path_list = [urlParser.getDomainPath(i) for i in \
                            kb.kb.getData('urls' , 'urlList')]
        domain_path_list = set(domain_path_list)

        # Try to find the file!
        for url in domain_path_list:
            for path in self._generate_paths(url, mutant.uploaded_file_name):

                get_response = self._urlOpener.GET(path, useCache=False)
                if not is_404(get_response):
                    # This is necesary, if I dont do this, the session saver will break cause
                    # REAL file objects can't be picked
                    mutant.setModValue('<file_object>')
                    v = vuln.vuln(mutant)
                    v.setPluginName(self.getName())
                    v.setId([mutant_response.id, get_response.id])
                    v.setSeverity(severity.HIGH)
                    v.setName('Insecure file upload')
                    v['fileDest'] = get_response.getURL()
                    v['fileVars'] = mutant.getFileVariables()
                    msg = 'A file upload to a directory inside the webroot was found at: '
                    msg += mutant.foundAt()
                    v.setDesc(msg)
                    kb.kb.append(self, 'fileUpload', v)
                    return
Example #5
0
 def _check_if_exists(self, domain_path, git_url, regular_expression):
     '''
     Check if the file exists.
     
     @parameter git_file_url: The URL to check
     '''
     try:
         response = self._urlOpener.GET( git_url, useCache=True )
     except w3afException:
         om.out.debug('Failed to GET git file:' + git_url)
     else:
         if not is_404(response):
             # Check pattern
             f = StringIO.StringIO(response.getBody())
             for line in f:
                 if regular_expression.match(line):
                     v = vuln.vuln()
                     v.setPluginName(self.getName())
                     v.setId( response.id )
                     v.setName( 'Possible Git repository found' )
                     v.setSeverity(severity.LOW)
                     v.setURL( response.getURL() )
                     msg = 'A Git repository file was found at: "' + v.getURL() + '" ; this could'
                     msg += ' indicate that a Git repo is accessible. You might be able to download'
                     msg += ' the Web application source code by running'
                     msg += ' "git clone ' + domain_path + '"'
                     v.setDesc( msg )
                     kb.kb.append( self, 'GIT', v )
                     om.out.vulnerability( v.getDesc(), severity=v.getSeverity() )
                     fuzzable_requests = self._createFuzzableRequests( response )
                     self._fuzzable_requests_to_return.extend( fuzzable_requests )
Example #6
0
 def _is_possible_backdoor(self, response):
     '''
     Heuristic to infer if the content of <response> has the pattern of a
     backdoor response.
     
     @param response: httpResponse object
     @return: A bool value
     '''
     if not is_404(response):
         body_text = response.getBody()
         dom  = response.getDOM()
         if dom:
             for ele, attrs in BACKDOOR_COLLECTION.iteritems():
                 for attrname, attr_vals in attrs.iteritems():
                     # Set of lowered attribute values
                     dom_attr_vals = \
                         set(n.get(attrname).lower() for n in \
                             (dom.xpath('//%s[@%s]' % (ele, attrname))))
                     # If at least one elem in intersection return True
                     if (dom_attr_vals and set(attr_vals)):
                         return True
 
         # If no regex matched then try with keywords. At least 2 should be
         # contained in 'body_text' to succeed.
         times = 0
         for back_kw in KNOWN_OFFENSIVE_WORDS:
             if re.search(back_kw, body_text, re.I):
                 times += 1
                 if times == 2:
                     return True
     return False
Example #7
0
 def _classic_worker(self, gh, search):
     
     # Init some variables
     google_se = google(self._urlOpener)
     
     google_list = google_se.getNResults( search, 9 )
     
     for result in google_list:
         # I found a vuln in the site!
         response = self._urlOpener.GET(result.URL, useCache=True )
         if not is_404( response ):
             v = vuln.vuln()
             v.setPluginName(self.getName())
             v.setURL( response.getURL() )
             v.setMethod( 'GET' )
             v.setName( 'Google hack database vulnerability' )
             v.setSeverity(severity.MEDIUM)
             msg = 'ghdb plugin found a vulnerability at URL: ' + result.URL
             msg += ' . Vulnerability description: ' + gh.desc
             v.setDesc( msg  )
             v.setId( response.id )
             kb.kb.append( self, 'vuln', v )
             om.out.vulnerability( v.getDesc(), severity=severity.MEDIUM )
                     
             # Create the fuzzable requests
             self._fuzzableRequests.extend( self._createFuzzableRequests( response ) )
Example #8
0
    def _do_request(self, url, mutant):
        '''
        Perform a simple GET to see if the result is an error or not, and then
        run the actual fuzzing.
        '''
        response = self._uri_opener.GET(mutant, cache=True, headers=self._headers)

        if not (is_404(response) or
                response.getCode() in (403, 401) or
                self._return_without_eval(mutant)):
            fr_list = self._createFuzzableRequests(response)
            self._fuzzable_requests.extend(fr_list)
            #
            #   Save it to the kb (if new)!
            #
            if response.getURL() not in self._seen and response.getURL().getFileName():
                i = info.info()
                i.setPluginName(self.getName())
                i.setName('Potentially interesting file')
                i.setURL(response.getURL())
                i.setId(response.id)
                i.setDesc('A potentially interesting file was found at: "' + response.getURL() + '".')
                kb.kb.append(self, 'files', i)
                om.out.information(i.getDesc())
                
                # Report only once
                self._seen.add(response.getURL())
Example #9
0
    def discover(self, fuzzableRequest ):
        '''
        Get the urllist.txt file and parse it.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        '''
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be run.
            raise w3afRunOnce()
        else:
            # Only run once
            self._exec = False
            
            dirs = []
            self._new_fuzzable_requests = []         
            
            base_url = fuzzableRequest.getURL().baseUrl()
            urllist_url = base_url.urlJoin( 'urllist.txt' )
            http_response = self._uri_opener.GET( urllist_url, cache=True )
            
            if not is_404( http_response ):

                # Work with it...
                dirs.append( urllist_url )
                is_urllist = 5
                for line in http_response.getBody().split('\n'):
                    
                    line = line.strip()
                    
                    if not line.startswith('#') and line:    
                        try:
                            url = base_url.urlJoin( line )
                        except:
                            is_urllist -= 1
                            if not is_urllist:
                                break
                        else:
                            dirs.append( url )

                if is_urllist:
                    # Save it to the kb!
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('urllist.txt file')
                    i.setURL( urllist_url )
                    i.setId( http_response.id )
                    i.setDesc( 'A urllist.txt file was found at: "'+ urllist_url +'".' )
                    kb.kb.append( self, 'urllist.txt', i )
                    om.out.information( i.getDesc() )

            for url in dirs:
                #   Send the requests using threads:
                self._run_async(meth=self._get_and_parse, args=(url,))
                
            # Wait for all threads to finish
            self._join()
            
            return self._new_fuzzable_requests
Example #10
0
 def _analyzeResult( self , response , expected_response, parameters, uri ):
     '''
     Analyzes the result of a _send()
     
     @return: True if vuln is found
     '''
     if expected_response.isdigit():
         int_er = int( expected_response )
         # This is used when expected_response is 200 , 401, 403, etc.
         if response.getCode() == int_er and not is_404( response ):
             return True
     
     elif expected_response in response and not is_404( response ):
         # If the content is found, and it's not in a 404 page, then we have a vuln.
         return True
     
     return False    
Example #11
0
    def grep(self, request, response):
        '''
        Plugin entry point, search for meta tags.

        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        uri = response.getURI()
        
        if response.is_text_or_html() and not is_404( response ) and \
            uri not in self._already_inspected:

            self._already_inspected.add(uri)
            
            try:
                dp = dpCache.dpc.getDocumentParserFor( response )
            except w3afException:
                pass
            else:
                meta_tag_list = dp.getMetaTags()
                
                for tag in meta_tag_list:
                    name = self._find_name( tag )
                    for attr in tag:
                        for word in self._interesting_words:

                            # Check if we have something interesting
                            # and WHERE that thing actually is
                            where = value = None
                            if ( word in attr[0].lower() ):
                                where = 'name'
                                value = attr[0].lower()
                            elif ( word in attr[1].lower() ):
                                where = 'value'
                                value = attr[1].lower()
                            
                            # Now... if we found something, report it =)
                            if where:
                                # The atribute is interesting!
                                i = info.info()
                                i.setPluginName(self.getName())
                                i.setName('Interesting META tag')
                                i.setURI( response.getURI() )
                                i.setId( response.id )
                                msg = 'The URI: "' +  i.getURI() + '" sent a META tag with '
                                msg += 'attribute '+ where +' "'+ value +'" which'
                                msg += ' looks interesting.'
                                i.addToHighlight( where, value )
                                if self._interesting_words.get(name, None):
                                    msg += ' The tag is used for '
                                    msg += self._interesting_words[name] + '.'
                                i.setDesc( msg )
                                kb.kb.append( self , 'metaTags' , i )

                            else:
                                # The attribute is not interesting
                                pass
Example #12
0
    def grep(self, request, response):
        '''
        Plugin entry point. Get responses, analyze words, create dictionary.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None.
        '''
        
        # Initial setup
        lang = kb.kb.getData( 'lang', 'lang' )
        if lang == []:
            lang = 'unknown'

        # I added the 404 code here to avoid doing some is_404 lookups
        if response.getCode() not in [500, 401, 403, 404] and \
            not is_404(response) and request.getMethod() in ['POST', 'GET']:
            # Run the plugins
            data = self._run_plugins(response)
            
            with self._plugin_lock:
                old_data = kb.kb.getData( 'passwordProfiling', 'passwordProfiling' )
                
                # "merge" both maps and update the repetitions
                for d in data:
                    
                    if len(d) >= 4 and d.isalnum() and \
                        not d.isdigit() and \
                        d.lower() not in self._banned_words and \
                        d.lower() not in self._commonWords[lang] and \
                        not request.sent( d ):
                        
                        if d in old_data:
                            old_data[ d ] += data[ d ]
                        else:
                            old_data[ d ] = data[ d ]
                
                #   If the dict grows a lot, I want to trim it. Basically, if 
                #   it grows to a length of more than 2000 keys, I'll trim it
                #   to 1000 keys.
                if len( old_data ) > 2000:
                    def sortfunc(x_obj, y_obj):
                        return cmp(y_obj[1], x_obj[1])
                
                    items = old_data.items()
                    items.sort(sortfunc)
                    
                    items = items[:1000]
                    
                    new_data = {}
                    for key, value in items:
                        new_data[key] = value
                        
                else:
                    new_data = old_data
                
                # save the updated map
                kb.kb.save(self, 'passwordProfiling', new_data)
Example #13
0
    def setUp(self):
        #
        #   Init
        #
        self.url_str = 'http://localhost:631/'
        self.url_inst = url_object( self.url_str )
        spam = httpResponse(200, '', {}, self.url_inst, self.url_inst)

        try:
            spam = httpResponse(200, '', {}, self.url_inst, self.url_inst)
            is_404(spam)
        except:
            pass

        self._w3af = core.controllers.w3afCore.w3afCore()
        self._plugins = []
        for pname in self._w3af.getPluginList('grep'):
            self._plugins.append( self._w3af.getPluginInstance(pname, 'grep') )
Example #14
0
 def _check_existance( self, mutant ):
     '''
     Actually check if the mutated URL exists.
     @return: None, all important data is saved to self._fuzzableRequests
     '''
     response = self._sendMutant( mutant, analyze=False )
     if not is_404( response ) and self._original_response.getBody() != response.getBody() :
         fuzzReqs = self._createFuzzableRequests( response )
         self._fuzzableRequests.extend( fuzzReqs )
Example #15
0
    def discover(self, fuzzableRequest ):
        '''
        Get the robots.txt file and parse it.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        '''
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
        else:
            # Only run once
            self._exec = False
            
            dirs = []
            self._new_fuzzable_requests = []         
            
            base_url = fuzzableRequest.getURL().baseUrl()
            robots_url = base_url.urlJoin( 'robots.txt' )
            http_response = self._urlOpener.GET( robots_url, useCache=True )
            
            if not is_404( http_response ):
                # Save it to the kb!
                i = info.info()
                i.setPluginName(self.getName())
                i.setName('robots.txt file')
                i.setURL( robots_url )
                i.setId( http_response.id )
                i.setDesc( 'A robots.txt file was found at: "'+ robots_url +'".' )
                kb.kb.append( self, 'robots.txt', i )
                om.out.information( i.getDesc() )


                # Work with it...
                dirs.append( robots_url )
                for line in http_response.getBody().split('\n'):
                    
                    line = line.strip()
                    
                    if len(line) > 0 and line[0] != '#' and (line.upper().find('ALLOW') == 0 or\
                    line.upper().find('DISALLOW') == 0 ):
                        
                        url = line[ line.find(':') + 1 : ]
                        url = url.strip()
                        url = base_url.urlJoin( url )
                        dirs.append( url )

            for url in dirs:
                #   Send the requests using threads:
                targs = ( url,  )
                self._tm.startFunction( target=self._get_and_parse, args=targs , ownerObj=self )
            
            # Wait for all threads to finish
            self._tm.join( self )
        
        return self._new_fuzzable_requests
Example #16
0
    def _pre_discovery(self):
        '''
        Create the first fuzzableRequestList
        '''

        # We only want to scan pages that in current scope
        get_curr_scope_pages = lambda fr: \
            fr.getURL().getDomain() == url.getDomain()

        for url in cf.cf.getData('targets'):
            try:
                #
                #    GET the initial target URLs in order to save them
                #    in a list and use them as our bootstrap URLs
                #
                response = self._w3af_core.uriOpener.GET(url, cache=True)
                self._fuzzable_request_set.update( filter(
                    get_curr_scope_pages, createFuzzableRequests(response)) )

                #
                #    NOTE: I need to perform this test here in order to avoid some weird
                #    thread locking that happens when the webspider calls is_404, and
                #    because I want to initialize the is_404 database in a controlled
                #    try/except block.
                #
                from core.controllers.coreHelpers.fingerprint_404 import is_404
                is_404(response)

            except KeyboardInterrupt:
                self._w3af_core._end()
                raise
            except (w3afMustStopOnUrlError, w3afException, w3afMustStopException), w3:
                om.out.error('The target URL: %s is unreachable.' % url)
                om.out.error('Error description: %s' % w3)
            except Exception, e:
                om.out.error('The target URL: %s is unreachable '
                             'because of an unhandled exception.' % url)
                om.out.error('Error description: "%s". See debug '
                             'output for more information.' % e)
                om.out.error('Traceback for this error: %s' % 
                             traceback.format_exc())
Example #17
0
 def grep(self, request, response):
     '''
     Get the page indicated by the fuzzableRequest and determine the language using the preposition list.
     
     @parameter request: The HTTP request object.
     @parameter response: The HTTP response object
     '''
     with self._plugin_lock:
         if self._exec and not is_404( response ) and response.is_text_or_html():
             kb.kb.save( self, 'lang', 'unknown' )
             
             number_of_matches = {}
             
             for lang_string in self._prepositions:
                 # Init the count map
                 number_of_matches[ lang_string ] = 0
                 
                 # Create regular expression
                 # I add the ' 's because I want the whole word.
                 prepositions = [' ' + i + ' ' for i in self._prepositions[lang_string]]
                 preposition_regex = '(' + '|'.join(prepositions) + ')'
                 
                 # Find all the matches for this regular expression
                 matches = re.findall(preposition_regex, response.getBody().lower())
                 number_of_matches[ lang_string ] = len(matches)
                         
             # Determine who is the winner
             def sortfunc(x,y):
                 return cmp(y[1],x[1])
                 
             items = number_of_matches.items()
             items.sort( sortfunc )
             
             if items[0][1] > items[1][1] * 2:
                 # Only run once
                 self._exec = False
                 
                 # This if was added so no duplicated messages are printed
                 # to the user, when w3af runs with multithreading.
                 if kb.kb.getData( 'lang', 'lang' ) == 'unknown':
                     om.out.information('The page language is: '+ items[0][0] )
                     kb.kb.save( self, 'lang', items[0][0] )
             
             else:
                 msg = 'Could not determine the page language using ' + response.getURL() 
                 msg += ', not enough text to make a good analysis.'
                 om.out.debug(msg)
                 # Keep running until giving a good response...
                 self._exec = True
         
         return []
Example #18
0
    def _bruteforce_directories(self, base_path):
        '''
        @parameter base_path: The base path to use in the bruteforcing process, can be something
        like http://host.tld/ or http://host.tld/images/ .
        '''
        for directory_name in file(self._dir_list):
            directory_name = directory_name.strip()
            
            # ignore comments and empty lines
            if directory_name and not directory_name.startswith('#'):
                dir_url = urlParser.urlJoin(  base_path , directory_name)
                dir_url +=  '/'

                http_response = self._urlOpener.GET( dir_url, useCache=False )
                
                if not is_404( http_response ):
                    #
                    #   Looking fine... but lets see if this is a false positive or not...
                    #
                    dir_url = urlParser.urlJoin(  base_path , directory_name + createRandAlNum(5) )
                    dir_url +=  '/'
                    invalid_http_response = self._urlOpener.GET( dir_url, useCache=False )

                    if is_404( invalid_http_response ):
                        #
                        #   Good, the directory_name + createRandAlNum(5) return a 404, the original
                        #   directory_name is not a false positive.
                        #
                        fuzzable_reqs = self._createFuzzableRequests( http_response )
                        self._fuzzable_requests.extend( fuzzable_reqs )
                        
                        msg = 'Directory bruteforcer plugin found directory "'
                        msg += http_response.getURL()  + '"'
                        msg += ' with HTTP response code ' + str(http_response.getCode())
                        msg += ' and Content-Length: ' + str(len(http_response.getBody()))
                        msg += '.'
                        
                        om.out.information( msg )
Example #19
0
 def _verify_reference(self, reference, original_request,
                       originalURL, possibly_broken):
     '''
     This method GET's every new link and parses it in order to get
     new links and forms.
     '''
     is_forward = self._is_forward(reference)
     if not self._only_forward or is_forward:
         #
         # Remember that this "breaks" the cache=True in most cases!
         #     headers = { 'Referer': originalURL }
         #
         # But this does not, and it is friendlier that simply ignoring the
         # referer
         #
         referer = originalURL.baseUrl()
         if not referer.url_string.endswith('/'):
             referer += '/'
         headers = {'Referer': referer}
         
         try:
             resp = self._uri_opener.GET(reference, cache=True, 
                                        headers=headers, follow_redir=False)
         except w3afMustStopOnUrlError:
             pass
         else:
             fuzz_req_list = []
             # Note: I WANT to follow links that are in the 404 page, but
             # if the page I fetched is a 404 then it should be ignored.
             if is_404(resp):
                 # add_self == False, because I don't want to return a 404
                 # to the core
                 fuzz_req_list = self._createFuzzableRequests(resp,
                                  request=original_request, add_self=False)
                 if not possibly_broken:
                     t = (resp.getURL(), original_request.getURI())
                     self._broken_links.add(t)
             else:
                 om.out.debug('Adding relative reference "%s" '
                              'to the result.' % reference)
                 frlist = self._createFuzzableRequests(resp, request=original_request)
                 fuzz_req_list.extend(frlist)
                             
             # Process the list.
             for fuzz_req in fuzz_req_list:
                 fuzz_req.setReferer(referer)
                 self._fuzzable_reqs.add(fuzz_req)
Example #20
0
    def grep(self, request, response):
        '''
        Plugin entry point, search for motw.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        if response.is_text_or_html():

            if not is_404( response ):
                motw_match = self._motw_re.search(response.getBody())

                # Create the info object
                if motw_match or self._withoutMOTW:
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('Mark of the web')
                    i.setURL( response.getURL() )
                    i.setId( response.id )
                    i.addToHighlight(motw_match.group(0))
                
                # Act based on finding/non-finding
                if motw_match:

                    # This int() can't fail because the regex validated
                    # the data before
                    url_length_indicated = int(motw_match.group(1))
                    url_length_actual = len(motw_match.group(2))
                    if (url_length_indicated <= url_length_actual):
                        msg = 'The  URL: "'  + response.getURL() + '"'
                        msg += ' contains a  valid Mark of the Web.'
                        i.setDesc( msg )
                        kb.kb.append( self, 'motw', i )
                    else:
                        msg = 'The URL: "' + response.getURL() + '" will be executed in Local '
                        msg += 'Machine Zone security context because the indicated length is '
                        msg += 'greater than the actual URL length.'
                        i['localMachine'] = True
                        i.setDesc( msg )
                        kb.kb.append( self, 'motw', i )
              
                elif self._withoutMOTW:
                    msg = 'The URL: "' + response.getURL()
                    msg += '" doesn\'t contain a Mark of the Web.'
                    i.setDesc( msg )
                    kb.kb.append( self, 'no_motw', i )
Example #21
0
    def discover(self, fuzzableRequest):
        """
        Get the sitemap.xml file and parse it.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        """
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
        else:
            # Only run once
            self._exec = False
            self._new_fuzzable_requests = []

            base_url = fuzzableRequest.getURL().baseUrl()
            sitemap_url = base_url.urlJoin("sitemap.xml")
            response = self._urlOpener.GET(sitemap_url, useCache=True)

            # Remember that httpResponse objects have a faster "__in__" than
            # the one in strings; so string in response.getBody() is slower than
            # string in response
            if "</urlset>" in response and not is_404(response):
                om.out.debug("Analyzing sitemap.xml file.")

                self._new_fuzzable_requests.extend(self._createFuzzableRequests(response))

                import xml.dom.minidom

                om.out.debug("Parsing xml file with xml.dom.minidom.")
                try:
                    dom = xml.dom.minidom.parseString(response.getBody())
                except:
                    raise w3afException("Error while parsing sitemap.xml")
                urlList = dom.getElementsByTagName("loc")
                for url in urlList:
                    url = url.childNodes[0].data
                    url_instance = url_object(url)
                    #   Send the requests using threads:
                    targs = (url_instance,)
                    self._tm.startFunction(target=self._get_and_parse, args=targs, ownerObj=self)

                # Wait for all threads to finish
                self._tm.join(self)

            return self._new_fuzzable_requests
Example #22
0
    def _compare_dir( self, arg, directory, flist ):
        '''
        This function is the callback function called from os.path.walk, from the python
        help function:
        
        walk(top, func, arg)
            Directory tree walk with callback function.
        
            For each directory in the directory tree rooted at top (including top
            itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
            dirname is the name of the directory, and fnames a list of the names of
            the files and subdirectories in dirname (excluding '.' and '..').  func
            may modify the fnames list in-place (e.g. via del or slice assignment),
            and walk will only recurse into the subdirectories whose names remain in
            fnames; this can be used to implement a filter, or to impose a specific
            order of visiting.  No semantics are defined for, or required of, arg,
            beyond that arg is always passed to func.  It can be used, e.g., to pass
            a filename pattern, or a mutable object designed to accumulate
            statistics.  Passing None for arg is common.

        '''
        if self._first:
            self._start_path = directory
            self._first = False
        
        directory_2 = directory.replace( self._start_path,'' )
        path = self._remote_path
        if directory_2 != '':
            path += directory_2 + os.path.sep
        else:
            path += directory_2
        
        for fname in flist:
            if os.path.isfile( directory + os.path.sep + fname ):
                url = path.urlJoin( fname )
                response = self._easy_GET( url )
            
                if not is_404( response ):
                    if response.is_text_or_html():
                        self._fuzzableRequests.extend( self._createFuzzableRequests( response ) )
                    self._check_content( response, directory + os.path.sep + fname )
                    self._eq.append( url )
                else:
                    self._not_eq.append( url )
Example #23
0
    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response using regex.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        domain_path = fuzzableRequest.getURL().getDomainPath()
        self._fuzzable_requests_to_return = []
        
        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.add( domain_path )
            
            for repo in self._compiled_dvcs_info.keys():
                relative_url = self._compiled_dvcs_info[repo]['filename']
                regular_expression = self._compiled_dvcs_info[repo]['re']
                repo_url = domain_path.urlJoin(relative_url)

                try:
                    response = self._urlOpener.GET( repo_url, useCache=True )
                except w3afException:
                    om.out.debug('Failed to GET '+repo+' file: "' + repo_url + '"')
                else:
                    if not is_404(response):
                        # Check pattern
                        f = StringIO.StringIO(response.getBody())
                        for line in f:
                            if regular_expression.match(line):
                                v = vuln.vuln()
                                v.setPluginName(self.getName())
                                v.setId( response.id )
                                v.setName( 'Possible '+repo+' repository found' )
                                v.setSeverity(severity.LOW)
                                v.setURL( response.getURL() )
                                msg = 'A '+repo+' repository file was found at: "' + v.getURL() + '" ; this could'
                                msg += ' indicate that a '+repo+' repo is accessible. You might be able to download'
                                msg += ' the Web application source code.'
                                v.setDesc( msg )
                                kb.kb.append( self, repo.upper(), v )
                                om.out.vulnerability( v.getDesc(), severity=v.getSeverity() )
                                fuzzable_requests = self._createFuzzableRequests( response )
                                self._fuzzable_requests_to_return.extend( fuzzable_requests )

            return self._fuzzable_requests_to_return
Example #24
0
 def discover(self, fuzzableRequest ):
     '''
     Get the sitemap.xml file and parse it.
     
     @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
     '''
     if not self._exec:
         # This will remove the plugin from the discovery plugins to be run.
         raise w3afRunOnce()
     else:
         # Only run once
         self._exec = False
         self._new_fuzzable_requests = []
         
         base_url = fuzzableRequest.getURL().baseUrl()
         sitemap_url = base_url.urlJoin( 'sitemap.xml' )
         response = self._uri_opener.GET( sitemap_url, cache=True )
         
         # Remember that httpResponse objects have a faster "__in__" than
         # the one in strings; so string in response.getBody() is slower than
         # string in response
         if '</urlset>' in response and not is_404( response ):
             om.out.debug('Analyzing sitemap.xml file.')
             
             self._new_fuzzable_requests.extend( self._createFuzzableRequests( response ) )
             
             import xml.dom.minidom
             om.out.debug('Parsing xml file with xml.dom.minidom.')
             try:
                 dom = xml.dom.minidom.parseString( response.getBody() )
             except:
                 raise w3afException('Error while parsing sitemap.xml')
             urlList = dom.getElementsByTagName("loc")
             for url in urlList:
                 try:
                     url = url.childNodes[0].data
                     url = url_object(url)
                 except ValueError, ve:
                     om.out.debug('Sitemap file had an invalid URL: "%s"' % ve)
                 except:
                     om.out.debug('Sitemap file had an invalid format')
Example #25
0
    def discover(self, fuzzableRequest ):
        '''
        GET some files and parse them.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                    (among other things) the URL to test.
        '''
        dirs = []
        if not self._exec :
            # This will remove the plugin from the discovery plugins to be run.
            raise w3afRunOnce()
            
        else:
            # Only run once
            self._exec = False
            
            base_url = fuzzableRequest.getURL().baseUrl()
            
            for url, regex_string in self.getOracleData():

                oracle_discovery_URL = base_url.urlJoin( url )
                response = self._uri_opener.GET( oracle_discovery_URL, cache=True )
                
                if not is_404( response ):
                    dirs.extend( self._createFuzzableRequests( response ) )
                    if re.match( regex_string , response.getBody(), re.DOTALL):
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName('Oracle application')
                        i.setURL( response.getURL() )
                        i.setDesc( self._parse( url, response ) )
                        i.setId( response.id )
                        kb.kb.append( self, 'info', i )
                        om.out.information( i.getDesc() )
                    else:
                        msg = 'oracleDiscovery found the URL: ' + response.getURL()
                        msg += ' but failed to parse it. The content of the URL is: "'
                        msg += response.getBody() + '".'
                        om.out.debug( msg )
        
        return dirs
Example #26
0
 def audit(self, freq ):
     '''
     Searches for file upload vulns using a POST to author.dll.
     
     @param freq: A fuzzableRequest
     '''
     # Set some value
     domain_path = urlParser.getDomainPath( freq.getURL() )
     
     # Start
     if self._stop_on_first and kb.kb.getData('frontpage', 'frontpage'):
         # Nothing to do, I have found vuln(s) and I should stop on first
         msg = 'Not verifying if I can upload files to: "' + domain_path + '" using author.dll'
         msg += '. Because I already found one vulnerability.'
         om.out.debug(msg)
     else:
         # I haven't found any vulns yet, OR i'm trying to find every
         # directory where I can write a file.
         if domain_path not in self._already_tested:
             om.out.debug( 'frontpage plugin is testing: ' + freq.getURL() )
             self._already_tested.add( domain_path )
             
             # Find a file that doesn't exist
             found404 = False
             for i in xrange(3):
                 randFile = createRandAlpha( 5 ) + '.html'
                 randPathFile = urlParser.urlJoin(domain_path,  randFile)
                 res = self._urlOpener.GET( randPathFile )
                 if is_404( res ):
                     found404 = True
                     break
             
             if found404:
                 upload_id = self._upload_file( domain_path,  randFile )
                 self._verify_upload( domain_path,  randFile,  upload_id )
             else:
                 msg = 'frontpage plugin failed to find a 404 page. This is mostly because of an'
                 msg += ' error in 404 page detection.'
                 om.out.error(msg)
    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        fuzzable_return_value = []
        
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:
            # Run the plugin.
            self._exec = False

        for domain_path in urlParser.getDirectories(fuzzableRequest.getURL() ):

            if domain_path not in self._analyzed_dirs:

                # Save the domain_path so I know I'm not working in vane
                self._analyzed_dirs.add( domain_path )

                # Request the file
                frontpage_info_url = urlParser.urlJoin(  domain_path , "_vti_inf.html" )
                try:
                    response = self._urlOpener.GET( frontpage_info_url, useCache=True )
                    om.out.debug( '[frontpage_version] Testing "' + frontpage_info_url + '".' )
                except w3afException,  w3:
                    msg = 'Failed to GET Frontpage Server _vti_inf.html file: "'
                    msg += frontpage_info_url + '". Exception: "' + str(w3) + '".'
                    om.out.debug( msg )
                else:
                    # Check if it's a Fronpage Info file
                    if not is_404( response ):
                        fuzzable_return_value.extend( self._createFuzzableRequests( response ) )
                        self._analyze_response( response )
                        return fuzzable_return_value
Example #28
0
    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response using regex.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        domain_path = fuzzableRequest.getURL().getDomainPath()
        self._fuzzable_requests_to_return = []
        
        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.append( domain_path )

            #
            #   First we check if the .git/HEAD file exists
            #
            relative_url, reg_ex = self._compiled_git_info[0]
            git_url = domain_path.urlJoin(relative_url)
            try:
                response = self._uri_opener.GET( git_url, cache=True )
            except w3afException:
                om.out.debug('Failed to GET git file: "' + git_url + '"')
            else:
                if not is_404(response):
                    #
                    #   It looks like we have a GIT repository!
                    #
                    for relative_url, reg_ex in self._compiled_git_info:
                        git_url = domain_path.urlJoin(relative_url)
                        args = (domain_path, git_url, reg_ex)
                        # Note: The .git/HEAD request is only sent once.
                        # The cache is used.
                        self._run_async(meth=self._check_if_exists, args=args)
                    
                    # Wait for all threads to finish
                    self._join()
                
            return self._fuzzable_requests_to_return
Example #29
0
    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response using regex.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
        self._fuzzable_requests_to_return = []
        
        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.append( domain_path )

            #
            #   First we check if the .git/HEAD file exists
            #
            url, regular_expression = self._compiled_git_info[0]
            git_url = urlParser.urlJoin(domain_path, url)
            try:
                response = self._urlOpener.GET( git_url, useCache=True )
            except w3afException:
                om.out.debug('Failed to GET git file: "' + git_url + '"')
            else:
                if not is_404(response):
                    #
                    #   It looks like we have a GIT repository!
                    #
                    for url, regular_expression in self._compiled_git_info:
                        git_url = urlParser.urlJoin(domain_path, url)
                        targs = (domain_path, git_url, regular_expression)
                        # Note: The .git/HEAD request is only sent once. We use the cache.
                        self._tm.startFunction(target=self._check_if_exists, args=targs, ownerObj=self)         
                    
                    # Wait for all threads to finish
                    self._tm.join( self )
                
            return self._fuzzable_requests_to_return
Example #30
0
 def _analyze_result(self, mutant, mutant_response):
     '''
     Analyze results of the _send_mutant method. 
     
     In this case, check if the file was uploaded to any of the known directories,
     or one of the "default" ones like "upload" or "files".
     '''
     with self._plugin_lock:
         if self._has_no_bug(mutant):        
             
             # Gen expr for directories where I can search for the uploaded file
             domain_path_list = set(u.getDomainPath() for u in 
                                    kb.kb.getData('urls' , 'url_objects'))
     
             # Try to find the file!
             for url in domain_path_list:
                 for path in self._generate_urls(url, mutant.uploaded_file_name):
     
                     get_response = self._uri_opener.GET(path, cache=False)
                     if not is_404(get_response):
                         # This is necesary, if I dont do this, the session
                         # saver will break cause REAL file objects can't 
                         # be picked
                         mutant.setModValue('<file_object>')
                         v = vuln.vuln(mutant)
                         v.setPluginName(self.getName())
                         v.setId([mutant_response.id, get_response.id])
                         v.setSeverity(severity.HIGH)
                         v.setName('Insecure file upload')
                         v['fileDest'] = get_response.getURL()
                         v['fileVars'] = mutant.getFileVariables()
                         msg = ('A file upload to a directory inside the '
                         'webroot was found at: ' + mutant.foundAt())
                         v.setDesc(msg)
                         kb.kb.append(self, 'fileUpload', v)
                         return