예제 #1
0
class dotNetErrors(baseDiscoveryPlugin):
    '''
    Request specially crafted URLs that generate ASP.NET errors in order to gather information.
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()

    def discover(self, fuzzableRequest ):
        '''
        Requests the special filenames.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if fuzzableRequest.getURL() not in self._already_tested:
            self._already_tested.add( fuzzableRequest.getURL() )

            # Generate the URLs to GET
            to_test = self._generate_URLs( fuzzableRequest.getURL() )
            for url in to_test:
                try:
                    response = self._urlOpener.GET( url, useCache=True )
                except KeyboardInterrupt,e:
                    raise e
                except w3afException,w3:
                    om.out.error( str(w3) )
                else:
                    self._analyze_response( response )
class frontpage_version(baseDiscoveryPlugin):
    '''
    Search FrontPage Server Info file and if it finds it will determine its version.
    @author: Viktor Gazdag ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()
        self._exec = True

    def discover(self, fuzzableRequest ):
        '''
        For every directory, fetch a list of files and analyze the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        fuzzable_return_value = []
        
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:
            # Run the plugin.
            self._exec = False

        for domain_path in urlParser.getDirectories(fuzzableRequest.getURL() ):

            if domain_path not in self._analyzed_dirs:

                # Save the domain_path so I know I'm not working in vane
                self._analyzed_dirs.add( domain_path )

                # Request the file
                frontpage_info_url = urlParser.urlJoin(  domain_path , "_vti_inf.html" )
                try:
                    response = self._urlOpener.GET( frontpage_info_url, useCache=True )
                    om.out.debug( '[frontpage_version] Testing "' + frontpage_info_url + '".' )
                except w3afException,  w3:
                    msg = 'Failed to GET Frontpage Server _vti_inf.html file: "'
                    msg += frontpage_info_url + '". Exception: "' + str(w3) + '".'
                    om.out.debug( msg )
                else:
                    # Check if it's a Fronpage Info file
                    if not is_404( response ):
                        fuzzable_return_value.extend( self._createFuzzableRequests( response ) )
                        self._analyze_response( response )
                        return fuzzable_return_value
class directoryIndexing(baseGrepPlugin):
    '''
    Grep every response for directory indexing problems.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._already_visited = ScalableBloomFilter()
        
        # Added performance by compiling all the regular expressions
        # before using them. The setup time of the whole plugin raises,
        # but the execution time is lowered *a lot*.
        self._compiled_regex_list = [ re.compile(regex, re.IGNORECASE | re.DOTALL) for regex in self._get_indexing_regex() ]

    def grep(self, request, response):
        '''
        Plugin entry point, search for directory indexing.
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        if urlParser.getDomainPath(response.getURL()) in self._already_visited:
            # Already worked for this URL, no reason to work twice
            return
        
        else:
            # Save it,
            self._already_visited.add( urlParser.getDomainPath(response.getURL()) )
            
            # Work,
            if response.is_text_or_html():
                html_string = response.getBody()
                for indexing_regex in self._compiled_regex_list:
                    if indexing_regex.search( html_string ):
                        v = vuln.vuln()
                        v.setPluginName(self.getName())
                        v.setURL( response.getURL() )
                        msg = 'The URL: "' + response.getURL() + '" has a directory '
                        msg += 'indexing vulnerability.'
                        v.setDesc( msg )
                        v.setId( response.id )
                        v.setSeverity(severity.LOW)
                        path = urlParser.getPath( response.getURL() )
                        v.setName( 'Directory indexing - ' + path )
                        
                        kb.kb.append( self , 'directory' , v )
                        break
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def _get_indexing_regex(self):
        '''
        @return: A list of the regular expression strings, in order to be compiled in __init__
        '''
        dir_indexing_regexes = []
        ### TODO: verify if I need to add more values here, IIS !!!
        dir_indexing_regexes.append("<title>Index of /") 
        dir_indexing_regexes.append('<a href="\\?C=N;O=D">Name</a>') 
        dir_indexing_regexes.append("Last modified</a>")
        dir_indexing_regexes.append("Parent Directory</a>")
        dir_indexing_regexes.append("Directory Listing for")
        dir_indexing_regexes.append("<TITLE>Folder Listing.")
        dir_indexing_regexes.append('<table summary="Directory Listing" ')
        dir_indexing_regexes.append("- Browsing directory ")
        dir_indexing_regexes.append('">\\[To Parent Directory\\]</a><br><br>') # IIS 6.0 and 7.0
        dir_indexing_regexes.append('<A HREF=".*?">.*?</A><br></pre><hr></body></html>') # IIS 5.0
        return dir_indexing_regexes
        
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'directoryIndexing', 'directory' ), 'URL' )
            
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #4
0
class findvhost(baseDiscoveryPlugin):
    '''
    Modify the HTTP Host header and try to find virtual hosts.
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # Internal variables
        self._first_exec = True
        self._already_queried = ScalableBloomFilter()
        self._can_resolve_domain_names = False
        self._non_existant_response = None
        
    def discover(self, fuzzableRequest ):
        '''
        Find virtual hosts.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                    (among other things) the URL to test.
        '''
        vhost_list = []
        if self._first_exec:
            # Only run once
            self._first_exec = False
            vhost_list = self._generic_vhosts( fuzzableRequest )
            
            # Set this for later
            self._can_resolve_domain_names = self._can_resolve_domains()
            
        
        # I also test for ""dead links"" that the web programmer left in the page
        # For example, If w3af finds a link to "http://corporative.intranet.corp/" it will try to
        # resolve the dns name, if it fails, it will try to request that page from the server
        vhost_list.extend( self._get_dead_links( fuzzableRequest ) )
        
        # Report our findings
        for vhost, request_id in vhost_list:
            v = vuln.vuln()
            v.setPluginName(self.getName())
            v.setURL( fuzzableRequest.getURL() )
            v.setMethod( 'GET' )
            v.setName( 'Shared hosting' )
            v.setSeverity(severity.LOW)
            
            domain = urlParser.getDomain(fuzzableRequest.getURL())
            
            msg = 'Found a new virtual host at the target web server, the virtual host name is: "'
            msg += vhost + '". To access this site you might need to change your DNS resolution'
            msg += ' settings in order to point "' + vhost + '" to the IP address of "'
            msg += domain + '".'
            v.setDesc( msg )
            v.setId( request_id )
            kb.kb.append( self, 'findvhost', v )
            om.out.information( v.getDesc() )       
        
        return []
        
    def _get_dead_links(self, fuzzableRequest):
        '''
        Find every link on a HTML document verify if the domain is reachable or not; after that,
        verify if the web found a different name for the target site or if we found a new site that
        is linked. If the link points to a dead site then report it (it could be pointing to some 
        private address or something...)
        '''
        res = []
        
        # Get some responses to compare later
        base_url = urlParser.baseUrl(fuzzableRequest.getURL())
        original_response = self._urlOpener.GET(fuzzableRequest.getURI(), useCache=True)
        base_response = self._urlOpener.GET(base_url, useCache=True)
        base_resp_body = base_response.getBody()
        
        try:
            dp = dpCache.dpc.getDocumentParserFor(original_response)
        except w3afException:
            # Failed to find a suitable parser for the document
            return []
        
        # Set the non existant response
        non_existant = 'iDoNotExistPleaseGoAwayNowOrDie' + createRandAlNum(4) 
        self._non_existant_response = self._urlOpener.GET(base_url, 
                                                useCache=False, headers={'Host': non_existant})
        nonexist_resp_body = self._non_existant_response.getBody()
        
        # Note:
        # - With parsed_references I'm 100% that it's really something in the HTML
        # that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in some cases
        # are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the URL
        # I'm going to trust the re_references also.
        parsed_references, re_references = dp.getReferences()
        parsed_references.extend(re_references)
        
        for link in parsed_references:
            domain = urlParser.getDomain(link)
            
            #
            # First section, find internal hosts using the HTTP Host header:
            #
            if domain not in self._already_queried:
                # If the parsed page has an external link to www.google.com
                # then I'll send a request to the target site, with Host: www.google.com
                # This sucks, but it's cool if the document has a link to 
                # http://some.internal.site.target.com/
                try:
                    vhost_response = self._urlOpener.GET(base_url, useCache=False,
                                                         headers={'Host': domain })
                except w3afException:
                    pass
                else:
                    self._already_queried.add(domain)
                    vhost_resp_body = vhost_response.getBody()
                    
                    # If they are *really* different (not just different by some chars)
                    if relative_distance_lt(vhost_resp_body, base_resp_body, 0.35) and \
                        relative_distance_lt(vhost_resp_body, nonexist_resp_body, 0.35):
                        # and the domain can't just be resolved using a DNS query to
                        # our regular DNS server
                        report = True
                        if self._can_resolve_domain_names:
                            try:
                                socket.gethostbyname(domain)
                            except:
                                # aha! The HTML is linking to a domain that's
                                # hosted in the same server, and the domain name
                                # can NOT be resolved!
                                report = True
                            else:
                                report = False

                        # have found something interesting!
                        if report:
                            res.append( (domain, vhost_response.id) )

            #
            # Second section, find hosts using failed DNS resolutions
            #
            if self._can_resolve_domain_names:
                try:
                    # raises exception when it's not found
                    # socket.gaierror: (-5, 'No address associated with hostname')
                    socket.gethostbyname( domain )
                except:
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('Internal hostname in HTML link')
                    i.setURL( fuzzableRequest.getURL() )
                    i.setMethod( 'GET' )
                    i.setId( original_response.id )
                    msg = 'The content of "'+ fuzzableRequest.getURL() +'" references a non '
                    msg += 'existant domain: "' + link + '". This may be a broken link, or an'
                    msg += ' internal domain name.'
                    i.setDesc( msg )
                    kb.kb.append( self, 'findvhost', i )
                    om.out.information( i.getDesc() )
        
        res = [ r for r in res if r != '']
        
        return res 
    
    def _can_resolve_domains(self):
        '''
        This method was added to verify if w3af can resolve domain names
        using the OS configuration (/etc/resolv.conf in linux) or if we are in some
        strange LAN where we can't.
        
        @return: True if we can resolve domain names.
        '''
        try:
            socket.gethostbyname( 'www.w3.org' )
        except:
            return False
        else:
            return True
    
    def _generic_vhosts( self, fuzzableRequest ):
        '''
        Test some generic virtual hosts, only do this once.
        '''
        res = []
        base_url = urlParser.baseUrl(fuzzableRequest.getURL())
        
        common_vhost_list = self._get_common_virtualhosts(urlParser.getDomain(base_url))
        
        # Get some responses to compare later
        original_response = self._urlOpener.GET(base_url, useCache=True)
        orig_resp_body = original_response.getBody()
        non_existant = 'iDoNotExistPleaseGoAwayNowOrDie' + createRandAlNum(4)
        self._non_existant_response = self._urlOpener.GET(base_url, useCache=False, \
                                                        headers={'Host': non_existant })
        nonexist_resp_body = self._non_existant_response.getBody()
        
        for common_vhost in common_vhost_list:
            try:
                vhost_response = self._urlOpener.GET( base_url, useCache=False, \
                                                headers={'Host': common_vhost } )
            except w3afException:
                pass
            else:
                vhost_resp_body = vhost_response.getBody()

                # If they are *really* different (not just different by some chars)
                if relative_distance_lt(vhost_resp_body, orig_resp_body, 0.35) and \
                    relative_distance_lt(vhost_resp_body, nonexist_resp_body, 0.35):
                    res.append((common_vhost, vhost_response.id))
        
        return res
    
    def _get_common_virtualhosts( self, domain ):
        '''
        @parameter domain: The original domain name.
        @return: A list of possible domain names that could be hosted in the same web
        server that "domain".
        '''
        res = []
        
        common_virtual_hosts = ['intranet', 'intra', 'extranet', 'extra' , 'test' , 'test1'
        'old' , 'new' , 'admin', 'adm', 'webmail', 'services', 'console', 'apps', 'mail', 
        'corporate', 'ws', 'webservice', 'private', 'secure', 'safe', 'hidden', 'public' ]
        
        for subdomain in common_virtual_hosts:
            # intranet
            res.append( subdomain )
            # intranet.www.targetsite.com
            res.append( subdomain + '.' + domain )
            # intranet.targetsite.com
            res.append( subdomain + '.' + urlParser.getRootDomain( domain ) )
            # This is for:
            # intranet.targetsite
            res.append( subdomain + '.' + urlParser.getRootDomain( domain ).split('.')[0] )
        
        return res

    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def setOptions( self, OptionList ):
        '''
        This method sets all the options that are configured using the user interface 
        generated by the framework using the result of getOptions().
        
        @parameter OptionList: A dictionary with the options for the plugin.
        @return: No value is returned.
        ''' 
        pass

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
        
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #5
0
class fileUpload(baseGrepPlugin):
    '''
    Find HTML forms with file upload capabilities.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        # Internal variables
        self._already_inspected = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        Plugin entry point, verify if the HTML has a form with file uploads.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        url = response.getURL()

        if response.is_text_or_html() and not url in self._already_inspected:

            self._already_inspected.add(url)
            dom = response.getDOM()

            # In some strange cases, we fail to normalize the document
            if dom is not None:

                # Loop through file inputs tags                
                for input_file in dom.xpath(FILE_INPUT_XPATH):
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('File upload form')
                    i.setURL(url)
                    i.setId(response.id)
                    msg = 'The URL: "%s" has form with file upload ' \
                    'capabilities.' % url
                    i.setDesc(msg)
                    to_highlight = etree.tostring(input_file)
                    i.addToHighlight(to_highlight)
                    kb.kb.append(self, 'fileUpload', i)

    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'fileUpload', 'fileUpload' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #6
0
class blankBody(baseGrepPlugin):
    '''
    Find responses with empty body.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._already_reported = ScalableBloomFilter()
        
    def grep(self, request, response):
        '''
        Plugin entry point, find the blank bodies and report them.

        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None

        Init
        >>> from core.data.url.httpResponse import httpResponse
        >>> from core.data.request.fuzzableRequest import fuzzableRequest
        >>> from core.controllers.misc.temp_dir import create_temp_dir
        >>> o = create_temp_dir()

        Simple test, empty string.
        >>> body = ''
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> b = blankBody()
        >>> b.grep(request, response)
        >>> assert len(kb.kb.getData('blankBody', 'blankBody')) == 1

        With some content.
        >>> kb.kb.save('blankBody','blankBody',[])
        >>> body = 'header body footer'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> b.grep(request, response)
        >>> assert len(kb.kb.getData('ssn', 'ssn')) == 0

        Strange method, empty body.
        >>> kb.kb.save('blankBody','blankBody',[])
        >>> body = ''
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'ARGENTINA' )
        >>> b.grep(request, response)
        >>> assert len(kb.kb.getData('ssn', 'ssn')) == 0

        Response codes,
        >>> kb.kb.save('blankBody','blankBody',[])
        >>> body = ''
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(401, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> b.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        0
        '''
        if response.getBody() == '' and request.getMethod() in ['GET', 'POST']\
        and response.getCode() not in [401, 304, 204] and 'location' not in response.getLowerCaseHeaders()\
        and response.getURL() not in self._already_reported:
            
            #   report these informations only once
            self._already_reported.add( response.getURL() )
            
            #   append the info object to the KB.
            i = info.info()
            i.setPluginName(self.getName())
            i.setName('Blank body')
            i.setURL( response.getURL() )
            i.setId( response.id )
            msg = 'The URL: "'+ response.getURL()  + '" returned an empty body. '
            msg += 'This could indicate an error.'
            i.setDesc(msg)
            kb.kb.append( self, 'blankBody', i )
        
    def setOptions( self, OptionList ):
        '''
        Nothing to do here, no options.
        '''
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'blankBody', 'blankBody' ), None )
    
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #7
0
class metaTags(baseGrepPlugin):
    '''
    Grep every page for interesting meta tags.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._comments = {}
        self._search404 = False
        
        self._interesting_words = {'user':None, 'pass':None, 'microsoft':None,
        'visual':None, 'linux':None, 'source':None, 'author':None, 'release':None,
        'version':None, 'verify-v1':'Google Sitemap' }
        self._already_inspected = ScalableBloomFilter()
        
        '''
        Can someone explain what this meta tag does?
        <meta name="verify-v1" content="/JBoXnwT1d7TbbWCwL8tXe+Ts2I2LXYrdnnK50g7kdY=" /> 
        
        Answer:
        That's one of the verification elements used by Google Sitemaps. When you sign up
        for Sitemaps you have to add that element to a root page to demonstrate to Google that
        you're the site owner. So there is probably a Sitemaps account for the site, if you 
        haven't found it already. 
        '''
        
    def grep(self, request, response):
        '''
        Plugin entry point, search for meta tags.

        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        uri = response.getURI()
        
        if response.is_text_or_html() and not is_404( response ) and \
            uri not in self._already_inspected:

            self._already_inspected.add(uri)
            
            try:
                dp = dpCache.dpc.getDocumentParserFor( response )
            except w3afException:
                pass
            else:
                meta_tag_list = dp.getMetaTags()
                
                for tag in meta_tag_list:
                    name = self._find_name( tag )
                    for attr in tag:
                        for word in self._interesting_words:

                            # Check if we have something interesting
                            # and WHERE that thing actually is
                            where = value = None
                            if ( word in attr[0].lower() ):
                                where = 'name'
                                value = attr[0].lower()
                            elif ( word in attr[1].lower() ):
                                where = 'value'
                                value = attr[1].lower()
                            
                            # Now... if we found something, report it =)
                            if where:
                                # The atribute is interesting!
                                i = info.info()
                                i.setPluginName(self.getName())
                                i.setName('Interesting META tag')
                                i.setURI( response.getURI() )
                                i.setId( response.id )
                                msg = 'The URI: "' +  i.getURI() + '" sent a META tag with '
                                msg += 'attribute '+ where +' "'+ value +'" which'
                                msg += ' looks interesting.'
                                i.addToHighlight( where, value )
                                if self._interesting_words.get(name, None):
                                    msg += ' The tag is used for '
                                    msg += self._interesting_words[name] + '.'
                                i.setDesc( msg )
                                kb.kb.append( self , 'metaTags' , i )

                            else:
                                # The attribute is not interesting
                                pass
    
    def _find_name( self, tag ):
        '''
        @return: the tag name.
        '''
        for attr in tag:
            if attr[0].lower() == 'name':
                return attr[1]
        return ''
        
    def setOptions( self, optionsMap ):
        self._search404 = optionsMap['search404'].getValue()
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''
        d1 = 'Search for meta tags in 404 pages.'
        o1 = option('search404', self._search404, d1, 'boolean')
        
        ol = optionList()
        ol.add(o1)
        return ol
    
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        # Now print the information objects
        self.printUniq( kb.kb.getData( 'metaTags', 'metaTags' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #8
0
class phpEggs(baseDiscoveryPlugin):
    '''
    Fingerprint the PHP version using documented easter eggs that exist in PHP.
    @author: Andres Riancho ( [email protected] )
    '''
    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        self._exec = True
        
        # Already analyzed extensions
        self._already_analyzed_ext = ScalableBloomFilter()
        
        # This is a list of hashes and description of the egg for every PHP version.
        self._egg_DB = {}
        self._egg_DB["4.1.2"] = [ 
                ("744aecef04f9ed1bc39ae773c40017d1", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.2.2"] = [ 
                ("758ccaa9094cdeedcfc60017e768137e", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.3.10"] = [ 
                ("1e8fe4ae1bf06be222c1643d32015f0c", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.3.10-18"] = [ 
                ("1e8fe4ae1bf06be222c1643d32015f0c", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.3.11"] = [ 
                ("1e8fe4ae1bf06be222c1643d32015f0c", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("a8ad323e837fa00771eda6b709f40e37", "PHP Logo 2"), 
                ("a8ad323e837fa00771eda6b709f40e37", "Zend Logo") ]
        self._egg_DB["4.3.2"] = [ 
                ("8a8b4a419103078d82707cf68226a482", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.3.8"] = [ 
                ("96714a0fbe23b5c07c8be343adb1ba90", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.3.9"] = [ 
                ("f9b56b361fafd28b668cc3498425a23b", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB['4.3.10'] = [
                ('7b27e18dc6f846b80e2f29ecf67e4133', 'PHP Logo'),
                ('43af90bcfa66f16af62744e8c599703d', 'Zend Logo'),
                ('b233cc756b06655f47489aa2779413d7', 'PHP Credits'),
                ('185386dd4b2eff044bd635d22ae7dd9e', 'PHP Logo 2')] 
        self._egg_DB["4.4.0"] = [ 
                ("ddf16ec67e070ec6247ec1908c52377e", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.4.0 for Windows"] = [ 
                ("6d974373683ecfcf30a7f6873f2d234a", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.4.4"] = [ 
                ("bed7ceff09e9666d96fdf3518af78e0e", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.4.4-8+etch6"] = [ 
                ("31a2553efc348a21b85e606e5e6c2424", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["4.4.7"] = [ 
                ("72b7ad604fe1362f1e8bf4f6d80d4edc", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB['4.4.7, PleskWin, ASP.NET'] = [
                ('b8477b9b88e90f12e3200660a70eb765', 'Zend Logo'),
                ('b8477b9b88e90f12e3200660a70eb765', 'PHP Credits'),
                ('b8477b9b88e90f12e3200660a70eb765', 'PHP Logo 2'),
                ('b8477b9b88e90f12e3200660a70eb765', 'PHP Logo')]
        self._egg_DB["4.4.8"] = [ 
                ("4cdfec8ca11691a46f4f63839e559fc5", "PHP Credits"), 
                ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"), 
                ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"), 
                ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo") ]
        self._egg_DB["5.0.3"] = [ 
                ("def61a12c3b0a533146810403d325451", "PHP Credits"), 
                ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"), 
                ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.1.1"] = [ 
                ("5518a02af41478cfc492c930ace45ae5", "PHP Credits"), 
                ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.1.6"] = [ 
                ("4b689316409eb09b155852e00657a0ae", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.0"] = [ 
                ("e566715bcb0fd2cb1dc43ed076c091f1", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.0-8+etch10"] = [ 
                ("e566715bcb0fd2cb1dc43ed076c091f1", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.0-8+etch7"] = [ 
                ("307f5a1c02155ca38744647eb94b3543", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.1"] = [ 
                ("d3894e19233d979db07d623f608b6ece", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.2"] = [ 
                ("56f9383587ebcc94558e11ec08584f05", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.3-1+b1"] = [ 
                ("c37c96e8728dc959c55219d47f2d543f", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.4"] = [ 
                ("74c33ab9745d022ba61bc43a5db717eb", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.5"] = [ 
                ("f26285281120a2296072f21e21e7b4b0", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.4-2ubuntu5.3"] = [ 
                ("f26285281120a2296072f21e21e7b4b0", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.5-3"] = [ 
                ("b7e4385bd7f07e378d92485b4722c169", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("0152ed695f4291488741d98ba066d280", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.6"] = [ 
                ("bbd44c20d561a0fc5a4aa76093d5400f", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB["5.2.6RC4-pl0-gentoo"] = [ 
                ("d03b2481f60d9e64cb5c0f4bd0c87ec1", "PHP Credits"), 
                ("c48b07899917dfb5d591032007041ae3", "PHP Logo"), 
                ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"), 
                ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo") ]
        self._egg_DB['5.2.8-pl1-gentoo'] = [
                ('c48b07899917dfb5d591032007041ae3', 'PHP Logo'), 
                ('40410284d460552a6c9e10c1f5ae7223', 'PHP Credits'), 
                ('50caaf268b4f3d260d720a1a29c5fe21', 'PHP Logo 2'), 
                ('7675f1d01c927f9e6a4752cf182345a2', 'Zend Logo')]
        
    def discover(self, fuzzableRequest ):
        '''
        Nothing strange, just do some GET requests to the eggs and analyze the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
        else:
            # Get the extension of the URL (.html, .php, .. etc)
            ext = urlParser.getExtension( fuzzableRequest.getURL() )
            
            # Only perform this analysis if we haven't already analyzed this type of extension
            # OR if we get an URL like http://f00b5r/4/     (Note that it has no extension)
            # This logic will perform some extra tests... but we won't miss some special cases
            # Also, we aren't doing something like "if 'php' in ext:" because we never depend
            # on something so changable as extensions to make decisions.
            if ext == '' or ext not in self._already_analyzed_ext:
                
                # Init some internal variables
                GET_results = []
                original_response = self._urlOpener.GET( fuzzableRequest.getURL(), useCache=True )
                
                # Perform the GET requests to see if we have a phpegg
                for egg, egg_desc in self._get_eggs():
                    egg_URL = urlParser.uri2url( fuzzableRequest.getURL() ) + egg
                    try:
                        response = self._urlOpener.GET( egg_URL, useCache=True )
                    except KeyboardInterrupt,e:
                        raise e
                    except w3afException, w3:
                        raise w3
                    else:
                        GET_results.append( (response, egg_desc, egg_URL) )
                        
                #
                #   Now I analyze if this is really a PHP eggs thing, or simply a response that
                #   changes a lot on each request. Before, I had something like this:
                #
                #       if relative_distance(original_response.getBody(), response.getBody()) < 0.1:
                #
                #   But I got some reports about false positives with this approach, so now I'm
                #   changing it to something a little bit more specific.
                images = 0
                not_images = 0
                for response, egg_desc, egg_URL in GET_results:
                    if 'image' in response.getContentType():
                        images += 1
                    else:
                        not_images += 1
                
                if images == 3 and not_images == 1:
                    #
                    #   The remote web server has expose_php = On. Report all the findings.
                    #
                    for response, egg_desc, egg_URL in GET_results:
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName('PHP Egg - ' + egg_desc)
                        i.setURL( egg_URL )
                        desc = 'The PHP framework running on the remote server has a "'
                        desc += egg_desc +'" easter egg, access to the PHP egg is possible'
                        desc += ' through the URL: "'+  egg_URL + '".'
                        i.setDesc( desc )
                        kb.kb.append( self, 'eggs', i )
                        om.out.information( i.getDesc() )
                        
                        #   Only run once.
                        self._exec = False
                
                    # analyze the info to see if we can identify the version
                    self._analyze_egg( GET_results )
                
                # Now we save the extension as one of the already analyzed
                if ext != '':
                    self._already_analyzed_ext.add(ext)
예제 #9
0
class allowedMethods(baseDiscoveryPlugin):
    '''
    Enumerate the allowed methods of an URL.
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)

        # Internal variables
        self._exec = True
        self._already_tested = ScalableBloomFilter()
        self._bad_codes = [ httpConstants.UNAUTHORIZED, httpConstants.NOT_IMPLEMENTED,
                                    httpConstants.METHOD_NOT_ALLOWED, httpConstants.FORBIDDEN]
        
        # Methods
        self._dav_methods = [ 'DELETE', 'PROPFIND', 'PROPPATCH', 'COPY', 'MOVE', 'LOCK', 
                                        'UNLOCK', 'MKCOL']
        self._common_methods = [ 'OPTIONS', 'GET', 'HEAD', 'POST', 'TRACE', 'PUT']
        self._uncommon_methods = ['*', 'SUBSCRIPTIONS', 'NOTIFY', 'DEBUG', 'TRACK', 'POLL', 'PIN', 
                                                    'INVOKE', 'SUBSCRIBE', 'UNSUBSCRIBE']
        
        # Methods taken from http://www.w3.org/Protocols/HTTP/Methods.html 
        self._proposed_methods = [ 'CHECKOUT', 'SHOWMETHOD', 'LINK', 'UNLINK', 'CHECKIN', 
                                                'TEXTSEARCH', 'SPACEJUMP', 'SEARCH', 'REPLY']
        self._extra_methods = [ 'CONNECT', 'RMDIR', 'MKDIR', 'REPORT', 'ACL', 'DELETE', 'INDEX', 
                                        'LABEL', 'INVALID']
        self._version_control = [ 'VERSION_CONTROL', 'CHECKIN', 'UNCHECKOUT', 'PATCH', 'MERGE', 
                                            'MKWORKSPACE', 'MKACTIVITY', 'BASELINE_CONTROL']       
        
        self._supported_methods = self._dav_methods  + self._common_methods + self._uncommon_methods
        self._supported_methods += self._proposed_methods + self._extra_methods
        self._supported_methods += self._version_control

 
        # User configured variables
        self._exec_one_time = True
        self._report_dav_only = True
        
    def discover(self, fuzzableRequest ):
        '''
        Uses several technics to try to find out what methods are allowed for an URL.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        if not self._exec:
            # This will remove the plugin from the discovery plugins to be runned.
            raise w3afRunOnce()
            
        else:
            # Run the plugin.
            if self._exec_one_time:
                self._exec = False
            
            domain_path = urlParser.getDomainPath( fuzzableRequest.getURL() )
            if domain_path not in self._already_tested:
                self._already_tested.add( domain_path )
                self._check_methods( domain_path )
        return []
    
    def _check_methods( self, url ):
        '''
        Find out what methods are allowed.
        @parameter url: Where to check.
        '''
        allowed_methods = []
        with_options = False
        id_list = []
        
        # First, try to check available methods using OPTIONS, if OPTIONS isn't 
        # enabled, do it manually
        res = self._urlOpener.OPTIONS( url )
        headers = res.getLowerCaseHeaders()
        for header_name in ['allow', 'public']:
            if header_name in headers:
                allowed_methods.extend( headers[header_name].split(',') )
                allowed_methods = [ x.strip() for x in allowed_methods ]
                with_options = True
                allowed_methods = list(set(allowed_methods))

        # Save the ID for later
        if with_options:
            id_list.append( res.id )

        else:
            #
            #   Before doing anything else, I'll send a request with a non-existant method
            #   If that request succeds, then all will...
            #
            try:
                non_exist_response = self._urlOpener.ARGENTINA( url )
                get_response = self._urlOpener.GET( url )
            except:
                pass
            else:
                if non_exist_response.getCode() not in self._bad_codes\
                and get_response.getBody() == non_exist_response.getBody():
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName( 'Non existent methods default to GET' )
                    i.setURL( url )
                    i.setId( [non_exist_response.getId(), get_response.getId()] )
                    msg = 'The remote Web server has a custom configuration, in which any non'
                    msg += ' existent methods that are invoked are defaulted to GET instead of'
                    msg += ' returning a "Not Implemented" response.'
                    i.setDesc( msg )
                    kb.kb.append( self , 'custom-configuration' , i )
                    #
                    #   It makes no sense to continue working, all methods will appear as enabled
                    #   because of this custom configuration.
                    #
                    return []

            
            # 'DELETE' is not tested! I don't want to remove anything...
            # 'PUT' is not tested! I don't want to overwrite anything...
            methods_to_test = self._supported_methods[:]
            
            # remove dups, and dangerous methods.
            methods_to_test = list(set(methods_to_test))
            methods_to_test.remove('DELETE')
            methods_to_test.remove('PUT')

            for method in methods_to_test:
                method_functor = getattr( self._urlOpener, method )
                try:
                    response = apply( method_functor, (url,) , {} )
                    code = response.getCode()
                except:
                    pass
                else:
                    if code not in self._bad_codes:
                        allowed_methods.append( method )
        
        # Added this to make the output a little more readable.
        allowed_methods.sort()
        
        # Check for DAV
        if len( set( allowed_methods ).intersection( self._dav_methods ) ) != 0:
            # dav is enabled!
            # Save the results in the KB so that other plugins can use this information
            i = info.info()
            i.setPluginName(self.getName())
            i.setName('Allowed methods for ' + url )
            i.setURL( url )
            i.setId( id_list )
            i['methods'] = allowed_methods
            msg = 'The URL "' + url + '" has the following allowed methods, which'
            msg += ' include DAV methods: ' + ', '.join(allowed_methods)
            i.setDesc( msg )
            kb.kb.append( self , 'dav-methods' , i )
        else:
            # Save the results in the KB so that other plugins can use this information
            # Do not remove these information, other plugins REALLY use it !
            i = info.info()
            i.setPluginName(self.getName())
            i.setName('Allowed methods for ' + url )
            i.setURL( url )
            i.setId( id_list )
            i['methods'] = allowed_methods
            msg = 'The URL "' + url + '" has the following allowed methods:'
            msg += ' ' + ', '.join(allowed_methods)
            i.setDesc( msg )
            kb.kb.append( self , 'methods' , i )
            
        return []
    
    def end( self ):
        '''
        Print the results.
        '''
        # First I get the data from the kb
        all_info_obj = kb.kb.getData( 'allowedMethods', 'methods' )
        dav_info_obj = kb.kb.getData( 'allowedMethods', 'dav-methods' )
        
        # Now I transform it to something I can use with groupbyMinKey
        allMethods = []
        for i in all_info_obj:
            allMethods.append( (i.getURL() , i['methods']) )
        
        davMethods = []
        
        for i in dav_info_obj:
            davMethods.append( (i.getURL() , i['methods']) )

        # Now I work the data...
        to_show, method_type = davMethods, ' DAV'
        if not self._report_dav_only:
            to_show, method_type = allMethods, ''
       

        # Make it hashable
        tmp = []
        for url, methodList in to_show:
            tmp.append( (url, ', '.join( methodList ) ) )
        
        result_dict, itemIndex = groupbyMinKey( tmp )
            
        for k in result_dict:
            if itemIndex == 0:
                # Grouped by URLs
                msg = 'The URL: "%s" has the following' + method_type + ' methods enabled:'
                om.out.information(msg % k)
            else:
                # Grouped by Methods
                msg = 'The methods: ' + k + ' are enabled on the following URLs:'
                om.out.information(msg)
            
            for i in result_dict[k]:
                om.out.information('- ' + i )
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''
        d1 = 'Execute plugin only one time'
        h1 = 'Generally the methods allowed for a URL are \
          configured system wide, so executing this plugin only one \
          time is the faster choice. The safest choice is to run it against every URL.'
        o1 = option('execOneTime', self._exec_one_time, d1, 'boolean', help=h1)
        
        d2 = 'Only report findings if uncommon methods are found'
        o2 = option('reportDavOnly', self._report_dav_only, d2, 'boolean')
        
        ol = optionList()
        ol.add(o1)
        ol.add(o2)
        return ol
        
    def setOptions( self, optionsMap ):
        '''
        This method sets all the options that are configured using the user interface 
        generated by the framework using the result of getOptions().
        
        @parameter OptionList: A dictionary with the options for the plugin.
        @return: No value is returned.
        ''' 
        self._exec_one_time = optionsMap['execOneTime'].getValue()
        self._report_dav_only = optionsMap['reportDavOnly'].getValue()

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []

    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #10
0
class ajax(baseGrepPlugin):
    '''
    Grep every page for traces of Ajax code.
      
    @author: Andres Riancho ( [email protected] )
    '''
    
    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        # Internal variables
        self._already_inspected = ScalableBloomFilter()
        
        # Create the regular expression to search for AJAX
        ajax_regex_string = '(XMLHttpRequest|eval\(|ActiveXObject|Msxml2\.XMLHTTP|'
        ajax_regex_string += 'ActiveXObject|Microsoft\.XMLHTTP)'
        self._ajax_regex_re = re.compile( ajax_regex_string, re.IGNORECASE )

    def grep(self, request, response):
        '''
        Plugin entry point.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None, all results are saved in the kb.

        Init
        >>> from core.data.url.httpResponse import httpResponse
        >>> from core.data.request.fuzzableRequest import fuzzableRequest
        >>> from core.controllers.misc.temp_dir import create_temp_dir
        >>> o = create_temp_dir()

        Simple test, empty string.
        >>> body = ''
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 0

        Discover ajax!
        >>> body = '<html><head><script>xhr = new XMLHttpRequest(); xhr.open(GET, "data.txt",  true); </script></head><html>'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Discover ajax with a broken script tag that doesn't close
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<html><head><script>xhr = new XMLHttpRequest(); xhr.open(GET, "data.txt",  true); </head><html>'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Discover ajax with a broken script, head and html tags.
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<html><head><script>xhr = new XMLHttpRequest(); xhr.open(GET, "data.txt",  true);'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Another ajax function, no broken html.
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<html><head><script> ... xhr = new ActiveXObject("Microsoft.XMLHTTP"); ... </script></head><html>'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> assert len(kb.kb.getData('ajax', 'ajax')) == 1

        Two functions, I only want one report for this page.
        >>> kb.kb.save('ajax','ajax',[])
        >>> body = '<script> ... xhr = new XMLHttpRequest(); ... xhr = new ActiveXObject("Microsoft.XMLHTTP"); ... </script>'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> a = ajax()
        >>> a.grep(request, response)
        >>> len(kb.kb.getData('ajax', 'ajax'))
        1
        '''
        url = response.getURL()
        if response.is_text_or_html() and url not in self._already_inspected:
            
            # Don't repeat URLs
            self._already_inspected.add(url)
            
            dom = response.getDOM()
            # In some strange cases, we fail to normalize the document
            if dom is not None:

                script_elements = dom.xpath('.//script')
                for element in script_elements:
                    # returns the text between <script> and </script>
                    script_content = element.text
                    
                    if script_content is not None:
                        
                        res = self._ajax_regex_re.search(script_content)
                        if res:
                            i = info.info()
                            i.setPluginName(self.getName())
                            i.setName('AJAX code')
                            i.setURL(url)
                            i.setDesc('The URL: "%s" has an AJAX code.' % url)
                            i.setId(response.id)
                            i.addToHighlight(res.group(0))
                            kb.kb.append(self, 'ajax', i)

    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'ajax', 'ajax' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #11
0
class strangeParameters(baseGrepPlugin):
    '''
    Grep the HTML response and find URIs that have strange parameters.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        # Internal variables
        self._already_reported = ScalableBloomFilter()
        
    def grep(self, request, response):
        '''
        Plugin entry point.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None, all results are saved in the kb.
        '''
        try:
            dp = dpCache.dpc.getDocumentParserFor( response )
        except w3afException:
            pass
        else:
            # Note:
            # - With parsed_references I'm 100% that it's really something in the HTML
            # that the developer intended to add.
            #
            # - The re_references are the result of regular expressions, which in some cases
            # are just false positives.
            parsed_references, re_references = dp.getReferences()
            
            for ref in parsed_references:
                
                qs = urlParser.getQueryString( ref )
                
                for param_name in qs:
                    # This for loop is to address the repeated parameter name issue
                    for element_index in xrange(len(qs[param_name])):
                        if self._is_strange( request, param_name, qs[param_name][element_index] )\
                        and ref not in self._already_reported:
                            # Don't repeat findings
                            self._already_reported.add(ref)

                            i = info.info()
                            i.setPluginName(self.getName())
                            i.setName('Strange parameter')
                            i.setURI( ref )
                            i.setId( response.id )
                            msg = 'The URI: "' +  i.getURI() + '" has a parameter named: "' + param_name
                            msg += '" with value: "' + qs[param_name][element_index] + '", which is quite odd.'
                            i.setDesc( msg )
                            i.setVar( param_name )
                            i['parameterValue'] = qs[param_name][element_index]
                            i.addToHighlight(qs[param_name][element_index])

                            kb.kb.append( self , 'strangeParameters' , i )
                            
                        # To find this kind of vulns
                        # http://thedailywtf.com/Articles/Oklahoma-
                        # Leaks-Tens-of-Thousands-of-Social-Security-Numbers,-Other-
                        # Sensitive-Data.aspx
                        if self._is_SQL( request, param_name, qs[param_name][element_index] )\
                        and ref not in self._already_reported:
                            
                            # Don't repeat findings
                            self._already_reported.add(ref)
                            
                            v = vuln.vuln()
                            v.setPluginName(self.getName())
                            v.setName('Parameter has SQL sentence')
                            v.setURI( ref )
                            v.setId( response.id )
                            msg = 'The URI: "' +  v.getURI() + '" has a parameter named: "' + param_name
                            msg +='" with value: "' + qs[param_name][element_index] + '", which is a SQL sentence.'
                            v.setDesc( msg )
                            v.setVar( param_name )
                            v['parameterValue'] = qs[param_name][element_index]
                            i.addToHighlight(qs[param_name][element_index])
                            kb.kb.append( self , 'strangeParameters' , v )
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'strangeParameters', 'strangeParameters' ), 'VAR' )

    def _is_SQL(self, request, parameter, value):
        '''
        @return: True if the parameter value contains SQL sentences
        '''
        regex = '(SELECT .*? FROM|INSERT INTO .*? VALUES|UPDATE .*? SET .*? WHERE)'
        for match in re.findall( regex, value, re.IGNORECASE):
            if not request.sent( match ):
                return True
        
        return False

    def _is_strange(self, request, parameter, value):
        '''
        @return: True if the parameter value is strange
        '''
        _strange_parameter_re = []

        # Seems to be a function
        _strange_parameter_re.append('\w+\(.*?\)')
        # Add more here...
        #_strange_parameter_re.append('....')

        for regex in _strange_parameter_re:
            for match in re.findall( regex, value ):
                if not request.sent( match ):
                    return True
        
        splitted_value = [ x for x in re.split( r'([a-zA-Z0-9. ]+)', value ) if x != '' ]
        if len( splitted_value ) > 4:
            if not request.sent( value ):
                return True
        
        return False
    
        
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #12
0
class pykto(baseDiscoveryPlugin):
    '''
    A nikto port to python. 
    @author: Andres Riancho ( [email protected] )  
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # internal variables
        self._exec = True
        self._already_visited = ScalableBloomFilter()
        self._first_time = True
        self._show_remote_server = True
        
        # User configured parameters
        self._db_file = 'plugins' + os.path.sep + 'discovery' + os.path.sep + 'pykto'
        self._db_file += os.path.sep + 'scan_database.db'
        
        self._extra_db_file = 'plugins' + os.path.sep + 'discovery' + os.path.sep
        self._extra_db_file += 'pykto' + os.path.sep + 'w3af_scan_database.db'
        
        self._cgi_dirs = ['/cgi-bin/']
        self._admin_dirs = ['/admin/', '/adm/'] 
        self._users = ['adm', 'bin', 'daemon', 'ftp', 'guest', 'listen', 'lp',
        'mysql', 'noaccess', 'nobody', 'nobody4', 'nuucp', 'operator',
        'root', 'smmsp', 'smtp', 'sshd', 'sys', 'test', 'unknown']                  
        self._nuke = ['/', '/postnuke/', '/postnuke/html/', '/modules/', '/phpBB/', '/forum/']

        self._mutate_tests = False
        self._generic_scan = False
        self._update_scandb = False
        self._source = ''
        
    def discover(self, fuzzableRequest ):
        '''
        Runs pykto to the site.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains
                                                      (among other things) the URL to test.
        '''
        self._new_fuzzable_requests = []
        
        if not self._exec:
            # dont run anymore
            raise w3afRunOnce()
            
        else:
            # run!
            if self._update_scandb:
                self._update_db()
            
            # Run the basic scan (only once)
            if self._first_time:
                self._first_time = False
                url = urlParser.baseUrl( fuzzableRequest.getURL() )
                self._exec = False
                self.__run( url )
            
            # And now mutate if the user configured it...
            if self._mutate_tests:
                
                # If mutations are enabled, I should keep running
                self._exec = True
                
                # Tests are to be mutated
                url = urlParser.getDomainPath( fuzzableRequest.getURL() )
                if url not in self._already_visited:
                    # Save the directories I already have tested
                    self._already_visited.add( url )
                    self.__run( url )

        return self._new_fuzzable_requests
                
    def __run( self, url ):
        '''
        Really run the plugin.
        
        @parameter url: The URL I have to test.
        '''
        try:
            # read the nikto database.
            db_file_1 = open(self._db_file, "r")
            # read the w3af scan database.
            db_file_2 = open(self._extra_db_file, "r")
        except Exception, e:
            raise w3afException('Failed to open the scan databases. Exception: "' + str(e) + '".')
        else:
예제 #13
0
class feeds(baseGrepPlugin):
    '''
    Grep every page and finds rss, atom, opml feeds.
      
    @author: Andres Riancho ( [email protected] )
    '''
    
    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._rss_tag_attr = [('rss', 'version', 'RSS'),# <rss version="...">
                              ('feed', 'version', 'OPML'),# <feed version="..."
                              ('opml', 'version', 'OPML') # <opml version="...">
                              ]
        self._already_inspected = ScalableBloomFilter()
                
    def grep(self, request, response):
        '''
        Plugin entry point, find feeds.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        dom = response.getDOM()
        uri = response.getURI()
        
        # In some strange cases, we fail to normalize the document
        if uri not in self._already_inspected and dom is not None:

            self._already_inspected.add(uri)

            for tag_name, attr_name, feed_type in self._rss_tag_attr:
                
                # Find all tags with tag_name
                element_list = dom.xpath('//%s' % tag_name)
            
                for element in element_list:
                    
                    if attr_name in element.attrib:
                        
                        version = element.attrib[attr_name]                        
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName(feed_type +' feed')
                        i.setURI(uri)
                        msg = 'The URL: "' + uri + '" is a ' + feed_type + ' version "' 
                        msg += version + '" feed.'
                        i.setDesc( msg )
                        i.setId( response.id )
                        i.addToHighlight( feed_type )
                        kb.kb.append( self, 'feeds', i )
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'feeds', 'feeds' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #14
0
class archiveDotOrg(baseDiscoveryPlugin):
    '''
    Search archive.org to find new pages in the target site.
    
    @author: Andres Riancho ( [email protected] )
    @author: Darren Bilby, thanks for the good idea!
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # Internal variables
        self._already_visited = ScalableBloomFilter()
        
        # User configured parameters
        self._max_depth = 3

    def discover(self, fuzzableRequest ):
        '''
        Does a search in archive.org and searches for links on the html. Then searches those
        URLs in the target site. This is a time machine ! 
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things)
                                                      the URL to test.
        '''
        # Get the domain and set some parameters
        domain = urlParser.getDomain( fuzzableRequest.getURL() )
        if is_private_site( domain ):
            msg = 'There is no point in searching archive.org for "'+ domain + '"'
            msg += ' because it is a private site that will never be indexed.'
            raise w3afException(msg)
        else:
            # Work
            om.out.debug( 'archiveDotOrg plugin is testing: ' + fuzzableRequest.getURL() )
            
            start_url = 'http://web.archive.org/web/*/' + fuzzableRequest.getURL()
            domain = urlParser.getDomain( fuzzableRequest.getURL() )
            references = self._spider_archive( [ start_url, ] , self._max_depth, domain )
            
            return self._analyze_urls( references )
            
    def _analyze_urls(self, references):
        '''
        Analyze what references are cached by archive.org
        
        @return: A list of query string objects for the URLs that are in the cache AND are in the
                    target web site.
        '''
        # Init some internal variables
        res = []
    
        # Translate archive.org URL's to normal URL's
        real_URLs = []
        for url in references:
            try:
                url = url[url.index('http', 1):]
            except Exception:
                pass
            else:
                real_URLs.append( url )
        real_URLs = list(set(real_URLs))
        
        if len( real_URLs ):
            om.out.debug('Archive.org cached the following pages:')
            for i in real_URLs:
                om.out.debug('- ' + i )
        else:
            om.out.debug('Archive.org did not find any pages.')
        
        # Verify if they exist in the target site and add them to the result if they do.
        for real_url in real_URLs:
            if self._exists_in_target( real_url ):
                QSObject = urlParser.getQueryString( real_url )
                qsr = httpQsRequest()
                qsr.setURI( real_url )
                qsr.setDc( QSObject )
                res.append( qsr )

        if len( res ):
            msg = 'The following pages are in Archive.org cache and also in'
            msg += ' the target site:'
            om.out.debug(msg)
            for i in res:
                om.out.debug('- ' + i.getURI() )
        else:
            om.out.debug('All pages found in archive.org cache are missing in the target site.')
            
        return res
    
    def _spider_archive( self, url_list, max_depth, domain ):
        '''
        Perform a classic web spidering process.
        
        @parameter url_list: The list of URL strings
        @parameter max_depth: The max link depth that we have to follow.
        @parameter domain: The domain name we are checking
        '''
        # Start the recursive spidering         
        res = []
        
        for url in url_list:
            if url not in self._already_visited:
                self._already_visited.add( url )
                
                try:
                    http_response = self._urlOpener.GET( url, useCache=True )
                except Exception:
                    pass
                else:
                    # Get the references
                    try:
                        document_parser = dpc.getDocumentParserFor( http_response )
                    except w3afException:
                        # Failed to find a suitable document parser
                        pass
                    else:
                        # Note:
                        # - With parsed_references I'm 100% that it's really something in the HTML
                        # that the developer intended to add.
                        #
                        # - The re_references are the result of regular expressions, which in some cases
                        # are just false positives.
                        parsed_references, re_references = document_parser.getReferences()
                        
                        # Filter the ones I want
                        url_regex = 'http://web\.archive\.org/web/.*/http[s]?://' + domain + '/.*'
                        new_urls = [ u for u in parsed_references if re.match(url_regex, u ) ]
                        
                        # Go recursive
                        if max_depth -1 > 0:
                            if new_urls:
                                res.extend( new_urls )
                                res.extend( self._spider_archive( new_urls, max_depth -1, domain ) )
                        else:
                            msg = 'Some sections of the archive.org site were not analyzed because'
                            msg += ' of the configured max_depth.'
                            om.out.debug(msg)
                            return new_urls
        
        return res
    
    def _exists_in_target( self, url ):
        '''
        Check if a resource still exists in the target web site.
        
        @parameter url: The resource.
        '''
        res = False
        
        try:
            response = self._urlOpener.GET( url, useCache=True )
        except KeyboardInterrupt,e:
            raise e
        except w3afException,e:
            pass
class dotNetEventValidation(baseGrepPlugin):
    '''
    Grep every page and identify the ones that have viewstate and don't have event validation.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)

        vsRegex = r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value=".*?" />'
        self._viewstate = re.compile( vsRegex, re.IGNORECASE|re.DOTALL)
        
        evRegex = r'<input type="hidden" name="__EVENTVALIDATION" '
        evRegex += 'id="__EVENTVALIDATION" value=".*?" />'
        self._eventvalidation = re.compile( evRegex, re.IGNORECASE|re.DOTALL)

        encryptedVsRegex = r'<input type="hidden" name="__VIEWSTATEENCRYPTED" '
        encryptedVsRegex += 'id="__VIEWSTATEENCRYPTED" value=".*?" />'
        self._encryptedVs = re.compile( encryptedVsRegex, re.IGNORECASE|re.DOTALL)

        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        If I find __VIEWSTATE and empty __EVENTVALIDATION => vuln.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        '''
        if response.is_text_or_html():

            # First verify if we havent greped this yet
            if request.getURI() in self._already_reported:
                return
            else:
                self._already_reported.add(request.getURI())

            res = self._viewstate.search(response.getBody())
            if res:
                # I have __viewstate!, verify if event validation is enabled
                if not self._eventvalidation.search(response.getBody()):
                    # Nice! We found a possible bug =)
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('.NET Event Validation is disabled')
                    i.setURL( response.getURL() )
                    i.setId( response.id )
                    i.addToHighlight(res.group())
                    msg = 'The URL: "' + i.getURL() + '" has .NET Event Validation disabled. '
                    msg += 'This programming/configuration error should be manually verified.'
                    i.setDesc( msg )
                    kb.kb.append( self, 'dotNetEventValidation', i )

                if not self._encryptedVs.search(response.getBody()):
                    # Nice! We can decode the viewstate! =)
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('.NET ViewState encryption is disabled')
                    i.setURL( response.getURL() )
                    i.setId( response.id )
                    msg = 'The URL: "' + i.getURL() + '" has .NET ViewState encryption disabled. '
                    msg += 'This programming/configuration error could be exploited '
                    msg += 'to decode the viewstate contents.'
                    i.setDesc( msg )
                    kb.kb.append( self, 'dotNetEventValidation', i )

    
    def setOptions( self, OptionList ):
        '''
        Do nothing, I don't have any options.
        '''
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        # Print alerts
        self.printUniq( kb.kb.getData( 'dotNetEventValidation', 'dotNetEventValidation' ), 'URL' )
        
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #16
0
class codeDisclosure(baseGrepPlugin):
    '''
    Grep every page for code disclosure vulnerabilities.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        #   Internal variables
        self._already_added = ScalableBloomFilter()
        self._first_404 = True

    def grep(self, request, response):
        '''
        Plugin entry point, search for the code disclosures.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None

        Init
        >>> from core.data.url.httpResponse import httpResponse
        >>> from core.data.request.fuzzableRequest import fuzzableRequest
        >>> from core.controllers.misc.temp_dir import create_temp_dir
        >>> from core.controllers.coreHelpers.fingerprint_404 import is_404
        >>> o = create_temp_dir()
        >>> global is_404
        >>> is_404 = lambda x: False

        Simple test, empty string.
        >>> body = ''
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> c = codeDisclosure()
        >>> c.grep(request, response)
        >>> assert len(kb.kb.getData('codeDisclosure', 'codeDisclosure')) == 0

        Disclose some PHP code,
        >>> kb.kb.save('codeDisclosure','codeDisclosure',[])
        >>> body = 'header <? echo "a"; ?> footer'
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = fuzzableRequest()
        >>> request.setURL( url )
        >>> request.setMethod( 'GET' )
        >>> c = codeDisclosure()
        >>> c.grep(request, response)
        >>> len(kb.kb.getData('codeDisclosure', 'codeDisclosure'))
        1
        '''
        if response.is_text_or_html() and response.getURL() not in self._already_added:
            
            match, lang  = is_source_file( response.getBody() )
            
            if match:
                
                # Check also for 404
                if not is_404( response ):
                    v = vuln.vuln()
                    v.setPluginName(self.getName())
                    v.setURL( response.getURL() )
                    v.setId( response.id )
                    v.setSeverity(severity.LOW)
                    v.setName( lang + ' code disclosure vulnerability' )
                    v.addToHighlight(match.group())
                    msg = 'The URL: "' + v.getURL() + '" has a '+lang+' code disclosure vulnerability.'
                    v.setDesc( msg )
                    kb.kb.append( self, 'codeDisclosure', v )
                    self._already_added.add( response.getURL() )
                
                else:
                    
                    self._first_404 = False
                    v = vuln.vuln()
                    v.setPluginName(self.getName())
                    v.setURL( response.getURL() )
                    v.setId( response.id )
                    v.setSeverity(severity.LOW)
                    v.addToHighlight(match.group())
                    v.setName( lang + ' code disclosure vulnerability in 404 page' )
                    msg = 'The URL: "' + v.getURL() + '" has a '+lang+' code disclosure vulnerability in'
                    msg += ' the customized 404 script.'
                    v.setDesc( msg )
                    kb.kb.append( self, 'codeDisclosure', v )
    
    def setOptions( self, OptionList ):
        '''
        No options to set.
        '''
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        # Print codeDisclosure
        self.printUniq( kb.kb.getData( 'codeDisclosure', 'codeDisclosure' ), 'URL' )
        
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #17
0
class webSpider(baseDiscoveryPlugin):
    """
    Crawl the web application.
    
    @author: Andres Riancho ( [email protected] )  
    """

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)

        # Internal variables
        self._compiled_ignore_re = None
        self._compiled_follow_re = None
        self._brokenLinks = []
        self._fuzzableRequests = []
        self._first_run = True
        self._already_crawled = disk_list()
        self._already_filled_form = ScalableBloomFilter()

        # User configured variables
        self._ignore_regex = ""
        self._follow_regex = ".*"
        self._only_forward = False
        self._compileRE()

    def discover(self, fuzzableRequest):
        """
        Searches for links on the html.

        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        """
        om.out.debug("webSpider plugin is testing: " + fuzzableRequest.getURL())

        if self._first_run:
            # I have to set some variables, in order to be able to code the "onlyForward" feature
            self._first_run = False
            self._target_urls = [urlParser.getDomainPath(i) for i in cf.cf.getData("targets")]
            self._target_domain = urlParser.getDomain(cf.cf.getData("targets")[0])

        # If its a form, then smartFill the Dc.
        original_dc = fuzzableRequest.getDc()
        if isinstance(fuzzableRequest, httpPostDataRequest.httpPostDataRequest):

            # TODO!!!!!!
            if fuzzableRequest.getURL() in self._already_filled_form:
                return []
            else:
                self._already_filled_form.add(fuzzableRequest.getURL())

            to_send = original_dc.copy()
            for parameter_name in to_send:

                # I do not want to mess with the "static" fields
                if isinstance(to_send, form.form):
                    if to_send.getType(parameter_name) in ["checkbox", "file", "radio", "select"]:
                        continue

                #
                #   Set all the other fields, except from the ones that have a value set (example:
                #   hidden fields like __VIEWSTATE).
                #
                for element_index in xrange(len(to_send[parameter_name])):

                    #   should I ignore it because it already has a value?
                    if to_send[parameter_name][element_index] != "":
                        continue

                    #   smartFill it!
                    to_send[parameter_name][element_index] = smartFill(parameter_name)

            fuzzableRequest.setDc(to_send)

        self._fuzzableRequests = []
        response = None

        try:
            response = self._sendMutant(fuzzableRequest, analyze=False)
        except KeyboardInterrupt, e:
            raise e
        else:
예제 #18
0
class dav(baseAuditPlugin):
    '''
    Verify if the WebDAV module is properly configured.
    
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseAuditPlugin.__init__(self)
        
        # Internal variables
        self._already_tested_dirs = ScalableBloomFilter()

    def audit(self, freq ):
        '''
        Searches for file upload vulns using PUT method.
        
        @param freq: A fuzzableRequest
        '''
        # Start
        domain_path = urlParser.getDomainPath( freq.getURL() )
        if domain_path not in self._already_tested_dirs:
            om.out.debug( 'dav plugin is testing: ' + freq.getURL() )
            self._already_tested_dirs.add( domain_path )
            
            self._PUT( domain_path )
            self._PROPFIND( domain_path )
            self._SEARCH( domain_path )
            
    def _SEARCH( self, domain_path ):
        '''
        Test SEARCH method.
        '''
        content = "<?xml version='1.0'?>\r\n"
        content += "<g:searchrequest xmlns:g='DAV:'>\r\n"
        content += "<g:sql>\r\n"
        content += "Select 'DAV:displayname' from scope()\r\n"
        content += "</g:sql>\r\n"
        content += "</g:searchrequest>\r\n"

        res = self._urlOpener.SEARCH( domain_path , data=content )
        
        content_matches =  '<a:response>' in res or '<a:status>' in res or 'xmlns:a="DAV:"' in res
        
        if content_matches and res.getCode() in xrange(200, 300):
            v = vuln.vuln()
            v.setPluginName(self.getName())
            v.setURL( res.getURL() )
            v.setId( res.id )
            v.setSeverity(severity.MEDIUM)
            v.setName( 'Insecure DAV configuration' )
            v.setMethod( 'SEARCH' )
            msg = 'Directory listing with HTTP SEARCH method was found at directory: "'
            msg += domain_path + '"'
            v.setDesc( msg )
            kb.kb.append( self, 'dav', v )
            
    def _PROPFIND( self, domain_path ):
        '''
        Test PROPFIND method
        '''
        content = "<?xml version='1.0'?>\r\n"
        content += "<a:propfind xmlns:a='DAV:'>\r\n"
        content += "<a:prop>\r\n"
        content += "<a:displayname:/>\r\n"
        content += "</a:prop>\r\n"
        content += "</a:propfind>\r\n"
        
        res = self._urlOpener.PROPFIND( domain_path , data=content, headers={'Depth': '1'} )
        # Remember that httpResponse objects have a faster "__in__" than
        # the one in strings; so string in response.getBody() is slower than
        # string in response               
        if "D:href" in res and res.getCode() in xrange(200, 300):
            v = vuln.vuln()
            v.setPluginName(self.getName())
            v.setURL( res.getURL() )
            v.setId( res.id )
            v.setSeverity(severity.MEDIUM)
            v.setName( 'Insecure DAV configuration' )
            v.setMethod( 'PROPFIND' )
            msg = 'Directory listing with HTTP PROPFIND method was found at directory: "'
            msg += domain_path + '"'
            v.setDesc( msg )
            kb.kb.append( self, 'dav', v )
        
    def _PUT( self, domain_path ):
        '''
        Tests PUT method.
        '''
        # upload
        url = urlParser.urlJoin( domain_path, createRandAlpha( 5 ) )
        rndContent = createRandAlNum(6)
        put_response = self._urlOpener.PUT( url , data=rndContent )
        
        # check if uploaded
        res = self._urlOpener.GET( url , useCache=True )
        if res.getBody() == rndContent:
            v = vuln.vuln()
            v.setPluginName(self.getName())
            v.setURL( url )
            v.setId( [put_response.id, res.id] )
            v.setSeverity(severity.HIGH)
            v.setName( 'Insecure DAV configuration' )
            v.setMethod( 'PUT' )
            msg = 'File upload with HTTP PUT method was found at resource: "' + domain_path + '".'
            msg += ' A test file was uploaded to: "' + res.getURL() + '".'
            v.setDesc( msg )
            kb.kb.append( self, 'dav', v )
        
        # Report some common errors
        elif put_response.getCode() == 500:
            i = info.info()
            i.setPluginName(self.getName())
            i.setURL( url )
            i.setId( res.id )
            i.setName( 'DAV incorrect configuration' )
            i.setMethod( 'PUT' )
            msg = 'DAV seems to be incorrectly configured. The web server answered with a 500'
            msg += ' error code. In most cases, this means that the DAV extension failed in'
            msg += ' some way. This error was found at: "' + put_response.getURL() + '".'
            i.setDesc( msg )
            kb.kb.append( self, 'dav', i )
        
        # Report some common errors
        elif put_response.getCode() == 403:
            i = info.info()
            i.setPluginName(self.getName())
            i.setURL( url )
            i.setId( [put_response.id, res.id] )
            i.setName( 'DAV insufficient privileges' )
            i.setMethod( 'PUT' )
            msg = 'DAV seems to be correctly configured and allowing you to use the PUT method'
            msg +=' but the directory does not have the correct permissions that would allow'
            msg += ' the web server to write to it. This error was found at: "'
            msg += put_response.getURL() + '".'
            i.setDesc( msg )
            kb.kb.append( self, 'dav', i )
            
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'dav', 'dav' ), 'VAR' )
        
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def setOptions( self, optionMap ):
        '''
        This method sets all the options that are configured using the user interface 
        generated by the framework using the result of getOptions().
        
        @parameter OptionList: A dictionary with the options for the plugin.
        @return: No value is returned.
        ''' 
        pass

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return ['discovery.allowedMethods', 'discovery.serverHeader']
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #19
0
class ssn(baseGrepPlugin):
    '''
    This plugin detects the occurence of US Social Security numbers in web pages.

    @author: dliz <dliz !at! users.sourceforge.net>
    '''
    # match numbers of the form: 'nnn-nn-nnnn', 'nnnnnnnnn', 'nnn nn nnnn'
    regex = '(?:^|[^\d])(\d{3})(?:[\- ]?)(\d{2})(?:[\- ]?)(\d{4})(?:[^\d]|$)'
    ssn_regex = re.compile(regex)
    

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._already_inspected = ScalableBloomFilter()
        self._ssnResponses = []
                
    def grep(self, request, response):
        '''
        Plugin entry point, find the SSN numbers.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None.


        >>> from core.data.url.httpResponse import httpResponse
        >>> from core.data.url.HTTPRequest import HTTPRequest

        Simple test, empty string.
        >>> body = ''
        >>> url = 'http://www.w3af.com/'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> request = HTTPRequest(url)
        >>> s = ssn(); s._already_inspected = set()
        >>> s.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        0

        With "-" separating the SSN parts
        >>> kb.kb.cleanup(); s._already_inspected = set()
        >>> body = 'header 771-12-9876 footer'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> s.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        1

        With HTML tags in the middle:
        >>> kb.kb.cleanup(); s._already_inspected = set()
        >>> body = 'header <b>771</b>-<b>12</b>-<b>9876</b> footer'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> s.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        1

        All the numbers together:
        >>> kb.kb.cleanup(); s._already_inspected = set()
        >>> body = 'header 771129876 footer'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> s.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        1

        One extra number at the end:
        >>> kb.kb.cleanup(); s._already_inspected = set()
        >>> body = 'header 7711298761 footer'
        >>> headers = {'content-type': 'text/html'}
        >>> response = httpResponse(200, body , headers, url, url)
        >>> s.grep(request, response)
        >>> len(kb.kb.getData('ssn', 'ssn'))
        0
        '''
        uri = response.getURI()
        if response.is_text_or_html() and response.getCode() == 200 and \
            response.getClearTextBody() is not None and \
            uri not in self._already_inspected:
            
            # Don't repeat URLs
            self._already_inspected.add(uri)
            found_ssn, validated_ssn = self._find_SSN(response.getClearTextBody())
            if validated_ssn:
                v = vuln.vuln()
                v.setPluginName(self.getName())
                v.setURI( uri )
                v.setId( response.id )
                v.setSeverity(severity.LOW)
                v.setName( 'US Social Security Number disclosure' )
                msg = 'The URL: "' + uri + '" possibly discloses a US '
                msg += 'Social Security Number: "'+ validated_ssn +'"'
                v.setDesc( msg )
                v.addToHighlight( found_ssn )
                kb.kb.append( self, 'ssn', v )
     
    def _find_SSN(self, body_without_tags):
        '''
        @return: SSN as found in the text and SSN in its regular format if the body had an SSN

        >>> s = ssn()
        >>> s._find_SSN( '' )
        (None, None)
        >>> s._find_SSN( 'header 771129876 footer' )
        ('771129876', '771-12-9876')
        >>> s._find_SSN( '771129876' )
        ('771129876', '771-12-9876')
        >>> s._find_SSN( 'header 771 12 9876 footer' )
        ('771 12 9876', '771-12-9876')
        >>> s._find_SSN( 'header 771 12 9876 32 footer' )
        ('771 12 9876', '771-12-9876')
        >>> s._find_SSN( 'header 771 12 9876 32 64 footer' )
        ('771 12 9876', '771-12-9876')
        >>> s._find_SSN( 'header 771129876 771129875 footer' )
        ('771129876', '771-12-9876')
        '''
        validated_ssn = None
        ssn = None
        for match in self.ssn_regex.finditer(body_without_tags):
            validated_ssn = self._validate_SSN(match)
            if validated_ssn:
                ssn = match.group(0)
                ssn = ssn.strip()
                break

        return ssn, validated_ssn
    
    
    def _validate_SSN(self, potential_ssn):
        '''
        This method is called to validate the digits of the 9-digit number
        found, to confirm that it is a valid SSN. All the publicly available SSN
        checks are performed. The number is an SSN if: 
        1. the first three digits <= 772
        2. the number does not have all zeros in any digit group 3+2+4 i.e. 000-xx-####,
        ###-00-#### or ###-xx-0000 are not allowed
        3. the number does not start from 666-xx-####. 666 for area code is not allowed
        4. the number is not between 987-65-4320 to 987-65-4329. These are reserved for advts
        5. the number is not equal to 078-05-1120

        Source of information: wikipedia and socialsecurity.gov
        '''
        area_number = int(potential_ssn.group(1))
        group_number = int(potential_ssn.group(2))
        serial_number = int(potential_ssn.group(3))

        if not group_number:
            return False
        if not serial_number:
            return False

        group = areas_groups_map.get(area_number)        
        if not group:
            return False
        
        odd_one = xrange(1, 11, 2)
        even_two = xrange(10, 100, 2) # (10-98 even only)
        even_three = xrange(2, 10, 2)
        odd_four = xrange(11, 100, 2) # (11-99 odd only)
        le_group = lambda x: x <= group
        isSSN = False
    
        # For little odds (odds between 1 and 9)
        if group in odd_one:
            if group_number <= group:
                isSSN = True

        # For big evens (evens between 10 and 98)
        elif group in even_two:
            if group_number in itertools.chain(odd_one, 
                                               filter(le_group, even_two)):
                isSSN = True

        # For little evens (evens between 2 and 8)
        elif group in even_three:
            if group_number in itertools.chain(odd_one, even_two,
                                               filter(le_group, even_three)):
                isSSN = True

        # For big odds (odds between 11 and 99)
        elif group in odd_four:
            if group_number in itertools.chain(odd_one, even_two, even_three,
                                               filter(le_group, odd_four)):
                isSSN = True
        
        if isSSN:
            return '%s-%s-%s' % (area_number, group_number, serial_number)
        return None



    def end(self):
        '''
        This method is called when the plugin won't be used anymore.
        '''
        # Print results
        self.printUniq( kb.kb.getData( 'ssn', 'ssn' ), 'URL' )

    def getOptions(self):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def setOptions(self, opt):
        pass
     
    def getLongDesc(self):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
        This plugins scans every response page to find the strings that are likely to be 
        the US social security numbers. 
        '''
        
    def getPluginDeps(self):
        '''
        @return: A list with the names of the plugins that should be run before the
        current one.
        '''
        return []
예제 #20
0
class slash( baseDiscoveryPlugin ):
    '''
    Identify if the resource http://host.tld/spam/ and http://host.tld/spam are the same.
    
    @author: Nicolas Rotta ( [email protected] )  
    '''
    
    def __init__( self ):
        baseDiscoveryPlugin.__init__( self )
        self._already_visited = ScalableBloomFilter()
        
    def discover( self, fuzzableRequest ):
        '''
        Generates a new URL by adding or substracting the '/' character.      
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''     
        self._fuzzableRequests = []
        
        url = fuzzableRequest.getURL()
        if url not in self._already_visited:
            self._already_visited.add( url )

            om.out.debug( 'slash plugin is testing: "' + fuzzableRequest.getURI() + '".' )
            
            fr = self._get_fuzzed_request( fuzzableRequest )
            original_response = self._urlOpener.GET( fuzzableRequest.getURL(), useCache = True )
                  
            targs = ( fr, original_response )
            self._tm.startFunction( target = self._do_request, args = targs , ownerObj = self )
           
            self._tm.join( self )
            self._already_visited.add( fr.getURL() )
                
        return self._fuzzableRequests

    def _get_fuzzed_request( self, fuzzableRequest ):
        '''
        Generate a new Url by adding or substracting the '/' character.
        @param fuzzableRequest: The original fuzzableRequest
        @return: The modified fuzzableRequest.
        '''
        fr = fuzzableRequest.copy()
        
        if ( fuzzableRequest.getURL().endswith( '/' ) ):
            fr.setURL( fuzzableRequest.getURL().rstrip( '/' ) )
        else:    
            fr.setURL( fuzzableRequest.getURL() + '/' )
            
        return fr
        
    def _do_request( self, fuzzableRequest, orig_resp ):
        '''
        Sends the request.
        @parameter fuzzableRequest: The fuzzable request object to modify.
        @parameter orig_resp: The response for the original request that was sent.
        '''
        try:
            resp = self._urlOpener.GET(fuzzableRequest.getURI(), useCache=True)
        except KeyboardInterrupt, e:
            raise e
        else:
예제 #21
0
class digitSum(baseDiscoveryPlugin):
    '''
    Take an URL with a number ( index2.asp ) and try to find related files (index1.asp, index3.asp).
    @author: Andres Riancho ( [email protected] )  
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        self._already_visited = ScalableBloomFilter()
        self._first_time = True
        
        # This is for the Referer
        self._headers = {}
        
        # User options
        self._fuzz_images = False
        self._max_digit_sections = 4
        
    def discover(self, fuzzableRequest ):
        '''
        Searches for new Url's by adding and substracting numbers to the url and the parameters.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        self._fuzzableRequests = []
            
        url = fuzzableRequest.getURL()
        self._headers = {'Referer':url}
        
        if self._first_time:
            self._first_time = False
        
        om.out.debug('digitSum is testing ' + fuzzableRequest.getURL() )
        original_response = self._urlOpener.GET( fuzzableRequest.getURL(), \
                                                            useCache=True, headers=self._headers )
        
        if original_response.is_text_or_html() or self._fuzz_images:
            for fr in self._mangle_digits( fuzzableRequest ):
                if fr.getURL() not in self._already_visited:
                    self._already_visited.add( fr.getURI() )
                    
                    targs = ( fr, original_response)
                    self._tm.startFunction( target=self._do_request, args=targs , ownerObj=self )
            
            # Wait for all threads to finish
            self._tm.join( self )
            
            # I add myself so the next call to this plugin wont find me ...
            # Example: index1.html ---> index2.html --!!--> index1.html
            self._already_visited.add( fuzzableRequest.getURI() )
                
        return self._fuzzableRequests

    def _do_request(self, fuzzableRequest, original_resp):
        '''
        Send the request.
        @parameter fuzzableRequest: The fuzzable request object to modify.
        @parameter original_resp: The response for the original request that was sent.
        '''
        try:
            response = self._urlOpener.GET(fuzzableRequest.getURI(), useCache=True,
                                                            headers=self._headers)
        except KeyboardInterrupt, e:
            raise e
        else:
예제 #22
0
class frontpage(baseAuditPlugin):
    '''
    Tries to upload a file using frontpage extensions (author.dll).
    
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseAuditPlugin.__init__(self)
        
        # Internal variables
        self._already_tested = ScalableBloomFilter()
        self._stop_on_first = True

    def audit(self, freq ):
        '''
        Searches for file upload vulns using a POST to author.dll.
        
        @param freq: A fuzzableRequest
        '''
        # Set some value
        domain_path = urlParser.getDomainPath( freq.getURL() )
        
        # Start
        if self._stop_on_first and kb.kb.getData('frontpage', 'frontpage'):
            # Nothing to do, I have found vuln(s) and I should stop on first
            msg = 'Not verifying if I can upload files to: "' + domain_path + '" using author.dll'
            msg += '. Because I already found one vulnerability.'
            om.out.debug(msg)
        else:
            # I haven't found any vulns yet, OR i'm trying to find every
            # directory where I can write a file.
            if domain_path not in self._already_tested:
                om.out.debug( 'frontpage plugin is testing: ' + freq.getURL() )
                self._already_tested.add( domain_path )
                
                # Find a file that doesn't exist
                found404 = False
                for i in xrange(3):
                    randFile = createRandAlpha( 5 ) + '.html'
                    randPathFile = urlParser.urlJoin(domain_path,  randFile)
                    res = self._urlOpener.GET( randPathFile )
                    if is_404( res ):
                        found404 = True
                        break
                
                if found404:
                    upload_id = self._upload_file( domain_path,  randFile )
                    self._verify_upload( domain_path,  randFile,  upload_id )
                else:
                    msg = 'frontpage plugin failed to find a 404 page. This is mostly because of an'
                    msg += ' error in 404 page detection.'
                    om.out.error(msg)
            
    def _upload_file( self, domain_path,  randFile ):
        '''
        Upload the file using author.dll
        
        @parameter domain_path: http://localhost/f00/
        @parameter randFile: fj01afka.html
        '''
        file_path = urlParser.getPath(domain_path) + randFile
        
        # TODO: The frontpage version should be obtained from the information saved in the kb
        # by the discovery.frontpage_version plugin!
        # The 4.0.2.4715 version should be dynamic!
        # The information is already saved in the discovery plugin in the line:
        # i['version'] = version_match.group(1)
        content = "method=put document:4.0.2.4715&service_name=&document=[document_name="
        content += file_path
        content += ";meta_info=[]]&put_option=overwrite&comment=&keep_checked_out=false"
        content += '\n'
        # The content of the file I'm uploading is the file name reversed
        content += randFile[::-1]
        
        # TODO: The _vti_bin and _vti_aut directories should be PARSED from the _vti_inf file
        # inside the discovery.frontpage_version plugin, and then used here
        targetURL = urlParser.urlJoin( domain_path, '_vti_bin/_vti_aut/author.dll' )

        try:
            res = self._urlOpener.POST( targetURL , data=content )
        except w3afException,  e:
            om.out.debug('Exception while uploading file using author.dll: ' + str(e))
        else:
예제 #23
0
class objects(baseGrepPlugin):
    '''
    Grep every page for objects and applets.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._tag_names = []
        self._tag_names.append('object')
        self._tag_names.append('applet')
        
        self._already_analyzed = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        Plugin entry point. Parse the object tags.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        url = response.getURL()
        if response.is_text_or_html() and url not in self._already_analyzed:

            self._already_analyzed.add(url)
            
            dom = response.getDOM()

            # In some strange cases, we fail to normalize the document
            if dom is not None:
            
                for tag_name in self._tag_names:
                    
                    # Find all input tags with a type file attribute
                    element_list = dom.xpath('//%s' % tag_name )
                    
                    if element_list:
                        i = info.info()
                        i.setPluginName(self.getName())
                        i.setName(tag_name.title() + ' tag')
                        i.setURL(url)
                        i.setId( response.id )
                        i.setDesc( 'The URL: "' + i.getURL() + '" has an '+ tag_name + ' tag.' )          
                        i.addToHighlight( tag_name )

                        kb.kb.append( self, tag_name, i )
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        # Print objects
        self.printUniq( kb.kb.getData( 'objects', 'object' ), 'URL' )
        
        # Print applets
        self.printUniq( kb.kb.getData( 'objects', 'applet' ), 'URL' )
        
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #24
0
class formAutocomplete(baseGrepPlugin):
    '''
    Grep every page for detection of forms with 'autocomplete' capabilities 
    containing password-type inputs.
      
    @author: Javier Andalia ([email protected])
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._already_inspected = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        Plugin entry point, test existance of HTML auto-completable forms
        containing password-type inputs. Either form's <autocomplete> attribute
        is not present or is 'off'.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None, all results are saved in the kb.
        '''
        url = response.getURL()

        if response.is_text_or_html() and not url in self._already_inspected:

            self._already_inspected.add(url)
            dom = response.getDOM()

            if dom is not None:

                # Loop through "auto-completable" forms
                for form in dom.xpath(AUTOCOMPLETE_FORMS_XPATH):

                    # Test existance of password-type inputs
                    if form.xpath(PWD_INPUT_XPATH):
                        inf = info()
                        inf.setName('Auto-completable form')
                        inf.setURL(url)
                        inf.setId(response.id)
                        msg = 'The URL: "%s" has <form> element with ' \
                        'autocomplete capabilities.' % url
                        inf.setDesc(msg)
                        form_str = etree.tostring(form)
                        to_highlight = form_str[:(form_str).find('>') + 1]
                        inf.addToHighlight(to_highlight)
                        kb.kb.append(self, 'formAutocomplete', inf)
                        # Also send 'msg' to console
                        om.out.information(msg)
                        # Enough with one input
                        break


    def setOptions(self, OptionList):
        pass

    def getOptions(self):
        '''
        @return: A list of option objects for this plugin.
        '''
        ol = optionList()
        return ol

    def getLongDesc(self):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return "This plugin greps every page for autocomplete-able forms " \
        "containing password-type inputs."

    def getPluginDeps(self):
        '''
        @return: A list with the names of the plugins that should be runned
        before the current one.
        '''
        return []
예제 #25
0
class httpInBody (baseGrepPlugin):
    """
    Search for HTTP request/response string in response body.
    @author: Andres Riancho ( [email protected] )
    """
    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._already_inspected = ScalableBloomFilter()
        
        # re that searches for
        #GET / HTTP/1.0
        self._re_request = re.compile('[a-zA-Z]{3,6} .*? HTTP/1.[01]')
        
        # re that searches for
        #HTTP/1.1 200 OK
        self._re_response = re.compile('HTTP/1.[01] [0-9][0-9][0-9] [a-zA-Z]*')
                
    def grep(self, request, response):
        '''
        Plugin entry point.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None, all results are saved in the kb.
        '''
        uri = response.getURI()
        # 501 Code is "Not Implemented" which in some cases responds with this in the body:
        # <body><h2>HTTP/1.1 501 Not Implemented</h2></body>
        # Which creates a false positive.
        if response.getCode() != 501 and uri not in self._already_inspected \
            and response.is_text_or_html():
            # Don't repeat URLs
            self._already_inspected.add(uri)

            # First if, mostly for performance.
            # Remember that httpResponse objects have a faster "__in__" than
            # the one in strings; so string in response.getBody() is slower than
            # string in response
            if 'HTTP/1' in response and response.getClearTextBody() is not None:

                # Now, remove tags
                body_without_tags = response.getClearTextBody()

                res = self._re_request.search(body_without_tags)
                if res:
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('HTTP Request in HTTP body')
                    i.setURI(uri)
                    i.setId(response.id)
                    i.setDesc('An HTTP request was found in the HTTP body of a response')
                    i.addToHighlight(res.group(0))
                    kb.kb.append(self, 'request', i)

                res = self._re_response.search(body_without_tags)
                if res:
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('HTTP Response in HTTP body')
                    i.setURI(uri)
                    i.setId(response.id)
                    i.setDesc('An HTTP response was found in the HTTP body of a response')
                    kb.kb.append(self, 'response', i)

    def setOptions( self, optionsMap ):
        pass
            
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        for info_type in ['request', 'response']:
            
            if kb.kb.getData('httpInBody', info_type):
                msg = 'The following URLs have an HTTP '+ info_type +' in the HTTP response body:'
                om.out.information(msg)
                for i in kb.kb.getData('httpInBody', info_type):
                    om.out.information('- ' + i.getURI() + '  (id:' + str(i.getId()) + ')' )
        
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #26
0
class oracle(baseGrepPlugin):
    '''
    Find Oracle applications.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._already_analyzed = ScalableBloomFilter()
        
    def grep(self, request, response):
        '''
        Plugin entry point. Grep for oracle applications.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        url = response.getURL()
        if response.is_text_or_html() and url not in self._already_analyzed:
            self._already_analyzed.add(url)

            for msg in self._getDescriptiveMessages():
                # Remember that httpResponse objects have a faster "__in__" than
                # the one in strings; so string in response.getBody() is slower than
                # string in response
                if msg in response:
                    
                    i = info.info()
                    i.setPluginName(self.getName())
                    i.setName('Oracle application')
                    i.setURL(url)
                    i.setId( response.id )
                    i.addToHighlight( msg )
                    msg = 'The URL: "' + url + '" was created using Oracle'
                    msg += ' Application server.'
                    i.setDesc( msg )
                    kb.kb.append( self , 'oracle' , i )

    def _getDescriptiveMessages( self ):
        res = []
        res.append('<!-- Created by Oracle ')
        return res
        
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'oracle', 'oracle' ), 'URL' )

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #27
0
class wsdlFinder(baseDiscoveryPlugin):
    '''
    Find web service definitions files.
    
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseDiscoveryPlugin.__init__(self)
        
        # Internal variables
        self._already_tested = ScalableBloomFilter()
        self._new_fuzzable_requests = []
        
    def discover(self, fuzzableRequest ):
        '''
        If url not in _tested, append a ?wsdl and check the response.
        
        @parameter fuzzableRequest: A fuzzableRequest instance that contains (among other things) the URL to test.
        '''
        url = urlParser.uri2url( fuzzableRequest.getURL() )
        if url not in self._already_tested:
            self._already_tested.add( url )
            
            # perform the requests
            for wsdl_parameter in self._get_WSDL():
                url_to_request = url + wsdl_parameter
                
                #   Send the requests using threads:
                targs = ( url_to_request, )
                self._tm.startFunction( target=self._do_request, args=targs, ownerObj=self )
        
            # Wait for all threads to finish
            self._tm.join( self )
        
        return self._new_fuzzable_requests

    def _do_request(self, url_to_request):
        '''
        Perform an HTTP request to the url_to_request parameter.
        @return: None.
        '''
        try:
            self._urlOpener.GET( url_to_request, useCache=True )
        except w3afException:
            om.out.debug('Failed to request the WSDL file: ' + url_to_request)
        else:
            # The response is analyzed by the wsdlGreper plugin
            pass

    def _get_WSDL( self ):
        '''
        @return: A list of parameters that are used to request the WSDL
        '''
        res = []
        
        res.append( '?wsdl' )
        res.append( '?WSDL' )
        
        return res
        
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol
        
    def setOptions( self, OptionList ):
        '''
        This method sets all the options that are configured using the user interface 
        generated by the framework using the result of getOptions().
        
        @parameter OptionList: A dictionary with the options for the plugin.
        @return: No value is returned.
        ''' 
        pass

    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be run before the
        current one.
        '''
        return ['grep.wsdlGreper']
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #28
0
class svnUsers(baseGrepPlugin):
    '''
    Grep every response for users of the versioning system.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        self._already_inspected = ScalableBloomFilter()
        # Add the regex to match something like this:
        #
        #   $Id: lzio.c,v 1.24 2003/03/20 16:00:56 roberto Exp $
        #   $Id: file name, version, timestamp, creator Exp $
        #
        regex = '\$.{1,12}: .*? .*? \d{4}[-/]\d{1,2}[-/]\d{1,2}'
        regex += ' \d{1,2}:\d{1,2}:\d{1,2}.*? (.*?) (Exp )?\$'
        self._regex_list = [ re.compile(regex) ]
        
    def grep(self, request, response):
        '''
        Plugin entry point.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None, all results are saved in the kb.
        '''
        uri = response.getURI()
        if response.is_text_or_html() and uri not in self._already_inspected:

            # Don't repeat URLs
            self._already_inspected.add(uri)

            for regex in self._regex_list:
                for m in regex.findall(response.getBody()):
                    v = vuln.vuln()
                    v.setPluginName(self.getName())
                    v.setURI(uri)
                    v.setId(response.id)
                    msg = 'The URL: "' + uri + '" contains a SVN versioning '
                    msg += 'signature with the username: "******" .'
                    v.setDesc(msg)
                    v['user'] = m[0]
                    v.setSeverity(severity.LOW)
                    v.setName('SVN user disclosure vulnerability')
                    v.addToHighlight(m[0])
                    kb.kb.append(self, 'users', v)

        
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'svnUsers', 'users' ), 'URL' )
    
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #29
0
class getMails(baseGrepPlugin):
    '''
    Find email accounts.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        # User configured variables
        self._only_target_domain = True
        self._already_inspected = ScalableBloomFilter()

    def grep(self, request, response):
        '''
        Plugin entry point, get the emails and save them to the kb.
        
        @parameter request: The HTTP request
        @parameter request: The HTTP response
        @return: None
        '''
        uri = response.getURI()
        if uri not in self._already_inspected:
            self._already_inspected.add(uri)
            self._grep_worker(request, response, 'mails', \
                    urlParser.getRootDomain(response.getURL()))
    
            if not self._only_target_domain:
                self._grep_worker(request, response, 'external_mails')
            
    def _grep_worker(self, request, response, kb_key, domain=None):
        '''
        Helper method for using in self.grep()
        
        @parameter request: The HTTP request
        @parameter request: The HTTP response
        @parameter kb_key: Knowledge base dict key
        @parameter domain: Target domain for getEmails filter
        @return: None
        '''
        # Modified when I added the pdfParser
        #if isTextOrHtml(response.getHeaders()):
        try:
            dp = dpCache.dpc.getDocumentParserFor( response )
        except w3afException:
            msg = 'If I can\'t parse the document, I won\'t be able to find any emails.'
            msg += ' Ignoring the response for "' + response.getURL() + '".'
            om.out.debug( msg )
            return

        mails = dp.getEmails(domain)
        
        for mail_address in mails:
            # Reduce false positives
            if request.sent( mail_address ):
                continue
                
            # Email address are case insensitive
            mail_address = mail_address.lower()
            url = response.getURL()

            email_map = {}
            for info_obj in kb.kb.getData( 'mails', 'mails'):
                mail_string = info_obj['mail']
                email_map[ mail_string ] = info_obj

            if mail_address not in email_map:
                # Create a new info object, and report it
                i = info.info()
                i.setPluginName(self.getName())
                i.setURL(url)
                i.setId( response.id )
                i.setName( mail_address )
                desc = 'The mail account: "'+ mail_address + '" was found in: '
                desc += '\n- ' + url
                desc += ' - In request with id: '+ str(response.id)
                i.setDesc( desc )
                i['mail'] = mail_address
                i['url_list'] = [url]
                i['user'] = mail_address.split('@')[0]
                i.addToHighlight( mail_address )
                kb.kb.append( 'mails', kb_key, i )
            
            else:
            
                # Get the corresponding info object.
                i = email_map[ mail_address ]
                # And work
                if url not in i['url_list']:
                    # This email was already found in some other URL
                    # I'm just going to modify the url_list and the description message
                    # of the information object.
                    id_list_of_info = i.getId()
                    id_list_of_info.append( response.id )
                    i.setId( id_list_of_info )
                    i.setURL('')
                    desc = i.getDesc()
                    desc += '\n- ' + url
                    desc += ' - In request with id: '+ str(response.id)
                    i.setDesc( desc )
                    i['url_list'].append(url)
        
    def setOptions( self, optionsMap ):
        self._only_target_domain = optionsMap['onlyTargetDomain'].getValue()
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        d1 = 'When greping, only search mails for domain of target'
        o1 = option('onlyTargetDomain', self._only_target_domain, d1, 'boolean')
        
        ol = optionList()
        ol.add(o1)
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'mails', 'mails' ), None )
        self.printUniq( kb.kb.getData( 'mails', 'external_mails' ), None )
    
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''
예제 #30
0
class hashFind(baseGrepPlugin):
    '''
    Identify hashes in HTTP responses.
      
    @author: Andres Riancho ( [email protected] )
    '''

    def __init__(self):
        baseGrepPlugin.__init__(self)
        
        self._already_reported = ScalableBloomFilter()
        
        # regex to split between words
        self._split_re = re.compile('[^\w]')
        
        
    def grep(self, request, response):
        '''
        Plugin entry point, identify hashes in the HTTP response.
        
        @parameter request: The HTTP request object.
        @parameter response: The HTTP response object
        @return: None
        '''
        # I know that by doing this I loose the chance of finding hashes in PDF files, but...
        # This is much faster
        if response.is_text_or_html():
            
            body = response.getBody()
            splitted_body = self._split_re.split(body)
            for possible_hash in splitted_body:
                
                #    This is a performance enhancement that cuts the execution
                #    time of this plugin in half.
                if len(possible_hash) > 31:
                
                    hash_type = self._get_hash_type( possible_hash )
                    if hash_type:
                        if self._has_hash_distribution( possible_hash ):
                            if (possible_hash, response.getURL()) not in self._already_reported:
                                i = info.info()
                                i.setPluginName(self.getName())
                                i.setName( hash_type + 'hash in HTML content')
                                i.setURL( response.getURL() )
                                i.addToHighlight(possible_hash)
                                i.setId( response.id )
                                msg = 'The URL: "'+ response.getURL()  + '" returned a response that may'
                                msg += ' contain a "' + hash_type + '" hash. The hash is: "'+ possible_hash
                                msg += '". This is uncommon and requires human verification.'
                                i.setDesc( msg )
                                kb.kb.append( self, 'hashFind', i )
                                
                                self._already_reported.add( (possible_hash, response.getURL()) )
    
    def _has_hash_distribution( self, possible_hash ):
        '''
        @parameter possible_hash: A string that may be a hash.
        @return: True if the string s has an equal(aprox.) distribution of numbers and letters
        '''
        numbers = 0
        letters = 0
        for char in possible_hash:
            if char.isdigit():
                numbers += 1
            else:
                letters += 1
        
        if numbers in range( letters - len(possible_hash) / 2 , letters + len(possible_hash) / 2 ):
            # Seems to be a hash, let's make a final test to avoid false positives with
            # strings like:
            # 2222222222222222222aaaaaaaaaaaaa
            is_hash = True
            for char in possible_hash:
                if possible_hash.count(char) > len(possible_hash) / 5:
                    is_hash = False
                    break
            return is_hash
            
        else:
            return False
        
    def _get_hash_type( self, possible_hash ):
        '''
        @parameter possible_hash: A string that may be a hash.
        @return: The hash type if the string seems to be a md5 / sha1 hash.
        None otherwise.
        '''
        # FIXME: Add more here!
        if len( possible_hash ) == 32:
            return 'MD5'
        elif len( possible_hash ) == 40:
            return 'SHA1'
        else:
            return None
    
    def setOptions( self, OptionList ):
        pass
    
    def getOptions( self ):
        '''
        @return: A list of option objects for this plugin.
        '''    
        ol = optionList()
        return ol

    def end(self):
        '''
        This method is called when the plugin wont be used anymore.
        '''
        self.printUniq( kb.kb.getData( 'hashFind', 'hashFind' ), None )
    
    def getPluginDeps( self ):
        '''
        @return: A list with the names of the plugins that should be runned before the
        current one.
        '''
        return []
    
    def getLongDesc( self ):
        '''
        @return: A DETAILED description of the plugin functions and features.
        '''
        return '''