예제 #1
0
파일: ssi.py 프로젝트: ElAleyo/w3af
        def filtered_freq_generator(freq_list):
            already_tested = ScalableBloomFilter()

            for freq in freq_list:
                if freq not in already_tested:
                    already_tested.add(freq)
                    yield freq
예제 #2
0
class directory_indexing(GrepPlugin):
    """
    Grep every response for directory indexing problems.

    :author: Andres Riancho ([email protected])
    """

    DIR_INDEXING = (
        "<title>Index of /",
        '<a href="?C=N;O=D">Name</a>',
        '<A HREF="?M=A">Last modified</A>',
        "Last modified</a>",
        "Parent Directory</a>",
        "Directory Listing for",
        "<TITLE>Folder Listing.",
        '<table summary="Directory Listing" ',
        "- Browsing directory ",
        # IIS 6.0 and 7.0
        '">[To Parent Directory]</a><br><br>',
        # IIS 5.0
        '<A HREF=".*?">.*?</A><br></pre><hr></body></html>')
    _multi_in = multi_in(DIR_INDEXING)

    def __init__(self):
        GrepPlugin.__init__(self)

        self._already_visited = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, search for directory indexing.
        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return

        if response.get_url().get_domain_path() in self._already_visited:
            return

        self._already_visited.add(response.get_url().get_domain_path())

        html_string = response.get_body()
        if self._multi_in.query(html_string):

            desc = 'The URL: "%s" has a directory indexing vulnerability.'
            desc = desc % response.get_url()

            v = Vuln('Directory indexing', desc, severity.LOW, response.id,
                     self.get_name())
            v.set_url(response.get_url())

            self.kb_append_uniq(self, 'directory', v, 'URL')

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #3
0
        def filtered_freq_generator(freq_list):
            already_tested = ScalableBloomFilter()

            for freq in freq_list:
                if freq not in already_tested:
                    already_tested.add(freq)
                    yield freq
예제 #4
0
    def test_bloom_filter(self):
        f = ScalableBloomFilter()

        for i in xrange(20000):
            data = (i, i)
            f.add(data)

        for i in xrange(20000):
            data = (i, i)
            data in f
예제 #5
0
class frontpage_version(InfrastructurePlugin):
    """
    Search FrontPage Server Info file and if it finds it will determine its version.
    :author: Viktor Gazdag ( [email protected] )
    """
    VERSION_RE = re.compile('FPVersion="(.*?)"', re.IGNORECASE)
    ADMIN_URL_RE = re.compile('FPAdminScriptUrl="(.*?)"', re.IGNORECASE)
    AUTHOR_URL_RE = re.compile('FPAuthorScriptUrl="(.*?)"', re.IGNORECASE)

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

    @runonce(exc_class=RunOnce)
    def discover(self, fuzzable_request, debugging_id):
        """
        For every directory, fetch a list of files and analyze the response.

        :param debugging_id: A unique identifier for this call to discover()
        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        for domain_path in fuzzable_request.get_url().get_directories():

            if domain_path in self._analyzed_dirs:
                continue

            # Save the domain_path so I know I'm not working in vane
            self._analyzed_dirs.add(domain_path)

            # Request the file
            frontpage_info_url = domain_path.url_join("_vti_inf.html")
            try:
                response = self._uri_opener.GET(frontpage_info_url, cache=True)
            except BaseFrameworkException, w3:
                fmt = (
                    'Failed to GET Frontpage Server _vti_inf.html file: "%s". '
                    'Exception: "%s".')
                om.out.debug(fmt % (frontpage_info_url, w3))
            else:
                # Check if it's a Frontpage Info file
                if not is_404(response):
                    fr = FuzzableRequest(response.get_uri())
                    self.output_queue.put(fr)

                    self._analyze_response(response)
예제 #6
0
class frontpage_version(InfrastructurePlugin):
    """
    Search FrontPage Server Info file and if it finds it will determine its version.
    :author: Viktor Gazdag ( [email protected] )
    """
    VERSION_RE = re.compile('FPVersion="(.*?)"', re.IGNORECASE)
    ADMIN_URL_RE = re.compile('FPAdminScriptUrl="(.*?)"', re.IGNORECASE)
    AUTHOR_URL_RE = re.compile('FPAuthorScriptUrl="(.*?)"', re.IGNORECASE)

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

    @runonce(exc_class=RunOnce)
    def discover(self, fuzzable_request):
        """
        For every directory, fetch a list of files and analyze the response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        for domain_path in fuzzable_request.get_url().get_directories():

            if domain_path in self._analyzed_dirs:
                continue

            # Save the domain_path so I know I'm not working in vane
            self._analyzed_dirs.add(domain_path)

            # Request the file
            frontpage_info_url = domain_path.url_join("_vti_inf.html")
            try:
                response = self._uri_opener.GET(frontpage_info_url,
                                                cache=True)
            except BaseFrameworkException, w3:
                fmt = 'Failed to GET Frontpage Server _vti_inf.html file: "%s"'\
                      '. Exception: "%s".'
                om.out.debug(fmt % (frontpage_info_url, w3))
            else:
                # Check if it's a Frontpage Info file
                if not is_404(response):
                    fr = FuzzableRequest(response.get_uri())
                    self.output_queue.put(fr)

                    self._analyze_response(response)
예제 #7
0
파일: blank_body.py 프로젝트: zcr214/w3af
class blank_body(GrepPlugin):
    """
    Find responses with empty body.

    :author: Andres Riancho ([email protected])
    """

    METHODS = ('GET', 'POST')
    HTTP_CODES = (401, 304, 302, 301, 204, 405)
    
    def __init__(self):
        GrepPlugin.__init__(self)
        self.already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, find the blank bodies and report them.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if response.get_body() == '' and request.get_method() in self.METHODS\
        and response.get_code() not in self.HTTP_CODES\
        and not response.get_headers().icontains('location')\
        and response.get_url().uri2url() not in self.already_reported:

            self.already_reported.add(response.get_url().uri2url())

            desc = 'The URL: "%s" returned an empty body, this could indicate'\
                   ' an application error.'
            desc = desc % response.get_url()

            i = Info('Blank http response body', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            
            self.kb_append(self, 'blank_body', i)

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #8
0
class strange_parameters(GrepPlugin):
    """
    Grep the HTML response and find URIs that have strange parameters.

    :author: Andres Riancho (([email protected]))
    """
    def __init__(self):
        GrepPlugin.__init__(self)

        # Internal variables
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        parsed_references, _ = dp.get_references()
        analyzers = {self._analyze_SQL, self._analyze_strange}

        for ref in parsed_references:
            for token in ref.querystring.iter_tokens():

                token_name = token.get_name()
                token_value = token.get_value()

                if (ref.uri2url(), token_name) in self._already_reported:
                    continue

                for analyzer in analyzers:
                    if analyzer(request, response, ref, token_name,
                                token_value):
                        # Don't repeat findings
                        self._already_reported.add((ref.uri2url(), token_name))
                        break

    def _analyze_strange(self, request, response, ref, token_name,
                         token_value):
        if self._is_strange(request, token_name, token_value):
            desc = ('The URI: "%s" has a parameter named: "%s" with value:'
                    ' "%s", which is very uncommon. and requires manual'
                    ' verification.')
            desc %= (response.get_uri(), token_name, token_value)

            i = Info('Uncommon query string parameter', desc, response.id,
                     self.get_name())
            i['parameter_value'] = token_value
            i.add_to_highlight(token_value)
            i.set_uri(ref)

            self.kb_append(self, 'strange_parameters', i)
            return True

        return False

    def _analyze_SQL(self, request, response, ref, token_name, token_value):
        """
        To find this kind of vulns

        http://thedailywtf.com/Articles/Oklahoma-
            Leaks-Tens-of-Thousands-of-Social-Security-Numbers,-Other-
            Sensitive-Data.aspx

        :return: True if the parameter value contains SQL sentences
        """
        for match in SQL_RE.findall(token_value):
            if request.sent(match):
                continue

            desc = ('The URI: "%s" has a parameter named: "%s" with value:'
                    ' "%s", which is a SQL query.')
            desc %= (response.get_uri(), token_name, token_value)

            v = Vuln('Parameter has SQL sentence', desc, severity.LOW,
                     response.id, self.get_name())
            v['parameter_value'] = token_value
            v.add_to_highlight(token_value)
            v.set_uri(ref)

            self.kb_append(self, 'strange_parameters', v)
            return True

        return False

    def _is_strange(self, request, parameter, value):
        """
        :return: True if the parameter value is strange
        """
        if 'wicket:' in parameter:
            #
            # The wicket framework uses, by default, strange URLs like this:
            #
            # https://www.DOMAIN.com/
            #     ?wicket:bookmarkablePage=:com.DOMAIN.web.pages.SignInPage
            #     &wicket:interface=:0:signInForm::IFormSubmitListener::
            #     ;jsessionid=7AC76A46A86BBC3F5253E374241BC892
            #
            #   Which are strange in all cases, except from wicket!
            #
            return False

        # Seems to be a function
        _strange_parameter_re = ['\w+\(.*?\)']

        for regex in _strange_parameter_re:
            for match in re.findall(regex, value):
                if not request.sent(match):
                    return True

        split_value = [x for x in STRANGE_RE.split(value) if x != '']
        if len(split_value) > 4:
            if not request.sent(value):
                return True

        return False

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #9
0
class archive_dot_org(CrawlPlugin):
    """
    Search archive.org to find new pages in the target site.

    :author: Andres Riancho ([email protected])
    :author: Darren Bilby, thanks for the good idea!
    """

    ARCHIVE_START_URL = 'http://web.archive.org/web/*/%s'
    INTERESTING_URLS_RE = '<a href="(http://web\.archive\.org/web/\d*?/https?://%s/.*?)"'
    NOT_IN_ARCHIVE = '<p>Wayback Machine doesn&apos;t have that page archived.</p>'

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._already_crawled = ScalableBloomFilter()
        self._already_verified = ScalableBloomFilter()

        # User configured parameters
        self._max_depth = 3

    def crawl(self, fuzzable_request):
        """
        Does a search in archive.org and searches for links on the html. Then
        searches those URLs in the target site. This is a time machine !

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain = fuzzable_request.get_url().get_domain()

        if is_private_site(domain):
            msg = 'There is no point in searching archive.org for "%s"'\
                  ' because it is a private site that will never be indexed.'
            om.out.information(msg % domain)
            raise RunOnce(msg)

        # Initial check to verify if domain in archive
        start_url = self.ARCHIVE_START_URL % fuzzable_request.get_url()
        start_url = URL(start_url)
        http_response = self._uri_opener.GET(start_url, cache=True)

        if self.NOT_IN_ARCHIVE in http_response.body:
            msg = 'There is no point in searching archive.org for "%s"'
            msg += ' because they are not indexing this site.'
            om.out.information(msg % domain)
            raise RunOnce(msg)

        references = self._spider_archive([
            start_url,
        ], self._max_depth, domain)
        self._analyze_urls(references)

    def _analyze_urls(self, references):
        """
        Analyze which references are cached by archive.org

        :return: A list of query string objects for the URLs that are in
                 the cache AND are in the target web site.
        """
        real_URLs = []

        # Translate archive.org URL's to normal URL's
        for url in references:
            url = url.url_string[url.url_string.index('http', 1):]
            real_URLs.append(URL(url))
        real_URLs = list(set(real_URLs))

        if len(real_URLs):
            om.out.debug('Archive.org cached the following pages:')
            for u in real_URLs:
                om.out.debug('- %s' % u)
        else:
            om.out.debug('Archive.org did not find any pages.')

        # Verify if they exist in the target site and add them to
        # the result if they do. Send the requests using threads:
        self.worker_pool.map(self._exists_in_target, real_URLs)

    def _spider_archive(self, url_list, max_depth, domain):
        """
        Perform a classic web spidering process.

        :param url_list: The list of URL strings
        :param max_depth: The max link depth that we have to follow.
        :param domain: The domain name we are checking
        """
        # Start the recursive spidering
        res = []

        def spider_worker(url, max_depth, domain):
            if url in self._already_crawled:
                return []

            self._already_crawled.add(url)

            try:
                http_response = self._uri_opener.GET(url, cache=True)
            except:
                return []

            # Filter the ones we need
            url_regex_str = self.INTERESTING_URLS_RE % domain
            matched_urls = re.findall(url_regex_str, http_response.body)
            new_urls = set([URL(u).remove_fragment() for u in matched_urls])

            # Go recursive
            if max_depth - 1 > 0:
                if new_urls:
                    res.extend(new_urls)
                    res.extend(
                        self._spider_archive(new_urls, max_depth - 1, domain))
            else:
                msg = 'Some sections of the archive.org site were not analyzed'
                msg += ' because of the configured max_depth.'
                om.out.debug(msg)
                return new_urls

        url_list, max_depth, domain
        args = izip(url_list, repeat(max_depth), repeat(domain))
        self.worker_pool.map_multi_args(spider_worker, args)

        return list(set(res))

    def _exists_in_target(self, url):
        """
        Check if a resource still exists in the target web site.

        :param url: The resource to verify.
        :return: None, the result is stored in self.output_queue
        """
        if url in self._already_verified:
            return

        self._already_verified.add(url)

        response = self._uri_opener.GET(url, cache=True)

        if not is_404(response):
            msg = 'The URL: "' + url + '" was found at archive.org and is'
            msg += ' STILL AVAILABLE in the target site.'
            om.out.debug(msg)
            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)
        else:
            msg = 'The URL: "' + url + '" was found at archive.org and was'
            msg += ' DELETED from the target site.'
            om.out.debug(msg)

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d = 'Maximum recursion depth for spidering process'
        h = 'The plugin will spider the archive.org site related to the target'
        h += ' site with the maximum depth specified in this parameter.'
        o = opt_factory('max_depth', self._max_depth, d, 'integer', help=h)
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param OptionList: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._max_depth = options_list['max_depth'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #10
0
class archive_dot_org(CrawlPlugin):
    """
    Search archive.org to find new pages in the target site.

    :author: Andres Riancho ([email protected])
    :author: Darren Bilby, thanks for the good idea!
    """

    ARCHIVE_START_URL = 'http://web.archive.org/web/*/%s'
    INTERESTING_URLS_RE = '<a href="(http://web\.archive\.org/web/\d*?/https?://%s/.*?)"'
    NOT_IN_ARCHIVE = '<p>Wayback Machine doesn&apos;t have that page archived.</p>'

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._already_crawled = ScalableBloomFilter()
        self._already_verified = ScalableBloomFilter()

        # User configured parameters
        self._max_depth = 3

    def crawl(self, fuzzable_request):
        """
        Does a search in archive.org and searches for links on the html. Then
        searches those URLs in the target site. This is a time machine !

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain = fuzzable_request.get_url().get_domain()

        if is_private_site(domain):
            msg = 'There is no point in searching archive.org for "%s"'\
                  ' because it is a private site that will never be indexed.'
            om.out.information(msg % domain)
            raise RunOnce(msg)

        # Initial check to verify if domain in archive
        start_url = self.ARCHIVE_START_URL % fuzzable_request.get_url()
        start_url = URL(start_url)
        http_response = self._uri_opener.GET(start_url, cache=True)

        if self.NOT_IN_ARCHIVE in http_response.body:
            msg = 'There is no point in searching archive.org for "%s"'
            msg += ' because they are not indexing this site.'
            om.out.information(msg % domain)
            raise RunOnce(msg)

        references = self._spider_archive(
            [start_url, ], self._max_depth, domain)
        self._analyze_urls(references)

    def _analyze_urls(self, references):
        """
        Analyze which references are cached by archive.org

        :return: A list of query string objects for the URLs that are in
                 the cache AND are in the target web site.
        """
        real_urls = []

        # Translate archive.org URL's to normal URL's
        for url in references:
            url = url.url_string[url.url_string.index('http', 1):]
            real_urls.append(URL(url))

        real_urls = list(set(real_urls))

        if len(real_urls):
            om.out.debug('Archive.org cached the following pages:')
            for u in real_urls:
                om.out.debug('- %s' % u)
        else:
            om.out.debug('Archive.org did not find any pages.')

        # Verify if they exist in the target site and add them to
        # the result if they do. Send the requests using threads:
        self.worker_pool.map(self._exists_in_target, real_urls)

    def _spider_archive(self, url_list, max_depth, domain):
        """
        Perform a classic web spidering process.

        :param url_list: The list of URL strings
        :param max_depth: The max link depth that we have to follow.
        :param domain: The domain name we are checking
        """
        # Start the recursive spidering
        res = []

        def spider_worker(url, max_depth, domain):
            if url in self._already_crawled:
                return []

            self._already_crawled.add(url)

            try:
                http_response = self._uri_opener.GET(url, cache=True)
            except:
                return []

            # Filter the ones we need
            url_regex_str = self.INTERESTING_URLS_RE % domain
            matched_urls = re.findall(url_regex_str, http_response.body)
            new_urls = [URL(u) for u in matched_urls]
            new_urls = [u.remove_fragment() for u in new_urls]
            new_urls = set(new_urls)

            # Go recursive
            if max_depth - 1 > 0:
                if new_urls:
                    res.extend(new_urls)
                    res.extend(self._spider_archive(new_urls,
                                                    max_depth - 1,
                                                    domain))
            else:
                msg = 'Some sections of the archive.org site were not analyzed'
                msg += ' because of the configured max_depth.'
                om.out.debug(msg)
                return new_urls

        args = izip(url_list, repeat(max_depth), repeat(domain))
        self.worker_pool.map_multi_args(spider_worker, args)

        return list(set(res))

    def _exists_in_target(self, url):
        """
        Check if a resource still exists in the target web site.

        :param url: The resource to verify.
        :return: None, the result is stored in self.output_queue
        """
        if url in self._already_verified:
            return

        self._already_verified.add(url)

        response = self._uri_opener.GET(url, cache=True)

        if not is_404(response):
            msg = 'The URL: "%s" was found at archive.org and is'\
                  ' STILL AVAILABLE in the target site.'
            om.out.debug(msg % url)

            fr = FuzzableRequest(response.get_uri())
            self.output_queue.put(fr)
        else:
            msg = 'The URL: "%s" was found at archive.org and was'\
                  ' DELETED from the target site.'
            om.out.debug(msg % url)

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d = 'Maximum recursion depth for spidering process'
        h = 'The plugin will spider the archive.org site related to the target'\
            ' site with the maximum depth specified in this parameter.'
        o = opt_factory('max_depth', self._max_depth, d, 'integer', help=h)
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param OptionList: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._max_depth = options_list['max_depth'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #11
0
class frontpage(AuditPlugin):
    """
    Tries to upload a file using frontpage extensions (author.dll).

    :author: Andres Riancho ([email protected])
    """
    def __init__(self):
        AuditPlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()
        self._author_url = None

    def _get_author_url(self):
        if self._author_url is not None:
            return self._author_url

        for info in kb.kb.get('frontpage_version', 'frontpage_version'):
            author_url = info.get('FPAuthorScriptUrl', None)
            if author_url is not None:
                self._author_url = author_url
                return self._author_url

        return None

    def audit(self, freq, orig_response, debugging_id):
        """
        Searches for file upload vulns using a POST to author.dll.

        :param freq: A FuzzableRequest
        :param orig_response: The HTTP response associated with the fuzzable request
        :param debugging_id: A unique identifier for this call to audit()
        """
        # Only run if we have the author URL for this frontpage instance
        if self._get_author_url() is None:
            return

        # Only identify one vulnerability of this type
        if kb.kb.get(self, 'frontpage'):
            return

        domain_path = freq.get_url().get_domain_path()

        # Upload only once to each directory
        if domain_path in self._already_tested:
            return

        self._already_tested.add(domain_path)

        rand_file = rand_alpha(6) + '.html'
        upload_id = self._upload_file(domain_path, rand_file, debugging_id)
        self._verify_upload(domain_path, rand_file, upload_id, debugging_id)

    def _upload_file(self, domain_path, rand_file, debugging_id):
        """
        Upload the file using author.dll

        :param domain_path: http://localhost/f00/
        :param rand_file: <random>.html
        """
        # TODO: The frontpage version should be obtained from the information
        # saved in the kb by the infrastructure.frontpage_version plugin!
        #
        # The 4.0.2.4715 version should be dynamic!
        version = '4.0.2.4715'

        file_path = domain_path.get_path() + rand_file

        data = POST_BODY % (version, file_path)
        data += rand_file[::-1]
        data = smart_str_ignore(data)

        target_url = self._get_author_url()

        try:
            res = self._uri_opener.POST(target_url,
                                        data=data,
                                        debugging_id=debugging_id)
        except BaseFrameworkException, e:
            om.out.debug(
                'Exception while uploading file using author.dll: %s' % e)
            return None
        else:
예제 #12
0
class html_comments(GrepPlugin):
    """
    Extract and analyze HTML comments.

    :author: Andres Riancho ([email protected])
    """

    HTML_RE = re.compile('<[a-zA-Z]*.*?>.*?</[a-zA-Z]>')

    INTERESTING_WORDS = (
        # In English
        'user', 'pass', 'xxx', 'fix', 'bug', 'broken', 'oops', 'hack',
        'caution', 'todo', 'note', 'warning', '!!!', '???', 'shit',
        'pass', 'password', 'passwd', 'pwd', 'secret', 'stupid',
        
        # In Spanish
        'tonto', 'porqueria', 'cuidado', 'usuario', u'contraseña',
        'puta', 'email', 'security', 'captcha', 'pinga', 'cojones',
        
        # some in Portuguese
        'banco', 'bradesco', 'itau', 'visa', 'bancoreal', u'transfêrencia',
        u'depósito', u'cartão', u'crédito', 'dados pessoais'
    )

    _multi_in = MultiIn([' %s ' % w for w in INTERESTING_WORDS])

    def __init__(self):
        GrepPlugin.__init__(self)

        # Internal variables
        self._comments = DiskDict(table_prefix='html_comments')
        self._already_reported = ScalableBloomFilter()
        self._end_was_called = False

    def grep(self, request, response):
        """
        Plugin entry point, parse those comments!

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return
        
        for comment in dp.get_comments():
            # These next two lines fix this issue:
            # audit.ssi + grep.html_comments + web app with XSS = false positive
            if request.sent(comment):
                continue

            if self._is_new(comment, response):

                self._interesting_word(comment, request, response)
                self._html_in_comment(comment, request, response)

    def _interesting_word(self, comment, request, response):
        """
        Find interesting words in HTML comments
        """
        comment = comment.lower()

        for word in self._multi_in.query(comment):
            if (word, response.get_url()) in self._already_reported:
                continue

            desc = ('A comment with the string "%s" was found in: "%s".'
                    ' This could be interesting.')
            desc %= (word, response.get_url())

            i = Info.from_fr('Interesting HTML comment', desc, response.id,
                             self.get_name(), request)
            i.add_to_highlight(word)

            kb.kb.append(self, 'interesting_comments', i)
            om.out.information(i.get_desc())
                
            self._already_reported.add((word, response.get_url()))

    def _html_in_comment(self, comment, request, response):
        """
        Find HTML code in HTML comments
        """
        html_in_comment = self.HTML_RE.search(comment)

        if html_in_comment is None:
            return

        if (comment, response.get_url()) in self._already_reported:
            return

        # There is HTML code in the comment.
        comment = comment.strip()
        comment = comment.replace('\n', '')
        comment = comment.replace('\r', '')
        comment = comment[:40]

        desc = ('A comment with the string "%s" was found in: "%s".'
                ' This could be interesting.')
        desc %= (comment, response.get_url())

        i = Info.from_fr('HTML comment contains HTML code', desc, response.id,
                         self.get_name(), request)
        i.set_uri(response.get_uri())
        i.add_to_highlight(html_in_comment.group(0))

        kb.kb.append(self, 'html_comment_hides_html', i)
        om.out.information(i.get_desc())
        self._already_reported.add((comment, response.get_url()))

    def _handle_no_such_table(self, comment, response, nste):
        """
        I had a lot of issues trying to reproduce [0], so this code is just
        a helper for me to identify the root cause.

        [0] https://github.com/andresriancho/w3af/issues/10849

        :param nste: The original exception
        :param comment: The comment we're analyzing
        :param response: The HTTP response
        :return: None, an exception with more information is re-raised
        """
        msg = ('A NoSuchTableException was raised by the DBMS. This issue is'
               ' related with #10849 , but since I was unable to reproduce'
               ' it, extra debug information is added to the exception:'
               '\n'
               '\n - Grep plugin end() was called: %s'
               '\n - Response ID is: %s'
               '\n - HTML comment is: "%s"'
               '\n - Original exception: "%s"'
               '\n\n'
               'https://github.com/andresriancho/w3af/issues/10849\n')
        args = (self._end_was_called,
                response.get_id(),
                comment,
                nste)
        raise NoSuchTableException(msg % args)

    def _is_new(self, comment, response):
        """
        Make sure that we perform a thread safe check on the self._comments
        dict, in order to avoid duplicates.
        """
        with self._plugin_lock:
            
            #pylint: disable=E1103
            try:
                comment_data = self._comments.get(comment, None)
            except NoSuchTableException, nste:
                self._handle_no_such_table(comment, response, nste)

            response_url = response.get_url()

            if comment_data is None:
                self._comments[comment] = [(response_url, response.id)]
                return True
            else:
                for saved_url, response_id in comment_data:
                    if response_url == saved_url:
                        return False
                else:
                    comment_data.append((response_url, response.id))
                    self._comments[comment] = comment_data
                    return True
예제 #13
0
파일: digit_sum.py 프로젝트: jatkatz/w3af
class digit_sum(CrawlPlugin):
    """
    Take an URL with a number (index2.asp) and try to find related files
    (index1.asp, index3.asp).

    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        CrawlPlugin.__init__(self)
        self._already_visited = ScalableBloomFilter()

        # User options
        self._fuzz_images = False
        self._max_digit_sections = 4

    def crawl(self, fuzzable_request):
        """
        Searches for new URLs by adding and substracting numbers to the file
        and the parameters.

        :param fuzzable_request: A fuzzable_request instance that contains
                                     (among other things) the URL to test.
        """
        url = fuzzable_request.get_url()
        headers = Headers([("Referer", url.url_string)])

        original_response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True, headers=headers)

        if original_response.is_text_or_html() or self._fuzz_images:

            fr_generator = self._mangle_digits(fuzzable_request)
            response_repeater = repeat(original_response)
            header_repeater = repeat(headers)

            args = izip(fr_generator, response_repeater, header_repeater)

            self.worker_pool.map_multi_args(self._do_request, args)

            # I add myself so the next call to this plugin wont find me ...
            # Example: index1.html ---> index2.html --!!--> index1.html
            self._already_visited.add(fuzzable_request.get_uri())

    def _do_request(self, fuzzable_request, original_resp, headers):
        """
        Send the request.

        :param fuzzable_request: The modified fuzzable request
        :param original_resp: The response for the original request that was
                              sent.
        """
        response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True, headers=headers)

        add = False

        if not is_404(response):
            # We have different cases:
            #    - If the URLs are different, then there is nothing to think
            #      about, we simply found something new!
            if response.get_url() != original_resp.get_url():
                add = True

            #    - If the content type changed, then there is no doubt that
            #      we've found something new!
            elif response.doc_type != original_resp.doc_type:
                add = True

            #    - If we changed the query string parameters, we have to check
            #      the content
            elif fuzzy_not_equal(response.get_clear_text_body(), original_resp.get_clear_text_body(), 0.8):
                # In this case what might happen is that the number we changed
                # is "out of range" and when requesting that it will trigger an
                # error in the web application, or show us a non-interesting
                # response that holds no content.
                #
                # We choose to return these to the core because they might help
                # with the code coverage efforts. Think about something like:
                #     foo.aspx?id=OUT_OF_RANGE&foo=inject_here
                # vs.
                #     foo.aspx?id=IN_RANGE&foo=inject_here
                #
                # This relates to the EXPECTED_URLS in test_digit_sum.py
                add = True

        if add:
            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)

    def _mangle_digits(self, fuzzable_request):
        """
        Mangle the digits (if any) in the fr URL.

        :param fuzzable_request: The original FuzzableRequest
        :return: A generator which returns mangled fuzzable requests
        """
        # First i'll mangle the digits in the URL file
        filename = fuzzable_request.get_url().get_file_name()
        domain_path = fuzzable_request.get_url().get_domain_path()
        for fname in self._do_combinations(filename):
            fr_copy = fuzzable_request.copy()
            fr_copy.set_url(domain_path.url_join(fname))

            if fr_copy.get_uri() not in self._already_visited:
                self._already_visited.add(fr_copy.get_uri())

                yield fr_copy

        # Now i'll mangle the query string variables
        if fuzzable_request.get_method() == "GET":
            for parameter in fuzzable_request.get_dc():

                # to support repeater parameter names...
                for element_index in xrange(len(fuzzable_request.get_dc()[parameter])):

                    combinations = self._do_combinations(fuzzable_request.get_dc()[parameter][element_index])
                    for modified_value in combinations:

                        fr_copy = fuzzable_request.copy()
                        new_dc = fr_copy.get_dc()
                        new_dc[parameter][element_index] = modified_value
                        fr_copy.set_dc(new_dc)

                        if fr_copy.get_uri() not in self._already_visited:
                            self._already_visited.add(fr_copy.get_uri())
                            yield fr_copy

    def _do_combinations(self, a_string):
        """
        >>> ds = digit_sum()
        >>> ds._do_combinations( 'abc123' )
        ['abc124', 'abc122']

        >>> ds._do_combinations( 'abc123def56' )
        ['abc124def56', 'abc122def56', 'abc123def57', 'abc123def55']

        """
        res = []
        splitted = self._find_digits(a_string)
        if len(splitted) <= 2 * self._max_digit_sections:
            for i in xrange(len(splitted)):
                if splitted[i].isdigit():
                    splitted[i] = str(int(splitted[i]) + 1)
                    res.append("".join(splitted))
                    splitted[i] = str(int(splitted[i]) - 2)
                    res.append("".join(splitted))

                    # restore the initial value for next loop
                    splitted[i] = str(int(splitted[i]) + 1)

        return res

    def _find_digits(self, a_string):
        """
        Finds digits in a string and returns a list with string sections.

        >>> ds = digit_sum()
        >>> ds._find_digits('foo45')
        ['foo', '45']

        >>> ds._find_digits('f001bar112')
        ['f', '001', 'bar', '112']

        :return: A list of strings.
        """
        # regexes are soooooooooooooo cool !
        return [x for x in re.split(r"(\d+)", a_string) if x != ""]

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d = "Apply URL fuzzing to all URLs, including images, videos, zip, etc."
        h = "It's safe to leave this option as the default."
        o = opt_factory("fuzzImages", self._fuzz_images, d, "boolean", help=h)
        ol.add(o)

        d = "Set the top number of sections to fuzz"
        h = (
            "It's safe to leave this option as the default. For example, with"
            " maxDigitSections = 1, this string wont be fuzzed: abc123def234 ;"
            " but this one will abc23ldd."
        )
        o = opt_factory("maxDigitSections", self._max_digit_sections, d, "integer", help=h)
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param OptionList: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._fuzz_images = options_list["fuzzImages"].get_value()
        self._max_digit_sections = options_list["maxDigitSections"].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #14
0
class strange_parameters(GrepPlugin):
    """
    Grep the HTML response and find URIs that have strange parameters.

    :author: Andres Riancho (([email protected]))
    """
    STRANGE_RE_CHARS = re.compile(r'([a-zA-Z0-9. ]+)')

    STRANGE_RE_LIST = [re.compile(r'\w+\(.*?\)')]

    SQL_RE = re.compile(
        r'(SELECT .*? FROM|'
        r'INSERT INTO .*? VALUES|'
        r'UPDATE .*? SET .*? WHERE)', re.IGNORECASE)

    def __init__(self):
        GrepPlugin.__init__(self)

        # Internal variables
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return

        # Note:
        #
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        parsed_references, _ = dp.get_references()
        analyzers = {self._analyze_SQL, self._analyze_strange}

        for ref in parsed_references:
            for token in ref.querystring.iter_tokens():

                token_name = token.get_name()
                token_value = token.get_value()

                if (ref.uri2url(), token_name) in self._already_reported:
                    continue

                for analyzer in analyzers:
                    if analyzer(request, response, ref, token_name,
                                token_value):
                        # Don't repeat findings
                        self._already_reported.add((ref.uri2url(), token_name))

    def _analyze_strange(self, request, response, ref, token_name,
                         token_value):
        if not self._is_strange(request, token_name, token_value):
            return False

        if request.sent(token_value):
            return False

        desc = ('The URI: "%s" has a parameter named: "%s" with value:'
                ' "%s", which is very uncommon and requires manual'
                ' inspection.')
        args = (response.get_uri(), token_name, token_value)
        args = tuple(smart_str_ignore(i) for i in args)
        desc %= args

        i = Info('Uncommon query string parameter', desc, response.id,
                 self.get_name())
        i['parameter_value'] = token_value
        i.add_to_highlight(token_value)
        i.set_uri(ref)

        self.kb_append(self, 'strange_parameters', i)
        return True

    def _analyze_SQL(self, request, response, ref, token_name, token_value):
        """
        To find these kinds of vulnerabilities

        http://thedailywtf.com/Articles/Oklahoma-Leaks-Tens-of-Thousands-of-Social-Security-Numbers,-Other-Sensitive-Data.aspx

        :return: True if the parameter value contains SQL sentences
        """
        for match in self.SQL_RE.findall(token_value):
            if request.sent(match):
                continue

            desc = ('The URI: "%s" has a parameter named: "%s" with value:'
                    ' "%s", which is a SQL query.')
            desc %= (response.get_uri(), token_name, token_value)

            v = Vuln('Parameter has SQL sentence', desc, severity.LOW,
                     response.id, self.get_name())
            v['parameter_value'] = token_value
            v.add_to_highlight(token_value)
            v.set_uri(ref)

            self.kb_append(self, 'strange_parameters', v)
            return True

        return False

    def _is_strange(self, request, parameter, value):
        """
        :return: True if the parameter value is strange
        """
        decoded_value = urllib.unquote(value)
        decoded_parameter = urllib.unquote(parameter)

        #
        # Parameters holding URLs will always be flagged as "strange" because
        # they contain multiple "special characters", but we don't care about
        # them enough to report them
        #
        if decoded_value.startswith('http://'):
            return False

        if decoded_value.startswith('https://'):
            return False

        #
        # The wicket framework uses strange URLs like this by design:
        #
        # https://www.DOMAIN.com/
        #     ?wicket:bookmarkablePage=:com.DOMAIN.web.pages.SignInPage
        #     &wicket:interface=:0:signInForm::IFormSubmitListener::
        #     ;jsessionid=7AC76A46A86BBC3F5253E374241BC892
        #
        # Which are strange in all cases, except from wicket!
        #
        if 'wicket:' in parameter or 'wicket:' in decoded_parameter:
            return False

        #
        # Match specific things such as function calls
        #
        for regex in self.STRANGE_RE_LIST:
            for match in regex.findall(value):
                if not request.sent(match):
                    return True

        #
        # Split the parameter by any character that is not A-Za-z0-9 and if
        # the length is greater than X then report it
        #
        split_value = [
            x for x in self.STRANGE_RE_CHARS.split(value) if x != ''
        ]
        if len(split_value) > 4:
            if not request.sent(value):
                return True

        return False

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #15
0
class allowed_methods(InfrastructurePlugin):
    """
    Enumerate the allowed methods of an URL.
    :author: Andres Riancho ([email protected])
    """

    BAD_CODES = {response_codes.UNAUTHORIZED, response_codes.NOT_IMPLEMENTED,
                 response_codes.METHOD_NOT_ALLOWED, response_codes.FORBIDDEN}

    DAV_METHODS = {'DELETE', 'PROPFIND', 'PROPPATCH', 'COPY', 'MOVE', 'LOCK',
                   'UNLOCK', 'MKCOL'}
    COMMON_METHODS = {'OPTIONS', 'GET', 'HEAD', 'POST', 'TRACE', 'PUT'}
    UNCOMMON_METHODS = {'*', 'SUBSCRIPTIONS', 'NOTIFY', 'DEBUG', 'TRACK',
                        'POLL', 'PIN', 'INVOKE', 'SUBSCRIBE', 'UNSUBSCRIBE'}
    # Methods taken from http://www.w3.org/Protocols/HTTP/Methods.html
    PROPOSED_METHODS = {'CHECKOUT', 'SHOWMETHOD', 'LINK', 'UNLINK', 'CHECKIN',
                        'TEXTSEARCH', 'SPACEJUMP', 'SEARCH', 'REPLY'}
    EXTRA_METHODS = {'CONNECT', 'RMDIR', 'MKDIR', 'REPORT', 'ACL', 'DELETE',
                     'INDEX', 'LABEL', 'INVALID'}
    VERSION_CONTROL = {'VERSION_CONTROL', 'CHECKIN', 'UNCHECKOUT', 'PATCH',
                       'MERGE', 'MKWORKSPACE', 'MKACTIVITY', 'BASELINE_CONTROL'}

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._exec = True
        self._already_tested = ScalableBloomFilter()

        # Methods
        self._supported_methods = self.DAV_METHODS | self.COMMON_METHODS | \
                                  self.UNCOMMON_METHODS | self.PROPOSED_METHODS | \
                                  self.EXTRA_METHODS | self.VERSION_CONTROL

        # User configured variables
        self._exec_one_time = True
        self._report_dav_only = True

    def discover(self, fuzzable_request):
        """
        Uses several techniques to try to find out what methods are allowed for
        an URL.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        if not self._exec:
            # This will remove the plugin from the infrastructure
            # plugins to be run.
            raise RunOnce()

        # Run the plugin.
        if self._exec_one_time:
            self._exec = False

        domain_path = fuzzable_request.get_url().get_domain_path()
        if domain_path not in self._already_tested:
            self._already_tested.add(domain_path)
            _allowed_methods, id_list = self._identify_allowed_methods(domain_path)
            self._analyze_methods(domain_path, _allowed_methods, id_list)

    def _identify_allowed_methods(self, url):
        # First, try to check available methods using OPTIONS,
        # if OPTIONS isn't enabled, do it manually
        allowed_options, id_options = self._identify_with_OPTIONS(url)
        allowed_bf, id_bf = self._identify_with_bruteforce(url)
        
        _allowed_methods = allowed_options + allowed_bf
        # If a method was found by both, bf and options, it is duplicated in 
        # the list. Remove dups
        _allowed_methods = list(set(_allowed_methods))
        # There are no duplicate requests.
        # Even if a method was discovered with both, bf and options, we 
        # furthermore want to see both requests
        id_list = id_options + id_bf
        
        # Added this to make the output a little bit more readable.
        _allowed_methods.sort()
        
        return _allowed_methods, id_list

    def _identify_with_OPTIONS(self, url):
        """
        Find out what methods are allowed using OPTIONS
        :param url: Where to check.
        """
        _allowed_methods = []
        id_list = []

        try:
            res = self._uri_opener.OPTIONS(url)
        except:
            pass
        else:
            headers = res.get_lower_case_headers()
            id_list.append(res.id)
            
            for header_name in ['allow', 'public']:
                if header_name in headers:
                    _allowed_methods.extend(headers[header_name].split(','))
                    _allowed_methods = [x.strip() for x in _allowed_methods]
                    _allowed_methods = list(set(_allowed_methods))
        
        return _allowed_methods, id_list

    def _identify_with_bruteforce(self, url):
        id_list = []
        _allowed_methods = []
        #
        #   Before doing anything else, I'll send a request with a
        #   non-existent method if that request succeeds, then all will...
        #
        non_exist_response = self._uri_opener.ARGENTINA(url)
        get_response = self._uri_opener.GET(url)

        if non_exist_response.get_code() not in self.BAD_CODES\
        and get_response.get_body() == non_exist_response.get_body():

            desc = ('The remote Web server has a custom configuration, in'
                    ' which any not implemented methods that are invoked are'
                    ' defaulted to GET instead of returning a "Not Implemented"'
                    ' response.')
            response_ids = [non_exist_response.get_id(), get_response.get_id()]
            i = Info('Non existent methods default to GET', desc, response_ids,
                     self.get_name())
            i.set_url(url)
            
            kb.kb.append(self, 'custom-configuration', i)
            #
            #   It makes no sense to continue working, all methods will
            #   appear as enabled because of this custom configuration.
            #
            return [], [non_exist_response.id, get_response.id]

        # 'DELETE' is not tested! I don't want to remove anything...
        # 'PUT' is not tested! I don't want to overwrite anything...
        methods_to_test = self._supported_methods.copy()

        # remove dangerous methods.
        methods_to_test.remove('DELETE')
        methods_to_test.remove('PUT')

        for method in methods_to_test:
            method_functor = getattr(self._uri_opener, method)
            try:
                response = apply(method_functor, (url,), {})
            except:
                pass
            else:
                code = response.get_code()                
                if code not in self.BAD_CODES:
                    _allowed_methods.append(method)
                    id_list.append(response.id)
        
        return _allowed_methods, id_list

    def _analyze_methods(self, url, _allowed_methods, id_list):
        # Sometimes there are no allowed methods, which means that our plugin
        # failed to identify any methods.
        if not _allowed_methods:
            return

        # Check for DAV
        elif set(_allowed_methods).intersection(self.DAV_METHODS):
            # dav is enabled!
            # Save the results in the KB so that other plugins can use this
            # information
            desc = ('The URL "%s" has the following allowed methods. These'
                    ' include DAV methods and should be disabled: %s')
            desc = desc % (url, ', '.join(_allowed_methods))
            
            i = Info('DAV methods enabled', desc, id_list, self.get_name())
            i.set_url(url)
            i['methods'] = _allowed_methods
            
            kb.kb.append(self, 'dav-methods', i)
        else:
            # Save the results in the KB so that other plugins can use this
            # information. Do not remove these information, other plugins
            # REALLY use it !
            desc = 'The URL "%s" has the following enabled HTTP methods: %s'
            desc = desc % (url, ', '.join(_allowed_methods))
            
            i = Info('Allowed HTTP methods', desc, id_list, self.get_name())
            i.set_url(url)
            i['methods'] = _allowed_methods
            
            kb.kb.append(self, 'methods', i)

    def end(self):
        """
        Print the results.
        """
        # First I get the data from the kb
        all_info_obj = kb.kb.get('allowed_methods', 'methods')
        dav_info_obj = kb.kb.get('allowed_methods', 'dav-methods')

        # Now I transform it to something I can use with group_by_min_key
        all_methods = []
        for i in all_info_obj:
            all_methods.append((i.get_url(), i['methods']))

        dav_methods = []

        for i in dav_info_obj:
            dav_methods.append((i.get_url(), i['methods']))

        # Now I work the data...
        to_show, method_type = dav_methods, ' DAV'
        if not self._report_dav_only:
            to_show, method_type = all_methods, ''

        # Make it hashable
        tmp = []
        for url, methodList in to_show:
            tmp.append((url, ', '.join(methodList)))

        result_dict, item_index = group_by_min_key(tmp)

        for k in result_dict:
            if item_index == 0:
                # Grouped by URLs
                msg = 'The URL: "%s" has the following %s methods enabled:'
                om.out.information(msg % (k, method_type))
            else:
                # Grouped by Methods
                msg = 'The methods: %s are enabled on the following URLs:'
                om.out.information(msg % k)

            for i in result_dict[k]:
                om.out.information('- ' + i)

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d1 = 'Execute plugin only one time'
        h1 = ('Generally the methods allowed for a URL are configured system'
              ' wide, so executing this plugin only once is the faster choice.'
              ' The most accurate choice is to run it against every URL.')
        o = opt_factory('execOneTime', self._exec_one_time, d1,
                        'boolean', help=h1)
        ol.add(o)

        d2 = 'Only report findings if uncommon methods are found'
        o = opt_factory('reportDavOnly', self._report_dav_only, d2, 'boolean')
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param options_list: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._exec_one_time = options_list['execOneTime'].get_value()
        self._report_dav_only = options_list['reportDavOnly'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #16
0
파일: find_vhosts.py 프로젝트: EnDe/w3af
class find_vhosts(InfrastructurePlugin):
    """
    Modify the HTTP Host header and try to find virtual hosts.
    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._first_exec = True
        self._already_queried = ScalableBloomFilter()
        self._can_resolve_domain_names = False

    def discover(self, fuzzable_request):
        """
        Find virtual hosts.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        analysis_result = self._analyze(fuzzable_request)
        self._report_results(fuzzable_request, analysis_result)

    def _analyze(self, fuzzable_request):
        vhost_list = []
        if self._first_exec:
            self._first_exec = False
            vhost_list.extend(self._generic_vhosts(fuzzable_request))

        # I also test for ""dead links"" that the web programmer left in the
        # page. For example, If w3af finds a link to
        # "http://corporative.intranet.corp/" it will try to resolve the dns
        # name, if it fails, it will try to request that page from the server
        vhost_list.extend(self._get_dead_links(fuzzable_request))
        return vhost_list

    def _report_results(self, fuzzable_request, analysis_result):
        """
        Report our findings
        """
        reported = set()
        for vhost, request_id in analysis_result:
            if vhost in reported:
                continue

            reported.add(vhost)

            domain = fuzzable_request.get_url().get_domain()
            desc = 'Found a new virtual host at the target web server, the'\
                   ' virtual host name is: "%s". To access this site' \
                   ' you might need to change your DNS resolution settings'\
                   ' in order to point "%s" to the IP address of "%s".'
            desc = desc % (vhost, vhost, domain)

            v = Vuln.from_fr('Virtual host identified', desc, severity.LOW,
                             request_id, self.get_name(), fuzzable_request)

            kb.kb.append(self, 'find_vhosts', v)
            om.out.information(v.get_desc())

    def _get_dead_links(self, fuzzable_request):
        """
        Find every link on a HTML document verify if the domain is reachable or
        not; after that, verify if the web found a different name for the target
        site or if we found a new site that is linked. If the link points to a
        dead site then report it (it could be pointing to some private address
        or something...)
        """
        # Get some responses to compare later
        base_url = fuzzable_request.get_url().base_url()
        original_response = self._uri_opener.GET(fuzzable_request.get_uri(),
                                                 cache=True)
        base_response = self._uri_opener.GET(base_url, cache=True)
        base_resp_body = base_response.get_body()

        try:
            dp = parser_cache.dpc.get_document_parser_for(original_response)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return []

        # Set the non existent response
        non_existent_response = self._get_non_exist(fuzzable_request)
        nonexist_resp_body = non_existent_response.get_body()

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the
        # URL I'm going to trust the re_references also.
        parsed_references, re_references = dp.get_references()
        parsed_references.extend(re_references)

        res = []

        vhosts = self._verify_link_domain(parsed_references)

        for domain, vhost_response in self._send_in_threads(base_url, vhosts):

            vhost_resp_body = vhost_response.get_body()

            if fuzzy_not_equal(vhost_resp_body, base_resp_body, 0.35) and \
            fuzzy_not_equal(vhost_resp_body, nonexist_resp_body, 0.35):
                res.append((domain, vhost_response.id))
            else:
                desc = 'The content of "%s" references a non existent domain:'\
                       ' "%s". This can be a broken link, or an internal'\
                       ' domain name.'
                desc = desc % (fuzzable_request.get_url(), domain)
                
                i = Info('Internal hostname in HTML link', desc,
                         original_response.id, self.get_name())
                i.set_url(fuzzable_request.get_url())
                
                kb.kb.append(self, 'find_vhosts', i)
                om.out.information(i.get_desc())

        return res

    def _verify_link_domain(self, parsed_references):
        """
        Verify each link in parsed_references and yield the ones that can NOT
        be resolved using DNS.
        """
        for link in parsed_references:
            domain = link.get_domain()

            if domain not in self._already_queried:
                self._already_queried.add(domain)

                try:
                    # raises exception when it's not found
                    # socket.gaierror: (-5, 'No address associated with hostname')
                    socket.gethostbyname(domain)
                except:
                    yield domain

    def _generic_vhosts(self, fuzzable_request):
        """
        Test some generic virtual hosts, only do this once.
        """
        # Get some responses to compare later
        base_url = fuzzable_request.get_url().base_url()
        original_response = self._uri_opener.GET(base_url, cache=True)
        orig_resp_body = original_response.get_body()

        non_existent_response = self._get_non_exist(fuzzable_request)
        nonexist_resp_body = non_existent_response.get_body()

        res = []
        vhosts = self._get_common_virtualhosts(base_url)

        for vhost, vhost_response in self._send_in_threads(base_url, vhosts):
            vhost_resp_body = vhost_response.get_body()

            # If they are *really* different (not just different by some chars)
            if fuzzy_not_equal(vhost_resp_body, orig_resp_body, 0.35) and \
                    fuzzy_not_equal(vhost_resp_body, nonexist_resp_body, 0.35):
                res.append((vhost, vhost_response.id))

        return res

    def _send_in_threads(self, base_url, vhosts):
        base_url_repeater = repeat(base_url)
        args_iterator = izip(base_url_repeater, vhosts)
        http_get = return_args(one_to_many(self._http_get_vhost))
        pool_results = self.worker_pool.imap_unordered(http_get,
                                                          args_iterator)

        for ((base_url, vhost),), vhost_response in pool_results:
            yield vhost, vhost_response

    def _http_get_vhost(self, base_url, vhost):
        """
        Performs an HTTP GET to a URL using a specific vhost.
        :return: HTTPResponse object.
        """
        headers = Headers([('Host', vhost)])
        return self._uri_opener.GET(base_url, cache=False,
                                    headers=headers)

    def _get_non_exist(self, fuzzable_request):
        base_url = fuzzable_request.get_url().base_url()
        non_existent_domain = 'iDoNotExistPleaseGoAwayNowOrDie' + rand_alnum(4)
        return self._http_get_vhost(base_url, non_existent_domain)

    def _get_common_virtualhosts(self, base_url):
        """

        :param base_url: The target URL object.

        :return: A list of possible domain names that could be hosted in the
                 same web server that "domain".

        """
        domain = base_url.get_domain()
        root_domain = base_url.get_root_domain()

        common_virtual_hosts = ['intranet', 'intra', 'extranet', 'extra',
                                'test', 'test1', 'old', 'new', 'admin',
                                'adm', 'webmail', 'services', 'console',
                                'apps', 'mail', 'corporate', 'ws', 'webservice',
                                'private', 'secure', 'safe', 'hidden', 'public']

        for subdomain in common_virtual_hosts:
            # intranet
            yield subdomain
            # intranet.www.targetsite.com
            yield subdomain + '.' + domain
            # intranet.targetsite.com
            yield subdomain + '.' + root_domain
            # intranet.targetsite
            yield subdomain + '.' + root_domain.split('.')[0]

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #17
0
class dot_net_errors(InfrastructurePlugin):
    """
    Request specially crafted URLs that generate ASP.NET errors in order
    to gather information.

    :author: Andres Riancho (([email protected]))
    """

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()
        # On real web applications, if we can't trigger an error in the first
        # MAX_TESTS tests, it simply won't happen and we have to stop testing.
        self.MAX_TESTS = 25

    def discover(self, fuzzable_request):
        """
        Requests the special filenames.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        if len(self._already_tested) < self.MAX_TESTS \
                and fuzzable_request.get_url() not in self._already_tested:
            self._already_tested.add(fuzzable_request.get_url())

            test_generator = self._generate_URLs(fuzzable_request.get_url())

            self.worker_pool.map(self._send_and_check,
                                    test_generator,
                                    chunksize=1)

    def _generate_URLs(self, original_url):
        """
        Generate new URLs based on original_url.

        :param original_url: The original url that has to be modified in
                                 order to trigger errors in the remote application.
        """
        special_chars = ['|', '~']

        filename = original_url.get_file_name()
        if filename != '' and '.' in filename:
            splitted_filename = filename.split('.')
            extension = splitted_filename[-1:][0]
            name = '.'.join(splitted_filename[0:-1])

            for char in special_chars:
                new_filename = name + char + '.' + extension
                new_url = original_url.url_join(new_filename)
                yield new_url

    def _send_and_check(self, url):
        """
        :param response: The HTTPResponse object that holds the content of
                             the response to analyze.
        """
        response = self._uri_opener.GET(url, cache=True)

        viewable_remote_machine = '<b>Details:</b> To enable the details of this'
        viewable_remote_machine += ' specific error message to be viewable on'
        viewable_remote_machine += ' remote machines'

        if viewable_remote_machine not in response.body\
        and '<h2> <i>Runtime Error</i> </h2></span>' in response.body:

            desc = 'Detailed information about ASP.NET error messages can be'\
                   ' viewed from remote sites. The URL: "%s" discloses'\
                   ' detailed error messages.'
            desc = desc % response.get_url()
        
            v = Vuln('Information disclosure via .NET errors', desc,
                     severity.LOW, response.id, self.get_name())
        
            kb.kb.append(self, 'dot_net_errors', v)

    def get_plugin_deps(self):
        """
        :return: A list with the names of the plugins that should be run before the
        current one.
        """
        return ['grep.error_pages']

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #18
0
class websocket_hijacking(AuditPlugin):
    """
    Detect Cross-Site WebSocket hijacking vulnerabilities.
    :author: Dmitry Roshchin ([email protected])
    """
    W3AF_DOMAIN = 'w3af.org'
    W3AF_ORIGIN = 'http://www.w3af.org/'

    def __init__(self):
        super(websocket_hijacking, self).__init__()
        self.already_tested_websockets = ScalableBloomFilter()

    def audit(self, freq, orig_response, debugging_id):
        """
        Detect websockets for Cross-Site WebSocket hijacking vulnerabilities.

        This plugin works really well and can be improved in two different ways:

            * Add new check_* methods to this class which detect websocket
              vulnerabilities and then add them to known_checks

            * Extend the websocket link detection in grep.websockets_links,
              which is the weak part of the process, this is because we're doing
              a very trivial regular expression match to find WS links, which
              will most likely fail in "complex" web applications

        :param freq: A FuzzableRequest
        :param orig_response: The HTTP response associated with the fuzzable request
        :param debugging_id: A unique identifier for this call to audit()
        """
        # We can only work if there are known web sockets
        ws_links = kb.kb.get('websockets_links', 'websockets_links')

        for web_socket_info_set in ws_links:
            web_socket_url = web_socket_info_set['ws_link']

            # Checking if we already tested this web socket URL
            if web_socket_url in self.already_tested_websockets:
                continue

            self.already_tested_websockets.add(web_socket_url)

            web_socket_url = URL(web_socket_url)
            web_socket_version = negotiate_websocket_version(self._uri_opener,
                                                             web_socket_url)
            self.check_websocket_security(web_socket_url,
                                          web_socket_version)

    def check_websocket_security(self, web_socket_url, web_socket_version):
        """
        Analyze the security of a web socket

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version
        :return: None, results (if any) are stored to the KB
        """
        known_checks = (self.check_is_open_web_socket,
                        self.check_is_restricted_by_origin_with_match_bug,
                        self.check_is_restricted_by_origin,
                        self.check_need_basic_auth_origin_not_restricted,
                        self.check_need_cookie_origin_not_restricted)

        for check in known_checks:
            if check(web_socket_url, web_socket_version):
                break

    def check_is_open_web_socket(self, web_socket_url, web_socket_version):
        """
        Note that this method only makes sense if called in a loop with the
        other check_* methods.

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version

        :return: True if the web socket is open:
                    * Any Origin can connect
                    * No cookies required for authentication
                    * No basic auth required for authentication
        """
        upgrade_request = build_ws_upgrade_request(web_socket_url,
                                                   web_socket_version=web_socket_version,
                                                   origin=self.W3AF_ORIGIN)
        upgrade_response = self._uri_opener.send_mutant(upgrade_request,
                                                        cookies=False,
                                                        use_basic_auth=False)

        if not is_successful_upgrade(upgrade_response):
            return False

        msg = ('An HTML5 WebSocket which allows connections from any origin'
               ' without authentication was found at "%s"')
        msg %= web_socket_url

        v = Vuln.from_fr('Open WebSocket', msg, severity.LOW,
                         upgrade_response.id, self.get_name(), upgrade_request)
        self.kb_append_uniq(self, 'websocket_hijacking', v)
        return True

    def check_is_restricted_by_origin_with_match_bug(self, web_socket_url,
                                                     web_socket_version):
        """
        Note that this method only makes sense if called in a loop with the
        other check_* methods.

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version

        :return: True if the web socket checks the origin for connections but
                 there is a bug in the matching process
        """
        #
        # Keep in mind that we get here only if the websocket is NOT an open
        # (accepts any origin) socket. So we're in a situation where the socket
        # is either verifying by Origin+Cookies, Origin+Basic Auth or just
        # Origin.
        #
        # We want to check for the "just Origin" now, with a twist, we're
        # checking if there is a mistake in the Origin domain match process
        #
        # This is the trick:
        origin_domain = web_socket_url.get_domain()
        origin_domain += '.%s' % self.W3AF_DOMAIN

        for scheme in {'http', 'https'}:
            origin = '%s://%s' % (scheme, origin_domain)
            upgrade_request = build_ws_upgrade_request(web_socket_url,
                                                       web_socket_version=web_socket_version,
                                                       origin=origin)
            upgrade_response = self._uri_opener.send_mutant(upgrade_request,
                                                            cookies=False,
                                                            use_basic_auth=False)

            if not is_successful_upgrade(upgrade_response):
                continue

            msg = ('An HTML5 WebSocket which restricts connections based on the'
                   ' Origin header was found to be vulnerable because of an'
                   ' incorrect matching algorithm. The "%s" Origin was allowed'
                   ' to connect to "%s".')
            msg %= (origin_domain, web_socket_url)

            v = Vuln.from_fr('Insecure WebSocket Origin filter', msg,
                             severity.MEDIUM, upgrade_response.id,
                             self.get_name(), upgrade_request)
            self.kb_append_uniq(self, 'websocket_hijacking', v)
            return True

        return False

    def check_is_restricted_by_origin(self, web_socket_url, web_socket_version):
        """
        Note that this method only makes sense if called in a loop with the
        other check_* methods.

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version

        :return: True if the web socket checks the origin for connections:
                    * Only the same origin can connect
                    * Send any cookie/basic auth known to the scanner
        """
        #
        # Keep in mind that we get here only if the websocket is NOT an open
        # (accepts any origin) socket. So we're in a situation where the socket
        # is either verifying by Origin+Cookies, Origin+Basic Auth or just
        # Origin.
        #
        # We want to check for the "just Origin" now
        #
        origin_domain = web_socket_url.get_domain()

        for scheme in {'http', 'https'}:
            origin = '%s://%s' % (scheme, origin_domain)
            upgrade_request = build_ws_upgrade_request(web_socket_url,
                                                       web_socket_version=web_socket_version,
                                                       origin=origin)
            upgrade_response = self._uri_opener.send_mutant(upgrade_request,
                                                            cookies=False,
                                                            use_basic_auth=False)

            if not is_successful_upgrade(upgrade_response):
                continue

            msg = ('An HTML5 WebSocket which allows connections only when the'
                   ' origin is set to "%s" was found at "%s"')
            msg %= (origin_domain, web_socket_url)

            v = Vuln.from_fr('Origin restricted WebSocket', msg, severity.LOW,
                             upgrade_response.id, self.get_name(),
                             upgrade_request)
            self.kb_append_uniq(self, 'websocket_hijacking', v)
            return True

        return False

    def check_need_basic_auth_origin_not_restricted(self, web_socket_url,
                                                    web_socket_version):
        """
        Note that this method only makes sense if called in a loop with the
        other check_* methods.

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version

        :return: True if the web socket does NOT check the origin for
                 connections but DOES require basic authentication to connect
        """
        #
        # Keep in mind that we get here only if:
        #   * The websocket is NOT an open (accepts any origin) socket
        #   * The websocket is NOT verifying by Origin
        #
        # So we're in one of these cases:
        #   * The websocket authenticates by cookie
        #   * The websocket authenticates by basic auth
        #
        # We want to check for the "authenticates by basic auth"
        #
        upgrade_request = build_ws_upgrade_request(web_socket_url,
                                                   web_socket_version=web_socket_version,
                                                   origin=self.W3AF_ORIGIN)
        upgrade_response = self._uri_opener.send_mutant(upgrade_request,
                                                        cookies=False,
                                                        # Note the True here!
                                                        use_basic_auth=True)

        if not is_successful_upgrade(upgrade_response):
            return False

        msg = 'Cross-Site WebSocket Hijacking has been found at "%s"'
        msg %= web_socket_url

        v = Vuln.from_fr('Websockets CSRF vulnerability', msg,
                         severity.HIGH, upgrade_response.id,
                         self.get_name(), upgrade_request)
        self.kb_append_uniq(self, 'websocket_hijacking', v)
        return True

    def check_need_cookie_origin_not_restricted(self, web_socket_url,
                                                web_socket_version):
        """
        Note that this method only makes sense if called in a loop with the
        other check_* methods.

        :param web_socket_url: The URL of the web socket
        :param web_socket_version: The protocol version

        :return: True if the web socket does NOT check the origin for
                 connections but DOES require cookies to connect
        """
        #
        # Keep in mind that we get here only if:
        #   * The websocket is NOT an open (accepts any origin) socket
        #   * The websocket is NOT verifying by Origin
        #
        # So we're in one of these cases:
        #   * The websocket authenticates by cookie
        #   * The websocket authenticates by basic auth
        #
        # We want to check for the "authenticates by cookie"
        #
        upgrade_request = build_ws_upgrade_request(web_socket_url,
                                                   web_socket_version=web_socket_version,
                                                   origin=self.W3AF_ORIGIN)
        upgrade_response = self._uri_opener.send_mutant(upgrade_request,
                                                        # Note the True here!
                                                        cookies=True,
                                                        use_basic_auth=False)

        if not is_successful_upgrade(upgrade_response):
            return False

        msg = 'Cross-Site WebSocket Hijacking has been found at "%s"'
        msg %= web_socket_url

        v = Vuln.from_fr('Websockets CSRF vulnerability', msg,
                         severity.HIGH, upgrade_response.id,
                         self.get_name(), upgrade_request)
        self.kb_append_uniq(self, 'websocket_hijacking', v)
        return True

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #19
0
파일: find_vhosts.py 프로젝트: ZionOps/w3af
class find_vhosts(InfrastructurePlugin):
    """
    Modify the HTTP Host header and try to find virtual hosts.
    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._first_exec = True
        self._already_queried = ScalableBloomFilter()
        self._can_resolve_domain_names = False

    def discover(self, fuzzable_request):
        """
        Find virtual hosts.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        analysis_result = self._analyze(fuzzable_request)
        self._report_results(fuzzable_request, analysis_result)

    def _analyze(self, fuzzable_request):
        vhost_list = []
        if self._first_exec:
            self._first_exec = False
            vhost_list.extend(self._generic_vhosts(fuzzable_request))

        # I also test for ""dead links"" that the web developer left in the
        # page. For example, If w3af finds a link to
        # "http://corporative.intranet.corp/" it will try to resolve the dns
        # name, if it fails, it will try to request that page from the server
        vhost_list.extend(self._get_dead_links(fuzzable_request))
        return vhost_list

    def _report_results(self, fuzzable_request, analysis_result):
        """
        Report our findings
        """
        reported = set()
        for vhost, request_id in analysis_result:
            if vhost in reported:
                continue

            reported.add(vhost)

            domain = fuzzable_request.get_url().get_domain()
            desc = (
                u"Found a new virtual host at the target web server, the"
                u' virtual host name is: "%s". To access this site'
                u" you might need to change your DNS resolution settings"
                u' in order to point "%s" to the IP address of "%s".'
            )
            desc %= (vhost, vhost, domain)

            v = Vuln.from_fr(
                "Virtual host identified", desc, severity.LOW, request_id, self.get_name(), fuzzable_request
            )

            kb.kb.append(self, "find_vhosts", v)
            om.out.information(v.get_desc())

    def _get_dead_links(self, fuzzable_request):
        """
        Find every link on a HTML document verify if the domain is reachable or
        not; after that, verify if the web found a different name for the target
        site or if we found a new site that is linked. If the link points to a
        dead site then report it (it could be pointing to some private address
        or something...)
        """
        # Get some responses to compare later
        base_url = fuzzable_request.get_url().base_url()
        original_response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True)
        base_response = self._uri_opener.GET(base_url, cache=True)
        base_resp_body = base_response.get_body()

        try:
            dp = parser_cache.dpc.get_document_parser_for(original_response)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return []

        # Set the non existent response
        non_existent_response = self._get_non_exist(fuzzable_request)
        nonexist_resp_body = non_existent_response.get_body()

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the
        # URL I'm going to trust the re_references also.
        parsed_references, re_references = dp.get_references()
        parsed_references.extend(re_references)

        res = []

        vhosts = self._verify_link_domain(parsed_references)

        for domain, vhost_response in self._send_in_threads(base_url, vhosts):

            vhost_resp_body = vhost_response.get_body()

            if fuzzy_not_equal(vhost_resp_body, base_resp_body, 0.35) and fuzzy_not_equal(
                vhost_resp_body, nonexist_resp_body, 0.35
            ):
                res.append((domain, vhost_response.id))
            else:
                desc = (
                    u'The content of "%s" references a non existent domain:'
                    u' "%s". This can be a broken link, or an internal'
                    u" domain name."
                )
                desc %= (fuzzable_request.get_url(), domain)

                i = Info(u"Internal hostname in HTML link", desc, original_response.id, self.get_name())
                i.set_url(fuzzable_request.get_url())

                kb.kb.append(self, "find_vhosts", i)
                om.out.information(i.get_desc())

        return res

    def _verify_link_domain(self, parsed_references):
        """
        Verify each link in parsed_references and yield the ones that can NOT
        be resolved using DNS.
        """
        for link in parsed_references:
            domain = link.get_domain()

            if domain not in self._already_queried:
                self._already_queried.add(domain)

                try:
                    socket.gethostbyname(domain)
                except socket.gaierror, se:
                    # raises exception when it's not found
                    if se.errno in (socket.EAI_NODATA, socket.EAI_NONAME):
                        yield domain
                except:
                    # We get here on other exceptions, an example is when the
                    # domain contains non-alnum chars
                    pass
예제 #20
0
class shell_shock(AuditPlugin):
    """
    Find shell shock vulnerabilities.
    :author: Andres Riancho ([email protected])
    """
    DELAY_TESTS = [PingDelay('() { test; }; ping -c %s 127.0.0.1'),
                   ExactDelay('() { test; }; sleep %s')]

    def __init__(self):
        super(shell_shock, self).__init__()
        self.already_tested_urls = ScalableBloomFilter()

    def audit(self, freq, orig_response):
        """
        Tests an URL for shell shock vulnerabilities.

        :param freq: A FuzzableRequest
        """
        url = freq.get_url()

        # Here the script is vulnerable, not a specific parameter, so we
        # run unique tests per URL
        if url not in self.already_tested_urls:
            self.already_tested_urls.add(url)

            # We are implementing these methods for detecting shell-shock vulns
            # if you know about other methods, or have improvements on these
            # please let us know. Pull-requests are also welcome.
            for detection_method in [self._with_header_echo_injection,
                                     # self._with_body_echo_injection,
                                     self._with_time_delay]:
                if detection_method(freq):
                    break

    def _with_header_echo_injection(self, freq):
        """
        We're sending a payload that will trigger the injection of various
        headers in the HTTP response body.

        :param freq: A FuzzableRequest
        :return: True if a vulnerability was found
        """
        injected_header = 'shellshock'
        injected_value = 'check'
        payload = '() { :;}; echo "%s: %s"' % (injected_header, injected_value)

        mutant = self.create_mutant(freq, TEST_HEADER)
        mutant.set_token_value(payload)

        response = self._uri_opener.send_mutant(mutant)
        header_value, header_name = response.get_headers().iget(injected_header)

        if header_value is not None and injected_value in header_value.lower():
            desc = u'Shell shock was found at: %s' % mutant.found_at()

            v = Vuln.from_mutant(u'Shell shock vulnerability', desc,
                                 severity.HIGH, [response.id],
                                 self.get_name(), mutant)

            self.kb_append_uniq(self, 'shell_shock', v)
            return True

    def _with_body_echo_injection(self, freq):
        """
        We're sending a payload that will trigger the injection of new lines
        that will make the response transition from "headers" to "body".

        :param freq: A FuzzableRequest
        :return: True if a vulnerability was found
        """
        raise NotImplementedError

    def create_mutant(self, freq, header_name):
        headers = freq.get_headers()
        headers[header_name] = ''
        freq.set_headers(headers)

        fuzzer_config = {'fuzzable_headers': [TEST_HEADER]}

        mutant = HeadersMutant.create_mutants(freq, [''], [TEST_HEADER],
                                              False, fuzzer_config)[0]

        return mutant

    def _with_time_delay(self, freq):
        """
        Tests an URLs for shell shock vulnerabilities using time delays.

        :param freq: A FuzzableRequest
        :return: True if a vulnerability was found
        """
        mutant = self.create_mutant(freq, TEST_HEADER)

        for delay_obj in self.DELAY_TESTS:
            ed = ExactDelayController(mutant, delay_obj, self._uri_opener)
            success, responses = ed.delay_is_controlled()

            if success:
                mutant.set_token_value(delay_obj.get_string_for_delay(3))
                desc = u'Shell shock was found at: %s' % mutant.found_at()

                v = Vuln.from_mutant(u'Shell shock vulnerability', desc,
                                     severity.HIGH, [r.id for r in responses],
                                     self.get_name(), mutant)

                self.kb_append_uniq(self, 'shell_shock', v)
                return True

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #21
0
파일: php_eggs.py 프로젝트: 0x554simon/w3af
class php_eggs(InfrastructurePlugin):
    """
    Fingerprint the PHP version using documented easter eggs that exist in PHP.
    :author: Andres Riancho ([email protected])
    """
    PHP_EGGS = [('?=PHPB8B5F2A0-3C92-11d3-A3A9-4C7B08C10000', 'PHP Credits'),
                ('?=PHPE9568F34-D428-11d2-A769-00AA001ACF42', 'PHP Logo'),
                ('?=PHPE9568F35-D428-11d2-A769-00AA001ACF42', 'Zend Logo'),
                ('?=PHPE9568F36-D428-11d2-A769-00AA001ACF42', 'PHP Logo 2')]

    # Empty EGG_DB array, will be filled with external data
    EGG_DB = {}

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Already analyzed extensions
        self._already_analyzed_ext = ScalableBloomFilter()

        # Internal DB
        self._db_file = os.path.join(ROOT_PATH, 'plugins', 'infrastructure',
                                     'php_eggs', 'eggs.json')

        # Get data from external JSON file and fill EGG_DB array
        data = self.read_jsondata(self._db_file)
        self.EGG_DB = self.fill_egg_array(data)

    def read_jsondata(self, jsonfile):
        """
        Read a JSON file. File handling for reading a JSON file
        :return: Raw JSON data.
        """
        json_data = open(jsonfile)
        file_data = json.load(json_data)
        json_data.close()
        return file_data

    def fill_egg_array(self, json_egg_data):
        """
        Fill an array with data from a JSON input file.
        :return: An array with PHP-versions with corresponding MD5 hashes.
        """
        egg_db = {}

        for egg in json_egg_data['db']:
            version = egg['version']
            egg_db[version] = {}

            for key in ('credits', 'php_1', 'php_2', 'zend'):
                if key in egg:
                    egg_db[version][key] = egg[key]

        return egg_db

    def discover(self, fuzzable_request):
        """
        Nothing strange, just do some GET requests to the eggs and analyze the
        response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        # Get the extension of the URL (.html, .php, .. etc)
        ext = fuzzable_request.get_url().get_extension()

        # Only perform this analysis if we haven't already analyzed this type
        # of extension OR if we get an URL like http://f00b5r/4/     (Note that
        # it has no extension) This logic will perform some extra tests... but
        # we won't miss some special cases. Also, we aren't doing something like
        # "if 'php' in ext:" because we never depend on something so easy to
        # modify as extensions to make decisions.
        if ext not in self._already_analyzed_ext:

            # Now we save the extension as one of the already analyzed
            self._already_analyzed_ext.add(ext)

            # Init some internal variables
            query_results = self._get_php_eggs(fuzzable_request, ext)

            if self._are_php_eggs(query_results):
                # analyze the info to see if we can identify the version
                self._extract_version_from_egg(query_results)
                raise NoMoreCalls

    def _get_php_eggs(self, fuzzable_request, ext):
        """
        HTTP GET the URLs for PHP Eggs
        :return: A list with the HTTP response objects
        """
        def http_get(fuzzable_request, (egg_url, egg_desc)):
            egg_url = fuzzable_request.get_url().uri2url().url_join(egg_url)
            response = self._uri_opener.GET(egg_url, cache=True, grep=False)
            return response, egg_url, egg_desc

        # Send the requests using threads:
        query_results = []

        http_get = one_to_many(http_get)
        fr_repeater = repeat(fuzzable_request)
        args_iterator = izip(fr_repeater, self.PHP_EGGS)
        pool_results = self.worker_pool.imap_unordered(http_get, args_iterator)

        for response, egg_URL, egg_desc in pool_results:
            eqr = EggQueryResult(response, egg_desc, egg_URL)
            query_results.append(eqr)

        return query_results
예제 #22
0
class VariantDB(object):
    """
    See the notes on PARAMS_MAX_VARIANTS and PATH_MAX_VARIANTS above. Also
    understand that we'll keep "dirty" versions of the references/fuzzable
    requests in order to be able to answer "False" to a call for
    need_more_variants in a situation like this:

        >> need_more_variants('http://foo.com/abc?id=32')
        True

        >> append('http://foo.com/abc?id=32')
        True

        >> need_more_variants('http://foo.com/abc?id=32')
        False

    """
    HASH_IGNORE_HEADERS = ('referer',)
    TAG = '[variant_db]'

    MAX_IN_MEMORY = 50

    def __init__(self):
        self._variants = CachedDiskDict(max_in_memory=self.MAX_IN_MEMORY,
                                        table_prefix='variant_db')
        self._variants_eq = ScalableBloomFilter()
        self._variants_form = CachedDiskDict(max_in_memory=self.MAX_IN_MEMORY,
                                             table_prefix='variant_db_form')

        self.params_max_variants = cf.cf.get('params_max_variants')
        self.path_max_variants = cf.cf.get('path_max_variants')
        self.max_equal_form_variants = cf.cf.get('max_equal_form_variants')

        self._db_lock = threading.RLock()

    def cleanup(self):
        self._variants.cleanup()
        self._variants_form.cleanup()

    def append(self, fuzzable_request):
        """
        :return: True if we added a new fuzzable request variant to the DB,
                 False if NO more variants are required for this fuzzable
                 request.
        """
        with self._db_lock:
            if self._seen_exactly_the_same(fuzzable_request):
                return False

            if self._has_form(fuzzable_request):
                if not self._need_more_variants_for_form(fuzzable_request):
                    return False

            if not self._need_more_variants_for_uri(fuzzable_request):
                return False

            # Yes, please give me more variants of fuzzable_request
            return True

    def _log_return_false(self, fuzzable_request, reason):
        args = (reason, fuzzable_request)
        msg = 'VariantDB is returning False because of "%s" for "%s"'
        om.out.debug(msg % args)

    def _need_more_variants_for_uri(self, fuzzable_request):
        #
        # Do we need more variants for the fuzzable request? (similar match)
        # PARAMS_MAX_VARIANTS and PATH_MAX_VARIANTS
        #
        clean_dict_key = clean_fuzzable_request(fuzzable_request)
        count = self._variants.get(clean_dict_key, None)

        if count is None:
            self._variants[clean_dict_key] = 1
            return True

        # We've seen at least one fuzzable request with this pattern...
        url = fuzzable_request.get_uri()
        has_params = url.has_query_string() or fuzzable_request.get_raw_data()

        # Choose which max_variants to use
        if has_params:
            max_variants = self.params_max_variants
            max_variants_type = 'params'
        else:
            max_variants = self.path_max_variants
            max_variants_type = 'path'

        if count >= max_variants:
            _type = 'need_more_variants_for_uri(%s)' % max_variants_type
            self._log_return_false(fuzzable_request, _type)
            return False

        self._variants[clean_dict_key] = count + 1
        return True

    def _seen_exactly_the_same(self, fuzzable_request):
        #
        # Is the fuzzable request already known to us? (exactly the same)
        #
        request_hash = fuzzable_request.get_request_hash(self.HASH_IGNORE_HEADERS)
        if request_hash in self._variants_eq:
            return True

        # Store it to avoid duplicated fuzzable requests in our framework
        self._variants_eq.add(request_hash)

        self._log_return_false(fuzzable_request, 'seen_exactly_the_same')
        return False

    def _has_form(self, fuzzable_request):
        raw_data = fuzzable_request.get_raw_data()
        if raw_data and len(raw_data.get_param_names()) >= 2:
            return True

        return False

    def _need_more_variants_for_form(self, fuzzable_request):
        #
        # Do we need more variants for this form? (similar match)
        # MAX_EQUAL_FORM_VARIANTS
        #
        clean_dict_key_form = clean_fuzzable_request_form(fuzzable_request)
        count = self._variants_form.get(clean_dict_key_form, None)

        if count is None:
            self._variants_form[clean_dict_key_form] = 1
            return True

        if count >= self.max_equal_form_variants:
            self._log_return_false(fuzzable_request, 'need_more_variants_for_form')
            return False

        self._variants_form[clean_dict_key_form] = count + 1
        return True
예제 #23
0
class content_negotiation(CrawlPlugin):
    """
    Use content negotiation to find new resources.
    :author: Andres Riancho (([email protected]))
    """

    def __init__(self):
        CrawlPlugin.__init__(self)

        # User configured parameters
        self._wordlist = os.path.join(ROOT_PATH, 'plugins', 'crawl',
                                      'content_negotiation',
                                      'common_filenames.db')

        # Internal variables
        self._already_tested_dir = ScalableBloomFilter()
        self._already_tested_resource = ScalableBloomFilter()
        self._content_negotiation_enabled = None
        self._to_bruteforce = Queue.Queue()
        # I want to try 3 times to see if the remote host is vulnerable
        # detection is not thaaaat accurate!
        self._tries_left = 3

    def crawl(self, fuzzable_request):
        """
        1- Check if HTTP server is vulnerable
        2- Exploit using FuzzableRequest
        3- Perform bruteforce for each new directory

        :param fuzzable_request: A fuzzable_request instance that contains
                                (among other things) the URL to test.
        """
        if self._content_negotiation_enabled is not None \
        and self._content_negotiation_enabled == False:
            return

        else:
            con_neg_result = self._verify_content_neg_enabled(
                fuzzable_request)

            if con_neg_result is None:
                # I can't say if it's vulnerable or not (yet), save the current
                # directory to be included in the bruteforcing process, and
                # return.
                self._to_bruteforce.put(fuzzable_request.get_url())
                return

            elif not con_neg_result:
                # Not vulnerable, nothing else to do.
                return

            elif con_neg_result:
                # Happy, happy, joy!
                # Now we can test if we find new resources!
                self._find_new_resources(fuzzable_request)

                # and we can also perform a bruteforce:
                self._to_bruteforce.put(fuzzable_request.get_url())
                self._bruteforce()

    def _find_new_resources(self, fuzzable_request):
        """
        Based on a request like http://host.tld/backup.php , this method will
        find files like backup.zip , backup.old, etc. Using the content
        negotiation technique.

        :return: A list of new fuzzable requests.
        """
        # Get the file name
        filename = fuzzable_request.get_url().get_file_name()
        if filename == '':
            return
        else:
            # The thing here is that I've found that if these files exist in
            # the directory:
            # - backup.asp.old
            # - backup.asp
            #
            # And I request "/backup" , then both are returned. So I'll request
            #  the "leftmost" filename.
            filename = filename.split('.')[0]

            # Now I simply perform the request:
            alternate_resource = fuzzable_request.get_url().url_join(filename)
            original_headers = fuzzable_request.get_headers()

            if alternate_resource not in self._already_tested_resource:
                self._already_tested_resource.add(alternate_resource)

                _, alternates = self._request_and_get_alternates(
                    alternate_resource,
                    original_headers)

                # And create the new fuzzable requests
                url = fuzzable_request.get_url()
                for fr in self._create_new_fuzzable_requests(url, alternates):
                    self.output_queue.put(fr)

    def _bruteforce(self):
        """
        Use some common words to bruteforce file names and find new resources.
        This process is done only once for every new directory.

        :return: A list of new fuzzable requests.
        """
        wl_url_generator = self._wordlist_url_generator()
        args_generator = izip(wl_url_generator, repeat(Headers()))

        # Send the requests using threads:
        for base_url, alternates in self.worker_pool.map_multi_args(
            self._request_and_get_alternates,
            args_generator, chunksize=10):

            for fr in self._create_new_fuzzable_requests(base_url, alternates):
                self.output_queue.put(fr)

    def _wordlist_url_generator(self):
        """
        Generator that returns alternate URLs to test by combining the following
        sources of information:
            - URLs in self._bruteforce
            - Words in the bruteforce wordlist file
        """
        while True:
            try:
                bf_url = self._to_bruteforce.get_nowait()
            except Queue.Empty:
                break
            else:
                directories = bf_url.get_directories()

                for directory_url in directories:
                    if directory_url not in self._already_tested_dir:
                        self._already_tested_dir.add(directory_url)

                        for word in file(self._wordlist):
                            word = word.strip()
                            yield directory_url.url_join(word)

    def _request_and_get_alternates(self, alternate_resource, headers):
        """
        Performs a request to an alternate resource, using the fake accept
        trick in order to retrieve the list of alternates, which is then
        returned.

        :return: A tuple with:
                    - alternate_resource parameter (unmodified)
                    - a list of strings containing the alternates.
        """
        headers['Accept'] = 'w3af/bar'
        response = self._uri_opener.GET(alternate_resource, headers=headers)

        alternates, _ = response.get_headers().iget('alternates')

        # And I parse the result
        if alternates:
            # An alternates header looks like this:
            # alternates: {"backup.php.bak" 1 {type application/x-trash} {length 0}},
            #             {"backup.php.old" 1 {type application/x-trash} {length 0}},
            #             {"backup.tgz" 1 {type application/x-gzip} {length 0}},
            #             {"backup.zip" 1 {type application/zip} {length 0}}
            #
            # All in the same line.
            return alternate_resource, re.findall('"(.*?)"', alternates)

        else:
            # something failed
            return alternate_resource, []

    def _create_new_fuzzable_requests(self, base_url, alternates):
        """
        With a list of alternate files, I create new fuzzable requests

        :param base_url: http://host.tld/some/dir/
        :param alternates: ['backup.old', 'backup.asp']

        :return: A list of fuzzable requests.
        """
        for alternate in alternates:
            # Get the new resource
            full_url = base_url.url_join(alternate)
            response = self._uri_opener.GET(full_url)

            if not is_404(response):
                yield FuzzableRequest(full_url)

    def _verify_content_neg_enabled(self, fuzzable_request):
        """
        Checks if the remote website is vulnerable or not. Saves the result in
        self._content_negotiation_enabled , because we want to perform this test
        only once.

        :return: True if vulnerable.
        """
        if self._content_negotiation_enabled is not None:
            # The test was already performed, we return the old response
            return self._content_negotiation_enabled

        else:
            # We perform the test, for this we need a URL that has a filename,
            # URLs that don't have a filename can't be used for this.
            filename = fuzzable_request.get_url().get_file_name()
            if filename == '':
                return None

            filename = filename.split('.')[0]

            # Now I simply perform the request:
            alternate_resource = fuzzable_request.get_url().url_join(filename)
            headers = fuzzable_request.get_headers()
            headers['Accept'] = 'w3af/bar'
            response = self._uri_opener.GET(alternate_resource, headers=headers)

            if response.get_headers().icontains('alternates'):
                # Even if there is only one file, with an unique mime type,
                # the content negotiation will return an alternates header.
                # So this is pretty safe.

                # Save the result internally
                self._content_negotiation_enabled = True

                # Save the result as an info in the KB, for the user to see it:
                desc = ('HTTP Content negotiation is enabled in the remote web'
                        ' server. This could be used to bruteforce file names'
                        ' and find new resources.')
 
                i = Info('HTTP Content Negotiation enabled', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                
                kb.kb.append(self, 'content_negotiation', i)
                om.out.information(i.get_desc())
            else:
                om.out.information(
                    'The remote Web server has Content Negotiation disabled.')

                # I want to perform this test a couple of times... so I only
                # return False if that "couple of times" is empty
                self._tries_left -= 1
                if self._tries_left == 0:
                    # Save the FALSE result internally
                    self._content_negotiation_enabled = False
                else:
                    # None tells the plugin to keep trying with the next URL
                    return None

            return self._content_negotiation_enabled

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        d1 = 'Word list to use in the file name brute forcing process.'
        o1 = opt_factory('wordlist', self._wordlist, d1, 'string')

        ol = OptionList()
        ol.add(o1)
        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param options_list: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        wordlist = options_list['wordlist'].get_value()
        if os.path.exists(wordlist):
            self._wordlist = wordlist

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #24
0
class wsdl_finder(CrawlPlugin):
    """
    Find web service definitions files.

    :author: Andres Riancho ([email protected])
    """

    WSDL = ('?wsdl',
            '?WSDL')

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()

    def crawl(self, fuzzable_request):
        """
        If url not in _tested, append a ?WSDL and check the response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        url = fuzzable_request.get_url().uri2url()
        url_string = url.url_string

        if url_string not in self._already_tested:
            self._already_tested.add(url_string)

            wsdl_url_generator = self.wsdl_url_generator(url_string)

            self.worker_pool.map(self._do_request,
                                    wsdl_url_generator,
                                    chunksize=1)

    def wsdl_url_generator(self, url_string):
        for wsdl_parameter in self.WSDL:
            url_to_request = url_string + wsdl_parameter
            url_instance = URL(url_to_request)
            yield url_instance

    def _do_request(self, url_to_request):
        """
        Perform an HTTP request to the url_to_request parameter.
        :return: None.
        """
        try:
            self._uri_opener.GET(url_to_request, cache=True)
        except BaseFrameworkException:
            om.out.debug('Failed to request the WSDL file: ' + url_to_request)
        else:
            # The response is analyzed by the wsdlGreper plugin
            pass

    def get_plugin_deps(self):
        """
        :return: A list with the names of the plugins that should be run before the
        current one.
        """
        return ['grep.wsdl_greper']

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #25
0
파일: pykto.py 프로젝트: cathartic/w3af
class pykto(CrawlPlugin):
    """
    A nikto port to python.
    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        CrawlPlugin.__init__(self)

        # internal variables
        self._exec = True
        self._already_analyzed = ScalableBloomFilter()

        # User configured parameters
        self._db_file = os.path.join(ROOT_PATH, "plugins", "crawl", "pykto", "scan_database.db")
        self._extra_db_file = os.path.join(ROOT_PATH, "plugins", "crawl", "pykto", "w3af_scan_database.db")

        self._cgi_dirs = ["/cgi-bin/"]
        self._admin_dirs = ["/admin/", "/adm/"]

        self._users = [
            "adm",
            "bin",
            "daemon",
            "ftp",
            "guest",
            "listen",
            "lp",
            "mysql",
            "noaccess",
            "nobody",
            "nobody4",
            "nuucp",
            "operator",
            "root",
            "smmsp",
            "smtp",
            "sshd",
            "sys",
            "test",
            "unknown",
        ]

        self._nuke = ["/", "/postnuke/", "/postnuke/html/", "/modules/", "/phpBB/", "/forum/"]

        self._mutate_tests = False

    def crawl(self, fuzzable_request):
        """
        Runs pykto to the site.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        if not self._exec and not self._mutate_tests:
            # dont run anymore
            raise RunOnce()

        else:
            # Run the basic scan (only once)
            url = fuzzable_request.get_url().base_url()
            if url not in self._already_analyzed:
                self._already_analyzed.add(url)
                self._run(url)
                self._exec = False

            # And now mutate if the user configured it...
            if self._mutate_tests:

                # Tests need to be mutated
                url = fuzzable_request.get_url().get_domain_path()
                if url not in self._already_analyzed:
                    # Save the directories I already have tested in order to
                    # avoid testing them more than once...
                    self._already_analyzed.add(url)
                    self._run(url)

    def _run(self, url):
        """
        Really run the plugin.

        :param url: The URL object I have to test.
        """
        config = Config(self._cgi_dirs, self._admin_dirs, self._nuke, self._mutate_tests, self._users)

        for db_file in [self._db_file, self._extra_db_file]:

            parser = NiktoTestParser(db_file, config, url)

            # Send the requests using threads:
            self.worker_pool.map_multi_args(self._send_and_check, parser.test_generator(), chunksize=10)

    def _send_and_check(self, nikto_test):
        """
        This method sends the request to the server.

        :return: True if the requested URI responded as expected.
        """
        #
        #    Small performance improvement. If all we want to know is if the
        #    file exists or not, lets use HEAD instead of GET. In 99% of the
        #    cases this will work as expected and we'll have a significant
        #    performance improvement.
        #
        if nikto_test.is_vulnerable.checks_only_response_code():
            try:
                http_response = self._uri_opener.HEAD(nikto_test.uri)
            except Exception:
                return
            else:
                if not nikto_test.is_vulnerable.check(http_response):
                    return False

        function_ptr = getattr(self._uri_opener, nikto_test.method)

        try:
            http_response = function_ptr(nikto_test.uri)
        except BaseFrameworkException, e:
            msg = 'An exception was raised while requesting "%s", the error' ' message is: "%s".'
            om.out.error(msg % (nikto_test.uri, e))
            return False

        if nikto_test.is_vulnerable.check(http_response) and not is_404(http_response):

            vdesc = 'pykto plugin found a vulnerability at URL: "%s".' ' Vulnerability description: "%s".'
            vdesc = vdesc % (http_response.get_url(), nikto_test.message)

            v = Vuln("Insecure resource", vdesc, severity.LOW, http_response.id, self.get_name())
            v.set_uri(http_response.get_uri())
            v.set_method(nikto_test.method)

            kb.kb.append(self, "vuln", v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())

            fr = FuzzableRequest.from_http_response(http_response)
            self.output_queue.put(fr)
예제 #26
0
class shell_shock(AuditPlugin):
    """
    Find shell shock vulnerabilities.
    :author: Andres Riancho ([email protected])
    """
    DELAY_TESTS = [PingDelay('() { test; }; ping -c %s 127.0.0.1'),
                   ExactDelay('() { test; }; sleep %s')]

    def __init__(self):
        super(shell_shock, self).__init__()
        self.already_tested_urls = ScalableBloomFilter()

    def audit(self, freq, orig_response, debugging_id):
        """
        Tests an URL for shell shock vulnerabilities.

        :param freq: A FuzzableRequest
        :param orig_response: The HTTP response associated with the fuzzable request
        :param debugging_id: A unique identifier for this call to audit()
        """
        url = freq.get_url()

        # Here the script is vulnerable, not a specific parameter, so we
        # run unique tests per URL
        if url not in self.already_tested_urls:
            self.already_tested_urls.add(url)

            # We are implementing these methods for detecting shell-shock vulns
            # if you know about other methods, or have improvements on these
            # please let us know. Pull-requests are also welcome.
            for detection_method in [self._with_header_echo_injection,
                                     #self._with_body_echo_injection,
                                     self._with_time_delay]:
                if detection_method(freq, debugging_id):
                    break

    def _with_header_echo_injection(self, freq, debugging_id):
        """
        We're sending a payload that will trigger the injection of various
        headers in the HTTP response body.

        :param freq: A FuzzableRequest
        :return: True if a vulnerability was found
        """
        injected_header = 'shellshock'
        injected_value = 'check'
        payload = '() { :;}; echo "%s: %s"' % (injected_header, injected_value)

        mutant = self.create_mutant(freq, TEST_HEADER)
        mutant.set_token_value(payload)

        response = self._uri_opener.send_mutant(mutant, debugging_id=debugging_id)
        header_value, header_name = response.get_headers().iget(injected_header)

        if header_value is not None and injected_value in header_value.lower():
            desc = u'Shell shock was found at: %s' % mutant.found_at()

            v = Vuln.from_mutant(u'Shell shock vulnerability', desc,
                                 severity.HIGH, [response.id],
                                 self.get_name(), mutant)

            self.kb_append_uniq(self, 'shell_shock', v)
            return True

    def _with_body_echo_injection(self, freq, debugging_id):
        """
        We're sending a payload that will trigger the injection of new lines
        that will make the response transition from "headers" to "body".

        :param freq: A FuzzableRequest
        :return: True if a vulnerability was found
        """
        raise NotImplementedError

    def create_mutant(self, freq, header_name):
        headers = freq.get_headers()
        headers[header_name] = ''
        freq.set_headers(headers)

        fuzzer_config = {'fuzzable_headers': [TEST_HEADER]}

        mutant = HeadersMutant.create_mutants(freq, [''], [TEST_HEADER],
                                              False, fuzzer_config)[0]

        return mutant

    def _with_time_delay(self, freq, debugging_id):
        """
        Tests an URLs for shell shock vulnerabilities using time delays.

        :param freq: A FuzzableRequest
        :return: True if a vulnerability was found
        """
        self._send_mutants_in_threads(func=self._find_delay_in_mutant,
                                      iterable=self._generate_delay_tests(freq, debugging_id),
                                      callback=lambda x, y: None)

    def _generate_delay_tests(self, freq, debugging_id):
        for delay_obj in self.DELAY_TESTS:
            mutant = self.create_mutant(freq, TEST_HEADER)
            yield mutant, delay_obj, debugging_id

    def _find_delay_in_mutant(self, (mutant, delay_obj, debugging_id)):
        """
        Try to delay the response and save a vulnerability if successful

        :param mutant: The mutant to modify and test
        :param delay_obj: The delay to use
        :param debugging_id: The debugging ID for logging
        """
        ed = ExactDelayController(mutant, delay_obj, self._uri_opener)
        ed.set_debugging_id(debugging_id)
        success, responses = ed.delay_is_controlled()

        if not success:
            return False

        mutant.set_token_value(delay_obj.get_string_for_delay(3))
        desc = u'Shell shock was found at: %s' % mutant.found_at()

        v = Vuln.from_mutant(u'Shell shock vulnerability', desc,
                             severity.HIGH, [r.id for r in responses],
                             self.get_name(), mutant)

        self.kb_append_uniq(self, 'shell_shock', v)
        return True
예제 #27
0
class hash_analysis(GrepPlugin):
    """
    Identify hashes in HTTP responses.

    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        GrepPlugin.__init__(self)

        self._already_reported = ScalableBloomFilter()

        # regex to split between words
        self._split_re = re.compile('[^\w]')

    def grep(self, request, response):
        """
        Plugin entry point, identify hashes in the HTTP response.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        # I know that by doing this I loose the chance of finding hashes in
        # PDF files, but... this is much faster
        if not response.is_text_or_html():
            return

        body = response.get_body()
        splitted_body = self._split_re.split(body)
        for possible_hash in splitted_body:

            #    This is a performance enhancement that cuts the execution
            #    time of this plugin in half.
            if len(possible_hash) < 31 or\
            len(possible_hash) > 129 :
                return
            
            hash_type = self._get_hash_type(possible_hash)
            if not hash_type:
                return

            possible_hash = possible_hash.lower()
            if self._has_hash_distribution(possible_hash):
                if (possible_hash, response.get_url()) not in self._already_reported:
                    desc = 'The URL: "%s" returned a response that may contain'\
                          ' a "%s" hash. The hash string is: "%s". This is'\
                          ' uncommon and requires human verification.'
                    desc = desc % (response.get_url(), hash_type, possible_hash)
                    
                    i = Info('Hash string in HTML content', desc,
                             response.id, self.get_name())
                    i.set_url(response.get_url())
                    i.add_to_highlight(possible_hash)
                    
                    self.kb_append(self, 'hash_analysis', i)

                    self._already_reported.add( (possible_hash,
                                                 response.get_url()) )

    def _has_hash_distribution(self, possible_hash):
        """
        :param possible_hash: A string that may be a hash.
        :return: True if the possible_hash has an equal (aprox.) distribution
        of numbers and letters and only has hex characters (0-9, a-f)
        """
        numbers = 0
        letters = 0
        for char in possible_hash:
            if char.isdigit():
                numbers += 1
            elif char in 'abcdef':
                letters += 1
            else:
                return False

        if numbers in range(letters - len(possible_hash) / 2, letters + len(possible_hash) / 2):
            # Seems to be a hash, let's make a final test to avoid false positives with
            # strings like:
            # 2222222222222222222aaaaaaaaaaaaa
            is_hash = True
            for char in possible_hash:
                if possible_hash.count(char) > len(possible_hash) / 5:
                    is_hash = False
                    break
            return is_hash

        else:
            return False

    def _get_hash_type(self, possible_hash):
        """
        :param possible_hash: A string that may be a hash.
        :return: The hash type if the string seems to be a md5 / sha1 hash.
        None otherwise.
        """
        # When adding something here, please review the code above where
        # we also check the length.
        hash_type_len = {
                         'MD5': 32,
                         'SHA1': 40,
                         'SHA224': 56,
                         'SHA256': 64,
                         'SHA384': 96,
                         'SHA512': 128,
                         }
        for hash_type, hash_len in hash_type_len.items():                
            if len(possible_hash) == hash_len:
                return hash_type
            
        return None

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #28
0
파일: digit_sum.py 프로젝트: intfrr/Tortazo
class digit_sum(CrawlPlugin):
    """
    Take an URL with a number (index2.asp) and try to find related files
    (index1.asp, index3.asp).

    :author: Andres Riancho ([email protected])
    """
    def __init__(self):
        CrawlPlugin.__init__(self)
        self._already_visited = ScalableBloomFilter()

        # User options
        self._fuzz_images = False
        self._max_digit_sections = 4

    def crawl(self, fuzzable_request):
        """
        Searches for new Url's by adding and substracting numbers to the url
        and the parameters.

        :param fuzzable_request: A fuzzable_request instance that contains
                                     (among other things) the URL to test.
        """
        url = fuzzable_request.get_url()
        headers = Headers([('Referer', url.url_string)])

        original_response = self._uri_opener.GET(fuzzable_request.get_uri(),
                                                 cache=True,
                                                 headers=headers)

        if original_response.is_text_or_html() or self._fuzz_images:

            fr_generator = self._mangle_digits(fuzzable_request)
            response_repeater = repeat(original_response)
            header_repeater = repeat(headers)

            args = izip(fr_generator, response_repeater, header_repeater)

            self.worker_pool.map_multi_args(self._do_request, args)

            # I add myself so the next call to this plugin wont find me ...
            # Example: index1.html ---> index2.html --!!--> index1.html
            self._already_visited.add(fuzzable_request.get_uri())

    def _do_request(self, fuzzable_request, original_resp, headers):
        """
        Send the request.

        :param fuzzable_request: The modified fuzzable request
        :param original_resp: The response for the original request that was
                              sent.
        """

        response = self._uri_opener.GET(fuzzable_request.get_uri(),
                                        cache=True,
                                        headers=headers)

        add = False

        if not is_404(response):
            # We have different cases:
            #    - If the URLs are different, then there is nothing to think
            #      about, we simply found something new!
            if response.get_url() != original_resp.get_url():
                add = True

            #    - If the content type changed, then there is no doubt that
            #      we've found something new!
            elif response.doc_type != original_resp.doc_type:
                add = True

            #    - If we changed the query string parameters, we have to check
            #      the content
            elif relative_distance_lt(response.get_clear_text_body(),
                                      original_resp.get_clear_text_body(),
                                      0.8):
                # In this case what might happen is that the number we changed
                # is "out of range" and when requesting that it will trigger an
                # error in the web application, or show us a non-interesting
                # response that holds no content.
                #
                # We choose to return these to the core because they might help
                # with the code coverage efforts. Think about something like:
                #     foo.aspx?id=OUT_OF_RANGE&foo=inject_here
                # vs.
                #     foo.aspx?id=IN_RANGE&foo=inject_here
                #
                # This relates to the EXPECTED_URLS in test_digit_sum.py
                add = True

        if add:
            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)

    def _mangle_digits(self, fuzzable_request):
        """
        Mangle the digits (if any) in the fr URL.

        :param fuzzable_request: The original FuzzableRequest
        :return: A generator which returns mangled fuzzable requests
        """
        # First i'll mangle the digits in the URL file
        filename = fuzzable_request.get_url().get_file_name()
        domain_path = fuzzable_request.get_url().get_domain_path()
        for fname in self._do_combinations(filename):
            fr_copy = fuzzable_request.copy()
            fr_copy.set_url(domain_path.url_join(fname))

            if fr_copy.get_uri() not in self._already_visited:
                self._already_visited.add(fr_copy.get_uri())

                yield fr_copy

        # Now i'll mangle the query string variables
        if fuzzable_request.get_method() == 'GET':
            for parameter in fuzzable_request.get_dc():

                # to support repeater parameter names...
                for element_index in xrange(
                        len(fuzzable_request.get_dc()[parameter])):

                    combinations = self._do_combinations(
                        fuzzable_request.get_dc()[parameter][element_index])
                    for modified_value in combinations:

                        fr_copy = fuzzable_request.copy()
                        new_dc = fr_copy.get_dc()
                        new_dc[parameter][element_index] = modified_value
                        fr_copy.set_dc(new_dc)

                        if fr_copy.get_uri() not in self._already_visited:
                            self._already_visited.add(fr_copy.get_uri())
                            yield fr_copy

    def _do_combinations(self, a_string):
        """
        >>> ds = digit_sum()
        >>> ds._do_combinations( 'abc123' )
        ['abc124', 'abc122']

        >>> ds._do_combinations( 'abc123def56' )
        ['abc124def56', 'abc122def56', 'abc123def57', 'abc123def55']

        """
        res = []
        splitted = self._find_digits(a_string)
        if len(splitted) <= 2 * self._max_digit_sections:
            for i in xrange(len(splitted)):
                if splitted[i].isdigit():
                    splitted[i] = str(int(splitted[i]) + 1)
                    res.append(''.join(splitted))
                    splitted[i] = str(int(splitted[i]) - 2)
                    res.append(''.join(splitted))

                    # restore the initial value for next loop
                    splitted[i] = str(int(splitted[i]) + 1)

        return res

    def _find_digits(self, a_string):
        """
        Finds digits in a string and returns a list with string sections.

        >>> ds = digit_sum()
        >>> ds._find_digits('foo45')
        ['foo', '45']

        >>> ds._find_digits('f001bar112')
        ['f', '001', 'bar', '112']

        :return: A list of strings.
        """
        # regexes are soooooooooooooo cool !
        return [x for x in re.split(r'(\d+)', a_string) if x != '']

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d = 'Apply URL fuzzing to all URLs, including images, videos, zip, etc.'
        h = 'It\'s safe to leave this option as the default.'
        o = opt_factory('fuzzImages', self._fuzz_images, d, 'boolean', help=h)
        ol.add(o)

        d = 'Set the top number of sections to fuzz'
        h = 'It\'s safe to leave this option as the default. For example, with maxDigitSections'
        h += ' = 1, this string wont be fuzzed: abc123def234 ; but this one will abc23ldd.'
        o = opt_factory('maxDigitSections',
                        self._max_digit_sections,
                        d,
                        'integer',
                        help=h)
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user interface
        generated by the framework using the result of get_options().

        :param OptionList: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._fuzz_images = options_list['fuzzImages'].get_value()
        self._max_digit_sections = options_list['maxDigitSections'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #29
0
class find_dvcs(CrawlPlugin):
    """
    Search Git, Mercurial (HG), Bazaar (BZR), Subversion (SVN) and CVS
    repositories and checks for files containing

    :author: Adam Baldwin ([email protected])
    :author: Tomas Velazquez (tomas.velazquezz - gmail.com)
    """

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()
        self._analyzed_filenames = ScalableBloomFilter()

        self._dvcs = {}
        self._dvcs['git repository'] = {}
        self._dvcs['git ignore'] = {}
        self._dvcs['hg repository'] = {}
        self._dvcs['hg ignore'] = {}
        self._dvcs['bzr repository'] = {}
        self._dvcs['bzr ignore'] = {}
        self._dvcs['svn repository'] = {}
        self._dvcs['svn ignore'] = {}
        self._dvcs['cvs repository'] = {}
        self._dvcs['cvs ignore'] = {}

        self._dvcs['git repository']['filename'] = '.git/index'
        self._dvcs['git repository']['function'] = self.git_index

        self._dvcs['git ignore']['filename'] = '.gitignore'
        self._dvcs['git ignore']['function'] = self.ignore_file

        self._dvcs['hg repository']['filename'] = '.hg/dirstate'
        self._dvcs['hg repository']['function'] = self.hg_dirstate

        self._dvcs['hg ignore']['filename'] = '.hgignore'
        self._dvcs['hg ignore']['function'] = self.ignore_file

        self._dvcs['bzr repository']['filename'] = '.bzr/checkout/dirstate'
        self._dvcs['bzr repository']['function'] = self.bzr_checkout_dirstate

        self._dvcs['bzr ignore']['filename'] = '.bzrignore'
        self._dvcs['bzr ignore']['function'] = self.ignore_file

        self._dvcs['svn repository']['filename'] = '.svn/entries'
        self._dvcs['svn repository']['function'] = self.svn_entries

        self._dvcs['svn ignore']['filename'] = '.svnignore'
        self._dvcs['svn ignore']['function'] = self.ignore_file

        self._dvcs['cvs repository']['filename'] = 'CVS/Entries'
        self._dvcs['cvs repository']['function'] = self.cvs_entries

        self._dvcs['cvs ignore']['filename'] = '.cvsignore'
        self._dvcs['cvs ignore']['function'] = self.ignore_file

    def crawl(self, fuzzable_request):
        """
        For every directory, fetch a list of files and analyze the response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain_path = fuzzable_request.get_url().get_domain_path()

        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.add(domain_path)

            test_generator = self._url_generator(domain_path)
            self.worker_pool.map_multi_args(self._send_and_check,
                                               test_generator)

    def _url_generator(self, domain_path):
        """
        Based on different URLs with directories, generate the URLs that need
        to be tested.

        :return: URLs
        """
        for repo in self._dvcs.keys():
            repo_url = domain_path.url_join(self._dvcs[repo]['filename'])
            function = self._dvcs[repo]['function']
            yield repo_url, function, repo, domain_path

    def _clean_filenames(self, filenames):
        """
        Filter some characters from filenames.

        :return: A clear list of filenames.
        """
        resources = set()

        for line in filenames:
            if line.startswith('/'):
                line = line[1:]
            if line.startswith('./'):
                line = line[2:]
            if line.endswith('/'):
                line = line[:-1]

            resources.add(line)

        return resources

    def _send_and_check(self, repo_url, repo_get_files, repo, domain_path):
        """
        Check if a repository index exists in the domain_path.

        :return: None, everything is saved to the self.out_queue.
        """
        http_response = self.http_get_and_parse(repo_url)

        if not is_404(http_response):

            filenames = repo_get_files(http_response.get_body())

            parsed_url_set = set()

            for filename in self._clean_filenames(filenames):
                test_url = domain_path.url_join(filename)
                if test_url not in self._analyzed_filenames:
                    parsed_url_set.add(test_url)
                    self._analyzed_filenames.add(filename)

            self.worker_pool.map(self.http_get_and_parse, parsed_url_set)

            if parsed_url_set:
                desc = 'A %s was found at: "%s"; this could indicate that'\
                       ' a %s is accessible. You might be able to download'\
                       ' the Web application source code.'
                desc = desc % (repo, http_response.get_url(), repo)
                
                v = Vuln('Source code repository', desc, severity.MEDIUM,
                         http_response.id, self.get_name())
                v.set_url(http_response.get_url())
                
                kb.kb.append(self, repo, v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())

    def git_index(self, body):
        """
        Analyze the contents of the Git index and extract filenames.

        :param body: The contents of the file to analyze.
        :return: A list of filenames found.
        """
        filenames = set()
        signature = 'DIRC'
        offset = 12

        if body[:4] != signature:
            return set()

        version, = struct.unpack('>I', body[4:8])
        index_entries, = struct.unpack('>I', body[8:12])

        if version == 2:
            filename_offset = 62
        elif version == 3:
            filename_offset = 63
        else:
            return filenames

        for _ in range(0, index_entries):
            offset += filename_offset - 1
            length, = struct.unpack('>B', body[offset:offset + 1])
            if length > (len(body) - offset):
                return set()
            filename = body[offset + 1:offset + 1 + length]
            padding = 8 - ((filename_offset + length) % 8)
            filenames.add(filename)
            offset += length + 1 + padding

        return filenames

    def hg_dirstate(self, body):
        """
        Analyze the contents of the HG dirstate and extract filenames.

        :param body: The contents of the file to analyze.
        :return: A list of filenames found.
        """
        filenames = set()
        offset = 53

        while offset < len(body):
            length, = struct.unpack('>I', body[offset:offset + 4])
            if length > (len(body) - offset):
                return set()
            offset += 4
            filename = body[offset:offset + length]
            offset += length + 13
            filenames.add(filename)

        return filenames

    def bzr_checkout_dirstate(self, body):
        """
        Analyze the contents of the BZR dirstate and extract filenames.

        :param body: The contents of the file to analyze.
        :return: A list of filenames found.
        """
        filenames = set()
        header = '#bazaar dirstate flat format '

        if body[0:29] != header:
            return set()

        body = body.split('\x00')
        found = True
        for offset in range(0, len(body)):
            filename = body[offset - 2]
            if body[offset] == 'd':
                if found:
                    filenames.add(filename)
                found = not found
            elif body[offset] == 'f':
                if found:
                    filenames.add(filename)
                found = not found

        return filenames

    def svn_entries(self, body):
        """
        Analyze the contents of the SVN entries and extract filenames.

        :param body: The contents of the file to analyze.
        :return: A list of filenames found.
        """
        filenames = set()
        lines = body.split('\n')
        offset = 29

        while offset < len(lines):
            line = lines[offset].strip()
            filename = lines[offset - 1].strip()
            if line == 'file':
                filenames.add(filename)
                offset += 34
            elif line == 'dir':
                filenames.add(filename)
                offset += 3

        return filenames

    def cvs_entries(self, body):
        """
        Analyze the contents of the CVS entries and extract filenames.

        :param body: The contents of the file to analyze.
        :return: A list of filenames found.
        """
        filenames = set()

        for line in body.split('\n'):
            if '/' in line:
                slashes = line.split('/')
                if len(slashes) != 6:
                    continue
                filenames.add(slashes[1])

        return filenames

    def filter_special_character(self, line):
        """
        Analyze the possible regexp contents and extract filenames or
        directories without regexp.

        :param line: A regexp filename or directory.
        :return: A real filename or directory.
        """
        special_characters = ['*', '?', '[', ']', ':']

        for char in special_characters:
            if char in line:
                l = line.split(char)[0]
                if '/' in l:
                    line = '/'.join(l.split('/')[:-1])
                else:
                    line = ''
                    break

        return line

    def ignore_file(self, body):
        """
        Analyze the contents of the Git, HG, BZR, SVN and CVS ignore file
        and extract filenames.

        :param body: The contents of the file to analyze.
        :return: A list of filenames found.
        """
        filenames = set()
        for line in body.split('\n'):

            line = line.strip()

            if line.startswith('#') or line == '':
                continue

            line = self.filter_special_character(line)
            if not line:
                continue

            if line.startswith('/') or line.startswith('^'):
                line = line[1:]
            if line.endswith('/') or line.endswith('$'):
                line = line[:-1]

            filenames.add(line)

        return filenames

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #30
0
class payment_webhook_finder(CrawlPlugin):
    """
    Find hidden payment gateway webhooks.

    :author: Coiffey Pierre ([email protected])
    """
    _dirs = {
        '/', '/inc/', '/include/', '/include/pay/', '/includes/',
        '/includes/pay/', '/lib/', '/libraries/', '/module/', '/module/pay/',
        '/modules/', '/modules/pay/', '/payment/', '/shop/', '/store/',
        '/svc/', '/servlet/', '/cgi/', '/cgi-bin/', '/cgibin/'
    }

    _files = {
        'pay', 'payment', 'success', 'paymentsuccess', 'paymentcomplete',
        'paymentsuccessful', 'successful', 'paid', 'return', 'valid',
        'validpay', 'validate', 'validatepayment', 'validatepay', 'validation',
        'complete', 'completepay', 'completepayment', 'trxcomplete',
        'transactioncomplete', 'final', 'finished'
    }

    _exts = {
        '', 'php', 'asp', 'aspx', 'jsp', 'py', 'pl', 'rb', 'cgi', 'php3',
        'php4', 'php5'
    }

    _methods = {'GET', 'POST'}

    MIN_URL_COUNT_FOR_EXTENSION_FILTER = 100

    def __init__(self):
        CrawlPlugin.__init__(self)
        self._already_tested = ScalableBloomFilter()

    def crawl(self, fuzzable_request, debugging_id):
        """
        Searches for new URLs using fuzzing.

        :param debugging_id: A unique identifier for this call to discover()
        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        uri = fuzzable_request.get_url()
        url = uri.uri2url()

        exts_to_append = self._get_extensions_for_fuzzing()

        url_generator = self._mutate_path(url, self._dirs, self._files,
                                          exts_to_append)
        url_generator = self._test_once_filter(url_generator)

        url_repeater = repeat(url)
        args = izip(url_repeater, url_generator)

        self.worker_pool.map_multi_args(self._send_requests, args)

    def _get_extensions_for_fuzzing(self):
        """
        This method is a performance improvement that reduces the number of HTTP
        requests sent by the plugin.

        When there are enough samples in kb.kb.get_all_known_urls() the method
        will only return a sub-set of the URL filename extensions to perform
        fuzzing on.

        :return: A set containing the extensions to use during fuzzing
        """
        if len(kb.kb.get_all_known_urls()
               ) < self.MIN_URL_COUNT_FOR_EXTENSION_FILTER:
            return self._exts

        site_url_extensions = get_url_extensions_from_kb()
        return site_url_extensions.intersection(self._exts)

    def _send_requests(self, url, mutant):
        """
        Perform a GET and POST request to check if the endpoint exists
        """
        for method in self._methods:
            functor = getattr(self._uri_opener, method)
            self._send_request(functor, url, mutant)

    def _send_request(self, functor, url, mutant):
        response = functor(mutant, cache=True)

        if is_404(response):
            return

        # Create the fuzzable request and send it to the core
        fr = FuzzableRequest.from_http_response(response)
        self.output_queue.put(fr)

        #
        # Save it to the kb!
        #
        desc = 'A potentially interesting URL was found at: "%s".'
        desc %= response.get_url()

        i = Info('Potentially interesting URL', desc, response.id,
                 self.get_name())
        i.set_url(response.get_url())

        kb.kb.append_uniq(self, 'url', i, filter_by='URL')
        om.out.information(i.get_desc())

    def _test_once_filter(self, mutated_url_path_generator):
        for mutated_url_path in mutated_url_path_generator:

            is_new = self._already_tested.add(mutated_url_path)

            if is_new:
                yield mutated_url_path

    def _mutate_path(self, url, dirs_to_append, files_to_append,
                     exts_to_append):
        """
        Mutate the path of the url.

        :param url: A URL to transform.
        :return: A list of URL's that mutate the original url passed as parameter
        """
        url_string = url.url_string

        if url_string.count('/') <= 1:
            return

        # Create the new path
        url_string = url_string[:url_string.rfind('/')]

        for dir_to_append in dirs_to_append:
            for file_to_append in files_to_append:
                for ext_to_append in exts_to_append:

                    if ext_to_append:
                        ext_to_append = '.%s' % ext_to_append

                    args = (url_string, dir_to_append, file_to_append,
                            ext_to_append)
                    url_str = '%s%s%s%s' % args

                    new_url = URL(url_str)

                    yield new_url

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #31
0
class dot_listing(CrawlPlugin):
    """
    Search for .listing files and extracts new filenames from it.
    :author: Tomas Velazquez ( [email protected] )
    """
    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

        # -rw-r--r--    1 andresr   w3af         8139 Apr 12 13:23 foo.zip
        regex_str = '[a-z-]{10}\s*\d+\s*(.*?)\s+(.*?)\s+\d+\s+\w+\s+\d+\s+[0-9:]{4,5}\s+(.*)'
        self._listing_parser_re = re.compile(regex_str)

    def crawl(self, fuzzable_request):
        """
        For every directory, fetch the .listing file and analyze the response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        for domain_path in fuzzable_request.get_url().get_directories():
            if domain_path not in self._analyzed_dirs:
                self._analyzed_dirs.add(domain_path)
                self._check_and_analyze(domain_path)

    def _check_and_analyze(self, domain_path):
        """
        Check if a .listing filename exists in the domain_path.
        :return: None, everything is saved to the self.out_queue.
        """
        # Request the file
        url = domain_path.url_join('.listing')
        try:
            response = self._uri_opener.GET(url, cache=True)
        except BaseFrameworkException, w3:
            msg = ('Failed to GET .listing file: "%s". Exception: "%s".')
            om.out.debug(msg % (url, w3))
            return

        # Check if it's a .listing file
        if not is_404(response):

            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)

            parsed_url_set = set()
            users = set()
            groups = set()

            extracted_info = self._extract_info_from_listing(
                response.get_body())
            for username, group, filename in extracted_info:
                if filename != '.' and filename != '..':
                    parsed_url_set.add(domain_path.url_join(filename))
                    users.add(username)
                    groups.add(group)

            self.worker_pool.map(self.http_get_and_parse, parsed_url_set)

            if parsed_url_set:
                desc = 'A .listing file was found at: "%s". The contents'\
                       ' of this file disclose filenames.'
                desc = desc % (response.get_url())

                v = Vuln('.listing file found', desc, severity.LOW,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'dot_listing', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())

            real_users = set([u for u in users if not u.isdigit()])
            real_groups = set([g for g in groups if not g.isdigit()])

            if real_users or real_groups:
                desc = 'A .listing file which leaks operating system usernames' \
                       ' and groups was identified at %s. The leaked users are %s,' \
                       ' and the groups are %s. This information can be used' \
                       ' during a bruteforce attack to the Web application,' \
                       ' SSH or FTP services.'
                desc = desc % (v.get_url(), ', '.join(real_users),
                               ', '.join(real_groups))

                v = Vuln('Operating system username and group leak', desc,
                         severity.LOW, response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'dot_listing', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
예제 #32
0
class find_backdoors(CrawlPlugin):
    """
    Find web backdoors and web shells.

    :author: Andres Riancho ([email protected])
    """
    WEBSHELL_DB = os.path.join(CRAWL_PATH, 'find_backdoors', 'web_shells.txt')
    SIGNATURE_DB = os.path.join(CRAWL_PATH, 'find_backdoors', 'signatures.txt')

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()
        self._signature_re = None

    def setup(self):
        with self._plugin_lock:
            if self._signature_re is not None:
                return

            signatures = self._read_signatures()
            self._signature_re = multi_re(signatures, hint_len=2)

    def _read_signatures(self):
        for line in file(self.SIGNATURE_DB):
            line = line.strip()

            if not line:
                continue

            if line.startswith('#'):
                continue

            yield (line, 'Backdoor signature')

    def crawl(self, fuzzable_request):
        """
        For every directory, fetch a list of shell files and analyze the
        response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain_path = fuzzable_request.get_url().get_domain_path()

        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.add(domain_path)

            self.setup()

            # Read the web shell database
            web_shells = self._iter_web_shells()

            # Send the requests using threads:
            args_iter = (domain_path.url_join(fname) for fname in web_shells)
            self.worker_pool.map(self._check_if_exists, args_iter)

    def _iter_web_shells(self):
        """
        :yield: lines from the web shell DB
        """
        for line in file(self.WEBSHELL_DB):
            line = line.strip()

            if line.startswith('#'):
                continue

            if not line:
                continue

            yield line

    def _check_if_exists(self, web_shell_url):
        """
        Check if the file exists.

        :param web_shell_url: The URL to check
        """
        try:
            response = self._uri_opener.GET(web_shell_url, cache=True)
        except BaseFrameworkException:
            om.out.debug('Failed to GET webshell:' + web_shell_url)
        else:
            signature = self._match_signature(response)
            if signature is None:
                return

            desc = (u'An HTTP response matching the web backdoor signature'
                    u' "%s" was found at: "%s"; this could indicate that the'
                    u' server has been compromised.')
            desc %= (signature, response.get_url())

            # It's probability is higher if we found a long signature
            _severity = severity.HIGH if len(
                signature) > 8 else severity.MEDIUM

            v = Vuln(u'Potential web backdoor', desc, _severity, response.id,
                     self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'backdoors', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())

            fr = FuzzableRequest.from_http_response(response)
            self.output_queue.put(fr)

    def _match_signature(self, response):
        """
        Heuristic to infer if the content of <response> has the pattern of a
        backdoor response.

        :param response: HTTPResponse object
        :return: A bool value
        """
        body_text = response.get_body()

        for match, _, _, _ in self._signature_re.query(body_text):
            match_string = match.group(0)
            return match_string

        return None

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #33
0
class url_fuzzer(CrawlPlugin):
    """
    Try to find backups, and other related files.
    :author: Andres Riancho ([email protected])
    """
    _appendables = ('~', '.tar.gz', '.gz', '.7z', '.cab', '.tgz',
                    '.gzip', '.bzip2', '.inc', '.zip', '.rar', '.jar', '.java',
                    '.class', '.properties', '.bak', '.bak1', '.bkp', '.back',
                    '.backup', '.backup1', '.old', '.old1', '.$$$'
                    )
    _backup_exts = ('tar.gz', '7z', 'gz', 'cab', 'tgz', 'gzip',
                    'bzip2', 'zip', 'rar'
                    )
    _file_types = (
        'inc', 'fla', 'jar', 'war', 'java', 'class', 'properties',
        'bak', 'bak1', 'backup', 'backup1', 'old', 'old1', 'c', 'cpp',
        'cs', 'vb', 'phps', 'disco', 'ori', 'orig', 'original'
    )

    def __init__(self):
        CrawlPlugin.__init__(self)

        self._headers = None
        self._first_time = True
        self._fuzz_images = False
        self._seen = ScalableBloomFilter()

    def crawl(self, fuzzable_request):
        """
        Searches for new Url's using fuzzing.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        url = fuzzable_request.get_url()
        self._headers = Headers([('Referer', url.url_string)])

        if self._first_time:
            self._verify_head_enabled(url)
            self._first_time = False

        # First we need to delete fragments and query strings from URL.
        url = url.uri2url()

        # And we mark this one as a "do not return" URL, because the
        # core already found it using another technique.
        self._seen.add(url)

        self._verify_head_enabled(url)
        if self._head_enabled():
            response = self._uri_opener.HEAD(url, cache=True,
                                             headers=self._headers)
        else:
            response = self._uri_opener.GET(url, cache=True,
                                            headers=self._headers)

        if response.is_text_or_html() or self._fuzz_images:
            mutants_chain = chain(self._mutate_by_appending(url),
                                  self._mutate_path(url),
                                  self._mutate_file_type(url),
                                  self._mutate_domain_name(url))
            url_repeater = repeat(url)
            args = izip(url_repeater, mutants_chain)

            self.worker_pool.map_multi_args(self._do_request, args)

    def _do_request(self, url, mutant):
        """
        Perform a simple GET to see if the result is an error or not, and then
        run the actual fuzzing.
        """
        response = self._uri_opener.GET(
            mutant, cache=True, headers=self._headers)

        if not (is_404(response) or
        response.get_code() in (403, 401) or
        self._return_without_eval(mutant)):

            # Create the fuzzable request and send it to the core
            fr = FuzzableRequest.from_http_response(response)
            self.output_queue.put(fr)
            
            #
            #   Save it to the kb (if new)!
            #
            if response.get_url() not in self._seen and response.get_url().get_file_name():
                desc = 'A potentially interesting file was found at: "%s".'
                desc = desc % response.get_url()

                i = Info('Potentially interesting file', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                
                kb.kb.append(self, 'files', i)
                om.out.information(i.get_desc())

                # Report only once
                self._seen.add(response.get_url())

    def _return_without_eval(self, uri):
        """
        This method tries to lower the false positives.
        """
        if not uri.has_query_string():
            return False

        uri.set_file_name(uri.get_file_name() + rand_alnum(7))

        try:
            response = self._uri_opener.GET(uri, cache=True,
                                            headers=self._headers)
        except BaseFrameworkException, e:
            msg = 'An exception was raised while requesting "%s", the error'
            msg += 'message is: "%s"'
            om.out.error(msg % (uri, e))
        else:
예제 #34
0
class fingerprint_404(object):
    """
    Read the 404 page(s) returned by the server.

    :author: Andres Riancho ([email protected])
    """

    _instance = None

    def __init__(self):
        #
        #   Set the opener, I need it to perform some tests and gain
        #   the knowledge about the server's 404 response bodies.
        #
        self._uri_opener = None
        self._worker_pool = None

        #
        #   Internal variables
        #
        self._already_analyzed = False
        self._404_responses = deque(maxlen=MAX_404_RESPONSES)
        self._lock = thread.allocate_lock()
        self._fingerprinted_paths = ScalableBloomFilter()
        self._directory_uses_404_codes = ScalableBloomFilter()

        # It is OK to store 200 here, I'm only storing path+filename as the key,
        # and bool as the value.
        self.is_404_LRU = LRU(250)

    def set_url_opener(self, urlopener):
        self._uri_opener = urlopener

    def set_worker_pool(self, worker_pool):
        self._worker_pool = worker_pool

    def generate_404_knowledge(self, url):
        """
        Based on a URL, request something that we know is going to be a 404.
        Afterwards analyze the 404's and summarise them.

        :return: A list with 404 bodies.
        """
        #
        #    This is the case when nobody has properly configured
        #    the object in order to use it.
        #
        if self._uri_opener is None:
            msg = "404 fingerprint database was incorrectly initialized."
            raise RuntimeError(msg)

        # Get the filename extension and create a 404 for it
        extension = url.get_extension()
        domain_path = url.get_domain_path()

        #
        #   This is a list of the most common handlers, in some configurations,
        #   the 404 depends on the handler, so I want to make sure that I catch
        #   the 404 for each one
        #
        handlers = {
            "py",
            "php",
            "asp",
            "aspx",
            "do",
            "jsp",
            "rb",
            "do",
            "gif",
            "htm",
            "pl",
            "cgi",
            "xhtml",
            "htmls",
            "foobar",
        }
        if extension:
            handlers.add(extension)

        test_urls = []

        for extension in handlers:
            rand_alnum_file = rand_alnum(8) + "." + extension
            url404 = domain_path.url_join(rand_alnum_file)
            test_urls.append(url404)

        imap_unordered = self._worker_pool.imap_unordered
        not_exist_resp_lst = []

        for not_exist_resp in imap_unordered(self._send_404, test_urls):
            not_exist_resp_lst.append(not_exist_resp)

        #
        #   I have the 404 responses in not_exist_resp_lst, but maybe they
        #   all look the same, so I'll filter the ones that look alike.
        #
        for i in not_exist_resp_lst:
            for j in not_exist_resp_lst:

                if i is j:
                    continue

                if fuzzy_equal(i.get_body(), j.get_body(), IS_EQUAL_RATIO):
                    # They are equal, just ignore it
                    continue
                else:
                    # They are no equal, this means that we'll have to add this
                    # one to the 404 responses
                    self._404_responses.append(j)

        # And I return the ones I need
        msg_fmt = "The 404 body result database has a length of %s."
        om.out.debug(msg_fmt % len(self._404_responses))
        self._fingerprinted_paths.add(domain_path)

    @retry(tries=2, delay=0.5, backoff=2)
    def _send_404(self, url404):
        """
        Sends a GET request to url404.

        :return: The HTTP response body.
        """
        # I don't use the cache, because the URLs are random and the only thing
        # that cache does is to fill up disk space
        response = self._uri_opener.GET(url404, cache=False, grep=False)
        return response

    @lru_404_cache
    def is_404(self, http_response):
        """
        All of my previous versions of is_404 were very complex and tried to
        struggle with all possible cases. The truth is that in most "strange"
        cases I was failing miserably, so now I changed my 404 detection once
        again, but keeping it as simple as possible.

        Also, and because I was trying to cover ALL CASES, I was performing a
        lot of requests in order to cover them, which in most situations was
        unnecesary.

        So now I go for a much simple approach:
            1- Cover the simplest case of all using only 1 HTTP request
            2- Give the users the power to configure the 404 detection by
               setting a string that identifies the 404 response (in case we
               are missing it for some reason in case #1)

        :param http_response: The HTTP response which we want to know if it
                                  is a 404 or not.
        """
        #
        #   First we handle the user configured exceptions:
        #
        domain_path = http_response.get_url().get_domain_path()
        if domain_path in cf.cf.get("always_404"):
            return True
        elif domain_path in cf.cf.get("never_404"):
            return False

        #
        #    The user configured setting. "If this string is in the response,
        #    then it is a 404"
        #
        if cf.cf.get("string_match_404") and cf.cf.get("string_match_404") in http_response:
            return True

        #
        #   This is the most simple case, we don't even have to think about this
        #
        #   If there is some custom website that always returns 404 codes, then
        #   we are screwed, but this is open source, and the pentester working
        #   on that site can modify these lines.
        #
        if http_response.get_code() == 404:
            return True

        #
        #    Simple, if the file we requested is in a directory that's known to
        #    return 404 codes for files that do not exist, AND this is NOT a 404
        #    then we're return False!
        #
        if domain_path in self._directory_uses_404_codes and http_response.get_code() != 404:
            return False

        #
        #   Lets start with the rather complex code...
        #
        with self._lock:
            if not self._already_analyzed:
                self.generate_404_knowledge(http_response.get_url())
                self._already_analyzed = True

        # 404_body was already cleaned inside generate_404_knowledge
        # so we need to clean this one in order to have a fair comparison
        html_body = get_clean_body(http_response)

        #
        #   Compare this response to all the 404's I have in my DB
        #
        #   Copy the 404_responses deque in order to be able to iterate over
        #   it from one thread, while it is changed in another.
        #
        copy_404_responses = copy.copy(self._404_responses)

        for resp_404 in copy_404_responses:

            if fuzzy_equal(resp_404.get_body(), html_body, IS_EQUAL_RATIO):
                msg = '"%s" (id:%s) is a 404 [similarity_index > %s]'
                fmt = (http_response.get_url(), http_response.id, IS_EQUAL_RATIO)
                om.out.debug(msg % fmt)
                return True

        else:
            #
            #    I get here when the for ends and no body_404_db matched with
            #    the html_body that was sent as a parameter by the user. This
            #    means one of two things:
            #        * There is not enough knowledge in self._404_responses, or
            #        * The answer is NOT a 404.
            #
            #    Because we want to reduce the amount of "false positives" that
            #    this method returns, we'll perform one extra check before
            #    saying that this is NOT a 404.
            domain_path = http_response.get_url().get_domain_path()
            if domain_path not in self._fingerprinted_paths:

                if self._is_404_with_extra_request(http_response, html_body):
                    #
                    #   Aha! It actually was a 404!
                    #
                    self._404_responses.append(http_response)
                    self._fingerprinted_paths.add(domain_path)

                    msg = (
                        '"%s" (id:%s) is a 404 (similarity_index > %s).'
                        " Adding new knowledge to the 404_bodies database"
                        " (length=%s)."
                    )
                    fmt = (http_response.get_url(), http_response.id, IS_EQUAL_RATIO, len(self._404_responses))
                    om.out.debug(msg % fmt)

                    return True

            msg = '"%s" (id:%s) is NOT a 404 [similarity_index < %s].'
            fmt = (http_response.get_url(), http_response.id, IS_EQUAL_RATIO)
            om.out.debug(msg % fmt)

            return False

    def _generate_404_filename(self, filename):
        """
        Some web applications are really picky about the URL format, or have
        different 404 handling for the URL format. So we're going to apply these
        rules for generating a filename that doesn't exist:

            * Flip the characters of the same type (digit, letter), ignoring
            the file extension (if any):
                'ab-23'      ==> 'ba-32'
                'abc-12'     ==> 'bac-12'
                'ab-23.html' ==> 'ba-32.html'

            * If after the character flipping the filename is equal to the
            original one, +2 to each char:
                'a1a2'      ==> 'c3c4"
                'a1a2.html' ==> 'c3c4.html"

        :param filename: The original filename
        :return: A mutated filename
        """
        split_filename = filename.rsplit(".", 1)
        if len(split_filename) == 2:
            orig_filename, extension = split_filename
        else:
            extension = None
            orig_filename = split_filename[0]

        def grouper(iterable, n, fillvalue=None):
            "Collect data into fixed-length chunks or blocks"
            # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
            args = [iter(iterable)] * n
            return izip_longest(fillvalue=fillvalue, *args)

        mod_filename = ""

        for x, y in grouper(orig_filename, 2):

            # Handle the last iteration
            if y is None:
                mod_filename += x
                break

            if x.isdigit() and y.isdigit():
                mod_filename += y + x
            elif x in string.letters and y in string.letters:
                mod_filename += y + x
            else:
                # Don't flip chars
                mod_filename += x + y

        if mod_filename == orig_filename:
            # Damn!
            plus_three_filename = ""
            letters = string.letters
            digits = string.digits
            lletters = len(letters)
            ldigits = len(digits)

            for c in mod_filename:
                indexl = letters.find(c)
                if indexl != -1:
                    new_index = (indexl + 3) % lletters
                    plus_three_filename += letters[new_index]
                else:
                    indexd = digits.find(c)
                    if indexd != -1:
                        new_index = (indexd + 3) % ldigits
                        plus_three_filename += digits[new_index]
                    else:
                        plus_three_filename += c

            mod_filename = plus_three_filename

        final_result = mod_filename
        if extension is not None:
            final_result += ".%s" % extension

        return final_result

    def _is_404_with_extra_request(self, http_response, clean_html_body):
        """
        Performs a very simple check to verify if this response is a 404 or not.

        It takes the original URL and modifies it by pre-pending a "not-" to the
        filename, then performs a request to that URL and compares the original
        response with the modified one. If they are equal then the original
        request is a 404.

        :param http_response: The original HTTP response
        :param clean_html_body: The original HTML body you could find in
                                http_response after passing it by a cleaner

        :return: True if the original response was a 404 !
        """
        response_url = http_response.get_url()
        filename = response_url.get_file_name()
        if not filename:
            relative_url = "../%s/" % rand_alnum(8)
            url_404 = response_url.url_join(relative_url)
        else:
            relative_url = self._generate_404_filename(filename)
            url_404 = response_url.url_join(relative_url)

        response_404 = self._send_404(url_404)
        clean_response_404_body = get_clean_body(response_404)

        if response_404.get_code() == 404 and url_404.get_domain_path() not in self._directory_uses_404_codes:
            self._directory_uses_404_codes.add(url_404.get_domain_path())

        return fuzzy_equal(clean_response_404_body, clean_html_body, IS_EQUAL_RATIO)
예제 #35
0
class web_spider(CrawlPlugin):
    """
    Crawl the web application.

    :author: Andres Riancho ([email protected])
    """
    UNAUTH_FORBID = {http_constants.UNAUTHORIZED, http_constants.FORBIDDEN}

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._compiled_ignore_re = None
        self._compiled_follow_re = None
        self._broken_links = DiskSet(table_prefix='web_spider')
        self._first_run = True
        self._target_urls = []
        self._target_domain = None
        self._already_filled_form = ScalableBloomFilter()
        self._variant_db = VariantDB()

        # User configured variables
        self._ignore_regex = ''
        self._follow_regex = '.*'
        self._only_forward = False
        self._compile_re()

    def crawl(self, fuzzable_req):
        """
        Searches for links on the html.

        :param fuzzable_req: A fuzzable_req instance that contains
                             (among other things) the URL to test.
        """
        self._handle_first_run()

        #
        # If it is a form, then smart_fill the parameters to send something that
        # makes sense and will allow us to cover more code.
        #
        data_container = fuzzable_req.get_raw_data()
        if isinstance(data_container, Form):

            if fuzzable_req.get_url() in self._already_filled_form:
                return

            self._already_filled_form.add(fuzzable_req.get_url())
            data_container.smart_fill()

        # Send the HTTP request
        resp = self._uri_opener.send_mutant(fuzzable_req)

        # Nothing to do here...
        if resp.get_code() == http_constants.UNAUTHORIZED:
            return

        # Nothing to do here...
        if resp.is_image():
            return

        # And we don't trust what comes from the core, check if 404
        if is_404(resp):
            return

        self._extract_html_forms(resp, fuzzable_req)
        self._extract_links_and_verify(resp, fuzzable_req)

    def _extract_html_forms(self, resp, fuzzable_req):
        """
        Parses the HTTP response body and extract HTML forms, resulting forms
        are put() on the output queue.
        """
        # Try to find forms in the document
        try:
            dp = parser_cache.dpc.get_document_parser_for(resp)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return

        same_domain = lambda f: f.get_action().get_domain() == \
                                resp.get_url().get_domain()

        # Create one FuzzableRequest for each form variant
        mode = cf.cf.get('form_fuzzing_mode')
        for form_params in dp.get_forms():

            if not same_domain(form_params):
                continue

            headers = fuzzable_req.get_headers()

            for form_params_variant in form_params.get_variants(mode):
                data_container = dc_from_form_params(form_params_variant)

                # Now data_container is one of Multipart of URLEncoded form
                # instances, which is a DataContainer. Much better than the
                # FormParameters instance we had before in form_params_variant
                r = FuzzableRequest.from_form(data_container, headers=headers)
                self.output_queue.put(r)

    def _handle_first_run(self):
        if self._first_run:
            # I have to set some variables, in order to be able to code
            # the "only_forward" feature
            self._first_run = False
            self._target_urls = [i.uri2url() for i in cf.cf.get('targets')]

            # The following line triggered lots of bugs when the "stop" button
            # was pressed and the core did this: "cf.cf.save('targets', [])"
            #
            #self._target_domain = cf.cf.get('targets')[0].get_domain()
            #
            #    Changing it to something awful but bug-free.
            targets = cf.cf.get('targets')
            if not targets:
                return
            else:
                self._target_domain = targets[0].get_domain()
                
    def _urls_to_verify_generator(self, resp, fuzzable_req):
        """
        Yields tuples containing:
            * Newly found URL
            * The FuzzableRequest instance passed as parameter
            * The HTTPResponse generated by the FuzzableRequest
            * Boolean indicating if we trust this reference or not

        :param resp: HTTP response object
        :param fuzzable_req: The HTTP request that generated the response
        """
        gen = itertools.chain(self._body_url_generator(resp, fuzzable_req),
                              self._headers_url_generator(resp, fuzzable_req))
        
        for ref, fuzzable_req, original_resp, possibly_broken in gen:
            if self._should_verify_extracted_url(ref, original_resp):
                yield ref, fuzzable_req, original_resp, possibly_broken

    def _headers_url_generator(self, resp, fuzzable_req):
        """
        Yields tuples containing:
            * Newly found URL
            * The FuzzableRequest instance passed as parameter
            * The HTTPResponse generated by the FuzzableRequest
            * Boolean indicating if we trust this reference or not

        The newly found URLs are extracted from the http response headers such
        as "Location".

        :param resp: HTTP response object
        :param fuzzable_req: The HTTP request that generated the response
        """
        # If response was a 30X (i.e. a redirect) then include the
        # corresponding fuzzable request.
        resp_headers = resp.get_headers()

        for url_header_name in URL_HEADERS:
            url_header_value, _ = resp_headers.iget(url_header_name, '')
            if url_header_value:
                url = smart_unicode(url_header_value, encoding=resp.charset)
                try:
                    ref = resp.get_url().url_join(url)
                except ValueError:
                    msg = 'The application sent a "%s" redirect that w3af' \
                          ' failed to correctly parse as an URL, the header' \
                          ' value was: "%s"'
                    om.out.debug(msg % (url_header_name, url))
                else:
                    yield ref, fuzzable_req, resp, False

    def _body_url_generator(self, resp, fuzzable_req):
        """
        Yields tuples containing:
            * Newly found URL
            * The FuzzableRequest instance passed as parameter
            * The HTTPResponse generated by the FuzzableRequest
            * Boolean indicating if we trust this reference or not

        The newly found URLs are extracted from the http response body using
        one of the framework's parsers.

        :param resp: HTTP response object
        :param fuzzable_req: The HTTP request that generated the response
        """
        #
        # Note: I WANT to follow links that are in the 404 page.
        #
        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(resp)
        except BaseFrameworkException, w3:
            om.out.debug('Failed to find a suitable document parser. '
                         'Exception "%s"' % w3)
        else:
예제 #36
0
파일: php_eggs.py 프로젝트: 3rdDegree/w3af
class php_eggs(InfrastructurePlugin):
    """
    Fingerprint the PHP version using documented easter eggs that exist in PHP.
    :author: Andres Riancho ([email protected])
    """
    PHP_EGGS = [('?=PHPB8B5F2A0-3C92-11d3-A3A9-4C7B08C10000', 'PHP Credits'),
                ('?=PHPE9568F34-D428-11d2-A769-00AA001ACF42', 'PHP Logo'),
                ('?=PHPE9568F35-D428-11d2-A769-00AA001ACF42', 'Zend Logo'),
                ('?=PHPE9568F36-D428-11d2-A769-00AA001ACF42', 'PHP Logo 2')]

    #
    # This is a list of hashes and description of the egg for every PHP version.
    # PHP versions 4.0.0 - 4.0.6
    # PHP versions 4.1.0 - 4.1.3
    # PHP versions 4.2.0 - 4.2.3
    # PHP versions 4.3.0 - 4.3.11
    # PHP versions 4.4.0 - 4.4.9
    # PHP versions 5.0.0 - 5.0.5
    # PHP versions 5.1.0 - 5.1.6
    # PHP versions 5.2.0 - 5.2.17
    # PHP versions 5.3.0 - 5.3.27
    # PHP versions 5.4.0 - 5.4.22 (still in progress)
    # Remark: PHP versions 5.5.x has no PHP-Eggs.
    # Remark: PHP Logo 2 is not always available. 
    
    
    EGG_DB = {}
    EGG_DB["4.0.0"] = [
        ("7c75d38f7b26b7cc13ed1d7bbedd0bb8", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.0.1"] = [
        ("31e2dd536176af3f7f142c18eef1aa4e", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.0.2"] = [
        ("34591272f6dd5cf9953b65dfdb390259", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.0.3pl1"] = [
        ("34591272f6dd5cf9953b65dfdb390259", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.0.4pl1"] = [
        ("bee683d024c0065a6e7ae57458416f60", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.0.5"] = [
        ("34040cf89a0574e7de5c643da6d9eab8", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.0.6"] = [
        ("5bd3e883d03543baf7f39749d526c5a4", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.1.0"] = [
        ("744aecef04f9ed1bc39ae773c40017d1", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.1.1"] = [
        ("744aecef04f9ed1bc39ae773c40017d1", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.1.2"] = [
        ("744aecef04f9ed1bc39ae773c40017d1", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.1.3"] = [
        ("744aecef04f9ed1bc39ae773c40017d1", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.2.0"] = [
        ("8bc001f58bf6c17a67e1ca288cb459cc", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.2.1"] = [
        ("8bc001f58bf6c17a67e1ca288cb459cc", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.2.2"] = [
        ("8bc001f58bf6c17a67e1ca288cb459cc", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.2.3"] = [
        ("3422eded2fcceb3c89cabb5156b5d4e2", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("85be3b4be7bfe839cbb3b4f2d30ff983", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.0"] = [
        ("1e04761e912831dd29b7a98785e7ac61", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.1"] = [
        ("1e04761e912831dd29b7a98785e7ac61", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.2"] = [
        ("22d03c3c0a9cff6d760a4ba63909faea", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.3"] = [
        ("8a4a61f60025b43f11a7c998f02b1902", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.4"] = [
        ("8a4a61f60025b43f11a7c998f02b1902", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.5"] = [
        ("8a4a61f60025b43f11a7c998f02b1902", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.6"] = [
        ("913ec921cf487109084a518f91e70859", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.7"] = [
        ("913ec921cf487109084a518f91e70859", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.8"] = [
        ("913ec921cf487109084a518f91e70859", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.9"] = [
        ("913ec921cf487109084a518f91e70859", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.10"] = [
        ("8fbf48d5a2a64065fc26db3e890b9871", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.10-18"] = [
        ("1e8fe4ae1bf06be222c1643d32015f0c", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.11"] = [
        ("8fbf48d5a2a64065fc26db3e890b9871", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.2"] = [
        ("8a8b4a419103078d82707cf68226a482", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("a57bd73e27be03a62dd6b3e1b537a72c", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.3.9"] = [
        ("f9b56b361fafd28b668cc3498425a23b", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB['4.3.10'] = [
        ('b233cc756b06655f47489aa2779413d7', 'PHP Credits'),
        ('7b27e18dc6f846b80e2f29ecf67e4133', 'PHP Logo'),
        ('185386dd4b2eff044bd635d22ae7dd9e', 'PHP Logo 2'),
        ('43af90bcfa66f16af62744e8c599703d', 'Zend Logo')]
    EGG_DB["4.4.0"] = [
        ("ddf16ec67e070ec6247ec1908c52377e", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.0 for Windows"] = [
        ("6d974373683ecfcf30a7f6873f2d234a", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.1"] = [
        ("55bc081f2d460b8e6eb326a953c0e71e", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.2"] = [
        ("bed7ceff09e9666d96fdf3518af78e0e", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.3"] = [
        ("bed7ceff09e9666d96fdf3518af78e0e", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.4"] = [
        ("bed7ceff09e9666d96fdf3518af78e0e", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.4-8+etch6"] = [
        ("31a2553efc348a21b85e606e5e6c2424", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.5"] = [
        ("692a87ca2c51523c17f597253653c777", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.6"] = [
        ("692a87ca2c51523c17f597253653c777", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.7"] = [
        ("692a87ca2c51523c17f597253653c777", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.7"] = [
        ("72b7ad604fe1362f1e8bf4f6d80d4edc", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.8"] = [
        ("50ac182f03fc56a719a41fc1786d937d", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.8"] = [
        ("4cdfec8ca11691a46f4f63839e559fc5", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["4.4.9"] = [
        ("50ac182f03fc56a719a41fc1786d937d", "PHP Credits"),
        ("11b9cfe306004fce599a1f8180b61266", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("da2dae87b166b7709dbd4061375b74cb", "Zend Logo")]
    EGG_DB["5.0.0RC1"] = [
        ("314e92ddb1a8abc0781ab87d5b66e960", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.0RC2"] = [
        ("e54dbf41d985bfbfa316dba207ad6bce", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.0RC3"] = [
        ("e54dbf41d985bfbfa316dba207ad6bce", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.0"] = [
        ("e54dbf41d985bfbfa316dba207ad6bce", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.1"] = [
        ("3c31e4674f42a49108b5300f8e73be26", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.2"] = [
        ("3c31e4674f42a49108b5300f8e73be26", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.3"] = [
        ("3c31e4674f42a49108b5300f8e73be26", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("37e194b799d4aaff10e39c4e3b2679a2", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.4"] = [
        ("3c31e4674f42a49108b5300f8e73be26", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.0.5"] = [
        ("6be3565cdd38e717e4eb96868d9be141", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.0RC1"] = [
        ("2673a94df41739ef8b012c07518b6c6f", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.0"] = [
        ("5518a02af41478cfc492c930ace45ae5", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.1"] = [
        ("5518a02af41478cfc492c930ace45ae5", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.2"] = [
        ("6cb0a5ba2d88f9d6c5c9e144dd5941a6", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.2"] = [
        ("b83433fb99d0bef643709364f059a44a", "PHP Credits"),
        ("8ac5a686135b923664f64fe718ea55cd", "PHP Logo"),
        ("4b2c92409cf0bcf465d199e93a15ac3f", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.3"] = [
        ("82fa2d6aa15f971f7dadefe4f2ac20e3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.4"] = [
        ("82fa2d6aa15f971f7dadefe4f2ac20e3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.5"] = [
        ("82fa2d6aa15f971f7dadefe4f2ac20e3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.6"] = [
        ("82fa2d6aa15f971f7dadefe4f2ac20e3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.1.6"] = [
        ("4b689316409eb09b155852e00657a0ae", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.0"] = [
        ("e566715bcb0fd2cb1dc43ed076c091f1", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.0-8+etch10"] = [
        ("e566715bcb0fd2cb1dc43ed076c091f1", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.0-8+etch7"] = [
        ("307f5a1c02155ca38744647eb94b3543", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.1"] = [
        ("d3894e19233d979db07d623f608b6ece", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.2"] = [
        ("56f9383587ebcc94558e11ec08584f05", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.3"] = [
        ("c37c96e8728dc959c55219d47f2d543f", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.3-1+b1"] = [
        ("c37c96e8728dc959c55219d47f2d543f", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.4"] = [
        ("74c33ab9745d022ba61bc43a5db717eb", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.4-2ubuntu5.3"] = [
        ("f26285281120a2296072f21e21e7b4b0", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.4-2ubuntu5.14"] = [
        ("c37c96e8728dc959c55219d47f2d543f", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.5"] = [
        ("c37c96e8728dc959c55219d47f2d543f", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.5"] = [
        ("f26285281120a2296072f21e21e7b4b0", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.5-3"] = [
        ("b7e4385bd7f07e378d92485b4722c169", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("0152ed695f4291488741d98ba066d280", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.6"] = [
        ("bbd44c20d561a0fc5a4aa76093d5400f", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.6RC4-pl0-gentoo"] = [
        ("d03b2481f60d9e64cb5c0f4bd0c87ec1", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.7"] = [
        ("1ffc970c5eae684bebc0e0133c4e1f01", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.8"] = [
        ("1ffc970c5eae684bebc0e0133c4e1f01", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.8-pl1-gentoo"] = [
        ("40410284d460552a6c9e10c1f5ae7223", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.9"] = [
        ("54f426521bf61f2d95c8bfaa13857c51", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.10"] = [
        ("54f426521bf61f2d95c8bfaa13857c51", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.11"] = [
        ("54f426521bf61f2d95c8bfaa13857c51", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.12"] = [
        ("54f426521bf61f2d95c8bfaa13857c51", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.13"] = [
        ("54f426521bf61f2d95c8bfaa13857c51", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.14"] = [
        ("54f426521bf61f2d95c8bfaa13857c51", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.15"] = [
        ("adb361b9255c1e5275e5bd6e2907c5fb", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.16"] = [
        ("adb361b9255c1e5275e5bd6e2907c5fb", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.2.17"] = [
        ("adb361b9255c1e5275e5bd6e2907c5fb", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("50caaf268b4f3d260d720a1a29c5fe21", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.0"] = [
        ("db23b07a9b426d0d033565b878b1e384", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.1"] = [
        ("a4c057b11fa0fba98c8e26cd7bb762a8", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.2"] = [
        ("a4c057b11fa0fba98c8e26cd7bb762a8", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.3"] = [
        ("b34501471d51cebafacdd45bf2cd545d", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.4"] = [
        ("e3b18899d0ffdf8322ed18d7bce3c9a0", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.5"] = [
        ("e3b18899d0ffdf8322ed18d7bce3c9a0", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.6"] = [
        ("2e7f5372931a7f6f86786e95871ac947", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.7"] = [
        ("f1f1f60ac0dcd700a1ad30aa81175d34", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.8"] = [
        ("f1f1f60ac0dcd700a1ad30aa81175d34", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.9"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.10"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.11"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.12"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.13"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.14"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.15"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.16"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.17"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.18"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.19"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.20"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.21"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.22"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.21-pl1-gentoo"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.23"] = [
        ("5e8e6736635920a0a97ba79d69c55b30", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.23"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.24"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.25"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.26"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.3.27"] = [
        ("23f183b78eb4e3ba8b3df13f0a15e5de", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.0"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.1"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.2"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.3"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.4"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.5"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.6"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.7"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.8"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.9"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.10"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.11"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.12"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.13"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.14"] = [
        ("85da0a620fabe694dab1d55cbf1e24c3", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.15"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.16"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.17"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.18"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.19"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.20"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.21"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    EGG_DB["5.4.22"] = [
        ("ebf6d0333d67af5f80077438c45c8eaa", "PHP Credits"),
        ("c48b07899917dfb5d591032007041ae3", "PHP Logo"),
        ("fb3bbd9ccc4b3d9e0b3be89c5ff98a14", "PHP Logo 2"),
        ("7675f1d01c927f9e6a4752cf182345a2", "Zend Logo")]
    
    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Already analyzed extensions
        self._already_analyzed_ext = ScalableBloomFilter()

    @runonce(exc_class=RunOnce)
    def discover(self, fuzzable_request):
        """
        Nothing strange, just do some GET requests to the eggs and analyze the
        response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        # Get the extension of the URL (.html, .php, .. etc)
        ext = fuzzable_request.get_url().get_extension()

        # Only perform this analysis if we haven't already analyzed this type
        # of extension OR if we get an URL like http://f00b5r/4/     (Note that
        # it has no extension) This logic will perform some extra tests... but
        # we won't miss some special cases. Also, we aren't doing something like
        # "if 'php' in ext:" because we never depend on something so changable
        # as extensions to make decisions.
        if ext not in self._already_analyzed_ext:

            # Now we save the extension as one of the already analyzed
            self._already_analyzed_ext.add(ext)

            # Init some internal variables
            query_results = self._GET_php_eggs(fuzzable_request, ext)

            if self._are_php_eggs(query_results):
                # analyze the info to see if we can identify the version
                self._extract_version_from_egg(query_results)

    def _GET_php_eggs(self, fuzzable_request, ext):
        """
        HTTP GET the URLs for PHP Eggs
        :return: A list with the HTTP response objects
        """
        def http_get(fuzzable_request, (egg_url, egg_desc)):
            egg_URL = fuzzable_request.get_url().uri2url().url_join(egg_url)
            try:
                response = self._uri_opener.GET(egg_URL, cache=True)
            except BaseFrameworkException, w3:
                raise w3
            else:
예제 #37
0
class VariantDB(object):
    """
    See the notes on PARAMS_MAX_VARIANTS and PATH_MAX_VARIANTS above. Also
    understand that we'll keep "dirty" versions of the references/fuzzable
    requests in order to be able to answer "False" to a call for
    need_more_variants in a situation like this:

        >> need_more_variants('http://foo.com/abc?id=32')
        True

        >> append('http://foo.com/abc?id=32')
        True

        >> need_more_variants('http://foo.com/abc?id=32')
        False

    """
    HASH_IGNORE_HEADERS = ('referer', )
    TAG = '[variant_db]'

    def __init__(self):
        self._variants = DiskDict(table_prefix='variant_db')
        self._variants_eq = ScalableBloomFilter()
        self._variants_form = DiskDict(table_prefix='variant_db_form')

        self.params_max_variants = cf.cf.get('params_max_variants')
        self.path_max_variants = cf.cf.get('path_max_variants')
        self.max_equal_form_variants = cf.cf.get('max_equal_form_variants')

        self._db_lock = threading.RLock()

    def cleanup(self):
        self._variants.cleanup()
        self._variants_form.cleanup()

    def append(self, fuzzable_request):
        """
        :return: True if we added a new fuzzable request variant to the DB,
                 False if NO more variants are required for this fuzzable
                 request.
        """
        with self._db_lock:
            if self._seen_exactly_the_same(fuzzable_request):
                return False

            if self._has_form(fuzzable_request):
                if not self._need_more_variants_for_form(fuzzable_request):
                    return False

            if not self._need_more_variants_for_uri(fuzzable_request):
                return False

            # Yes, please give me more variants of fuzzable_request
            return True

    def _log_return_false(self, fuzzable_request, reason):
        args = (reason, fuzzable_request)
        msg = 'VariantDB is returning False because of "%s" for "%s"'
        om.out.debug(msg % args)

    def _need_more_variants_for_uri(self, fuzzable_request):
        #
        # Do we need more variants for the fuzzable request? (similar match)
        # PARAMS_MAX_VARIANTS and PATH_MAX_VARIANTS
        #
        clean_dict_key = clean_fuzzable_request(fuzzable_request)
        count = self._variants.get(clean_dict_key, None)

        if count is None:
            self._variants[clean_dict_key] = 1
            return True

        # We've seen at least one fuzzable request with this pattern...
        url = fuzzable_request.get_uri()
        has_params = url.has_query_string() or fuzzable_request.get_raw_data()

        # Choose which max_variants to use
        if has_params:
            max_variants = self.params_max_variants
            max_variants_type = 'params'
        else:
            max_variants = self.path_max_variants
            max_variants_type = 'path'

        if count >= max_variants:
            _type = 'need_more_variants_for_uri(%s)' % max_variants_type
            self._log_return_false(fuzzable_request, _type)
            return False

        self._variants[clean_dict_key] = count + 1
        return True

    def _seen_exactly_the_same(self, fuzzable_request):
        #
        # Is the fuzzable request already known to us? (exactly the same)
        #
        request_hash = fuzzable_request.get_request_hash(
            self.HASH_IGNORE_HEADERS)
        if request_hash in self._variants_eq:
            return True

        # Store it to avoid duplicated fuzzable requests in our framework
        self._variants_eq.add(request_hash)

        self._log_return_false(fuzzable_request, 'seen_exactly_the_same')
        return False

    def _has_form(self, fuzzable_request):
        raw_data = fuzzable_request.get_raw_data()
        if raw_data and len(raw_data.get_param_names()) >= 2:
            return True

        return False

    def _need_more_variants_for_form(self, fuzzable_request):
        #
        # Do we need more variants for this form? (similar match)
        # MAX_EQUAL_FORM_VARIANTS
        #
        clean_dict_key_form = clean_fuzzable_request_form(fuzzable_request)
        count = self._variants_form.get(clean_dict_key_form, None)

        if count is None:
            self._variants_form[clean_dict_key_form] = 1
            return True

        if count >= self.max_equal_form_variants:
            self._log_return_false(fuzzable_request,
                                   'need_more_variants_for_form')
            return False

        self._variants_form[clean_dict_key_form] = count + 1
        return True
예제 #38
0
파일: frontpage.py 프로젝트: intfrr/Tortazo
class frontpage(AuditPlugin):
    """
    Tries to upload a file using frontpage extensions (author.dll).

    :author: Andres Riancho (([email protected]))
    """

    def __init__(self):
        AuditPlugin.__init__(self)

        # Internal variables
        self._already_tested = ScalableBloomFilter()

    def audit(self, freq, orig_response):
        """
        Searches for file upload vulns using a POST to author.dll.

        :param freq: A FuzzableRequest
        """
        domain_path = freq.get_url().get_domain_path()

        if kb.kb.get(self, 'frontpage'):
            # Nothing to do, I have found vuln(s) and I should stop on first
            msg = 'Not verifying if I can upload files to: "%s" using'\
                  ' author.dll. Because I already found a vulnerability.'
            om.out.debug(msg)
            return

        # I haven't found any vulns yet, OR i'm trying to find every
        # directory where I can write a file.
        if domain_path not in self._already_tested:
            self._already_tested.add(domain_path)

            # Find a file that doesn't exist and then try to upload it
            for _ in xrange(3):
                rand_file = rand_alpha(5) + '.html'
                rand_path_file = domain_path.url_join(rand_file)
                res = self._uri_opener.GET(rand_path_file)
                if is_404(res):
                    upload_id = self._upload_file(domain_path, rand_file)
                    self._verify_upload(domain_path, rand_file, upload_id)
                    break
            else:
                msg = 'frontpage plugin failed to find a 404 page. This is'\
                      ' mostly because of an error in 404 page detection.'
                om.out.error(msg)

    def _upload_file(self, domain_path, rand_file):
        """
        Upload the file using author.dll

        :param domain_path: http://localhost/f00/
        :param rand_file: <random>.html
        """
        file_path = domain_path.get_path() + rand_file

        # TODO: The frontpage version should be obtained from the information saved in the kb
        # by the infrastructure.frontpage_version plugin!
        # The 4.0.2.4715 version should be dynamic!
        # The information is already saved in the crawl plugin in the line:
        # i['version'] = version_match.group(1)
        content = "method=put document:4.0.2.4715&service_name=&document=[document_name="
        content += file_path
        content += ";meta_info=[]]&put_option=overwrite&comment=&keep_checked_out=false"
        content += '\n'
        # The content of the file I'm uploading is the file name reversed
        content += rand_file[::-1]

        # TODO: The _vti_bin and _vti_aut directories should be PARSED from the _vti_inf file
        # inside the infrastructure.frontpage_version plugin, and then used here
        target_url = domain_path.url_join('_vti_bin/_vti_aut/author.dll')

        try:
            res = self._uri_opener.POST(target_url, data=content)
        except BaseFrameworkException, e:
            om.out.debug(
                'Exception while uploading file using author.dll: ' + str(e))
        else:
예제 #39
0
class dav(AuditPlugin):
    """
    Verify if the WebDAV module is properly configured.

    :author: Andres Riancho ([email protected])
    """

    CONTENT_TYPE = Headers([('content-type',
                             'application/xml; charset="utf-8"')])

    def __init__(self):
        AuditPlugin.__init__(self)

        # Internal variables
        self._already_tested_dirs = ScalableBloomFilter()

    def audit(self, freq, orig_response):
        """
        Searches for file upload vulns using PUT method.

        :param freq: A FuzzableRequest
        """
        # Start
        domain_path = freq.get_url().get_domain_path()
        if domain_path not in self._already_tested_dirs:
            self._already_tested_dirs.add(domain_path)
            #
            #    Send the three requests in different threads, store the
            #    apply_result objects in order to be able to "join()" in the
            #    next for loop
            #
            #    TODO: This seems to be a fairly common use case: Send args to N
            #    functions that need to be run in different threads. If possible
            #    code this into threadpool.py in order to make this code clearer
            results = []
            for func in [self._PUT, self._PROPFIND, self._SEARCH]:
                apply_res = self.worker_pool.apply_async(func, (domain_path, ))
                results.append(apply_res)

            for apply_res in results:
                apply_res.get()

    #pylint: disable=C0103
    def _SEARCH(self, domain_path):
        """
        Test SEARCH method.
        """
        content = "<?xml version='1.0'?>\r\n"
        content += "<g:searchrequest xmlns:g='DAV:'>\r\n"
        content += "<g:sql>\r\n"
        content += "Select 'DAV:displayname' from scope()\r\n"
        content += "</g:sql>\r\n"
        content += "</g:searchrequest>\r\n"

        res = self._uri_opener.SEARCH(domain_path,
                                      data=content,
                                      headers=self.CONTENT_TYPE)

        content_matches = '<a:response>' in res or '<a:status>' in res or \
            'xmlns:a="DAV:"' in res

        if content_matches and res.get_code() in xrange(200, 300):
            msg = 'Directory listing with HTTP SEARCH method was found at' \
                  'directory: "%s".' % domain_path

            v = Vuln('Insecure DAV configuration', msg, severity.MEDIUM,
                     res.id, self.get_name())

            v.set_url(res.get_url())
            v.set_method('SEARCH')

            self.kb_append(self, 'dav', v)

    #pylint: disable=C0103
    def _PROPFIND(self, domain_path):
        """
        Test PROPFIND method
        """
        content = "<?xml version='1.0'?>\r\n"
        content += "<a:propfind xmlns:a='DAV:'>\r\n"
        content += "<a:prop>\r\n"
        content += "<a:displayname:/>\r\n"
        content += "</a:prop>\r\n"
        content += "</a:propfind>\r\n"

        headers = copy.deepcopy(self.CONTENT_TYPE)
        headers['Depth'] = '1'

        res = self._uri_opener.PROPFIND(domain_path,
                                        data=content,
                                        headers=headers)

        if "D:href" in res and res.get_code() in xrange(200, 300):
            msg = 'Directory listing with HTTP PROPFIND method was found at' \
                  ' directory: "%s".' % domain_path

            v = Vuln('Insecure DAV configuration', msg, severity.MEDIUM,
                     res.id, self.get_name())

            v.set_url(res.get_url())
            v.set_method('PROPFIND')

            self.kb_append(self, 'dav', v)

    #pylint: disable=C0103
    def _PUT(self, domain_path):
        """
        Tests PUT method.
        """
        # upload
        url = domain_path.url_join(rand_alpha(5))
        rnd_content = rand_alnum(6)
        headers = Headers([('content-type', 'text/plain')])

        put_response = self._uri_opener.PUT(url,
                                            data=rnd_content,
                                            headers=headers)

        # check if uploaded
        res = self._uri_opener.GET(url, cache=True)
        if res.get_body() == rnd_content:
            msg = 'File upload with HTTP PUT method was found at resource:' \
                  ' "%s". A test file was uploaded to: "%s".'
            msg = msg % (domain_path, res.get_url())

            v = Vuln('Insecure DAV configuration', msg, severity.HIGH,
                     [put_response.id, res.id], self.get_name())

            v.set_url(url)
            v.set_method('PUT')

            self.kb_append(self, 'dav', v)

        # Report some common errors
        elif put_response.get_code() == 500:
            msg = 'DAV seems to be incorrectly configured. The web server' \
                  ' answered with a 500 error code. In most cases, this means'\
                  ' that the DAV extension failed in some way. This error was'\
                  ' found at: "%s".' % put_response.get_url()

            i = Info('DAV incorrect configuration', msg, res.id,
                     self.get_name())

            i.set_url(url)
            i.set_method('PUT')

            self.kb_append(self, 'dav', i)

        # Report some common errors
        elif put_response.get_code() == 403:
            msg = 'DAV seems to be correctly configured and allowing you to'\
                  ' use the PUT method but the directory does not have the'\
                  ' correct permissions that would allow the web server to'\
                  ' write to it. This error was found at: "%s".'
            msg = msg % put_response.get_url()

            i = Info('DAV incorrect configuration', msg,
                     [put_response.id, res.id], self.get_name())

            i.set_url(url)
            i.set_method('PUT')

            self.kb_append(self, 'dav', i)

    def get_plugin_deps(self):
        """
        :return: A list with the names of the plugins that should be run before
                 the current one.
        """
        return [
            'infrastructure.allowed_methods', 'infrastructure.server_header'
        ]

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #40
0
class html_comments(GrepPlugin):
    """
    Extract and analyze HTML comments.

    :author: Andres Riancho ([email protected])
    """

    HTML_RE = re.compile('<[a-zA-Z]*.*?>.*?</[a-zA-Z]>')

    INTERESTING_WORDS = (
        # In English
        'user',
        'pass',
        'xxx',
        'fix',
        'bug',
        'broken',
        'oops',
        'hack',
        'caution',
        'todo',
        'note',
        'warning',
        '!!!',
        '???',
        'shit',
        'pass',
        'password',
        'passwd',
        'pwd',
        'secret',
        'stupid',

        # In Spanish
        'tonto',
        'porqueria',
        'cuidado',
        'usuario',
        u'contraseña',
        'puta',
        'email',
        'security',
        'captcha',
        'pinga',
        'cojones',

        # some in Portuguese
        'banco',
        'bradesco',
        'itau',
        'visa',
        'bancoreal',
        u'transfêrencia',
        u'depósito',
        u'cartão',
        u'crédito',
        'dados pessoais')

    _multi_in = multi_in([' %s ' % w for w in INTERESTING_WORDS])

    def __init__(self):
        GrepPlugin.__init__(self)

        # Internal variables
        self._comments = DiskDict(table_prefix='html_comments')
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, parse those comments!

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return

        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return

        for comment in dp.get_comments():
            # These next two lines fix this issue:
            # audit.ssi + grep.html_comments + web app with XSS = false positive
            if request.sent(comment):
                continue

            if self._is_new(comment, response):

                self._interesting_word(comment, request, response)
                self._html_in_comment(comment, request, response)

    def _interesting_word(self, comment, request, response):
        """
        Find interesting words in HTML comments
        """
        comment = comment.lower()

        for word in self._multi_in.query(comment):
            if (word, response.get_url()) in self._already_reported:
                continue

            desc = ('A comment with the string "%s" was found in: "%s".'
                    ' This could be interesting.')
            desc %= (word, response.get_url())

            v = Vuln.from_fr('Interesting HTML comment',
                             desc, severity.INFORMATION, response.id,
                             self.get_name(), request)
            v.add_to_highlight(word)

            kb.kb.append(self, 'interesting_comments', v)

            self._already_reported.add((word, response.get_url()))

    def _html_in_comment(self, comment, request, response):
        """
        Find HTML code in HTML comments
        """
        html_in_comment = self.HTML_RE.search(comment)

        if html_in_comment is None:
            return

        if (comment, response.get_url()) in self._already_reported:
            return

        # There is HTML code in the comment.
        comment = comment.strip()
        comment = comment.replace('\n', '')
        comment = comment.replace('\r', '')
        comment = comment[:40]

        desc = ('A comment with the string "%s" was found in: "%s".'
                ' This could be interesting.')
        desc %= (comment, response.get_url())

        v = Vuln.from_fr('HTML comment contains HTML code',
                         desc, severity.INFORMATION, response.id,
                         self.get_name(), request)
        v.set_uri(response.get_uri())
        v.add_to_highlight(html_in_comment.group(0))

        om.out.vulnerability(v.get_desc(), severity=severity.INFORMATION)
        kb.kb.append(self, 'html_comment_hides_html', v)
        self._already_reported.add((comment, response.get_url()))

    def _is_new(self, comment, response):
        """
        Make sure that we perform a thread safe check on the self._comments
        dict, in order to avoid duplicates.
        """
        with self._plugin_lock:

            #pylint: disable=E1103
            comment_data = self._comments.get(comment, None)
            response_url = response.get_url()

            if comment_data is None:
                self._comments[comment] = [(response_url, response.id)]
                return True
            else:
                for saved_url, response_id in comment_data:
                    if response_url == saved_url:
                        return False
                else:
                    comment_data.append((response_url, response.id))
                    self._comments[comment] = comment_data
                    return True
            #pylint: enable=E1103

    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        :return: None
        """
        for comment, url_request_id_lst in self._comments.iteritems():

            stick_comment = ' '.join(comment.split())

            if len(stick_comment) > 40:
                msg = ('A comment with the string "%s..." (and %s more bytes)'
                       ' was found on these URL(s):')
                args = (stick_comment[:40], str(len(stick_comment) - 40))
                om.out.vulnerability(msg % args, severity=severity.INFORMATION)
            else:
                msg = 'A comment containing "%s" was found on these URL(s):'
                om.out.vulnerability(msg % stick_comment,
                                     severity=severity.INFORMATION)

            inform = []

            for url, request_id in url_request_id_lst:
                msg = '- %s (request with id: %s)'
                inform.append(msg % (url, request_id))

            for i in sorted(inform):
                om.out.vulnerability(i, severity=severity.INFORMATION)

        self._comments.cleanup()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #41
0
파일: web_spider.py 프로젝트: s0i37/__w3af
class web_spider(CrawlPlugin):
    """
    Crawl the web application.

    :author: Andres Riancho ([email protected])
    """
    UNAUTH_FORBID = {http_constants.UNAUTHORIZED, http_constants.FORBIDDEN}

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._compiled_ignore_re = None
        self._compiled_follow_re = None
        self._broken_links = DiskSet(table_prefix='web_spider')
        self._first_run = True
        self._target_urls = []
        self._target_domain = None
        self._already_filled_form = ScalableBloomFilter()
        self._variant_db = VariantDB()

        # User configured variables
        self._ignore_regex = ''
        self._follow_regex = '.*'
        self._ignore_factor = 50
        self._only_forward = False
        self._compile_re()
        self._fuzzy_browser = FuzzyBrowser(self._ignore_factor)
        self._requests_count = 0
        self._max_requests_count = 100

    def crawl(self, fuzzable_req):
        """
        Searches for links on the html.

        :param fuzzable_req: A fuzzable_req instance that contains
                             (among other things) the URL to test.
        """
        self._handle_first_run()

        #
        # If it is a form, then smart_fill the parameters to send something that
        # makes sense and will allow us to cover more code.
        #
        data_container = fuzzable_req.get_raw_data()
        #print Fore.GREEN + fuzzable_req.get_url() + Fore.RESET
        if isinstance(data_container, Form):

            if fuzzable_req.get_url() in self._already_filled_form:
                return

            self._already_filled_form.add(fuzzable_req.get_url())
            data_container.smart_fill()

        # Send the HTTP request
        resp = self._uri_opener.send_mutant(fuzzable_req)

        # Nothing to do here...
        if resp.get_code() == http_constants.UNAUTHORIZED:
            return

        # Nothing to do here...
        if resp.is_image():
            return

        # And we don't trust what comes from the core, check if 404
        if is_404(resp):
            return

        self._extract_html_forms(resp, fuzzable_req)
        self._extract_links_and_verify(resp, fuzzable_req)

    def _extract_html_forms(self, resp, fuzzable_req):
        """
        Parses the HTTP response body and extract HTML forms, resulting forms
        are put() on the output queue.
        """
        # Try to find forms in the document
        try:
            dp = parser_cache.dpc.get_document_parser_for(resp)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return

        same_domain = lambda f: f.get_action().get_domain() == \
                                resp.get_url().get_domain()

        # Create one FuzzableRequest for each form variant
        mode = cf.cf.get('form_fuzzing_mode')
        for form_params in dp.get_forms():

            if not same_domain(form_params):
                continue

            headers = fuzzable_req.get_headers()

            for form_params_variant in form_params.get_variants(mode):
                data_container = dc_from_form_params(form_params_variant)

                # Now data_container is one of Multipart of URLEncoded form
                # instances, which is a DataContainer. Much better than the
                # FormParameters instance we had before in form_params_variant
                r = FuzzableRequest.from_form(data_container, headers=headers)
                self._requests_count += 1
                if self._requests_count <= self._max_requests_count:
                    self.output_queue.put(r)

    def _handle_first_run(self):
        if self._first_run:
            # I have to set some variables, in order to be able to code
            # the "only_forward" feature
            self._first_run = False
            self._target_urls = [i.uri2url() for i in cf.cf.get('targets')]

            # The following line triggered lots of bugs when the "stop" button
            # was pressed and the core did this: "cf.cf.save('targets', [])"
            #
            #self._target_domain = cf.cf.get('targets')[0].get_domain()
            #
            #    Changing it to something awful but bug-free.
            targets = cf.cf.get('targets')
            if not targets:
                return
            else:
                self._target_domain = targets[0].get_domain()

    def _urls_to_verify_generator(self, resp, fuzzable_req):
        """
        Yields tuples containing:
            * Newly found URL
            * The FuzzableRequest instance passed as parameter
            * The HTTPResponse generated by the FuzzableRequest
            * Boolean indicating if we trust this reference or not

        :param resp: HTTP response object
        :param fuzzable_req: The HTTP request that generated the response
        """
        gen = itertools.chain(self._body_url_generator(resp, fuzzable_req),
                              headers_url_generator(resp, fuzzable_req))

        for ref, fuzzable_req, original_resp, possibly_broken in gen:
            if self._should_verify_extracted_url(ref, original_resp):
                yield ref, fuzzable_req, original_resp, possibly_broken

    def _body_url_generator(self, resp, fuzzable_req):
        """
        Yields tuples containing:
            * Newly found URL
            * The FuzzableRequest instance passed as parameter
            * The HTTPResponse generated by the FuzzableRequest
            * Boolean indicating if we trust this reference or not

        The newly found URLs are extracted from the http response body using
        one of the framework's parsers.

        :param resp: HTTP response object
        :param fuzzable_req: The HTTP request that generated the response
        """
        #
        # Note: I WANT to follow links that are in the 404 page.
        #
        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(resp)
        except BaseFrameworkException, w3:
            om.out.debug('Failed to find a suitable document parser. '
                         'Exception "%s"' % w3)
        else:
예제 #42
0
class allowed_methods(InfrastructurePlugin):
    """
    Enumerate the allowed methods of an URL.
    :author: Andres Riancho ([email protected])
    """

    BAD_CODES = {
        response_codes.UNAUTHORIZED, response_codes.NOT_IMPLEMENTED,
        response_codes.METHOD_NOT_ALLOWED, response_codes.FORBIDDEN
    }

    DAV_METHODS = {
        'DELETE', 'PROPFIND', 'PROPPATCH', 'COPY', 'MOVE', 'LOCK', 'UNLOCK',
        'MKCOL'
    }
    COMMON_METHODS = {'OPTIONS', 'GET', 'HEAD', 'POST', 'TRACE', 'PUT'}
    UNCOMMON_METHODS = {
        '*', 'SUBSCRIPTIONS', 'NOTIFY', 'DEBUG', 'TRACK', 'POLL', 'PIN',
        'INVOKE', 'SUBSCRIBE', 'UNSUBSCRIBE'
    }
    # Methods taken from http://www.w3.org/Protocols/HTTP/Methods.html
    PROPOSED_METHODS = {
        'CHECKOUT', 'SHOWMETHOD', 'LINK', 'UNLINK', 'CHECKIN', 'TEXTSEARCH',
        'SPACEJUMP', 'SEARCH', 'REPLY'
    }
    EXTRA_METHODS = {
        'CONNECT', 'RMDIR', 'MKDIR', 'REPORT', 'ACL', 'DELETE', 'INDEX',
        'LABEL', 'INVALID'
    }
    VERSION_CONTROL = {
        'VERSION_CONTROL', 'CHECKIN', 'UNCHECKOUT', 'PATCH', 'MERGE',
        'MKWORKSPACE', 'MKACTIVITY', 'BASELINE_CONTROL'
    }

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._exec = True
        self._already_tested = ScalableBloomFilter()

        # Methods
        self._supported_methods = self.DAV_METHODS | self.COMMON_METHODS | \
                                  self.UNCOMMON_METHODS | self.PROPOSED_METHODS | \
                                  self.EXTRA_METHODS | self.VERSION_CONTROL

        # User configured variables
        self._exec_one_time = True
        self._report_dav_only = True

    def discover(self, fuzzable_request):
        """
        Uses several techniques to try to find out what methods are allowed for
        an URL.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        if not self._exec:
            # This will remove the plugin from the infrastructure
            # plugins to be run.
            raise RunOnce()

        # Run the plugin.
        if self._exec_one_time:
            self._exec = False

        domain_path = fuzzable_request.get_url().get_domain_path()
        if domain_path not in self._already_tested:
            self._already_tested.add(domain_path)
            allowed_methods, id_list = self._identify_allowed_methods(
                domain_path)
            self._analyze_methods(domain_path, allowed_methods, id_list)

    def _identify_allowed_methods(self, url):
        # First, try to check available methods using OPTIONS,
        # if OPTIONS isn't enabled, do it manually
        allowed_options, id_options = self._identify_with_OPTIONS(url)
        allowed_bf, id_bf = self._identify_with_bruteforce(url)

        allowed_methods = allowed_options + allowed_bf
        # If a method was found by both, bf and options, it is duplicated in
        # the list. Remove dups
        allowed_methods = list(set(allowed_methods))
        # There are no duplicate requests.
        # Even if a method was discovered with both, bf and options, we
        # furthermore want to see both requests
        id_list = id_options + id_bf

        # Added this to make the output a little bit more readable.
        allowed_methods.sort()

        return allowed_methods, id_list

    def _identify_with_OPTIONS(self, url):
        """
        Find out what methods are allowed using OPTIONS
        :param url: Where to check.
        """
        allowed_methods = []
        id_list = []

        try:
            res = self._uri_opener.OPTIONS(url)
        except:
            pass
        else:
            headers = res.get_lower_case_headers()
            id_list.append(res.id)

            for header_name in ['allow', 'public']:
                if header_name in headers:
                    allowed_methods.extend(headers[header_name].split(','))
                    allowed_methods = [x.strip() for x in allowed_methods]
                    allowed_methods = list(set(allowed_methods))

        return allowed_methods, id_list

    def _identify_with_bruteforce(self, url):
        id_list = []
        allowed_methods = []
        #
        #   Before doing anything else, I'll send a request with a
        #   non-existant method if that request succeds, then all will...
        #
        non_exist_response = self._uri_opener.ARGENTINA(url)
        get_response = self._uri_opener.GET(url)

        if non_exist_response.get_code() not in self.BAD_CODES \
                and get_response.get_body() == non_exist_response.get_body():
            desc = 'The remote Web server has a custom configuration, in' \
                   ' which any not implemented methods that are invoked are' \
                   ' defaulted to GET instead of returning a "Not Implemented"' \
                   ' response.'
            response_ids = [non_exist_response.get_id(), get_response.get_id()]
            i = Info('Non existent methods default to GET', desc, response_ids,
                     self.get_name())
            i.set_url(url)

            kb.kb.append(self, 'custom-configuration', i)
            #
            #   It makes no sense to continue working, all methods will
            #   appear as enabled because of this custom configuration.
            #
            return [], [non_exist_response.id, get_response.id]

        # 'DELETE' is not tested! I don't want to remove anything...
        # 'PUT' is not tested! I don't want to overwrite anything...
        methods_to_test = self._supported_methods.copy()

        # remove dangerous methods.
        methods_to_test.remove('DELETE')
        methods_to_test.remove('PUT')

        for method in methods_to_test:
            method_functor = getattr(self._uri_opener, method)
            try:
                response = apply(method_functor, (url, ), {})
            except:
                pass
            else:
                code = response.get_code()
                if code not in self.BAD_CODES:
                    allowed_methods.append(method)
                    id_list.append(response.id)

        return allowed_methods, id_list

    def _analyze_methods(self, url, allowed_methods, id_list):
        # Sometimes there are no allowed methods, which means that our plugin
        # failed to identify any methods.
        if not allowed_methods:
            return

        # Check for DAV
        elif set(allowed_methods).intersection(self.DAV_METHODS):
            # dav is enabled!
            # Save the results in the KB so that other plugins can use this
            # information
            desc = 'The URL "%s" has the following allowed methods. These' \
                   ' include DAV methods and should be disabled: %s'
            desc = desc % (url, ', '.join(allowed_methods))

            i = Info('DAV methods enabled', desc, id_list, self.get_name())
            i.set_url(url)
            i['methods'] = allowed_methods

            kb.kb.append(self, 'dav-methods', i)
        else:
            # Save the results in the KB so that other plugins can use this
            # information. Do not remove these information, other plugins
            # REALLY use it !
            desc = 'The URL "%s" has the following enabled HTTP methods: %s'
            desc = desc % (url, ', '.join(allowed_methods))

            i = Info('Allowed HTTP methods', desc, id_list, self.get_name())
            i.set_url(url)
            i['methods'] = allowed_methods

            kb.kb.append(self, 'methods', i)

    def end(self):
        """
        Print the results.
        """
        # First I get the data from the kb
        all_info_obj = kb.kb.get('allowed_methods', 'methods')
        dav_info_obj = kb.kb.get('allowed_methods', 'dav-methods')

        # Now I transform it to something I can use with group_by_min_key
        allMethods = []
        for i in all_info_obj:
            allMethods.append((i.get_url(), i['methods']))

        davMethods = []

        for i in dav_info_obj:
            davMethods.append((i.get_url(), i['methods']))

        # Now I work the data...
        to_show, method_type = davMethods, ' DAV'
        if not self._report_dav_only:
            to_show, method_type = allMethods, ''

        # Make it hashable
        tmp = []
        for url, methodList in to_show:
            tmp.append((url, ', '.join(methodList)))

        result_dict, itemIndex = group_by_min_key(tmp)

        for k in result_dict:
            if itemIndex == 0:
                # Grouped by URLs
                msg = 'The URL: "%s" has the following' + \
                      method_type + ' methods enabled:'
                om.out.information(msg % k)
            else:
                # Grouped by Methods
                msg = 'The methods: ' + k + \
                      ' are enabled on the following URLs:'
                om.out.information(msg)

            for i in result_dict[k]:
                om.out.information('- ' + i)

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d1 = 'Execute plugin only one time'
        h1 = 'Generally the methods allowed for a URL are configured system' \
             ' wide, so executing this plugin only once is the faster choice.' \
             ' The most accurate choice is to run it against every URL.'
        o = opt_factory('execOneTime',
                        self._exec_one_time,
                        d1,
                        'boolean',
                        help=h1)
        ol.add(o)

        d2 = 'Only report findings if uncommon methods are found'
        o = opt_factory('reportDavOnly', self._report_dav_only, d2, 'boolean')
        ol.add(o)

        return ol

    def set_options(self, options_list):
        """
        This method sets all the options that are configured using the user
        interface generated by the framework using the result of get_options().

        :param OptionList: A dictionary with the options for the plugin.
        :return: No value is returned.
        """
        self._exec_one_time = options_list['execOneTime'].get_value()
        self._report_dav_only = options_list['reportDavOnly'].get_value()

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #43
0
class get_emails(GrepPlugin):
    """
    Find email accounts.

    :author: Andres Riancho ([email protected])
    """

    def __init__(self):
        GrepPlugin.__init__(self)

        # User configured variables
        self._only_target_domain = True
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, get the emails and save them to the kb.

        :param request: The HTTP request
        :param request: The HTTP response
        :return: None
        """
        self._grep_worker(request, response, 'emails',
                          response.get_url().get_root_domain())

        if not self._only_target_domain:
            self._grep_worker(request, response, 'external_emails')

    def _grep_worker(self, request, response, kb_key, domain=None):
        """
        Helper method for using in self.grep()

        :param request: The HTTP request
        :param response: The HTTP response
        :param kb_key: Knowledge base dict key
        :param domain: Target domain for get_emails filter
        :return: None
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            msg = 'Failed to get document parser for "%s" at get_emails.'
            om.out.debug(msg % response.get_url())
            return

        emails = set(dp.get_emails(domain))

        for mail_address in emails:
            # Reduce false positives
            if request.sent(mail_address):
                continue

            # Email address are case insensitive
            mail_address = mail_address.lower()
            url = response.get_url()
            uniq_key = (mail_address, url)

            if uniq_key in self._already_reported:
                continue

            # Avoid dups
            self._already_reported.add(uniq_key)

            # Create a new info object, and report it
            desc = 'The mail account: "%s" was found at "%s".'
            desc = desc % (mail_address, url)

            i = Info('Email address disclosure', desc, response.id,
                     self.get_name())
            i.add_to_highlight(mail_address)
            i.set_url(url)
            i[EmailInfoSet.ITAG] = mail_address
            i['user'] = mail_address.split('@')[0]

            self.kb_append_uniq_group('emails', kb_key, i,
                                      group_klass=EmailInfoSet)

    def set_options(self, options_list):
        self._only_target_domain = options_list['only_target_domain'].get_value()

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()

        d1 = 'Only search emails for domain of target'
        o1 = opt_factory('only_target_domain', self._only_target_domain,
                         d1, 'boolean')
        ol.add(o1)

        return ol

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #44
0
class find_backdoors(CrawlPlugin):
    """
    Find web backdoors and web shells.

    :author: Andres Riancho ([email protected])
    """
    WEBSHELL_DB = os.path.join(ROOT_PATH, 'plugins', 'crawl', 'find_backdoors',
                               'web_shells.txt')

    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

    def crawl(self, fuzzable_request):
        """
        For every directory, fetch a list of shell files and analyze the
        response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        domain_path = fuzzable_request.get_url().get_domain_path()

        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.add(domain_path)

            # Read the web shell database
            web_shells = self._iter_web_shells()

            # Send the requests using threads:
            args_iter = (domain_path.url_join(fname) for fname in web_shells)
            self.worker_pool.map(self._check_if_exists, args_iter)

    def _iter_web_shells(self):
        """
        :yield: lines from the web shell DB
        """
        for line in file(self.WEBSHELL_DB).readline():
            if line.startswith('#'):
                continue

            if not line:
                continue

            yield line.strip()

    def _check_if_exists(self, web_shell_url):
        """
        Check if the file exists.

        :param web_shell_url: The URL to check
        """
        try:
            response = self._uri_opener.GET(web_shell_url, cache=True)
        except BaseFrameworkException:
            om.out.debug('Failed to GET webshell:' + web_shell_url)
        else:
            if self._is_possible_backdoor(response):
                desc = 'A web backdoor was found at: "%s"; this could ' \
                       'indicate that the server has been compromised.'
                desc = desc % response.get_url()

                v = Vuln('Potential web backdoor', desc, severity.HIGH,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'backdoors', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())

                fr = FuzzableRequest.from_http_response(response)
                self.output_queue.put(fr)

    def _is_possible_backdoor(self, response):
        """
        Heuristic to infer if the content of <response> has the pattern of a
        backdoor response.

        :param response: HTTPResponse object
        :return: A bool value
        """
        if not is_404(response):
            body_text = response.get_body()
            dom = response.get_dom()
            if dom is not None:
                for ele, attrs in BACKDOOR_COLLECTION.iteritems():
                    for attrname, attr_vals in attrs.iteritems():
                        # Set of lowered attribute values
                        dom_attr_vals = \
                            set(n.get(attrname).lower() for n in
                                (dom.xpath('//%s[@%s]' % (ele, attrname))))
                        # If at least one elem in intersection return True
                        if (dom_attr_vals and set(attr_vals)):
                            return True

            # If no regex matched then try with keywords. At least 2 should be
            # contained in 'body_text' to succeed.
            times = 0
            for back_kw in KNOWN_OFFENSIVE_WORDS:
                if re.search(back_kw, body_text, re.I):
                    times += 1
                    if times == 2:
                        return True
        return False

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #45
0
class analyze_cookies(GrepPlugin):
    """
    Grep every response for session cookies sent by the web application.

    :author: Andres Riancho ([email protected])
    """
    SECURE_RE = re.compile('; *?secure([\s;, ]|$)', re.I)
    HTTPONLY_RE = re.compile('; *?httponly([\s;, ]|$)', re.I)

    def __init__(self):
        GrepPlugin.__init__(self)

        self._cookie_key_failed_fingerprint = set()
        self._already_reported_fingerprint = set()
        self._already_reported_cookies = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, search for cookies.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        # do this check every time
        self._ssl_cookie_via_http(request, response)

        #
        # Analyze the response headers and find cookies
        #
        headers = response.get_headers()

        for header_name in headers:
            if header_name.lower() in COOKIE_HEADERS:

                cookie_header_value = headers[header_name].strip()
                cookie_object = self._parse_cookie(request, response,
                                                   cookie_header_value)

                if cookie_object is not None:
                    self._collect_cookies(request, response, cookie_object,
                                          cookie_header_value)

                    # Find if the cookie introduces any vulnerability,
                    # or discloses information
                    self._analyze_cookie_security(request, response,
                                                  cookie_object,
                                                  cookie_header_value)

    def _collect_cookies(self, request, response, cookie_object,
                         cookie_header_value):
        """
        Store (unique) cookies in the KB for later analysis.
        """
        # Cookie class has an __eq__ which compares Cookies' keys for
        # equality, not the values, so these two cookies are equal:
        #        a=1;
        #        a=2;
        # And these two are not:
        #        a=1;
        #        b=1;
        cookie_keys = tuple(cookie_object.keys())
        uniq_id = (cookie_keys, response.get_url())
        if uniq_id in self._already_reported_cookies:
            return

        # No duplicates
        self._already_reported_cookies.add(uniq_id)

        # Create the info and store it in the KB
        cstr = cookie_object.output(header='').strip()
        desc = 'The URL: "%s" sent the cookie: "%s".'
        desc = desc % (response.get_url(), cstr)

        i = CookieInfo('Cookie', desc, response.id, self.get_name())
        i.set_url(response.get_url())
        i.set_cookie_object(cookie_object)
        """
        The expiration date tells the browser when to delete the
        cookie. If no expiration date is provided, the cookie is
        deleted at the end of the user session, that is, when the
        user quits the browser. As a result, specifying an expiration
        date is a means for making cookies to survive across
        browser sessions. For this reason, cookies that have an
        expiration date are called persistent.
        """
        i['persistent'] = 'expires' in cookie_object

        self.kb_append_uniq_group(self,
                                  'cookies',
                                  i,
                                  group_klass=CollectedCookieInfoSet)

    def _parse_cookie(self, request, response, cookie_header_value):
        """
        If the response sets more than one Cookie, this method will
        be called once for each "Set-Cookie" header.

        BUGBUG: The urllib2 library concatenates , values of repeated headers.
                See HTTPMessage.addheader() in httplib.py

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :param cookie_header_value: The cookie, as sent in the HTTP response

        :return: The cookie object or None if the parsing failed
        """
        try:
            # Note to self: This line may print some chars to the console
            return parse_cookie(cookie_header_value)
        except Cookie.CookieError:
            desc = 'The remote Web application sent a cookie with an' \
                   ' incorrect format: "%s" that does NOT respect the RFC.'
            desc = desc % cookie_header_value

            i = CookieInfo('Invalid cookie', desc, response.id,
                           self.get_name())
            i.set_url(response.get_url())
            i.set_cookie_string(cookie_header_value)

            # The cookie is invalid, this is worth mentioning ;)
            kb.kb.append(self, 'invalid-cookies', i)
            return None

    def _analyze_cookie_security(self, request, response, cookie_obj,
                                 cookie_header_value):
        """
        In this method I call all the other methods that perform a specific
        analysis of the already caught cookie.
        """
        self._secure_over_http(request, response, cookie_obj,
                               cookie_header_value)
        self._not_secure_over_https(request, response, cookie_obj,
                                    cookie_header_value)

        fingerprinted = self._match_cookie_fingerprint(request, response,
                                                       cookie_obj)
        self._http_only(request, response, cookie_obj, cookie_header_value,
                        fingerprinted)

    def _http_only(self, request, response, cookie_obj, cookie_header_value,
                   fingerprinted):
        """
        Verify if the cookie has the httpOnly parameter set

        Reference:
            http://www.owasp.org/index.php/HTTPOnly
            http://en.wikipedia.org/wiki/HTTP_cookie

        :param request: The http request object
        :param response: The http response object
        :param cookie_obj: The cookie object to analyze
        :param cookie_header_value: The cookie, as sent in the HTTP response
        :param fingerprinted: True if the cookie was fingerprinted
        :return: None
        """
        if not self.HTTPONLY_RE.search(cookie_header_value):

            vuln_severity = severity.MEDIUM if fingerprinted else severity.LOW
            desc = 'A cookie without the HttpOnly flag was sent when ' \
                   ' requesting "%s". The HttpOnly flag prevents potential' \
                   ' intruders from accessing the cookie value through' \
                   ' Cross-Site Scripting attacks.'
            desc = desc % response.get_url()

            v = CookieVuln('Cookie without HttpOnly', desc, vuln_severity,
                           response.id, self.get_name())
            v.set_url(response.get_url())
            v.set_cookie_object(cookie_obj)

            self.kb_append_uniq_group(self,
                                      'http_only',
                                      v,
                                      group_klass=HttpOnlyCookieInfoSet)

    def _ssl_cookie_via_http(self, request, response):
        """
        Analyze if a cookie value, sent in a HTTPS request, is now used for
        identifying the user in an insecure page. Example:
            Login is done over SSL
            The rest of the page is HTTP
        """
        if request.get_url().get_protocol().lower() == 'https':
            return

        # Pre-calculate to avoid CPU usage
        request_dump = request.dump()

        for info_set in kb.kb.get(self, 'cookies'):
            for info in info_set.infos:
                if info.get_url().get_protocol().lower() != 'https':
                    continue

                if request.get_url().get_domain() != info.get_url().get_domain(
                ):
                    continue

                # The cookie was sent using SSL, I'll check if the current
                # request, is using these values in the POSTDATA / QS / COOKIE
                for key in info[COOKIE_KEYS]:

                    value = info.get_cookie_object()[key].value

                    # This if is to create less false positives
                    if len(value) > 6 and value in request_dump:

                        desc = 'Cookie values that were set over HTTPS, are' \
                               ' then sent over an insecure channel in a' \
                               ' request to "%s".'
                        desc = desc % request.get_url()

                        v = CookieVuln('Secure cookies over insecure channel',
                                       desc, severity.HIGH, response.id,
                                       self.get_name())
                        v.set_url(response.get_url())
                        v.set_cookie_object(info.get_cookie_object())

                        kb.kb.append(self, 'secure_via_http', v)

    def _match_cookie_fingerprint(self, request, response, cookie_obj):
        """
        Now we analyze the cookie and try to guess the remote web server or
        programming framework based on the cookie that was sent.

        :return: True if the cookie was fingerprinted
        """
        cookie_keys = cookie_obj.keys()
        for cookie_key in cookie_keys:
            if cookie_key in self._cookie_key_failed_fingerprint:
                cookie_keys.remove(cookie_key)

            if cookie_key in self._already_reported_fingerprint:
                cookie_keys.remove(cookie_key)

        for cookie_key in cookie_keys:
            for cookie_str_db, system_name in COOKIE_FINGERPRINT:
                if cookie_str_db not in cookie_key:
                    continue

                if cookie_key in self._already_reported_fingerprint:
                    continue

                # Unreported match!
                self._already_reported_fingerprint.add(cookie_key)

                desc = 'A cookie matching the cookie fingerprint DB'\
                       ' has been found when requesting "%s".'\
                       ' The remote platform is: "%s".'
                desc = desc % (response.get_url(), system_name)

                i = CookieInfo('Identified cookie', desc, response.id,
                               self.get_name())
                i.set_cookie_object(cookie_obj)
                i.set_url(response.get_url())
                i['httpd'] = system_name

                kb.kb.append(self, 'fingerprint', i)
                return True
        else:
            # No match was found, we store the keys so we don't try to match
            # them again against the COOKIE_FINGERPRINT
            for cookie_key in cookie_keys:
                self._cookie_key_failed_fingerprint.add(cookie_key)

        return False

    def _secure_over_http(self, request, response, cookie_obj,
                          cookie_header_value):
        """
        Checks if a cookie marked as secure is sent over http.

        Reference:
            http://en.wikipedia.org/wiki/HTTP_cookie

        :param request: The http request object
        :param response: The http response object
        :param cookie_obj: The cookie object to analyze
        :param cookie_header_value: The cookie, as sent in the HTTP response
        :return: None
        """
        if self.SECURE_RE.search(cookie_header_value) and \
        response.get_url().get_protocol().lower() == 'http':

            desc = 'A cookie marked with the secure flag was sent over' \
                   ' an insecure channel (HTTP) when requesting the URL:'\
                   ' "%s", this usually means that the Web application was'\
                   ' designed to run over SSL and was deployed without'\
                   ' security or that the developer does not understand the'\
                   ' "secure" flag.'
            desc = desc % response.get_url()

            v = CookieVuln('Secure cookie over HTTP', desc, severity.HIGH,
                           response.id, self.get_name())
            v.set_url(response.get_url())
            v.set_cookie_object(cookie_obj)

            kb.kb.append(self, 'false_secure', v)

    def _not_secure_over_https(self, request, response, cookie_obj,
                               cookie_header_value):
        """
        Checks if a cookie that does NOT have a secure flag is sent over https.

        :param request: The http request object
        :param response: The http response object
        :param cookie_obj: The cookie object to analyze
        :param cookie_header_value: The cookie, as sent in the HTTP response
        :return: None
        """
        if response.get_url().get_protocol().lower() == 'https' and \
        not self.SECURE_RE.search(cookie_header_value):
            desc = 'A cookie without the secure flag was sent in an HTTPS' \
                   ' response at "%s". The secure flag prevents the browser' \
                   ' from sending a "secure" cookie over an insecure HTTP' \
                   ' channel, thus preventing potential session hijacking' \
                   ' attacks.'
            desc = desc % response.get_url()

            v = CookieVuln('Secure flag missing in HTTPS cookie', desc,
                           severity.MEDIUM, response.id, self.get_name())
            v.set_url(response.get_url())
            v.set_cookie_object(cookie_obj)

            self.kb_append_uniq_group(self,
                                      'secure',
                                      v,
                                      group_klass=NotSecureFlagCookieInfoSet)

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #46
0
class fingerprint_404(object):
    """
    Read the 404 page(s) returned by the server.

    :author: Andres Riancho ([email protected])
    """

    _instance = None

    def __init__(self):
        #
        #   Set the opener, I need it to perform some tests and gain
        #   the knowledge about the server's 404 response bodies.
        #
        self._uri_opener = None
        self._worker_pool = None
        
        #
        #   Internal variables
        #
        self._already_analyzed = False
        self._404_bodies = []
        self._lock = thread.allocate_lock()
        self._fingerprinted_paths = ScalableBloomFilter()
        self._directory_uses_404_codes = ScalableBloomFilter()

        # It is OK to store 200 here, I'm only storing path+filename as the key,
        # and bool as the value.
        self.is_404_LRU = LRU(200)

    def set_url_opener(self, urlopener):
        self._uri_opener = urlopener

    def set_worker_pool(self, worker_pool):
        self._worker_pool = worker_pool

    def generate_404_knowledge(self, url):
        """
        Based on a URL, request something that we know is going to be a 404.
        Afterwards analyze the 404's and summarise them.

        :return: A list with 404 bodies.
        """
        #
        #    This is the case when nobody has properly configured
        #    the object in order to use it.
        #
        if self._uri_opener is None:
            msg = '404 fingerprint database was incorrectly initialized.'
            raise RuntimeError(msg)

        # Get the filename extension and create a 404 for it
        extension = url.get_extension()
        domain_path = url.get_domain_path()

        # the result
        self._response_body_list = []

        #
        #   This is a list of the most common handlers, in some configurations, the 404
        #   depends on the handler, so I want to make sure that I catch the 404 for each one
        #
        handlers = set()
        handlers.update(
            ['py', 'php', 'asp', 'aspx', 'do', 'jsp', 'rb', 'do'])
        handlers.update(
            ['gif', 'htm', 'pl', 'cgi', 'xhtml', 'htmls', 'foobar'])
        if extension:
            handlers.add(extension)

        args_list = []

        for extension in handlers:
            rand_alnum_file = rand_alnum(8) + '.' + extension
            url404 = domain_path.url_join(rand_alnum_file)
            args_list.append(url404)

        self._worker_pool.map(self._send_404, args_list)

        #
        #   I have the bodies in self._response_body_list , but maybe they
        #   all look the same, so I'll filter the ones that look alike.
        #
        result = [self._response_body_list[0], ]
        for i in self._response_body_list:
            for j in self._response_body_list:

                if relative_distance_ge(i, j, IS_EQUAL_RATIO):
                    # They are equal, we are ok with that
                    continue
                else:
                    # They are no equal, this means that we'll have to add this to the list
                    result.append(j)

        # I don't need these anymore
        self._response_body_list = None

        # And I return the ones I need
        result = list(set(result))
        om.out.debug('The 404 body result database has a length of ' +
                     str(len(result)) + '.')

        self._404_bodies = result
        self._already_analyzed = True
        self._fingerprinted_paths.add(domain_path)

    def need_analysis(self):
        return not self._already_analyzed

    @retry(tries=2, delay=0.5, backoff=2)
    def _send_404(self, url404, store=True):
        """
        Sends a GET request to url404 and saves the response in self._response_body_list .
        :return: The HTTP response body.
        """
        # I don't use the cache, because the URLs are random and the only thing that
        # cache does is to fill up disk space
        response = self._uri_opener.GET(url404, cache=False, grep=False)

        if store:
            # I don't want the random file name to affect the 404, so I replace
            # it with a blank space,
            response_body = get_clean_body(response)
            self._response_body_list.append(response_body)

        return response

    def is_404(self, http_response):
        """
        All of my previous versions of is_404 were very complex and tried to
        struggle with all possible cases. The truth is that in most "strange"
        cases I was failing miserably, so now I changed my 404 detection once
        again, but keeping it as simple as possible.

        Also, and because I was trying to cover ALL CASES, I was performing a
        lot of requests in order to cover them, which in most situations was
        unnecesary.

        So now I go for a much simple approach:
            1- Cover the simplest case of all using only 1 HTTP request
            2- Give the users the power to configure the 404 detection by
               setting a string that identifies the 404 response (in case we
               are missing it for some reason in case #1)

        :param http_response: The HTTP response which we want to know if it
                                  is a 404 or not.
        """
        #
        #   First we handle the user configured exceptions:
        #
        domain_path = http_response.get_url().get_domain_path()
        if domain_path in cf.cf.get('always_404'):
            return True
        elif domain_path in cf.cf.get('never_404'):
            return False

        #
        #    The user configured setting. "If this string is in the response,
        #    then it is a 404"
        #
        if cf.cf.get('string_match_404') and cf.cf.get('string_match_404') in http_response:
            return True

        #
        #   This is the most simple case, we don't even have to think about this.
        #
        #   If there is some custom website that always returns 404 codes, then we
        #   are screwed, but this is open source, and the pentester working on
        #   that site can modify these lines.
        #
        if http_response.get_code() == 404:
            return True

        #
        #    Simple, if the file we requested is in a directory that's known to
        #    return 404 codes for files that do not exist, AND this is NOT a 404
        #    then we're return False!
        #
        if domain_path in self._directory_uses_404_codes and \
                http_response.get_code() != 404:
            return False

        #
        #   Before actually working, I'll check if this response is in the LRU,
        #   if it is I just return the value stored there.
        #
        if http_response.get_url().get_path() in self.is_404_LRU:
            return self.is_404_LRU[http_response.get_url().get_path()]

        with self._lock:
            if self.need_analysis():
                self.generate_404_knowledge(http_response.get_url())

        # self._404_body was already cleaned inside generate_404_knowledge
        # so we need to clean this one in order to have a fair comparison
        html_body = get_clean_body(http_response)

        #
        #    Compare this response to all the 404's I have in my DB
        #
        #    Note: while self._404_bodies is a list, we can perform this for loop
        #          without "with self._lock", read comments in stackoverflow:
        #          http://stackoverflow.com/questions/9515364/does-python-freeze-the-list-before-for-loop
        #
        for body_404_db in self._404_bodies:

            if relative_distance_ge(body_404_db, html_body, IS_EQUAL_RATIO):
                msg = '"%s" (id:%s) is a 404 [similarity_index > %s]'
                fmt = (
                    http_response.get_url(), http_response.id, IS_EQUAL_RATIO)
                om.out.debug(msg % fmt)
                return self._fingerprinted_as_404(http_response)

        else:
            #
            #    I get here when the for ends and no body_404_db matched with the
            #    html_body that was sent as a parameter by the user. This means one
            #    of two things:
            #        * There is not enough knowledge in self._404_bodies, or
            #        * The answer is NOT a 404.
            #
            #    Because we want to reduce the amount of "false positives" that
            #    this method returns, we'll perform one extra check before saying
            #    that this is NOT a 404.
            if http_response.get_url().get_domain_path() not in self._fingerprinted_paths:
                if self._single_404_check(http_response, html_body):
                    self._404_bodies.append(html_body)
                    self._fingerprinted_paths.add(
                        http_response.get_url().get_domain_path())

                    msg = '"%s" (id:%s) is a 404 (similarity_index > %s). Adding new'
                    msg += ' knowledge to the 404_bodies database (length=%s).'
                    fmt = (http_response.get_url(), http_response.id,
                           IS_EQUAL_RATIO, len(self._404_bodies))
                    om.out.debug(msg % fmt)

                    return self._fingerprinted_as_404(http_response)

            msg = '"%s" (id:%s) is NOT a 404 [similarity_index < %s].'
            fmt = (http_response.get_url(), http_response.id, IS_EQUAL_RATIO)
            om.out.debug(msg % fmt)
            return self._fingerprinted_as_200(http_response)

    def _fingerprinted_as_404(self, http_response):
        """
        Convenience function so that I don't forget to update the LRU
        :return: True
        """
        self.is_404_LRU[http_response.get_url().get_path()] = True
        return True

    def _fingerprinted_as_200(self, http_response):
        """
        Convenience function so that I don't forget to update the LRU
        :return: False
        """
        self.is_404_LRU[http_response.get_url().get_path()] = False
        return False

    def _single_404_check(self, http_response, html_body):
        """
        Performs a very simple check to verify if this response is a 404 or not.

        It takes the original URL and modifies it by pre-pending a "not-" to the
        filename, then performs a request to that URL and compares the original
        response with the modified one. If they are equal then the original
        request is a 404.

        :param http_response: The original HTTP response
        :param html_body: The original HTML body after passing it by a cleaner

        :return: True if the original response was a 404 !
        """
        response_url = http_response.get_url()
        filename = response_url.get_file_name()
        if not filename:
            relative_url = '../%s/' % rand_alnum(8)
            url_404 = response_url.url_join(relative_url)
        else:
            relative_url = 'not-%s' % filename
            url_404 = response_url.url_join(relative_url)

        response_404 = self._send_404(url_404, store=False)
        clean_response_404_body = get_clean_body(response_404)

        if response_404.get_code() == 404 and \
                url_404.get_domain_path() not in self._directory_uses_404_codes:
            self._directory_uses_404_codes.add(url_404.get_domain_path())

        return relative_distance_ge(clean_response_404_body, html_body, IS_EQUAL_RATIO)
예제 #47
0
파일: dav.py 프로젝트: foobarmonk/w3af
class dav(AuditPlugin):
    """
    Verify if the WebDAV module is properly configured.

    :author: Andres Riancho ([email protected])
    """

    CONTENT_TYPE = Headers([('content-type',
                             'application/xml; charset="utf-8"')])

    def __init__(self):
        AuditPlugin.__init__(self)

        # Internal variables
        self._already_tested_dirs = ScalableBloomFilter()

    def audit(self, freq, orig_response, debugging_id):
        """
        Searches for file upload vulns using PUT method.

        :param freq: A FuzzableRequest
        :param orig_response: The HTTP response associated with the fuzzable request
        :param debugging_id: A unique identifier for this call to audit()
        """
        # Start
        domain_path = freq.get_url().get_domain_path()
        if domain_path not in self._already_tested_dirs:
            self._already_tested_dirs.add(domain_path)
            #
            # Send the three requests in different threads, store the
            # apply_result objects in order to be able to "join()" in the
            # next for loop
            #
            # TODO: This seems to be a fairly common use case: Send args to N
            # functions that need to be run in different threads. If possible
            # code this into threadpool.py in order to make this code clearer
            results = []
            for func in [self._PUT, self._PROPFIND, self._SEARCH]:
                apply_res = self.worker_pool.apply_async(func, (domain_path,))
                results.append(apply_res)

            for apply_res in results:
                apply_res.get()

    #pylint: disable=C0103
    def _SEARCH(self, domain_path):
        """
        Test SEARCH method.
        """
        content = "<?xml version='1.0'?>\r\n"
        content += "<g:searchrequest xmlns:g='DAV:'>\r\n"
        content += "<g:sql>\r\n"
        content += "Select 'DAV:displayname' from scope()\r\n"
        content += "</g:sql>\r\n"
        content += "</g:searchrequest>\r\n"

        res = self._uri_opener.SEARCH(domain_path, data=content,
                                      headers=self.CONTENT_TYPE)

        content_matches = '<a:response>' in res or '<a:status>' in res or \
            'xmlns:a="DAV:"' in res

        if content_matches and res.get_code() in xrange(200, 300):
            msg = 'Directory listing with HTTP SEARCH method was found at' \
                  'directory: "%s".' % domain_path
                  
            v = Vuln('Insecure DAV configuration', msg, severity.MEDIUM,
                     res.id, self.get_name())

            v.set_url(res.get_url())
            v.set_method('SEARCH')
            
            self.kb_append(self, 'dav', v)

    #pylint: disable=C0103
    def _PROPFIND(self, domain_path):
        """
        Test PROPFIND method
        """
        content = "<?xml version='1.0'?>\r\n"
        content += "<a:propfind xmlns:a='DAV:'>\r\n"
        content += "<a:prop>\r\n"
        content += "<a:displayname:/>\r\n"
        content += "</a:prop>\r\n"
        content += "</a:propfind>\r\n"

        headers = copy.deepcopy(self.CONTENT_TYPE)
        headers['Depth'] = '1'

        res = self._uri_opener.PROPFIND(domain_path, data=content,
                                        headers=headers)

        if "D:href" in res and res.get_code() in xrange(200, 300):
            msg = 'Directory listing with HTTP PROPFIND method was found at' \
                  ' directory: "%s".' % domain_path

            v = Vuln('Insecure DAV configuration', msg, severity.MEDIUM,
                     res.id, self.get_name())

            v.set_url(res.get_url())
            v.set_method('PROPFIND')

            self.kb_append(self, 'dav', v)

    #pylint: disable=C0103
    def _PUT(self, domain_path):
        """
        Tests PUT method.
        """
        # upload
        url = domain_path.url_join(rand_alpha(5))
        rnd_content = rand_alnum(6)
        headers = Headers([('content-type', 'text/plain')])

        put_response = self._uri_opener.PUT(url, data=rnd_content,
                                            headers=headers)

        # check if uploaded
        res = self._uri_opener.GET(url, cache=True)
        if res.get_body() == rnd_content:
            msg = 'File upload with HTTP PUT method was found at resource:' \
                  ' "%s". A test file was uploaded to: "%s".'
            msg = msg % (domain_path, res.get_url())
            
            v = Vuln('Publicly writable directory', msg, severity.HIGH,
                     [put_response.id, res.id], self.get_name())

            v.set_url(url)
            v.set_method('PUT')
            
            self.kb_append(self, 'dav', v)

        # Report some common errors
        elif put_response.get_code() == 500:
            msg = 'DAV seems to be incorrectly configured. The web server' \
                  ' answered with a 500 error code. In most cases, this means'\
                  ' that the DAV extension failed in some way. This error was'\
                  ' found at: "%s".' % put_response.get_url()

            i = Info('DAV incorrect configuration', msg, res.id, self.get_name())

            i.set_url(url)
            i.set_method('PUT')
            
            self.kb_append(self, 'dav', i)

        # Report some common errors
        elif put_response.get_code() == 403:
            # handle false positive when PUT method is not supported
            # https://github.com/andresriancho/w3af/pull/2724/files
            if 'supported' in put_response.get_body().lower():
                return
            
            msg = 'DAV seems to be correctly configured and allowing you to'\
                  ' use the PUT method but the directory does not have the'\
                  ' right permissions that would allow the web server to'\
                  ' write to it. This error was found at: "%s".'
            msg = msg % put_response.get_url()
            
            i = Info('DAV incorrect configuration', msg,
                     [put_response.id, res.id], self.get_name())

            i.set_url(url)
            i.set_method('PUT')
            
            self.kb_append(self, 'dav', i)

    def get_plugin_deps(self):
        """
        :return: A list with the names of the plugins that should be run before
                 the current one.
        """
        return ['infrastructure.allowed_methods',
                'infrastructure.server_header']

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #48
0
class url_session(GrepPlugin):
    """
    Finds URLs which have a parameter that holds the session ID. 

    :author: Andres Riancho ([email protected])
    """

    SESSID_PARAMS = ALL_COOKIES

    def __init__(self):
        GrepPlugin.__init__(self)
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, find the blank bodies and report them.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        self.analyze_uri(request, response)
        self.analyze_document_links(request, response)

    def _has_sessid(self, uri):
        """
        :return: A set which contains the session ID parameters (if any)
        """
        sessid_in_uri = set()
        if uri.has_query_string():
            query_string = uri.get_querystring()
            params = set(query_string.keys())
            sessid_in_uri = self.SESSID_PARAMS.intersection(params)
        return sessid_in_uri

    def analyze_document_links(self, request, response):
        """
        Find session IDs in the URI and store them in the KB.
        """
        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(response)
        except:
            pass
        else:
            parsed_refs, _ = doc_parser.get_references()

            for link_uri in parsed_refs:
                if self._has_sessid(link_uri) and \
                response.get_url() not in self._already_reported:
                    #   report these informations only once
                    self._already_reported.add(response.get_url())

                    desc = 'The HTML content at "%s" contains a link (%s)'\
                           ' which holds a session id. The ID could be leaked'\
                           ' to third party domains through the referrer'\
                           ' header.'
                    desc = desc % (response.get_url(), link_uri)

                    #   append the info object to the KB.
                    i = Info('Session ID in URL', desc, response.id,
                             self.get_name())
                    i.set_uri(response.get_uri())

                    self.kb_append(self, 'url_session', i)
                    break

    def analyze_uri(self, request, response):
        """
        Find session IDs in the URI and store them in the KB.
        """
        request_uri = request.get_uri()
        if self._has_sessid(request_uri) and \
        response.get_url() not in self._already_reported:
            #   report these informations only once
            self._already_reported.add(response.get_url())

            desc = 'The URL "%s" contains a session id which could be'\
                  ' leaked to third party domains through the referrer'\
                  ' header.'
            desc = desc % request_uri

            #   append the info object to the KB.
            i = Info('Session ID in URL', desc, response.id, self.get_name())
            i.set_uri(response.get_uri())

            self.kb_append(self, 'url_session', i)

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #49
0
class pykto(CrawlPlugin):
    """
    A nikto port to python.
    :author: Andres Riancho ([email protected])
    """
    def __init__(self):
        CrawlPlugin.__init__(self)

        # internal variables
        self._exec = True
        self._already_analyzed = ScalableBloomFilter()

        # User configured parameters
        self._db_file = os.path.join(ROOT_PATH, 'plugins', 'crawl', 'pykto',
                                     'scan_database.db')
        self._extra_db_file = os.path.join(ROOT_PATH, 'plugins', 'crawl',
                                           'pykto', 'w3af_scan_database.db')

        self._cgi_dirs = ['/cgi-bin/']
        self._admin_dirs = ['/admin/', '/adm/']

        self._users = ['adm', 'bin', 'daemon', 'ftp', 'guest', 'listen', 'lp',
                       'mysql', 'noaccess', 'nobody', 'nobody4', 'nuucp',
                       'operator', 'root', 'smmsp', 'smtp', 'sshd', 'sys',
                       'test', 'unknown']

        self._nuke = ['/', '/postnuke/', '/postnuke/html/', '/modules/',
                      '/phpBB/', '/forum/']

        self._mutate_tests = False

    def crawl(self, fuzzable_request):
        """
        Runs pykto to the site.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        if not self._exec and not self._mutate_tests:
            # dont run anymore
            raise RunOnce()

        else:
            # Run the basic scan (only once)
            url = fuzzable_request.get_url().base_url()
            if url not in self._already_analyzed:
                self._already_analyzed.add(url)
                self._run(url)
                self._exec = False

            # And now mutate if the user configured it...
            if self._mutate_tests:

                # Tests need to be mutated
                url = fuzzable_request.get_url().get_domain_path()
                if url not in self._already_analyzed:
                    # Save the directories I already have tested in order to
                    # avoid testing them more than once...
                    self._already_analyzed.add(url)
                    self._run(url)

    def _run(self, url):
        """
        Really run the plugin.

        :param url: The URL object I have to test.
        """
        config = Config(self._cgi_dirs, self._admin_dirs, self._nuke,
                        self._mutate_tests, self._users)
                
        for db_file in [self._db_file, self._extra_db_file]:
            
            parser = NiktoTestParser(db_file, config, url)
            
            # Send the requests using threads:
            self.worker_pool.map_multi_args(self._send_and_check,
                                            parser.test_generator(),
                                            chunksize=10)

    def _send_and_check(self, nikto_test):
        """
        This method sends the request to the server.

        :return: True if the requested URI responded as expected.
        """
        #
        #    Small performance improvement. If all we want to know is if the
        #    file exists or not, lets use HEAD instead of GET. In 99% of the
        #    cases this will work as expected and we'll have a significant
        #    performance improvement.
        #
        if nikto_test.is_vulnerable.checks_only_response_code():
            try:
                http_response = self._uri_opener.HEAD(nikto_test.uri)
            except Exception:
                return
            else:
                if not nikto_test.is_vulnerable.check(http_response):
                    return False

        function_ptr = getattr(self._uri_opener, nikto_test.method)

        try:
            http_response = function_ptr(nikto_test.uri)
        except BaseFrameworkException, e:
            msg = ('An exception was raised while requesting "%s", the error'
                   ' message is: "%s".')
            om.out.error(msg % (nikto_test.uri, e))
            return False

        if nikto_test.is_vulnerable.check(http_response) and \
        not is_404(http_response):
            
            vdesc = ('pykto plugin found a vulnerability at URL: "%s".'
                     ' Vulnerability description: "%s".')
            vdesc = vdesc % (http_response.get_url(), nikto_test.message)

            v = Vuln('Insecure URL', vdesc, severity.LOW,
                     http_response.id, self.get_name())
            v.set_uri(http_response.get_uri())
            v.set_method(nikto_test.method)

            kb.kb.append(self, 'vuln', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())

            fr = FuzzableRequest.from_http_response(http_response)
            self.output_queue.put(fr)
예제 #50
0
class find_dvcs(CrawlPlugin):
    """
    Search Git, Mercurial (HG), Bazaar (BZR), Subversion (SVN) and CVS
    repositories and checks for files containing

    :author: Adam Baldwin ([email protected])
    :author: Tomas Velazquez (tomas.velazquezz - gmail.com)
    """
    def __init__(self):
        CrawlPlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()
        self._analyzed_filenames = ScalableBloomFilter()

        self._dvcs = {
            'git repository': {},
            'git ignore': {},
            'hg repository': {},
            'hg ignore': {},
            'bzr repository': {},
            'bzr ignore': {},
            'svn repository': {},
            'svn ignore': {},
            'cvs repository': {},
            'cvs ignore': {}
        }

        self._dvcs['git repository']['filename'] = '.git/index'
        self._dvcs['git repository']['function'] = self.git_index

        self._dvcs['git ignore']['filename'] = '.gitignore'
        self._dvcs['git ignore']['function'] = self.ignore_file

        self._dvcs['hg repository']['filename'] = '.hg/dirstate'
        self._dvcs['hg repository']['function'] = self.hg_dirstate

        self._dvcs['hg ignore']['filename'] = '.hgignore'
        self._dvcs['hg ignore']['function'] = self.ignore_file

        self._dvcs['bzr repository']['filename'] = '.bzr/checkout/dirstate'
        self._dvcs['bzr repository']['function'] = self.bzr_checkout_dirstate

        self._dvcs['bzr ignore']['filename'] = '.bzrignore'
        self._dvcs['bzr ignore']['function'] = self.ignore_file

        self._dvcs['svn repository']['filename'] = '.svn/entries'
        self._dvcs['svn repository']['function'] = self.svn_entries

        self._dvcs['svn ignore']['filename'] = '.svnignore'
        self._dvcs['svn ignore']['function'] = self.ignore_file

        self._dvcs['cvs repository']['filename'] = 'CVS/Entries'
        self._dvcs['cvs repository']['function'] = self.cvs_entries

        self._dvcs['cvs ignore']['filename'] = '.cvsignore'
        self._dvcs['cvs ignore']['function'] = self.ignore_file

    def crawl(self, fuzzable_request):
        """
        For every directory, fetch a list of files and analyze the response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        domain_path = fuzzable_request.get_url().get_domain_path()

        if domain_path not in self._analyzed_dirs:
            self._analyzed_dirs.add(domain_path)

            test_generator = self._url_generator(domain_path)
            self.worker_pool.map_multi_args(self._send_and_check,
                                            test_generator)

    def _url_generator(self, domain_path):
        """
        Based on different URLs with directories, generate the URLs that need
        to be tested.

        :return: URLs
        """
        for repo in self._dvcs.keys():
            repo_url = domain_path.url_join(self._dvcs[repo]['filename'])
            _function = self._dvcs[repo]['function']
            yield repo_url, _function, repo, domain_path

    def _clean_filenames(self, filenames):
        """
        Filter some characters from filenames.

        :return: A clear list of filenames.
        """
        resources = set()

        for filename in filenames:

            # Sometimes we get random bytes from the .git/index because of
            # git versions we don't fully support, so we ignore any encoding
            # errors
            filename = smart_unicode(filename, errors='ignore')

            if filename.startswith('/'):
                filename = filename[1:]
            if filename.startswith('./'):
                filename = filename[2:]
            if filename.endswith('/'):
                filename = filename[:-1]

            resources.add(filename)

        return resources

    def _send_and_check(self, repo_url, repo_get_files, repo, domain_path):
        """
        Check if a repository index exists in the domain_path.

        :return: None, everything is saved to the self.out_queue.
        """
        http_response = self.http_get_and_parse(repo_url,
                                                binary_response=True,
                                                respect_size_limit=False)

        if is_404(http_response):
            return

        try:
            filenames = repo_get_files(http_response.get_raw_body())
        except Exception, e:
            # We get here when the HTTP response is NOT a 404, but the response
            # body couldn't be properly parsed. This is usually because of a false
            # positive in the is_404 function, OR a new version-format of the file
            # to be parsed.
            #
            # Log in order to be able to improve the framework.
            args = (e, repo_get_files.__name__, repo_url)
            om.out.debug('Got a "%s" exception while running "%s" on "%s"' %
                         args)
        else:
예제 #51
0
class strange_parameters(GrepPlugin):
    """
    Grep the HTML response and find URIs that have strange parameters.

    :author: Andres Riancho (([email protected]))
    """
    def __init__(self):
        GrepPlugin.__init__(self)

        # Internal variables
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        parsed_references, _ = dp.get_references()

        for ref in parsed_references:

            qs = ref.querystring

            for param_name in qs:
                # This for loop is to address the repeated parameter name issue
                for element_index in xrange(len(qs[param_name])):
                    if self._is_strange(request, param_name, qs[param_name][element_index])\
                    and (ref.uri2url(), param_name) not in self._already_reported:
                        # Don't repeat findings
                        self._already_reported.add((ref.uri2url(), param_name))

                        desc = 'The URI: "%s" has a parameter named: "%s"'\
                               ' with value: "%s", which is very uncommon.'\
                               ' and requires manual verification.'
                        desc = desc % (response.get_uri(), param_name,
                                       qs[param_name][element_index])

                        i = Info('Uncommon query string parameter', desc,
                                 response.id, self.get_name())
                        i.set_uri(ref)
                        i.set_var(param_name)
                        i['parameter_value'] = qs[param_name][element_index]
                        i.add_to_highlight(qs[param_name][element_index])

                        self.kb_append(self, 'strange_parameters', i)

                    # To find this kind of vulns
                    # http://thedailywtf.com/Articles/Oklahoma-
                    # Leaks-Tens-of-Thousands-of-Social-Security-Numbers,-Other-
                    # Sensitive-Data.aspx
                    if self._is_SQL(request, param_name, qs[param_name][element_index])\
                    and ref not in self._already_reported:

                        # Don't repeat findings
                        self._already_reported.add(ref)
                        desc = 'The URI: "%s" has a parameter named: "%s"'\
                               ' with value: "%s", which is a SQL query.'
                        desc = desc % (response.get_uri(), param_name,
                                       qs[param_name][element_index])
                        v = Vuln('Parameter has SQL sentence', desc,
                                 severity.LOW, response.id, self.get_name())
                        v.set_uri(ref)
                        v.set_var(param_name)
                        v['parameter_value'] = qs[param_name][element_index]

                        v.add_to_highlight(qs[param_name][element_index])
                        self.kb_append(self, 'strange_parameters', v)

    def _is_SQL(self, request, parameter, value):
        """
        :return: True if the parameter value contains SQL sentences
        """
        regex = '(SELECT .*? FROM|INSERT INTO .*? VALUES|UPDATE .*? SET .*? WHERE)'
        for match in re.findall(regex, value, re.IGNORECASE):
            if not request.sent(match):
                return True

        return False

    def _is_strange(self, request, parameter, value):
        """
        :return: True if the parameter value is strange
        """
        if 'wicket:' in parameter:
            #
            #   The wicket framework uses, by default, strange URLs like this:
            #   https://www.DOMAIN.com/?wicket:bookmarkablePage=:com.DOMAIN.SUBDOMAIN.web.pages.SignInPage
            #   &wicket:interface=:0:signInForm::IFormSubmitListener::;jsessionid=7AC76A46A86BBC3F5253E374241BC892
            #
            #   Which are strange in all cases, except from wicket!
            #
            return False

        _strange_parameter_re = []

        # Seems to be a function
        _strange_parameter_re.append('\w+\(.*?\)')
        # Add more here...
        #_strange_parameter_re.append('....')

        for regex in _strange_parameter_re:
            for match in re.findall(regex, value):
                if not request.sent(match):
                    return True

        splitted_value = [
            x for x in re.split(r'([a-zA-Z0-9. ]+)', value) if x != ''
        ]
        if len(splitted_value) > 4:
            if not request.sent(value):
                return True

        return False

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #52
0
class url_session(GrepPlugin):
    """
    Finds URLs which have a parameter that holds the session ID. 

    :author: Andres Riancho ([email protected])
    """
    
    SESSID_PARAMS = ALL_COOKIES
    
    def __init__(self):
        GrepPlugin.__init__(self)
        self._already_reported = ScalableBloomFilter()

    def grep(self, request, response):
        """
        Plugin entry point, find the blank bodies and report them.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        self.analyze_uri(request, response)
        self.analyze_document_links(request, response)
    
    def _has_sessid(self, uri):
        """
        :return: A set which contains the session ID parameters (if any)
        """
        sessid_in_uri = set()
        if uri.has_query_string():
            query_string = uri.get_querystring()
            params = set(query_string.keys())
            sessid_in_uri = self.SESSID_PARAMS.intersection(params)
        return sessid_in_uri
        
    def analyze_document_links(self, request, response):
        """
        Find session IDs in the URI and store them in the KB.
        """
        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(response)
        except:
            pass
        else:
            parsed_refs, _ = doc_parser.get_references()
            
            for link_uri in parsed_refs:
                if self._has_sessid(link_uri) and \
                response.get_url() not in self._already_reported:
                    #   report these informations only once
                    self._already_reported.add(response.get_url())

                    desc = 'The HTML content at "%s" contains a link (%s)'\
                           ' which holds a session id. The ID could be leaked'\
                           ' to third party domains through the referrer'\
                           ' header.'
                    desc = desc % (response.get_url(), link_uri)
                    
                    #   append the info object to the KB.
                    i = Info('Session ID in URL', desc, response.id,
                             self.get_name())
                    i.set_uri(response.get_uri())
                    
                    self.kb_append(self, 'url_session', i)
                    break
    
    
    def analyze_uri(self, request, response):
        """
        Find session IDs in the URI and store them in the KB.
        """
        request_uri = request.get_uri()
        if self._has_sessid(request_uri) and \
        response.get_url() not in self._already_reported:
                #   report these informations only once
                self._already_reported.add(response.get_url())
                
                desc = 'The URL "%s" contains a session id which could be'\
                      ' leaked to third party domains through the referrer'\
                      ' header.'
                desc = desc % request_uri
                
                #   append the info object to the KB.
                i = Info('Session ID in URL', desc, response.id,
                         self.get_name())
                i.set_uri(response.get_uri())

                self.kb_append(self, 'url_session', i)

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """
예제 #53
0
class php_eggs(InfrastructurePlugin):
    """
    Fingerprint the PHP version using documented easter eggs that exist in PHP.
    :author: Andres Riancho ([email protected])
    """
    PHP_EGGS = [('?=PHPB8B5F2A0-3C92-11d3-A3A9-4C7B08C10000', 'PHP Credits'),
                ('?=PHPE9568F34-D428-11d2-A769-00AA001ACF42', 'PHP Logo'),
                ('?=PHPE9568F35-D428-11d2-A769-00AA001ACF42', 'Zend Logo'),
                ('?=PHPE9568F36-D428-11d2-A769-00AA001ACF42', 'PHP Logo 2')]

    # Empty EGG_DB array, will be filled with external data
    EGG_DB = {}

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Already analyzed extensions
        self._already_analyzed_ext = ScalableBloomFilter()

        # Internal DB
        self._db_file = os.path.join(ROOT_PATH, 'plugins', 'infrastructure',
                                     'php_eggs', 'eggs.json')

        # Get data from external JSON file and fill EGG_DB array
        data = self.read_jsondata(self._db_file)
        self.EGG_DB = self.fill_egg_array(data)

    def read_jsondata(self, jsonfile):
        """
        Read a JSON file. File handling for reading a JSON file
        :return: Raw JSON data.
        """
        json_data = open(jsonfile)
        file_data = json.load(json_data)
        json_data.close()
        return file_data

    def fill_egg_array(self, json_egg_data):
        """
        Fill an array with data from a JSON input file.
        :return: An array with PHP-versions with corresponding MD5 hashes.
        """
        egg_db = {}

        for egg in json_egg_data['db']:
            version = egg['version']
            egg_db[version] = {}

            for key in ('credits', 'php_1', 'php_2', 'zend'):
                if key in egg:
                    egg_db[version][key] = egg[key]

        return egg_db

    def discover(self, fuzzable_request):
        """
        Nothing strange, just do some GET requests to the eggs and analyze the
        response.

        :param fuzzable_request: A fuzzable_request instance that contains
                                 (among other things) the URL to test.
        """
        # Get the extension of the URL (.html, .php, .. etc)
        ext = fuzzable_request.get_url().get_extension()

        # Only perform this analysis if we haven't already analyzed this type
        # of extension OR if we get an URL like http://f00b5r/4/     (Note that
        # it has no extension) This logic will perform some extra tests... but
        # we won't miss some special cases. Also, we aren't doing something like
        # "if 'php' in ext:" because we never depend on something so easy to
        # modify as extensions to make decisions.
        if ext not in self._already_analyzed_ext:

            # Now we save the extension as one of the already analyzed
            self._already_analyzed_ext.add(ext)

            # Init some internal variables
            query_results = self._get_php_eggs(fuzzable_request, ext)

            if self._are_php_eggs(query_results):
                # analyze the info to see if we can identify the version
                self._extract_version_from_egg(query_results)
                raise NoMoreCalls

    def _get_php_eggs(self, fuzzable_request, ext):
        """
        HTTP GET the URLs for PHP Eggs
        :return: A list with the HTTP response objects
        """
        def http_get(fuzzable_request, (egg_url, egg_desc)):
            egg_url = fuzzable_request.get_url().uri2url().url_join(egg_url)
            response = self._uri_opener.GET(egg_url, cache=True, grep=False)
            return response, egg_url, egg_desc

        # Send the requests using threads:
        query_results = []

        http_get = one_to_many(http_get)
        fr_repeater = repeat(fuzzable_request)
        args_iterator = izip(fr_repeater, self.PHP_EGGS)
        pool_results = self.worker_pool.imap_unordered(http_get, args_iterator)

        for response, egg_URL, egg_desc in pool_results:
            eqr = EggQueryResult(response, egg_desc, egg_URL)
            query_results.append(eqr)

        return query_results
예제 #54
0
class frontpage_version(InfrastructurePlugin):
    """
    Search FrontPage Server Info file and if it finds it will determine its version.
    :author: Viktor Gazdag ( [email protected] )
    """
    VERSION_RE = re.compile('FPVersion="(.*?)"', re.IGNORECASE)
    ADMIN_URL_RE = re.compile('FPAdminScriptUrl="(.*?)"', re.IGNORECASE)
    AUTHOR_URL_RE = re.compile('FPAuthorScriptUrl="(.*?)"', re.IGNORECASE)

    def __init__(self):
        InfrastructurePlugin.__init__(self)

        # Internal variables
        self._analyzed_dirs = ScalableBloomFilter()

    @runonce(exc_class=RunOnce)
    def discover(self, fuzzable_request, debugging_id):
        """
        For every directory, fetch a list of files and analyze the response.

        :param debugging_id: A unique identifier for this call to discover()
        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        for domain_path in fuzzable_request.get_url().get_directories():

            if domain_path in self._analyzed_dirs:
                continue

            # Save the domain_path so I know I'm not working in vane
            self._analyzed_dirs.add(domain_path)

            # Request the file
            frontpage_info_url = domain_path.url_join("_vti_inf.html")
            try:
                response = self._uri_opener.GET(frontpage_info_url, cache=True)
            except BaseFrameworkException as w3:
                fmt = (
                    'Failed to GET Frontpage Server _vti_inf.html file: "%s". '
                    'Exception: "%s".')
                om.out.debug(fmt % (frontpage_info_url, w3))
            else:
                # Check if it's a Frontpage Info file
                if not is_404(response):
                    fr = FuzzableRequest(response.get_uri())
                    self.output_queue.put(fr)

                    self._analyze_response(response)

    def _analyze_response(self, response):
        """
        It seems that we have found a _vti_inf file, parse it and analyze the
        content!

        :param response: The http response object for the _vti_inf file.
        :return: None. All the info is saved to the kb.
        """
        version_mo = self.VERSION_RE.search(response.get_body())
        admin_mo = self.ADMIN_URL_RE.search(response.get_body())
        author_mo = self.AUTHOR_URL_RE.search(response.get_body())

        if version_mo and admin_mo and author_mo:
            self._exec = False

            desc = ('The FrontPage Configuration Information file was found'
                    ' at: "%s" and the version of FrontPage Server Extensions'
                    ' is: "%s".')
            desc %= (response.get_url(), version_mo.group(1))

            i = Info('FrontPage configuration information', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            i['version'] = version_mo.group(1)

            kb.kb.append(self, 'frontpage_version', i)
            om.out.information(i.get_desc())

            #
            # Handle the admin.exe file
            #
            self._analyze_admin(response, admin_mo)

            #
            # Handle the author.exe file
            #
            self._analyze_author(response, author_mo)

        else:
            # This is strange... we found a _vti_inf file, but there is no
            # frontpage information in it... IPS? WAF? honeypot?
            msg = '[IMPROVEMENT] Invalid frontPage configuration information'\
                  ' found at %s (id: %s).'
            msg = msg % (response.get_url(), response.id)
            om.out.debug(msg)

    def _analyze_admin(self, response, frontpage_admin):
        """
        Analyze the admin URL.

        :param response: The http response object for the _vti_inf file.
        :param frontpage_admin: A regex match object.
        :return: None. All the info is saved to the kb.
        """
        admin_location = response.get_url().get_domain_path().url_join(
            frontpage_admin.group(1))

        # Check for anomalies in the location of admin.exe
        if frontpage_admin.group(1) != '_vti_bin/_vti_adm/admin.exe':
            name = 'Customized frontpage configuration'

            desc = 'The FPAdminScriptUrl is at: "%s" instead of the default'\
                   ' location "_vti_bin/_vti_adm/admin.exe". This is very'\
                   ' uncommon.'
            desc = desc % admin_location

        else:
            name = 'FrontPage FPAdminScriptUrl'

            desc = 'The FPAdminScriptUrl is at: "%s".'
            desc = desc % admin_location

        i = Info(name, desc, response.id, self.get_name())
        i.set_url(admin_location)
        i['FPAdminScriptUrl'] = admin_location

        kb.kb.append(self, 'frontpage_version', i)
        om.out.information(i.get_desc())

    def _analyze_author(self, response, frontpage_author):
        """
        Analyze the author URL.

        :param response: The http response object for the _vti_inf file.
        :param frontpage_author: A regex match object.
        :return: None. All the info is saved to the kb.
        """
        domain_path = response.get_url().get_domain_path()
        author_location = domain_path.url_join(frontpage_author.group(1))

        # Check for anomalies in the location of author.exe
        if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe':
            name = 'Customized frontpage configuration'

            desc = ('The FPAuthorScriptUrl is at: "%s" instead of the default'
                    ' location: "/_vti_bin/_vti_adm/author.exe". This is very'
                    ' uncommon.')
            desc %= author_location
        else:
            name = 'FrontPage FPAuthorScriptUrl'

            desc = 'The FPAuthorScriptUrl is at: "%s".'
            desc %= author_location

        i = Info(name, desc, response.id, self.get_name())
        i.set_url(author_location)
        i['FPAuthorScriptUrl'] = author_location

        kb.kb.append(self, 'frontpage_version', i)
        om.out.information(i.get_desc())

    def get_long_desc(self):
        """
        :return: A DETAILED description of the plugin functions and features.
        """
        return """