Exemplo n.º 1
0
    def discover(self, fuzzable_request):
        """
        Checks if JBoss Interesting Directories exist in the target server.
        Also verifies some vulnerabilities.
        """
        base_url = fuzzable_request.get_url().base_url()

        args_iter = izip(repeat(base_url), self.JBOSS_VULNS)
        otm_send_request = one_to_many(self.send_request)
        response_pool = self.worker_pool.imap_unordered(otm_send_request,
                                                        args_iter)

        for vuln_db_instance, response in response_pool:

            if is_404(response):
                continue

            vuln_url = base_url.url_join(vuln_db_instance['url'])
            name = vuln_db_instance['name']
            desc = vuln_db_instance['desc']

            if vuln_db_instance['type'] == 'info':
                o = Info(name, desc, response.id, self.get_name())
            else:
                o = Vuln(name, desc, severity.LOW, response.id, self.get_name())

            o.set_url(vuln_url)
            kb.kb.append(self, 'find_jboss', o)

            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)
Exemplo n.º 2
0
    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if not response.is_text_or_html():
            return
        
        url = response.get_url()
        dom = response.get_dom()
        # In some strange cases, we fail to normalize the document
        if dom is None:
            return
        
        script_elements = self._script_xpath(dom)
        for element in script_elements:
            # returns the text between <script> and </script>
            script_content = element.text

            if script_content is not None:

                res = self._ajax_regex_re.search(script_content)
                if res:
                    desc = 'The URL: "%s" has AJAX code.' % url
                    i = Info('AJAX code', desc, response.id,
                             self.get_name())
                    i.set_url(url)
                    i.add_to_highlight(res.group(0))
                    
                    self.kb_append_uniq(self, 'ajax', i, 'URL')
Exemplo n.º 3
0
    def grep(self, request, response):
        """
        Check if HTTPS responses have the Strict-Transport-Security header set.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if self._reports > MAX_REPORTS:
            return

        if request.get_url().get_protocol() != 'https':
            return

        sts_header_value, _ = response.get_headers().iget(STS_HEADER, None)
        if sts_header_value is not None:
            return

        self._reports += 1

        desc = 'The web server uses HTTPS but does not set the '\
               ' Strict-Transport-Security header.'
        i = Info('Missing Strict Transport Security header', desc,
                 response.id, self.get_name())
        i.set_url(response.get_url())
        i[STSInfoSet.ITAG] = response.get_url().get_domain()

        self.kb_append_uniq_group(self, 'strict_transport_security', i,
                                  group_klass=STSInfoSet)
Exemplo n.º 4
0
    def _check_user_dir(self, mutated_url, user, user_desc, user_tag,
                        non_existent):
        """
        Perform the request and compare with non_existent

        :see _create_tests: For parameter description
        :return: The HTTP response id if the mutated_url is a web user
                 directory, None otherwise.
        """
        resp = self.http_get_and_parse(mutated_url)
        
        path = mutated_url.get_path()
        response_body = resp.get_body().replace(path, '')

        if fuzzy_not_equal(response_body, non_existent, 0.7):

            # Avoid duplicates
            known_users = [u['user'] for u in kb.kb.get('user_dir', 'users')]
            if user in known_users:
                return

            # Save the finding to the KB
            desc = 'An operating system user directory was found at: "%s"'
            desc = desc % resp.get_url()

            i = Info('Web user home directory', desc, resp.id, self.get_name())
            i.set_url(resp.get_url())
            i['user'] = user
            i['user_desc'] = user_desc
            i['user_tag'] = user_tag

            self.kb_append_uniq(self, 'users', i)

            # Analyze if we can get more information from this finding
            self._analyze_finding(i)
Exemplo n.º 5
0
    def _fingerprint_meta(self, domain_path, wp_unique_url, response):
        """
        Check if the wp version is in index header
        """
        # Main scan URL passed from w3af + wp index page
        wp_index_url = domain_path.url_join('index.php')
        response = self._uri_opener.GET(wp_index_url, cache=True)

        # Find the string in the response html
        find = '<meta name="generator" content="[Ww]ord[Pp]ress (\d\.\d\.?\d?)" />'
        m = re.search(find, response.get_body())

        # If string found, group version
        if m:
            version = m.group(1)

            # Save it to the kb!
            desc = 'WordPress version "%s" found in the index header.'
            desc = desc % version

            i = Info('Fingerprinted Wordpress version', desc, response.id,
                     self.get_name())
            i.set_url(wp_index_url)
            
            kb.kb.append(self, 'info', i)
            om.out.information(i.get_desc())
Exemplo n.º 6
0
    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if not response.is_text_or_html():
            return
        
        if not self.symfony_detected(response):
            return

        if self.has_csrf_token(response):
            return

        desc = ('The URL: "%s" seems to be generated by the Symfony framework'
                ' and contains a form that has CSRF protection disabled.')
        desc %= response.get_url()

        i = Info('Symfony Framework with CSRF protection disabled',
                 desc, response.id, self.get_name())
        i.set_url(response.get_url())
        self.kb_append_uniq(self, 'symfony', i, 'URL')
Exemplo n.º 7
0
    def grep(self, request, response):
        """
        Check if all responses have X-Content-Type-Options header set

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if self._reports > MAX_REPORTS:
            return

        ct_options_value, _ = response.get_headers().iget(CT_OPTIONS_HEADER, None)
        if ct_options_value is not None:
            if ct_options_value.strip().lower() == NOSNIFF:
                return

        self._reports += 1

        desc = 'The URL "%s" returned an HTTP response without the' \
               ' recommended HTTP header X-Content-Type-Options'
        desc %= response.get_url()

        i = Info('Missing X-Content-Type-Options header', desc,
                 response.id, self.get_name())
        i.set_url(response.get_url())
        i[CTSniffingInfoSet.ITAG] = response.get_url().get_domain()

        self.kb_append_uniq_group(self, 'content_sniffing', i,
                                  group_klass=CTSniffingInfoSet)
Exemplo n.º 8
0
    def grep(self, request, response):
        """
        Plugin entry point. Parse the object tags.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        url = response.get_url()
        dom = response.get_dom()

        if response.is_text_or_html() and dom is not None:

            elem_list = self._tag_xpath(dom)
            for element in elem_list:

                tag_name = element.tag
                
                desc = 'The URL: "%s" has an "%s" tag. We recommend you download'\
                      ' the client side code and analyze it manually.'
                desc = desc % (response.get_uri(), tag_name)

                i = Info('Browser plugin content', desc, response.id,
                         self.get_name())
                i.set_url(url)
                i.add_to_highlight(tag_name)

                self.kb_append_uniq(self, tag_name, i, 'URL')
Exemplo n.º 9
0
    def _fingerprint_data(self, domain_path, wp_unique_url, response):
        """
        Find wordpress version from data
        """
        for wp_fingerprint in self._get_wp_fingerprints():
            
            # The URL in the XML is relative AND it has two different variables
            # that we need to replace:
            #        $wp-content$    -> wp-content/
            #        $wp-plugins$    -> wp-content/plugins/
            path = wp_fingerprint.filepath
            path = path.replace('$wp-content$', 'wp-content/')
            path = path.replace('$wp-plugins$', 'wp-content/plugins/')
            test_url = domain_path.url_join(path)
            
            response = self._uri_opener.GET(test_url, cache=True)

            response_hash = hashlib.md5(response.get_body()).hexdigest()

            if response_hash == wp_fingerprint.hash:
                version = wp_fingerprint.version

                # Save it to the kb!
                desc = 'WordPress version "%s" fingerprinted by matching known md5'\
                       ' hashes to HTTP responses of static resources available at'\
                       ' the remote WordPress install.'
                desc = desc % version
                i = Info('Fingerprinted Wordpress version', desc, response.id,
                         self.get_name())
                i.set_url(test_url)
        
                kb.kb.append(self, 'info', i)
                om.out.information(i.get_desc())
                
                break
Exemplo n.º 10
0
    def _parse_document(self, response):
        """
        Parses the HTML and adds the mail addresses to the kb.
        """
        get_document_parser_for = parser_cache.dpc.get_document_parser_for

        try:
            document_parser = get_document_parser_for(response, cache=False)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            pass
        else:
            # Search for email addresses
            for mail in document_parser.get_emails(self._domain_root):
                if mail not in self._accounts:
                    self._accounts.append(mail)

                    desc = 'The mail account: "%s" was found at: "%s".'
                    desc = desc % (mail, response.get_uri())

                    i = Info('Email account', desc, response.id,
                             self.get_name())
                    i.set_url(response.get_uri())
                    i['mail'] = mail
                    i['user'] = mail.split('@')[0]
                    i['url_list'] = {response.get_uri()}

                    self.kb_append('emails', 'emails', i)
Exemplo n.º 11
0
    def grep(self, request, response):
        """
        Plugin entry point, verify if the HTML has a form with file uploads.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        dom = response.get_dom()
        url = response.get_url()
        
        # In some strange cases, we fail to normalize the document
        if dom is not None:

            # Loop through file inputs tags
            for input_file in self._file_input_xpath(dom):
                msg = 'The URL: "%s" has form with file upload capabilities.'
                msg = msg % url
                
                i = Info('File upload form', msg, response.id,
                         self.get_name())
                i.set_url(url)
                to_highlight = etree.tostring(input_file)
                i.add_to_highlight(to_highlight)
                
                self.kb_append_uniq(self, 'file_upload', i, 'URL')
Exemplo n.º 12
0
    def _lowest_privilege_test(self, response):
        regex_str = 'User/Group </td><td class="v">(.*?)\((\d.*?)\)/(\d.*?)</td>'
        lowest_privilege_test = re.search(regex_str, response.get_body(), re.I)
        if lowest_privilege_test:
            lpt_uname = lowest_privilege_test.group(1)
            lpt_uid = lowest_privilege_test.group(2)
            lpt_uid = int(lpt_uid)
            lpt_gid = lowest_privilege_test.group(3)
            if lpt_uid < 99 or lpt_gid < 99 or \
            re.match('root|apache|daemon|bin|operator|adm', lpt_uname, re.I):

                desc = 'phpinfo()::PHP may be executing as a higher privileged'\
                       ' group. Username: %s, UserID: %s, GroupID: %s.' 
                desc = desc % (lpt_uname, lpt_uid, lpt_gid)
                
                v = Vuln('PHP lowest_privilege_test:fail', desc,
                         severity.MEDIUM, response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
            else:
                lpt_name = 'privilege:' + lpt_uname
                lpt_desc = 'phpinfo()::PHP is executing under '
                lpt_desc += 'username: '******', '
                lpt_desc += 'userID: ' + str(lpt_uid) + ', '
                lpt_desc += 'groupID: ' + lpt_gid
                i = Info(lpt_name, lpt_desc, response.id, self.get_name())
                i.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', i)
                om.out.information(i.get_desc())
Exemplo n.º 13
0
    def _do_request(self, url, mutant):
        """
        Perform a simple GET to see if the result is an error or not, and then
        run the actual fuzzing.
        """
        response = self._uri_opener.GET(
            mutant, cache=True, headers=self._headers)

        if not (is_404(response) or
        response.get_code() in (403, 401) or
        self._return_without_eval(mutant)):

            # Create the fuzzable request and send it to the core
            fr = FuzzableRequest.from_http_response(response)
            self.output_queue.put(fr)
            
            #
            #   Save it to the kb (if new)!
            #
            if response.get_url() not in self._seen and response.get_url().get_file_name():
                desc = 'A potentially interesting file was found at: "%s".'
                desc = desc % response.get_url()

                i = Info('Potentially interesting file', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                
                kb.kb.append(self, 'files', i)
                om.out.information(i.get_desc())

                # Report only once
                self._seen.add(response.get_url())
Exemplo n.º 14
0
    def discover(self, fuzzable_request):
        """
        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        root_domain = fuzzable_request.get_url().get_root_domain()

        pks_se = pks(self._uri_opener)
        results = pks_se.search(root_domain)
        pks_url = 'http://pgp.mit.edu:11371/'

        for result in results:
            mail = result.username + '@' + root_domain
            
            desc = 'The mail account: "%s" was found at: "%s".'
            desc = desc % (mail, pks_url)

            i = Info('Email account', desc, result.id, self.get_name())
            i.set_url(URL(pks_url))
            i['mail'] = mail
            i['user'] = result.username
            i['name'] = result.name
            i['url_list'] = {URL(pks_url)}
            
            kb.kb.append('emails', 'emails', i)
            om.out.information(i.get_desc())
Exemplo n.º 15
0
    def grep(self, request, response):
        """
        Plugin entry point, verify if the HTML has a form with file uploads.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        url = response.get_url()

        for tag in mp_doc_parser.get_tags_by_filter(response, ('input',)):
            input_type = tag.attrib.get('type', None)

            if input_type is None:
                continue

            if input_type.lower() != 'file':
                continue

            msg = 'A form which allows file uploads was found at "%s"'
            msg %= url

            i = Info('File upload form', msg, response.id, self.get_name())
            i.set_url(url)

            self.kb_append_uniq(self, 'file_upload', i, 'URL')
Exemplo n.º 16
0
    def grep(self, request, response):
        """
        Plugin entry point. Analyze if the HTTP response codes are strange.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if response.get_code() in self.COMMON_HTTP_CODES:
            return

        # Create a new info object from scratch and save it to the kb
        desc = ('The remote Web server sent a strange HTTP response code:'
                ' "%s" with the message: "%s", manual inspection is'
                ' recommended.')
        desc %= (response.get_code(), response.get_msg())

        i = Info('Strange HTTP response code',
                 desc, response.id, self.get_name())
        i.add_to_highlight(str(response.get_code()), response.get_msg())
        i.set_url(response.get_url())
        i[StrangeCodesInfoSet.ITAG] = response.get_code()
        i['message'] = response.get_msg()

        self.kb_append_uniq_group(self, 'strange_http_codes', i,
                                  group_klass=StrangeCodesInfoSet)
Exemplo n.º 17
0
    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        all_findings = kb.kb.get_all_findings()

        for title, desc, _id, url, highlight in self._potential_vulns:
            for info in all_findings:
                # This makes sure that if the sqli plugin found a vulnerability
                # in the same URL as we found a detailed error, we won't report
                # the detailed error.
                #
                # If the user fixes the sqli vulnerability and runs the scan again
                # most likely the detailed error will disappear too. If the sqli
                # vulnerability disappears and this one remains, it will appear
                # as a new vulnerability in the second scan.
                if info.get_url() == url:
                    break
            else:
                i = Info(title, desc, _id, self.get_name())
                i.set_url(url)
                i.add_to_highlight(highlight)

                self.kb_append_uniq(self, 'error_page', i)

        self._potential_vulns.cleanup()
Exemplo n.º 18
0
    def grep(self, request, response):
        """
        Check if the header names are common or not

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        # Check for protocol anomalies
        self._content_location_not_300(request, response)

        # Check header names
        for header_name in response.get_headers().keys():
            if header_name.upper() in self.COMMON_HEADERS:
                continue

            # Create a new info object and save it to the KB
            hvalue = response.get_headers()[header_name]

            desc = 'The remote web server sent the HTTP header: "%s"'\
                   ' with value: "%s", which is quite uncommon and'\
                   ' requires manual analysis.'
            desc = desc % (header_name, hvalue)

            i = Info('Strange header', desc, response.id, self.get_name())
            i.add_to_highlight(hvalue, header_name)
            i.set_url(response.get_url())
            i[StrangeHeaderInfoSet.ITAG] = header_name
            i['header_value'] = hvalue

            self.kb_append_uniq_group(self, 'strange_headers', i,
                                      group_klass=StrangeHeaderInfoSet)
Exemplo n.º 19
0
    def _match_cookie_fingerprint(self, request, response, cookie_obj):
        """
        Now we analyze the cookie and try to guess the remote web server or
        programming framework based on the cookie that was sent.

        :return: True if the cookie was fingerprinted
        """
        cookie_obj_str = cookie_obj.output(header='')

        for cookie_str_db, system_name in self.COOKIE_FINGERPRINT:
            if cookie_str_db in cookie_obj_str:
                if system_name not in self._already_reported_server:
                    desc = 'A cookie matching the cookie fingerprint DB'\
                           ' has been found when requesting "%s".'\
                           ' The remote platform is: "%s".'
                    desc = desc % (response.get_url(), system_name)

                    i = Info('Identified cookie', desc,
                             response.id, self.get_name())

                    i.set_url(response.get_url())
                    i['httpd'] = system_name
                                        
                    self._set_cookie_to_rep(i, cobj=cookie_obj)

                    kb.kb.append(self, 'security', i)
                    self._already_reported_server.append(system_name)
                    return True

        return False
Exemplo n.º 20
0
    def _analyze_domain(self, response, script_full_url, script_tag):
        """
        Checks if the domain is the same, or if it's considered secure.
        """
        url = response.get_url()
        script_domain = script_full_url.get_domain()

        if script_domain != response.get_url().get_domain():

            for secure_domain in self._secure_js_domains:
                # We do a "in" because the secure js domains list contains
                # entries such as ".google." which should be match. This is to
                # take into account things like ".google.com.br" without having
                # to list all of them.
                #
                # Not the best, could raise some false negatives, but... bleh!
                if secure_domain in script_domain:
                    # It's a third party that we trust
                    return

            to_highlight = script_tag.attrib.get('src')
            desc = ('The URL: "%s" has a script tag with a source that points'
                    ' to a third party site ("%s"). This practice is not'
                    ' recommended, the security of the current site is being'
                    ' delegated to the external entity.')
            desc %= (url, script_domain)

            i = Info('Cross-domain javascript source', desc,
                     response.id, self.get_name())
            i.set_url(url)
            i.add_to_highlight(to_highlight)
            i[CrossDomainInfoSet.ITAG] = script_domain

            self.kb_append_uniq_group(self, 'cross_domain_js', i,
                                      group_klass=CrossDomainInfoSet)
Exemplo n.º 21
0
    def grep(self, request, response):
        """
        Analyze if the HTTP response reason messages are strange.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        response_code = response.get_code()
        msg_list = W3C_REASONS.get(response_code, None)

        if msg_list is None:
            return

        response_reason = response.get_msg().lower()

        if response_reason in msg_list:
            # It's common, nothing to do here.
            return

        # Create a new info object from scratch and save it to the kb:
        desc = "The remote Web server sent a strange HTTP reason" 'message "%s", manual inspection is recommended.'
        desc = desc % response.get_msg()

        i = Info("Strange HTTP Reason message", desc, response.id, self.get_name())
        i.set_url(response.get_url())
        i.add_to_highlight(response.get_msg())
        i[StrangeHeaderInfoSet.ITAG] = response.get_msg()

        self.kb_append_uniq_group(self, "strange_reason", i, group_klass=StrangeHeaderInfoSet)
Exemplo n.º 22
0
Arquivo: afd.py Projeto: RON313/w3af
    def _analyze_results(self, filtered, not_filtered):
        """
        Analyze the test results and save the conclusion to the kb.
        """
        if len(filtered) >= len(self._get_offending_strings()) / 5.0:
            desc = 'The remote network has an active filter. IMPORTANT: The'\
                   ' result of all the other plugins will be inaccurate, web'\
                   ' applications could be vulnerable but "protected" by the'\
                   ' active filter.'
                   
            i = Info('Active filter detected', desc, 1, self.get_name())
            i['filtered'] = filtered
            
            kb.kb.append(self, 'afd', i)
            om.out.information(i.get_desc())

            om.out.information('The following URLs were filtered:')
            for i in filtered:
                om.out.information('- ' + i)

            if not_filtered:
                om.out.information(
                    'The following URLs passed undetected by the filter:')
                for i in not_filtered:
                    om.out.information('- ' + i)
Exemplo n.º 23
0
    def grep(self, request, response):
        """
        Plugin entry point, find feeds.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        dom = response.get_dom()
        uri = response.get_uri()

        # In some strange cases, we fail to normalize the document
        if dom is None:
            return

        # Find all feed tags
        element_list = self._tag_xpath(dom)

        for element in element_list:

            feed_tag = element.tag
            feed_type = self._feed_types[feed_tag.lower()]
            version = element.attrib.get('version', 'unknown')

            fmt = 'The URL "%s" is a %s version %s feed.'
            desc = fmt % (uri, feed_type, version)
            i = Info('Content feed resource', desc, response.id,
                     self.get_name())
            i.set_uri(uri)
            i.add_to_highlight(feed_type)
            
            self.kb_append_uniq(self, 'feeds', i, 'URL')
Exemplo n.º 24
0
    def _force_disclosures(self, domain_path, potentially_vulnerable_paths):
        """
        :param domain_path: The path to wordpress' root directory
        :param potentially_vulnerable_paths: A list with the paths I'll URL-join
                                             with @domain_path, GET and parse.
        """
        for pvuln_path in potentially_vulnerable_paths:

            pvuln_url = domain_path.url_join(pvuln_path)
            response = self._uri_opener.GET(pvuln_url, cache=True)

            if is_404(response):
                continue

            response_body = response.get_body()
            if 'Fatal error: ' in response_body:
                desc = 'Analyze the HTTP response body to find the full path'\
                       ' where wordpress was installed.'
                i = Info('WordPress path disclosure', desc, response.id,
                         self.get_name())
                i.set_url(pvuln_url)
                
                kb.kb.append(self, 'info', i)
                om.out.information(i.get_desc())
                break
Exemplo n.º 25
0
    def analyze_document_links(self, request, response):
        """
        Find session IDs in the URI and store them in the KB.
        """
        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(response)
        except:
            pass
        else:
            parsed_refs, _ = doc_parser.get_references()
            
            for link_uri in parsed_refs:
                if self._has_sessid(link_uri) and \
                response.get_url() not in self._already_reported:
                    #   report these informations only once
                    self._already_reported.add(response.get_url())

                    desc = 'The HTML content at "%s" contains a link (%s)'\
                           ' which holds a session id. The ID could be leaked'\
                           ' to third party domains through the referrer'\
                           ' header.'
                    desc = desc % (response.get_url(), link_uri)
                    
                    #   append the info object to the KB.
                    i = Info('Session ID in URL', desc, response.id,
                             self.get_name())
                    i.set_uri(response.get_uri())
                    
                    self.kb_append(self, 'url_session', i)
                    break
Exemplo n.º 26
0
    def _analyze_author(self, response, frontpage_author):
        """
        Analyze the author URL.

        :param response: The http response object for the _vti_inf file.
        :param frontpage_author: A regex match object.
        :return: None. All the info is saved to the kb.
        """
        author_location = response.get_url().get_domain_path().url_join(
            frontpage_author.group(1))

        # Check for anomalies in the location of author.exe
        if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe':
            name = 'Customized frontpage configuration'

            desc = 'The FPAuthorScriptUrl is at: "%s" instead of the default'\
                   ' location: "/_vti_bin/_vti_adm/author.exe". This is very'\
                   ' uncommon.'
            desc = desc % author_location
        else:
            name = 'FrontPage FPAuthorScriptUrl'

            desc = 'The FPAuthorScriptUrl is at: "%s".'
            desc = desc % author_location

        i = Info(name, desc, response.id, self.get_name())
        i.set_url(author_location)
        i['FPAuthorScriptUrl'] = author_location
        
        kb.kb.append(self, 'frontpage_version', i)
        om.out.information(i.get_desc())
Exemplo n.º 27
0
    def verify_found(self, vulnerability_names):
        """
        Runs the scan and verifies that the vulnerability with the specified
        name was found.

        :param vulnerability_names: The names of the vulnerabilities to be found
        :return: None. Will raise assertion if fails
        """
        # Setup requirements
        desc = 'The URL: "%s" uses HTML5 websocket "%s"'
        desc %= (self.target_url, self.target_url)

        i = Info('HTML5 WebSocket detected', desc, 1, 'websockets_links')
        i.set_url(URL(self.target_url))
        i[WebSocketInfoSet.ITAG] = self.target_url

        # Store found links
        info_set = WebSocketInfoSet([i])
        self.kb.append('websockets_links', 'websockets_links', i, info_set)

        # Run the plugin
        cfg = RUN_CONFIG['cfg']
        self._scan(self.target_url, cfg['plugins'])

        # Assert
        vulns = self.kb.get('websocket_hijacking', 'websocket_hijacking')
        self.assertEqual(vulnerability_names, [v.get_name() for v in vulns])
Exemplo n.º 28
0
    def test_to_json(self):
        i = Info('Blind SQL injection vulnerability', MockInfo.LONG_DESC, 1,
                 'plugin_name')

        i['test'] = 'foo'
        i.add_to_highlight('abc', 'def')

        iset = InfoSet([i])

        jd = iset.to_json()
        json_string = json.dumps(jd)
        jd = json.loads(json_string)

        self.assertEqual(jd['name'], iset.get_name())
        self.assertEqual(jd['url'], str(iset.get_url()))
        self.assertEqual(jd['var'], iset.get_token_name())
        self.assertEqual(jd['response_ids'], iset.get_id())
        self.assertEqual(jd['vulndb_id'], iset.get_vulndb_id())
        self.assertEqual(jd['desc'], iset.get_desc(with_id=False))
        self.assertEqual(jd['long_description'], iset.get_long_description())
        self.assertEqual(jd['fix_guidance'], iset.get_fix_guidance())
        self.assertEqual(jd['fix_effort'], iset.get_fix_effort())
        self.assertEqual(jd['tags'], iset.get_tags())
        self.assertEqual(jd['wasc_ids'], iset.get_wasc_ids())
        self.assertEqual(jd['wasc_urls'], list(iset.get_wasc_urls()))
        self.assertEqual(jd['cwe_urls'], list(iset.get_cwe_urls()))
        self.assertEqual(jd['references'], BLIND_SQLI_REFS)
        self.assertEqual(jd['owasp_top_10_references'], BLIND_SQLI_TOP10_REFS)
        self.assertEqual(jd['plugin_name'], iset.get_plugin_name())
        self.assertEqual(jd['severity'], iset.get_severity())
        self.assertEqual(jd['attributes'], iset.first_info.copy())
        self.assertEqual(jd['highlight'], list(iset.get_to_highlight()))
Exemplo n.º 29
0
    def _do_request(self, mutated_url, user):
        """
        Perform the request and compare.

        :return: The HTTP response id if the mutated_url is a web user
                 directory, None otherwise.
        """
        response = self._uri_opener.GET(mutated_url, cache=True,
                                        headers=self._headers)
        
        path = mutated_url.get_path()
        response_body = response.get_body().replace(path, '')

        if relative_distance_lt(response_body, self._non_existent, 0.7):

            # Avoid duplicates
            if user not in [u['user'] for u in kb.kb.get('user_dir', 'users')]:
                desc = 'A user directory was found at: %s'
                desc = desc % response.get_url()
                
                i = Info('Web user home directory', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                i['user'] = user

                kb.kb.append(self, 'users', i)

                for fr in self._create_fuzzable_requests(response):
                    self.output_queue.put(fr)

            return response.id

        return None
Exemplo n.º 30
0
    def grep(self, request, response):
        """
        websockets_links

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        # if it is not html or js we are not interested
        if not response.is_text_or_html():
            return

        # checking if websockets are in use the fast way and if they
        # are moving on to slower checks
        if not (WS_URL in response.body or WSS_URL in response.body):
            return

        # if websockets usage signs were found we need to get the exact url
        url = request.get_url()

        # if it is javascript we search the whole doc
        if JavaScriptParser(response).can_parse(response):
            ws_links = find_websockets_links(response.body)
        else:
            # if it is html we should search inside <script> tags only
            dom = response.get_dom()

            if dom is None:
                return

            ws_links = set()
            script_tag_xpath = etree.XPath(SCRIPT_TAG_XPATH)

            for script in script_tag_xpath(dom):
                for ws_link in find_websockets_links(script.text):
                    ws_links.add(ws_link)

        # if we didn't find any link manual inspection is needed
        if len(ws_links) == 0:
            # TODO: In some scenarios this message is repeated multiple, since
            #       it's a debug() message we don't care that much.
            msg = 'The URL "%s" has signs of HTML5 WebSockets usage, ' \
                  'but couldn\'t find any useful links.\n' \
                  'Perhaps links are dynamically created using javascript.\n' \
                  'Manual inspection of the page is recommended.'
            om.out.debug(msg % url)

        for ws_link in ws_links:
            desc = 'The URL: "%s" uses HTML5 websocket "%s"'
            desc = desc % (url, ws_link)

            i = Info('HTML5 WebSocket detected', desc, response.id,
                     self.get_name())
            i.set_url(url)
            i[WebSocketInfoSet.ITAG] = ws_link

            # Store found links
            self.kb_append_uniq_group(self, 'websockets_links', i,
                                      group_klass=WebSocketInfoSet)
Exemplo n.º 31
0
    def _send_request(self, mutant):
        """
        Sends a mutant to the remote web server. I wrap urllib's _send_mutant
        just to handle errors in a different way.
        """
        try:
            response = self._uri_opener.send_mutant(mutant)
        except (BaseFrameworkException, ScanMustStopException):
            desc = 'A potential (most probably a false positive than a bug)' \
                   ' buffer-overflow was found when requesting: "%s", using' \
                   ' HTTP method %s. The data sent was: "%s".'
            desc = desc % (mutant.get_url(), mutant.get_method(), mutant.get_dc())

            i = Info.from_mutant('Potential buffer overflow vulnerability',
                                 desc, [], self.get_name(), mutant)
            
            self.kb_append_uniq(self, 'buffer_overflow', i)
        else:
            self._analyze_result(mutant, response)
Exemplo n.º 32
0
    def analyze_wsdl(self, request, response):
        match_list = self._multi_in.query(response.body)
        if len(match_list):
            desc = 'The URL: "%s" is a Web Services Description Language' \
                   ' page. This requires manual analysis to determine the' \
                   ' security of the web service.'
            desc = desc % response.get_url()

            i = Info('WSDL resource', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            i.add_to_highlight(*match_list)

            self.kb_append_uniq(self, 'wsdl', i, 'URL')
Exemplo n.º 33
0
    def _check_server_header(self, fuzzable_request):
        """
        HTTP GET and analyze response for server header
        """
        response = self._uri_opener.GET(fuzzable_request.get_url(), cache=True)

        server, header_name = response.get_headers().iget('server')

        if server:
            desc = 'The server header for the remote web server is: "%s".'
            desc = desc % server

            i = Info('Server header', desc, response.id, self.get_name())
            i['server'] = server
            i.add_to_highlight(header_name + ':')

            om.out.information(i.get_desc())

            # Save the results in the KB so the user can look at it
            kb.kb.append(self, 'server', i)

            # Also save this for easy internal use
            # other plugins can use this information
            kb.kb.raw_write(self, 'server_string', server)
        else:
            # strange !
            desc = ('The remote HTTP Server omitted the "server" header in'
                    ' its response.')
            i = Info('Omitted server header', desc, response.id,
                     self.get_name())

            om.out.information(i.get_desc())

            # Save the results in the KB so that other plugins can use this
            # information
            kb.kb.append(self, 'omitted_server_header', i)

            # Also save this for easy internal use
            # other plugins can use this information
            kb.kb.raw_write(self, 'server_string', '')
Exemplo n.º 34
0
    def grep(self, request, response):
        """
        Plugin entry point, verify if the HTML has a form with file uploads.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        url = response.get_url()        
        dom = response.get_dom()

        # In some strange cases, we fail to normalize the document
        if dom is None:
            return
        
        # Loop through script inputs tags
        for script_src_tag in self._script_src_xpath(dom):

            # This should be always False due to the XPATH we're using
            # but you never know...
            if not 'src' in script_src_tag.attrib:
                continue

            script_src = script_src_tag.attrib['src']
            try:
                script_full_url = response.get_url().url_join(script_src)
            except ValueError:
                msg = 'Invalid URL found by cross_domain_js: "%s"'
                om.out.debug(msg % script_src)
                continue

            script_domain = script_full_url.get_domain()

            if script_domain != response.get_url().get_domain():
                desc = 'The URL: "%s" has script tag with a source that points' \
                       ' to a third party site ("%s"). This practice is not' \
                       ' recommended as security of the current site is being' \
                       ' delegated to that external entity.'
                desc = desc % (url, script_domain) 
                
                i = Info('Cross-domain javascript source', desc,
                         response.id, self.get_name())
                i.set_url(url)
                to_highlight = etree.tostring(script_src_tag)
                i.add_to_highlight(to_highlight)
                
                self.kb_append_uniq(self, 'cross_domain_js', i, 'URL')
Exemplo n.º 35
0
    def _grep_worker(self, request, response, kb_key, domain=None):
        """
        Helper method for using in self.grep()

        :param request: The HTTP request
        :param response: The HTTP response
        :param kb_key: Knowledge base dict key
        :param domain: Target domain for get_emails filter
        :return: None
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            msg = 'Failed to get document parser for "%s" at get_emails.'
            om.out.debug(msg % response.get_url())
            return

        emails = set(dp.get_emails(domain))

        for mail_address in emails:
            # Reduce false positives
            if request.sent(mail_address):
                continue

            # Email address are case insensitive
            mail_address = mail_address.lower()
            url = response.get_url()
            uniq_key = (mail_address, url)

            if uniq_key in self._already_reported:
                continue

            # Avoid dups
            self._already_reported.add(uniq_key)

            # Create a new info object, and report it
            desc = 'The mail account: "%s" was found at "%s".'
            desc = desc % (mail_address, url)

            i = Info('Email address disclosure', desc, response.id,
                     self.get_name())
            i.add_to_highlight(mail_address)
            i.set_url(url)
            i[EmailInfoSet.ITAG] = mail_address
            i['user'] = mail_address.split('@')[0]

            self.kb_append_uniq_group('emails',
                                      kb_key,
                                      i,
                                      group_klass=EmailInfoSet)
Exemplo n.º 36
0
    def _is_trusted_cert(self, url, domain):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            ssl_sock = ssl.wrap_socket(s,
                                       ca_certs=self._ca_file,
                                       cert_reqs=ssl.CERT_REQUIRED,
                                       ssl_version=ssl.PROTOCOL_SSLv23)
            ssl_sock.connect((domain, url.get_port()))
            match_hostname(ssl_sock.getpeercert(), domain)
        except (ssl.SSLError, CertificateError), e:
            invalid_cert = isinstance(e, CertificateError)
            details = str(e)

            if isinstance(e, ssl.SSLError):
                err_chunks = details.split(':')
                if len(err_chunks) == 7:
                    details = err_chunks[5] + ':' + err_chunks[6]
                if 'CERTIFICATE' in details:
                    invalid_cert = True

            if invalid_cert:
                desc = ('"%s" uses an invalid security certificate.'
                        ' The certificate is not trusted because: "%s".')
                desc %= (domain, details)

                v = Vuln('Self-signed SSL certificate', desc, severity.LOW, 1,
                         self.get_name())

                tag = 'invalid_ssl_cert'
            else:
                # We use here Info instead of Vuln because it is too common case
                desc = ('"%s" has an invalid SSL configuration.'
                        ' Technical details: "%s"')
                desc %= (domain, details)

                v = Info('Invalid SSL connection', desc, 1, self.get_name())

                tag = 'invalid_ssl_connect'

            v.set_url(url)

            self.kb_append(self, tag, v)
Exemplo n.º 37
0
    def _interesting_word(self, comment, request, response):
        """
        Find interesting words in HTML comments
        """
        comment = comment.lower()
        for word in self._multi_in.query(comment):
            if (word, response.get_url()) not in self._already_reported_interesting:
                desc = 'A comment with the string "%s" was found in: "%s".'\
                       ' This could be interesting.'
                desc = desc % (word, response.get_url())

                i = Info.from_fr('Interesting HTML comment', desc, response.id,
                                 self.get_name(), request)
                i.add_to_highlight(word)
                
                kb.kb.append(self, 'interesting_comments', i)
                om.out.information(i.get_desc())
                
                self._already_reported_interesting.add((word,
                                                        response.get_url()))
Exemplo n.º 38
0
    def test_from_info(self):
        url = URL('http://moth/')

        inst1 = MockInfo()
        inst1.set_uri(url)
        inst1['eggs'] = 'spam'

        inst2 = Info.from_info(inst1)

        self.assertNotEqual(id(inst1), id(inst2))
        self.assertIsInstance(inst2, Info)

        self.assertEqual(inst1.get_uri(), inst2.get_uri())
        self.assertEqual(inst1.get_uri(), url)
        self.assertEqual(inst1.get_url(), inst2.get_url())
        self.assertEqual(inst1.get_method(), inst2.get_method())
        self.assertEqual(inst1.get_to_highlight(), inst2.get_to_highlight())

        self.assertEqual(inst2.get_uri(), url)
        self.assertEqual(inst2['eggs'], 'spam')
Exemplo n.º 39
0
    def find_error_page(self, request, response):
        for msg in self._multi_in.query(response.body):
            desc = 'The URL: "%s" contains the descriptive error: "%s".'
            desc = desc % (response.get_url(), msg)
            i = Info('Descriptive error page', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            i.add_to_highlight(msg)

            self.kb_append_uniq(self, 'error_page', i, 'URL')

            # There is no need to report more than one info for the same result,
            # the user will read the info object and analyze it even if we
            # report it only once. If we report it twice, he'll get mad ;)
            break
Exemplo n.º 40
0
    def _html_in_comment(self, comment, request, response):
        """
        Find HTML code in HTML comments
        """
        #
        # Check if HTML code is present in this comment
        #
        html_in_comment = self.HTML_RE.search(comment)

        if html_in_comment is None:
            return

        #
        # Remove false positives
        #
        for false_positive_string in self.HTML_FALSE_POSITIVES:
            if false_positive_string in comment:
                return

        #
        # There is HTML code in the comment, report it
        #
        comment = comment.strip()
        comment = comment.replace('\n', '')
        comment = comment.replace('\r', '')
        comment = comment[:40]

        desc = ('A comment containing HTML code "%s" was found in: "%s".'
                ' This could be interesting.')
        desc %= (comment, response.get_url())

        i = Info.from_fr('HTML comment contains HTML code', desc, response.id,
                         self.get_name(), request)
        i.set_uri(response.get_uri())
        i.add_to_highlight(html_in_comment.group(0))
        i[HTMLCommentHidesHTMLInfoSet.ITAG] = comment

        self.kb_append_uniq_group(self,
                                  'html_comment_hides_html',
                                  i,
                                  group_klass=HTMLCommentHidesHTMLInfoSet)
Exemplo n.º 41
0
    def _session_hash_function(self, response):
        regex_str = 'session\.hash_function</td><td class="v">(.*?)</td>'
        session_hash_function = re.search(regex_str, response.get_body(), re.I)
        if session_hash_function:
            
            if session_hash_function.group(1) == 0\
            or session_hash_function.group(1) != 'no':
                desc = 'The phpinfo()::session.hash_function use md5 algorithm.'
                i = Info('PHP session.hash_function:md5', desc, response.id,
                         self.get_name())
            else:
                desc = 'The phpinfo()::session.hash_function use sha algorithm.'
                i = Info('PHP session.hash_function:sha', desc, response.id,
                         self.get_name())

            i.set_url(response.get_url())
            
            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())
Exemplo n.º 42
0
 def _magic_quotes_gpc(self, response):
     regex_str = 'magic_quotes_gpc</td><td class="v">(On|Off)</td>'
     magic_quotes_gpc = re.search(regex_str, response.get_body(), re.I)
     if magic_quotes_gpc:
         mqg = magic_quotes_gpc.group(1)
         
         if mqg == 'On':
             desc = 'The phpinfo()::magic_quotes_gpc is on.'
             i = Info('PHP magic_quotes_gpc: On', desc, response.id,
                      self.get_name())
             
         else:
             desc = 'The phpinfo()::magic_quotes_gpc is off.'
             i = Info('PHP magic_quotes_gpc: Off', desc, response.id,
                      self.get_name())
             
         i.set_url(response.get_url())
         kb.kb.append(self, 'phpinfo', i)
         om.out.information(i.get_desc())
Exemplo n.º 43
0
    def _report(self, scantask, report_file):
        """
        Displays detailed report information to the user and save the data to
        the kb.

        :return: None.
        """
        halberd_report = file(report_file).read()
        os.unlink(report_file)
        om.out.information(halberd_report)

        clues = scantask.analyzed
        if len(clues) > 1:
            # This is added so other w3af plugins can read the halberd results.
            # If needed by other plugins, I could fill up the info object with
            # more data about the different headers, time, etc...
            i = Info('HTTP load balancer detected', halberd_report, 1,
                     self.get_name())
            i['server_number'] = len(clues)

            kb.kb.append(self, 'halberd', i)
Exemplo n.º 44
0
 def _analyze_response(self, original_resp, resp):
     """
     :param original_resp: The HTTPResponse object that holds the
                               ORIGINAL response.
     :param resp: The HTTPResponse object that holds the content of
                      the response to analyze.
     """
     if fuzzy_not_equal(original_resp.get_body(), resp.get_body(), 0.7):
         response_ids = [original_resp.id, resp.id]
         desc = '[Manual verification required] The response body for a ' \
               'request with a trailing dot in the domain, and the response ' \
               'body without a trailing dot in the domain differ. This could ' \
               'indicate a misconfiguration in the virtual host settings. In ' \
               'some cases, this misconfiguration permits the attacker to ' \
               'read the source code of the web application.'
         
         i = Info('Potential virtual host misconfiguration', desc,
                  response_ids, self.get_name())
         
         om.out.information(desc)
         kb.kb.append(self, 'domain_dot', i)
Exemplo n.º 45
0
def session_hash_function(response):
    regex_str = 'session\.hash_function</td><td class="v">(.*?)</td>'
    session_hash_function_mo = re.search(regex_str, response.get_body(), re.I)

    if not session_hash_function_mo:
        return

    if session_hash_function_mo.group(1) == 0 \
            or session_hash_function_mo.group(1) != 'no':
        desc = 'The phpinfo()::session.hash_function uses the insecure md5 algorithm.'
        i = Info('PHP session.hash_function:md5', desc, response.id, 'phpinfo')
    else:
        desc = 'The phpinfo()::session.hash_function uses the insecure sha algorithm.'
        i = Info('PHP session.hash_function:sha', desc, response.id, 'phpinfo')

    i.set_url(response.get_url())

    kb.kb.append('phpinfo', 'phpinfo', i)
    om.out.information(i.get_desc())
Exemplo n.º 46
0
    def grep(self, request, response):
        """
        Plugin entry point, test existence of HTML auto-completable forms
        containing password-type inputs. Either form's <autocomplete> attribute
        is not present or is 'off'.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        url = response.get_url()
        dom = response.get_dom()

        if not response.is_text_or_html() or dom is None:
            return

        autocompletable = lambda inp: inp.get('autocomplete', 'on').lower(
        ) != 'off'

        # Loop through "auto-completable" forms
        for form in self._autocomplete_forms_xpath(dom):

            passwd_inputs = self._pwd_input_xpath(form)

            # Test existence of password-type inputs and verify that
            # all inputs are autocompletable
            if passwd_inputs and all(
                    map(autocompletable,
                        chain(passwd_inputs, self._text_input_xpath(form)))):

                form_str = etree.tostring(form)
                to_highlight = form_str[:form_str.find('>') + 1]

                desc = ('The URL: "%s" has a "<form>" element with '
                        'auto-complete enabled.')
                desc %= url

                i = Info('Auto-completable form', desc, response.id,
                         self.get_name())
                i.set_url(url)
                i.add_to_highlight(to_highlight)

                self.kb_append_uniq(self,
                                    'form_autocomplete',
                                    i,
                                    filter_by='URL')
                break
Exemplo n.º 47
0
    def grep(self, request, response):
        """
        Plugin entry point, test existence of HTML auto-completable forms
        containing password-type inputs. Either form's <autocomplete> attribute
        is not present or is 'off'.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if not response.is_text_or_html():
            return

        try:
            doc_parser = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            return

        for form in doc_parser.get_forms():

            # Only analyze forms which have autocomplete enabled at <form>
            if form.get_autocomplete() is False:
                continue

            for form_field_list in form.meta.itervalues():
                for form_field in form_field_list:
                    if form_field.input_type != INPUT_TYPE_PASSWD:
                        continue

                    if not form_field.autocomplete:
                        continue

                    url = response.get_url()
                    desc = ('The URL: "%s" has a "<form>" element with '
                            'auto-complete enabled.')
                    desc %= url

                    i = Info('Auto-completable form', desc, response.id,
                             self.get_name())
                    i.add_to_highlight('autocomplete')
                    i.set_url(url)

                    self.kb_append_uniq(self,
                                        'form_autocomplete',
                                        i,
                                        filter_by='URL')
                    break
Exemplo n.º 48
0
    def test_from_mutant(self):
        url = URL('http://moth/?a=1&b=2')
        payloads = ['abc', 'def']

        freq = FuzzableRequest(url)
        fuzzer_config = {}

        created_mutants = QSMutant.create_mutants(freq, payloads, [], False,
                                                  fuzzer_config)

        mutant = created_mutants[0]

        inst = Info.from_mutant('TestCase', 'desc' * 30, 1, 'plugin_name',
                                mutant)

        self.assertIsInstance(inst, Info)

        self.assertEqual(inst.get_uri(), mutant.get_uri())
        self.assertEqual(inst.get_url(), mutant.get_url())
        self.assertEqual(inst.get_method(), mutant.get_method())
        self.assertEqual(inst.get_dc(), mutant.get_dc())
        self.assertIsInstance(inst.get_dc(), QueryString)
Exemplo n.º 49
0
    def find_version_numbers(self, request, response):
        """
        Now i'll check if I can get a version number from the error page
        This is common in apache, tomcat, etc...
        """
        if 400 < response.get_code() < 600:

            for match, _, _, server in self._multi_re.query(response.body):
                match_string = match.group(0)
                if match_string not in self._already_reported_versions:
                    # Save the info obj
                    desc = 'An error page sent this %s version: "%s".'
                    desc %= (server, match_string)

                    i = Info('Error page with information disclosure', desc,
                             response.id, self.get_name())
                    i.set_url(response.get_url())
                    i.add_to_highlight(server)
                    i.add_to_highlight(match_string)

                    kb.kb.append(self, 'server', i)
                    kb.kb.raw_write(self, 'server', match_string)

                    self._already_reported_versions.append(match_string)
Exemplo n.º 50
0
    def grep(self, request, response):
        """
        Plugin entry point, identify hashes in the HTTP response.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        # I know that by doing this I loose the chance of finding hashes in
        # PDF files, but... this is much faster
        if not response.is_text_or_html():
            return

        body = response.get_body()
        splitted_body = self._split_re.split(body)
        for possible_hash in splitted_body:

            #    This is a performance enhancement that cuts the execution
            #    time of this plugin in half.
            if len(possible_hash) < 31 or \
                            len(possible_hash) > 129:
                return

            hash_type = self._get_hash_type(possible_hash)
            if not hash_type:
                return

            possible_hash = possible_hash.lower()
            if self._has_hash_distribution(possible_hash):
                if (possible_hash,
                        response.get_url()) not in self._already_reported:
                    desc = 'The URL: "%s" returned a response that may contain' \
                           ' a "%s" hash. The hash string is: "%s". This is' \
                           ' uncommon and requires human verification.'
                    desc = desc % (response.get_url(), hash_type,
                                   possible_hash)

                    i = Info('Hash string in HTML content', desc, response.id,
                             self.get_name())
                    i.set_url(response.get_url())
                    i.add_to_highlight(possible_hash)

                    self.kb_append(self, 'hash_analysis', i)

                    self._already_reported.add(
                        (possible_hash, response.get_url()))
Exemplo n.º 51
0
    def grep(self, request, response):
        """
        Plugin entry point. Analyze if the HTTP response reason messages are strange.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        response_code = response.get_code()
        msg_list = W3C_REASONS.get(response_code, None)

        if msg_list is not None:

            response_reason = response.get_msg().lower()

            if response_reason not in msg_list:
                #
                #   I check if the kb already has a info object with this code:
                #
                strange_reason_infos = kb.kb.get('strange_reason',
                                                 'strange_reason')

                corresponding_info = None
                for info_obj in strange_reason_infos:
                    if info_obj['reason'] == response.get_msg():
                        corresponding_info = info_obj
                        break

                if corresponding_info:
                    # Work with the "old" info object:
                    id_list = corresponding_info.get_id()
                    id_list.append(response.id)
                    corresponding_info.set_id(id_list)

                else:
                    # Create a new info object from scratch and save it to the kb:
                    desc = 'The remote Web server sent a strange HTTP reason'\
                           'message: "%s" manual inspection is advised.'
                    desc = desc % response.get_msg()
                    i = Info('Strange HTTP Reason message', desc, response.id,
                             self.get_name())
                    i.set_url(response.get_url())
                    i['reason'] = response.get_msg()
                    i.add_to_highlight(response.get_msg())

                    self.kb_append_uniq(self, 'strange_reason', i, 'URL')
Exemplo n.º 52
0
def open_basedir(response):
    regex_str = 'open_basedir</td><td class="v">(.*?)</td>'
    open_basedir_mo = re.search(regex_str, response.get_body(), re.I)

    if not open_basedir_mo:
        return

    obd = open_basedir_mo.group(1)

    if obd == '<i>no value</i>':
        desc = 'The phpinfo()::open_basedir is not set.'
        i = Info('PHP open_basedir:disabled', desc, response.id, 'phpinfo')

    else:
        desc = 'The phpinfo()::open_basedir is set to %s.'
        desc %= open_basedir_mo.group(1)
        i = Info('PHP open_basedir:enabled', desc, response.id, 'phpinfo')

    i.set_url(response.get_url())
    kb.kb.append('phpinfo', 'phpinfo', i)
    om.out.information(i.get_desc())
Exemplo n.º 53
0
    def _analyze_strange(self, request, response, ref, token_name,
                         token_value):
        if self._is_strange(request, token_name, token_value):
            desc = ('The URI: "%s" has a parameter named: "%s" with value:'
                    ' "%s", which is very uncommon. and requires manual'
                    ' verification.')
            desc %= (response.get_uri(), token_name, token_value)

            i = Info('Uncommon query string parameter', desc, response.id,
                     self.get_name())
            i['parameter_value'] = token_value
            i.add_to_highlight(token_value)
            i.set_uri(ref)

            self.kb_append(self, 'strange_parameters', i)
            return True

        return False
Exemplo n.º 54
0
    def _html_in_comment(self, comment, request, response):
        """
        Find HTML code in HTML comments
        """
        html_in_comment = self.HTML_RE.search(comment)

        if html_in_comment is None:
            return

        for false_positive_string in self.HTML_FALSE_POSITIVES:
            if false_positive_string in comment:
                return

        comment_data = (comment, response.get_url())

        if comment_data in self._already_reported:
            return

        self._already_reported.add(comment_data)

        # There is HTML code in the comment.
        comment = comment.strip()
        comment = comment.replace('\n', '')
        comment = comment.replace('\r', '')
        comment = comment[:40]

        desc = ('A comment containing HTML code "%s" was found in: "%s".'
                ' This could be interesting.')
        desc %= (comment, response.get_url())

        i = Info.from_fr('HTML comment contains HTML code', desc, response.id,
                         self.get_name(), request)
        i.set_uri(response.get_uri())
        i.add_to_highlight(html_in_comment.group(0))

        kb.kb.append(self, 'html_comment_hides_html', i)
        om.out.information(i.get_desc())
Exemplo n.º 55
0
    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        # Check if the header names are common or not
        for header_name in response.get_headers().keys():
            if header_name.upper() not in self.COMMON_HEADERS:

                # Check if the kb already has a info object with this code:
                strange_header_infos = kb.kb.get('strange_headers',
                                                 'strange_headers')

                for info_obj in strange_header_infos:
                    if info_obj['header_name'] == header_name:
                        # Work with the "old" info object:
                        id_list = info_obj.get_id()
                        id_list.append(response.id)
                        info_obj.set_id(id_list)
                        break
                else:
                    # Create a new info object from scratch and save it to
                    # the kb:
                    hvalue = response.get_headers()[header_name]
                    
                    desc = 'The remote web server sent the HTTP header: "%s"'\
                           ' with value: "%s", which is quite uncommon and'\
                           ' requires manual analysis.'
                    desc = desc % (header_name, hvalue)

                    i = Info('Strange header', desc, response.id,
                             self.get_name())
                    i.set_url(response.get_url())
                    i['header_name'] = header_name
                    i['header_value'] = hvalue
                    i.add_to_highlight(hvalue, header_name)
                    
                    kb.kb.append(self, 'strange_headers', i)

        # Now check for protocol anomalies
        self._content_location_not_300(request, response)
Exemplo n.º 56
0
    def _analyze_result(self, mutant, response):
        """
        Analyze results of the _send_mutant method.
        """
        #
        #   I will only report the vulnerability once.
        #
        if self._has_no_bug(mutant):

            if self._header_was_injected(mutant, response):
                desc = 'Response splitting was found at: %s' % mutant.found_at(
                )

                v = Vuln.from_mutant('Response splitting vulnerability', desc,
                                     severity.MEDIUM, response.id,
                                     self.get_name(), mutant)

                self.kb_append_uniq(self, 'response_splitting', v)

            # When trying to send a response splitting to php 5.1.2 I get :
            # Header may not contain more than a single header, new line detected
            for error in self.HEADER_ERRORS:

                if error in response:
                    desc = 'The variable "%s" at URL "%s" modifies the HTTP'\
                           ' response headers, but this error was sent while'\
                           ' testing for response splitting: "%s".'
                    args = (mutant.get_token_name(), mutant.get_url(), error)
                    desc = desc % args
                    i = Info.from_mutant('Parameter modifies response headers',
                                         desc, response.id, self.get_name(),
                                         mutant)

                    self.kb_append_uniq(self, 'response_splitting', i)

                    return
Exemplo n.º 57
0
    def _send_request(self, mutant, debugging_id):
        """
        Sends a mutant to the remote web server. I wrap urllib's _send_mutant
        just to handle errors in a different way.
        """
        # Only grep the request which sends the larger payload
        grep = mutant.get_token_value() == self.BUFFER_TESTS[-1]

        try:
            response = self._uri_opener.send_mutant(mutant,
                                                    debugging_id=debugging_id,
                                                    grep=grep)
        except (BaseFrameworkException, ScanMustStopException):
            desc = ('A potential (most probably a false positive than a bug)'
                    ' buffer-overflow was found when requesting: "%s", using'
                    ' HTTP method %s. The data sent was: "%s".')
            desc %= (mutant.get_url(), mutant.get_method(), mutant.get_dc())

            i = Info.from_mutant('Potential buffer overflow vulnerability',
                                 desc, [], self.get_name(), mutant)

            self.kb_append_uniq(self, 'buffer_overflow', i)
        else:
            self._analyze_result(mutant, response)
Exemplo n.º 58
0
    def _report_finding(self, name, response, protected_by=None):
        """
        Creates a information object based on the name and the response
        parameter and saves the data in the kb.

        :param name: The name of the WAF
        :param response: The HTTP response object that was used to identify the WAF
        :param protected_by: A more detailed description/version of the WAF
        """
        desc = 'The remote network seems to have a "%s" WAF deployed to' \
               ' protect access to the web server.'
        desc = desc % name

        if protected_by:
            desc += ' The following is the WAF\'s version: "%s".' % protected_by

        i = Info('Web Application Firewall fingerprint', desc, response.id,
                 self.get_name())
        i.set_url(response.get_url())
        i.set_id(response.id)

        kb.kb.append(self, name, i)
        om.out.information(i.get_desc())
Exemplo n.º 59
0
    def _content_location_not_300(self, request, response):
        """
        Check if the response has a content-location header and the response code
        is not in the 300 range.

        :return: None, all results are saved in the kb.
        """
        if 'content-location' in response.get_lower_case_headers() \
        and response.get_code() > 300\
        and response.get_code() < 310:
            desc = 'The URL: "%s" sent the HTTP header: "content-location"'\
                   ' with value: "%s" in an HTTP response with code %s which'\
                   ' is a violation to the RFC.'
            desc = desc % (response.get_url(), response.get_lower_case_headers(
            )['content-location'], response.get_code())
            i = Info('Content-Location HTTP header anomaly', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            i.add_to_highlight('content-location')

            kb.kb.append(self, 'anomaly', i)
Exemplo n.º 60
0
    def grep(self, request, response):
        """
        Plugin entry point. Analyze if the HTTP response codes are strange.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if response.get_code() in self.COMMON_HTTP_CODES:
            return

        # I check if the kb already has a info object with this code:
        strange_code_infos = kb.kb.get('strange_http_codes',
                                       'strange_http_codes')

        corresponding_info = None
        for info_obj in strange_code_infos:
            if info_obj['code'] == response.get_code():
                corresponding_info = info_obj
                break

        if corresponding_info:
            # Work with the "old" info object:
            id_list = corresponding_info.get_id()
            id_list.append(response.id)
            corresponding_info.set_id(id_list)

        else:
            # Create a new info object from scratch and save it to the kb:
            desc = 'The remote Web server sent a strange HTTP response code:'\
                   ' "%s" with the message: "%s", manual inspection is advised.'
            desc = desc % (response.get_code(), response.get_msg())

            i = Info('Strange HTTP response code', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())
            i['code'] = response.get_code()
            i.add_to_highlight(str(response.get_code()), response.get_msg())

            self.kb_append_uniq(self, 'strange_http_codes', i, 'URL')