예제 #1
0
파일: phpinfo.py 프로젝트: everping/w3af
    def _lowest_privilege_test(self, response):
        regex_str = 'User/Group </td><td class="v">(.*?)\((\d.*?)\)/(\d.*?)</td>'
        lowest_privilege_test = re.search(regex_str, response.get_body(), re.I)
        if lowest_privilege_test:
            lpt_uname = lowest_privilege_test.group(1)
            lpt_uid = lowest_privilege_test.group(2)
            lpt_uid = int(lpt_uid)
            lpt_gid = lowest_privilege_test.group(3)
            if lpt_uid < 99 or lpt_gid < 99 or \
            re.match('root|apache|daemon|bin|operator|adm', lpt_uname, re.I):

                desc = 'phpinfo()::PHP may be executing as a higher privileged'\
                       ' group. Username: %s, UserID: %s, GroupID: %s.' 
                desc = desc % (lpt_uname, lpt_uid, lpt_gid)
                
                v = Vuln('PHP lowest_privilege_test:fail', desc,
                         severity.MEDIUM, response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
            else:
                lpt_name = 'privilege:' + lpt_uname
                lpt_desc = 'phpinfo()::PHP is executing under '
                lpt_desc += 'username: '******', '
                lpt_desc += 'userID: ' + str(lpt_uid) + ', '
                lpt_desc += 'groupID: ' + lpt_gid
                i = Info(lpt_name, lpt_desc, response.id, self.get_name())
                i.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', i)
                om.out.information(i.get_desc())
예제 #2
0
파일: ajax.py 프로젝트: 3rdDegree/w3af
    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if not response.is_text_or_html():
            return
        
        url = response.get_url()
        dom = response.get_dom()
        # In some strange cases, we fail to normalize the document
        if dom is None:
            return
        
        script_elements = self._script_xpath(dom)
        for element in script_elements:
            # returns the text between <script> and </script>
            script_content = element.text

            if script_content is not None:

                res = self._ajax_regex_re.search(script_content)
                if res:
                    desc = 'The URL: "%s" has AJAX code.' % url
                    i = Info('AJAX code', desc, response.id,
                             self.get_name())
                    i.set_url(url)
                    i.add_to_highlight(res.group(0))
                    
                    self.kb_append_uniq(self, 'ajax', i, 'URL')
예제 #3
0
    def grep(self, request, response):
        """
        Check if all responses have X-Content-Type-Options header set

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if self._reports > MAX_REPORTS:
            return

        ct_options_value, _ = response.get_headers().iget(CT_OPTIONS_HEADER, None)
        if ct_options_value is not None:
            if ct_options_value.strip().lower() == NOSNIFF:
                return

        self._reports += 1

        desc = 'The URL "%s" returned an HTTP response without the' \
               ' recommended HTTP header X-Content-Type-Options'
        desc %= response.get_url()

        i = Info('Missing X-Content-Type-Options header', desc,
                 response.id, self.get_name())
        i.set_url(response.get_url())
        i[CTSniffingInfoSet.ITAG] = response.get_url().get_domain()

        self.kb_append_uniq_group(self, 'content_sniffing', i,
                                  group_klass=CTSniffingInfoSet)
예제 #4
0
    def grep(self, request, response):
        """
        Check if the header names are common or not

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        # Check for protocol anomalies
        self._content_location_not_300(request, response)

        # Check header names
        for header_name in response.get_headers().keys():
            if header_name.upper() in self.COMMON_HEADERS:
                continue

            # Create a new info object and save it to the KB
            hvalue = response.get_headers()[header_name]

            desc = 'The remote web server sent the HTTP header: "%s"'\
                   ' with value: "%s", which is quite uncommon and'\
                   ' requires manual analysis.'
            desc = desc % (header_name, hvalue)

            i = Info('Strange header', desc, response.id, self.get_name())
            i.add_to_highlight(hvalue, header_name)
            i.set_url(response.get_url())
            i[StrangeHeaderInfoSet.ITAG] = header_name
            i['header_value'] = hvalue

            self.kb_append_uniq_group(self, 'strange_headers', i,
                                      group_klass=StrangeHeaderInfoSet)
예제 #5
0
    def grep(self, request, response):
        """
        Analyze if the HTTP response reason messages are strange.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        response_code = response.get_code()
        msg_list = W3C_REASONS.get(response_code, None)

        if msg_list is None:
            return

        response_reason = response.get_msg().lower()

        if response_reason in msg_list:
            # It's common, nothing to do here.
            return

        # Create a new info object from scratch and save it to the kb:
        desc = "The remote Web server sent a strange HTTP reason" 'message "%s", manual inspection is recommended.'
        desc = desc % response.get_msg()

        i = Info("Strange HTTP Reason message", desc, response.id, self.get_name())
        i.set_url(response.get_url())
        i.add_to_highlight(response.get_msg())
        i[StrangeHeaderInfoSet.ITAG] = response.get_msg()

        self.kb_append_uniq_group(self, "strange_reason", i, group_klass=StrangeHeaderInfoSet)
예제 #6
0
파일: user_dir.py 프로젝트: aricciard/w3af
    def _check_user_dir(self, mutated_url, user, user_desc, user_tag,
                        non_existent):
        """
        Perform the request and compare with non_existent

        :see _create_tests: For parameter description
        :return: The HTTP response id if the mutated_url is a web user
                 directory, None otherwise.
        """
        resp = self.http_get_and_parse(mutated_url)
        
        path = mutated_url.get_path()
        response_body = resp.get_body().replace(path, '')

        if fuzzy_not_equal(response_body, non_existent, 0.7):

            # Avoid duplicates
            known_users = [u['user'] for u in kb.kb.get('user_dir', 'users')]
            if user in known_users:
                return

            # Save the finding to the KB
            desc = 'An operating system user directory was found at: "%s"'
            desc = desc % resp.get_url()

            i = Info('Web user home directory', desc, resp.id, self.get_name())
            i.set_url(resp.get_url())
            i['user'] = user
            i['user_desc'] = user_desc
            i['user_tag'] = user_tag

            self.kb_append_uniq(self, 'users', i)

            # Analyze if we can get more information from this finding
            self._analyze_finding(i)
예제 #7
0
 def _analyze_methods(self, url, allowed_methods, id_list):
     # Check for DAV
     if set(allowed_methods).intersection(self.DAV_METHODS):
         # dav is enabled!
         # Save the results in the KB so that other plugins can use this
         # information
         desc = 'The URL "%s" has the following allowed methods. These'\
               ' include DAV methods and should be disabled: %s' 
         desc = desc % (url, ', '.join(allowed_methods))
         
         i = Info('DAV methods enabled', desc, id_list, self.get_name())
         i.set_url(url)
         i['methods'] = allowed_methods
         
         kb.kb.append(self, 'dav-methods', i)
     else:
         # Save the results in the KB so that other plugins can use this
         # information. Do not remove these information, other plugins
         # REALLY use it !
         desc = 'The URL "%s" has the following enabled HTTP methods: %s'
         desc = desc % (url, ', '.join(allowed_methods))
         
         i = Info('Allowed HTTP methods', desc, id_list, self.get_name())
         i.set_url(url)
         i['methods'] = allowed_methods
         
         kb.kb.append(self, 'methods', i)
예제 #8
0
    def grep(self, request, response):
        """
        Check if HTTPS responses have the Strict-Transport-Security header set.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if self._reports > MAX_REPORTS:
            return

        if request.get_url().get_protocol() != 'https':
            return

        sts_header_value, _ = response.get_headers().iget(STS_HEADER, None)
        if sts_header_value is not None:
            return

        self._reports += 1

        desc = 'The web server uses HTTPS but does not set the '\
               ' Strict-Transport-Security header.'
        i = Info('Missing Strict Transport Security header', desc,
                 response.id, self.get_name())
        i.set_url(response.get_url())
        i[STSInfoSet.ITAG] = response.get_url().get_domain()

        self.kb_append_uniq_group(self, 'strict_transport_security', i,
                                  group_klass=STSInfoSet)
예제 #9
0
    def _fingerprint_meta(self, domain_path, wp_unique_url, response):
        """
        Check if the wp version is in index header
        """
        # Main scan URL passed from w3af + wp index page
        wp_index_url = domain_path.url_join('index.php')
        response = self._uri_opener.GET(wp_index_url, cache=True)

        # Find the string in the response html
        find = '<meta name="generator" content="[Ww]ord[Pp]ress (\d\.\d\.?\d?)" />'
        m = re.search(find, response.get_body())

        # If string found, group version
        if m:
            version = m.group(1)

            # Save it to the kb!
            desc = 'WordPress version "%s" found in the index header.'
            desc = desc % version

            i = Info('Fingerprinted Wordpress version', desc, response.id,
                     self.get_name())
            i.set_url(wp_index_url)
            
            kb.kb.append(self, 'info', i)
            om.out.information(i.get_desc())
예제 #10
0
    def _analyze_domain(self, response, script_full_url, script_tag):
        """
        Checks if the domain is the same, or if it's considered secure.
        """
        url = response.get_url()
        script_domain = script_full_url.get_domain()

        if script_domain != response.get_url().get_domain():

            for secure_domain in self._secure_js_domains:
                # We do a "in" because the secure js domains list contains
                # entries such as ".google." which should be match. This is to
                # take into account things like ".google.com.br" without having
                # to list all of them.
                #
                # Not the best, could raise some false negatives, but... bleh!
                if secure_domain in script_domain:
                    # It's a third party that we trust
                    return

            to_highlight = script_tag.attrib.get('src')
            desc = ('The URL: "%s" has a script tag with a source that points'
                    ' to a third party site ("%s"). This practice is not'
                    ' recommended, the security of the current site is being'
                    ' delegated to the external entity.')
            desc %= (url, script_domain)

            i = Info('Cross-domain javascript source', desc,
                     response.id, self.get_name())
            i.set_url(url)
            i.add_to_highlight(to_highlight)
            i[CrossDomainInfoSet.ITAG] = script_domain

            self.kb_append_uniq_group(self, 'cross_domain_js', i,
                                      group_klass=CrossDomainInfoSet)
예제 #11
0
    def grep(self, request, response):
        """
        Plugin entry point, verify if the HTML has a form with file uploads.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        url = response.get_url()

        for tag in mp_doc_parser.get_tags_by_filter(response, ('input',)):
            input_type = tag.attrib.get('type', None)

            if input_type is None:
                continue

            if input_type.lower() != 'file':
                continue

            msg = 'A form which allows file uploads was found at "%s"'
            msg %= url

            i = Info('File upload form', msg, response.id, self.get_name())
            i.set_url(url)

            self.kb_append_uniq(self, 'file_upload', i, 'URL')
예제 #12
0
    def _parse_document(self, response):
        """
        Parses the HTML and adds the mail addresses to the kb.
        """
        get_document_parser_for = parser_cache.dpc.get_document_parser_for

        try:
            document_parser = get_document_parser_for(response, cache=False)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            pass
        else:
            # Search for email addresses
            for mail in document_parser.get_emails(self._domain_root):
                if mail not in self._accounts:
                    self._accounts.append(mail)

                    desc = 'The mail account: "%s" was found at: "%s".'
                    desc = desc % (mail, response.get_uri())

                    i = Info('Email account', desc, response.id,
                             self.get_name())
                    i.set_url(response.get_uri())
                    i['mail'] = mail
                    i['user'] = mail.split('@')[0]
                    i['url_list'] = {response.get_uri()}

                    self.kb_append('emails', 'emails', i)
예제 #13
0
    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        all_findings = kb.kb.get_all_findings()

        for title, desc, _id, url, highlight in self._potential_vulns:
            for info in all_findings:
                # This makes sure that if the sqli plugin found a vulnerability
                # in the same URL as we found a detailed error, we won't report
                # the detailed error.
                #
                # If the user fixes the sqli vulnerability and runs the scan again
                # most likely the detailed error will disappear too. If the sqli
                # vulnerability disappears and this one remains, it will appear
                # as a new vulnerability in the second scan.
                if info.get_url() == url:
                    break
            else:
                i = Info(title, desc, _id, self.get_name())
                i.set_url(url)
                i.add_to_highlight(highlight)

                self.kb_append_uniq(self, 'error_page', i)

        self._potential_vulns.cleanup()
예제 #14
0
    def grep(self, request, response):
        """
        Plugin entry point, verify if the HTML has a form with file uploads.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return
        
        dom = response.get_dom()
        url = response.get_url()
        
        # In some strange cases, we fail to normalize the document
        if dom is not None:

            # Loop through file inputs tags
            for input_file in self._file_input_xpath(dom):
                msg = 'The URL: "%s" has form with file upload capabilities.'
                msg = msg % url
                
                i = Info('File upload form', msg, response.id,
                         self.get_name())
                i.set_url(url)
                to_highlight = etree.tostring(input_file)
                i.add_to_highlight(to_highlight)
                
                self.kb_append_uniq(self, 'file_upload', i, 'URL')
예제 #15
0
    def _fingerprint_data(self, domain_path, wp_unique_url, response):
        """
        Find wordpress version from data
        """
        for wp_fingerprint in self._get_wp_fingerprints():
            
            # The URL in the XML is relative AND it has two different variables
            # that we need to replace:
            #        $wp-content$    -> wp-content/
            #        $wp-plugins$    -> wp-content/plugins/
            path = wp_fingerprint.filepath
            path = path.replace('$wp-content$', 'wp-content/')
            path = path.replace('$wp-plugins$', 'wp-content/plugins/')
            test_url = domain_path.url_join(path)
            
            response = self._uri_opener.GET(test_url, cache=True)

            response_hash = hashlib.md5(response.get_body()).hexdigest()

            if response_hash == wp_fingerprint.hash:
                version = wp_fingerprint.version

                # Save it to the kb!
                desc = 'WordPress version "%s" fingerprinted by matching known md5'\
                       ' hashes to HTTP responses of static resources available at'\
                       ' the remote WordPress install.'
                desc = desc % version
                i = Info('Fingerprinted Wordpress version', desc, response.id,
                         self.get_name())
                i.set_url(test_url)
        
                kb.kb.append(self, 'info', i)
                om.out.information(i.get_desc())
                
                break
예제 #16
0
    def _do_request(self, mutated_url, user):
        """
        Perform the request and compare.

        :return: The HTTP response id if the mutated_url is a web user
                 directory, None otherwise.
        """
        response = self._uri_opener.GET(mutated_url, cache=True,
                                        headers=self._headers)
        
        path = mutated_url.get_path()
        response_body = response.get_body().replace(path, '')

        if relative_distance_lt(response_body, self._non_existent, 0.7):

            # Avoid duplicates
            if user not in [u['user'] for u in kb.kb.get('user_dir', 'users')]:
                desc = 'A user directory was found at: %s'
                desc = desc % response.get_url()
                
                i = Info('Web user home directory', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                i['user'] = user

                kb.kb.append(self, 'users', i)

                for fr in self._create_fuzzable_requests(response):
                    self.output_queue.put(fr)

            return response.id

        return None
예제 #17
0
    def discover(self, fuzzable_request):
        """
        Checks if JBoss Interesting Directories exist in the target server.
        Also verifies some vulnerabilities.
        """
        base_url = fuzzable_request.get_url().base_url()

        args_iter = izip(repeat(base_url), self.JBOSS_VULNS)
        otm_send_request = one_to_many(self.send_request)
        response_pool = self.worker_pool.imap_unordered(otm_send_request,
                                                        args_iter)

        for vuln_db_instance, response in response_pool:

            if is_404(response):
                continue

            vuln_url = base_url.url_join(vuln_db_instance['url'])
            name = vuln_db_instance['name']
            desc = vuln_db_instance['desc']

            if vuln_db_instance['type'] == 'info':
                o = Info(name, desc, response.id, self.get_name())
            else:
                o = Vuln(name, desc, severity.LOW, response.id, self.get_name())

            o.set_url(vuln_url)
            kb.kb.append(self, 'find_jboss', o)

            for fr in self._create_fuzzable_requests(response):
                self.output_queue.put(fr)
    def _test_DNS(self, original_response, dns_wildcard_url):
        """
        Check if http://www.domain.tld/ == http://domain.tld/
        """
        headers = Headers([("Host", dns_wildcard_url.get_domain())])
        try:
            modified_response = self._uri_opener.GET(original_response.get_url(), cache=True, headers=headers)
        except BaseFrameworkException:
            return
        else:
            if relative_distance_lt(modified_response.get_body(), original_response.get_body(), 0.35):
                desc = (
                    "The target site has NO DNS wildcard, and the contents" ' of "%s" differ from the contents of "%s".'
                )
                desc = desc % (dns_wildcard_url, original_response.get_url())

                i = Info("No DNS wildcard", desc, modified_response.id, self.get_name())
                i.set_url(dns_wildcard_url)

                kb.kb.append(self, "dns_wildcard", i)
                om.out.information(i.get_desc())
            else:
                desc = (
                    "The target site has a DNS wildcard configuration, the"
                    ' contents of "%s" are equal to the ones of "%s".'
                )
                desc = desc % (dns_wildcard_url, original_response.get_url())

                i = Info("DNS wildcard", desc, modified_response.id, self.get_name())
                i.set_url(original_response.get_url())

                kb.kb.append(self, "dns_wildcard", i)
                om.out.information(i.get_desc())
예제 #19
0
    def verify_found(self, vulnerability_names):
        """
        Runs the scan and verifies that the vulnerability with the specified
        name was found.

        :param vulnerability_names: The names of the vulnerabilities to be found
        :return: None. Will raise assertion if fails
        """
        # Setup requirements
        desc = 'The URL: "%s" uses HTML5 websocket "%s"'
        desc %= (self.target_url, self.target_url)

        i = Info('HTML5 WebSocket detected', desc, 1, 'websockets_links')
        i.set_url(URL(self.target_url))
        i[WebSocketInfoSet.ITAG] = self.target_url

        # Store found links
        info_set = WebSocketInfoSet([i])
        self.kb.append('websockets_links', 'websockets_links', i, info_set)

        # Run the plugin
        cfg = RUN_CONFIG['cfg']
        self._scan(self.target_url, cfg['plugins'])

        # Assert
        vulns = self.kb.get('websocket_hijacking', 'websocket_hijacking')
        self.assertEqual(vulnerability_names, [v.get_name() for v in vulns])
예제 #20
0
파일: symfony.py 프로젝트: 0x554simon/w3af
    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if not response.is_text_or_html():
            return
        
        if not self.symfony_detected(response):
            return

        if self.has_csrf_token(response):
            return

        desc = ('The URL: "%s" seems to be generated by the Symfony framework'
                ' and contains a form that has CSRF protection disabled.')
        desc %= response.get_url()

        i = Info('Symfony Framework with CSRF protection disabled',
                 desc, response.id, self.get_name())
        i.set_url(response.get_url())
        self.kb_append_uniq(self, 'symfony', i, 'URL')
예제 #21
0
    def _do_request(self, url, mutant):
        """
        Perform a simple GET to see if the result is an error or not, and then
        run the actual fuzzing.
        """
        response = self._uri_opener.GET(
            mutant, cache=True, headers=self._headers)

        if not (is_404(response) or
        response.get_code() in (403, 401) or
        self._return_without_eval(mutant)):

            # Create the fuzzable request and send it to the core
            fr = FuzzableRequest.from_http_response(response)
            self.output_queue.put(fr)
            
            #
            #   Save it to the kb (if new)!
            #
            if response.get_url() not in self._seen and response.get_url().get_file_name():
                desc = 'A potentially interesting file was found at: "%s".'
                desc = desc % response.get_url()

                i = Info('Potentially interesting file', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                
                kb.kb.append(self, 'files', i)
                om.out.information(i.get_desc())

                # Report only once
                self._seen.add(response.get_url())
예제 #22
0
    def _match_cookie_fingerprint(self, request, response, cookie_obj):
        """
        Now we analyze the cookie and try to guess the remote web server or
        programming framework based on the cookie that was sent.

        :return: True if the cookie was fingerprinted
        """
        cookie_obj_str = cookie_obj.output(header='')

        for cookie_str_db, system_name in self.COOKIE_FINGERPRINT:
            if cookie_str_db in cookie_obj_str:
                if system_name not in self._already_reported_server:
                    desc = 'A cookie matching the cookie fingerprint DB'\
                           ' has been found when requesting "%s".'\
                           ' The remote platform is: "%s".'
                    desc = desc % (response.get_url(), system_name)

                    i = Info('Identified cookie', desc,
                             response.id, self.get_name())

                    i.set_url(response.get_url())
                    i['httpd'] = system_name
                                        
                    self._set_cookie_to_rep(i, cobj=cookie_obj)

                    kb.kb.append(self, 'security', i)
                    self._already_reported_server.append(system_name)
                    return True

        return False
예제 #23
0
    def _analyze_author(self, response, frontpage_author):
        """
        Analyze the author URL.

        :param response: The http response object for the _vti_inf file.
        :param frontpage_author: A regex match object.
        :return: None. All the info is saved to the kb.
        """
        author_location = response.get_url().get_domain_path().url_join(
            frontpage_author.group(1))

        # Check for anomalies in the location of author.exe
        if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe':
            name = 'Customized frontpage configuration'

            desc = 'The FPAuthorScriptUrl is at: "%s" instead of the default'\
                   ' location: "/_vti_bin/_vti_adm/author.exe". This is very'\
                   ' uncommon.'
            desc = desc % author_location
        else:
            name = 'FrontPage FPAuthorScriptUrl'

            desc = 'The FPAuthorScriptUrl is at: "%s".'
            desc = desc % author_location

        i = Info(name, desc, response.id, self.get_name())
        i.set_url(author_location)
        i['FPAuthorScriptUrl'] = author_location
        
        kb.kb.append(self, 'frontpage_version', i)
        om.out.information(i.get_desc())
    def _force_disclosures(self, domain_path, potentially_vulnerable_paths):
        """
        :param domain_path: The path to wordpress' root directory
        :param potentially_vulnerable_paths: A list with the paths I'll URL-join
                                             with @domain_path, GET and parse.
        """
        for pvuln_path in potentially_vulnerable_paths:

            pvuln_url = domain_path.url_join(pvuln_path)
            response = self._uri_opener.GET(pvuln_url, cache=True)

            if is_404(response):
                continue

            response_body = response.get_body()
            if 'Fatal error: ' in response_body:
                desc = 'Analyze the HTTP response body to find the full path'\
                       ' where wordpress was installed.'
                i = Info('WordPress path disclosure', desc, response.id,
                         self.get_name())
                i.set_url(pvuln_url)
                
                kb.kb.append(self, 'info', i)
                om.out.information(i.get_desc())
                break
예제 #25
0
    def grep(self, request, response):
        """
        Plugin entry point. Analyze if the HTTP response codes are strange.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if response.get_code() in self.COMMON_HTTP_CODES:
            return

        # Create a new info object from scratch and save it to the kb
        desc = ('The remote Web server sent a strange HTTP response code:'
                ' "%s" with the message: "%s", manual inspection is'
                ' recommended.')
        desc %= (response.get_code(), response.get_msg())

        i = Info('Strange HTTP response code',
                 desc, response.id, self.get_name())
        i.add_to_highlight(str(response.get_code()), response.get_msg())
        i.set_url(response.get_url())
        i[StrangeCodesInfoSet.ITAG] = response.get_code()
        i['message'] = response.get_msg()

        self.kb_append_uniq_group(self, 'strange_http_codes', i,
                                  group_klass=StrangeCodesInfoSet)
예제 #26
0
    def _analyze_methods(self, url, _allowed_methods, id_list):
        # Sometimes there are no allowed methods, which means that our plugin
        # failed to identify any methods.
        if not _allowed_methods:
            return

        # Check for DAV
        elif set(_allowed_methods).intersection(self.DAV_METHODS):
            # dav is enabled!
            # Save the results in the KB so that other plugins can use this
            # information
            desc = ('The URL "%s" has the following allowed methods. These'
                    ' include DAV methods and should be disabled: %s')
            desc = desc % (url, ', '.join(_allowed_methods))
            
            i = Info('DAV methods enabled', desc, id_list, self.get_name())
            i.set_url(url)
            i['methods'] = _allowed_methods
            
            kb.kb.append(self, 'dav-methods', i)
        else:
            # Save the results in the KB so that other plugins can use this
            # information. Do not remove these information, other plugins
            # REALLY use it !
            desc = 'The URL "%s" has the following enabled HTTP methods: %s'
            desc = desc % (url, ', '.join(_allowed_methods))
            
            i = Info('Allowed HTTP methods', desc, id_list, self.get_name())
            i.set_url(url)
            i['methods'] = _allowed_methods
            
            kb.kb.append(self, 'methods', i)
예제 #27
0
파일: objects.py 프로젝트: 3rdDegree/w3af
    def grep(self, request, response):
        """
        Plugin entry point. Parse the object tags.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        url = response.get_url()
        dom = response.get_dom()

        if response.is_text_or_html() and dom is not None:

            elem_list = self._tag_xpath(dom)
            for element in elem_list:

                tag_name = element.tag
                
                desc = 'The URL: "%s" has an "%s" tag. We recommend you download'\
                      ' the client side code and analyze it manually.'
                desc = desc % (response.get_uri(), tag_name)

                i = Info('Browser plugin content', desc, response.id,
                         self.get_name())
                i.set_url(url)
                i.add_to_highlight(tag_name)

                self.kb_append_uniq(self, tag_name, i, 'URL')
예제 #28
0
    def discover(self, fuzzable_request):
        """
        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        root_domain = fuzzable_request.get_url().get_root_domain()

        pks_se = pks(self._uri_opener)
        results = pks_se.search(root_domain)
        pks_url = 'http://pgp.mit.edu:11371/'

        for result in results:
            mail = result.username + '@' + root_domain
            
            desc = 'The mail account: "%s" was found at: "%s".'
            desc = desc % (mail, pks_url)

            i = Info('Email account', desc, result.id, self.get_name())
            i.set_url(URL(pks_url))
            i['mail'] = mail
            i['user'] = result.username
            i['name'] = result.name
            i['url_list'] = {URL(pks_url)}
            
            kb.kb.append('emails', 'emails', i)
            om.out.information(i.get_desc())
예제 #29
0
    def grep(self, request, response):
        """
        websockets_links

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        # if it is not html or js we are not interested
        if not response.is_text_or_html():
            return

        # checking if websockets are in use the fast way and if they
        # are moving on to slower checks
        if not (WS_URL in response.body or WSS_URL in response.body):
            return

        # if websockets usage signs were found we need to get the exact url
        url = request.get_url()

        # if it is javascript we search the whole doc
        if JavaScriptParser(response).can_parse(response):
            ws_links = find_websockets_links(response.body)
        else:
            # if it is html we should search inside <script> tags only
            dom = response.get_dom()

            if dom is None:
                return

            ws_links = set()
            script_tag_xpath = etree.XPath(SCRIPT_TAG_XPATH)

            for script in script_tag_xpath(dom):
                for ws_link in find_websockets_links(script.text):
                    ws_links.add(ws_link)

        # if we didn't find any link manual inspection is needed
        if len(ws_links) == 0:
            # TODO: In some scenarios this message is repeated multiple, since
            #       it's a debug() message we don't care that much.
            msg = 'The URL "%s" has signs of HTML5 WebSockets usage, ' \
                  'but couldn\'t find any useful links.\n' \
                  'Perhaps links are dynamically created using javascript.\n' \
                  'Manual inspection of the page is recommended.'
            om.out.debug(msg % url)

        for ws_link in ws_links:
            desc = 'The URL: "%s" uses HTML5 websocket "%s"'
            desc = desc % (url, ws_link)

            i = Info('HTML5 WebSocket detected', desc, response.id,
                     self.get_name())
            i.set_url(url)
            i[WebSocketInfoSet.ITAG] = ws_link

            # Store found links
            self.kb_append_uniq_group(self, 'websockets_links', i,
                                      group_klass=WebSocketInfoSet)
예제 #30
0
 def _analyze_gears_manifest(self, url, response, file_name):
     if '"entries":' in response:
         desc = 'A gears manifest file was found at: "{}". ' \
                'Each file should be manually reviewed for sensitive information ' \
                'that may get cached on the client.'.format(url)
         i = Info('Gears manifest resource', desc, response.id, self.get_name())
         i.set_url(self._target_url)
         self.kb_append(self, 'wg_ria_enumerator', i)
예제 #31
0
    def crawl(self, fuzzable_request):
        """
        Get the urllist.txt file and parse it.

        :param fuzzable_request: A fuzzable_request instance that contains
                                    (among other things) the URL to test.
        """
        base_url = fuzzable_request.get_url().base_url()
        urllist_url = base_url.url_join('urllist.txt')
        http_response = self._uri_opener.GET(urllist_url, cache=True)

        if not is_404(http_response):
            if self._is_urllist_txt(base_url, http_response.get_body()):
                # Save it to the kb!
                desc = 'A urllist.txt file was found at: "%s", this file might' \
                       ' expose private URLs and requires a manual review. The' \
                       ' scanner will add all URLs listed in this files to the' \
                       ' analysis queue.'
                desc = desc % urllist_url

                i = Info('urllist.txt file', desc, http_response.id,
                         self.get_name())
                i.set_url(urllist_url)

                kb.kb.append(self, 'urllist.txt', i)
                om.out.information(i.get_desc())

            # Even in the case where it is NOT a valid urllist.txt it might be
            # the case where some URLs are present, so I'm going to extract them
            # from the file as if it is a valid urllist.txt

            url_generator = self._extract_urls_generator(base_url,
                                                         http_response.get_body())

            # Send the requests using threads:
            self.worker_pool.map(self.http_get_and_parse, url_generator)
예제 #32
0
    def _fingerprint_readme(self, domain_path, wp_unique_url, response):
        """
        GET the readme.html file and extract the version information from there.
        """
        wp_readme_url = domain_path.url_join('readme.html')
        response = self._uri_opener.GET(wp_readme_url, cache=True)

        # Find the string in the response html
        find = '<br /> Version (\d\.\d\.?\d?)'
        m = re.search(find, response.get_body())

        # If string found, group version
        if m:
            version = m.group(1)

            desc = 'WordPress version "%s" found in the readme.html file.'
            desc = desc % version

            i = Info('Fingerprinted Wordpress version', desc, response.id,
                     self.get_name())
            i.set_url(wp_readme_url)

            kb.kb.append(self, 'info', i)
            om.out.information(i.get_desc())
예제 #33
0
    def _analyze_finding(self, user_info):
        """
        If required, save a Info to the KB with the extra information we can
        get from user_info.

        :param user_info: A Info object as created by _check_user_dir
        :return: None, info is stored in KB
        """
        tag = user_info['user_tag']
        user = user_info['user']
        user_desc = user_info['user_desc']
        name = None
        desc = None

        if tag == OS:
            desc = 'The remote OS can be identified as "%s" based'\
                   ' on the remote user "%s" information that is'\
                   ' exposed by the web server.'
            desc = desc % (user_desc, user)

            name = 'Fingerprinted operating system'

        elif tag == APPLICATION:
            desc = 'The remote server has "%s" installed, w3af'\
                   ' found this information based on the remote'\
                   ' user "%s".'
            desc = desc % (user_desc, user)

            name = 'Identified installed application'

        if name is not None and desc is not None:
            i = Info(name, desc, user_info.get_id(), self.get_name())
            i.set_url(user_info.get_url())

            kb.kb.append(self, 'users', i)
            om.out.report_finding(i)
예제 #34
0
    def _check_user_dir(self, mutated_url, user, user_desc, user_tag,
                        non_existent):
        """
        Perform the request and compare with non_existent

        :see _create_tests: For parameter description
        :return: The HTTP response id if the mutated_url is a web user
                 directory, None otherwise.
        """
        resp = self.http_get_and_parse(mutated_url)

        path = mutated_url.get_path()
        response_body = resp.get_body().replace(path, '')

        if fuzzy_not_equal(response_body, non_existent, 0.7):

            # Avoid duplicates
            user_infos = kb.kb.get('user_dir', 'users')
            known_users = [u.get('user', None) for u in user_infos]
            if user in known_users:
                return

            # Save the finding to the KB
            desc = 'An operating system user directory was found at: "%s"'
            desc = desc % resp.get_url()

            i = Info('Web user home directory', desc, resp.id, self.get_name())
            i.set_url(resp.get_url())
            i['user'] = user
            i['user_desc'] = user_desc
            i['user_tag'] = user_tag

            self.kb_append_uniq(self, 'users', i)

            # Analyze if we can get more information from this finding
            self._analyze_finding(i)
예제 #35
0
def enable_dl(response):
    regex_str = 'enable_dl</td><td class="v">(On|Off)</td>'
    enable_dl_mo = re.search(regex_str, response.get_body(), re.I)

    if not enable_dl_mo:
        return

    rg = enable_dl_mo.group(1)
    if rg == 'On':
        desc = 'The phpinfo()::enable_dl is on.'
        v = Vuln('PHP enable_dl: On', desc, severity.MEDIUM, response.id,
                 'phpinfo')
        v.set_url(response.get_url())

        kb.kb.append('phpinfo', 'phpinfo', v)
        om.out.vulnerability(v.get_desc(), severity=v.get_severity())
    else:
        ed_name = 'PHP enable_dl: Off'
        ed_desc = 'The phpinfo()::enable_dl is off.'
        i = Info(ed_name, ed_desc, response.id, 'phpinfo')
        i.set_url(response.get_url())

        kb.kb.append('phpinfo', 'phpinfo', i)
        om.out.information(i.get_desc())
예제 #36
0
    def find_version_numbers(self, request, response):
        """
        Now i'll check if I can get a version number from the error page
        This is common in apache, tomcat, etc...
        """
        if 400 < response.get_code() < 600:

            for match, _, _, server in self._multi_re.query(response.body):
                match_string = match.group(0)
                if match_string not in self._already_reported_versions:
                    # Save the info obj
                    desc = 'An error page sent this %s version: "%s".'
                    desc %= (server, match_string)

                    i = Info('Error page with information disclosure', desc,
                             response.id, self.get_name())
                    i.set_url(response.get_url())
                    i.add_to_highlight(server)
                    i.add_to_highlight(match_string)

                    kb.kb.append(self, 'server', i)
                    kb.kb.raw_write(self, 'server', match_string)

                    self._already_reported_versions.append(match_string)
예제 #37
0
    def grep(self, request, response):
        """
        Plugin entry point, find the blank bodies and report them.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if response.get_body() == '' and request.get_method() in self.METHODS\
        and response.get_code() not in self.HTTP_CODES\
        and 'location' not in response.get_lower_case_headers()\
        and response.get_url().uri2url() not in self.already_reported:

            self.already_reported.add(response.get_url().uri2url())

            desc = 'The URL: "%s" returned an empty body, this could indicate'\
                   ' an application error.'
            desc = desc % response.get_url()

            i = Info('Blank http response body', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())

            self.kb_append(self, 'blank_body', i)
예제 #38
0
    def _analyze_domain(self, response, script_full_url, script_tag):
        """
        Checks if the domain is the same, or if it's considered secure.
        """
        response_url = response.get_url()
        script_domain = script_full_url.get_domain()

        if script_domain == response_url.get_domain():
            return

        for _ in self._secure_domain_multi_in.query(script_domain):
            # Query the multi in to check if any if the domains we loaded
            # previously match against the script domain we found in the
            # HTML.
            #
            # It's a third party that we trust
            return

        to_highlight = script_tag.attrib.get('src')
        desc = ('The URL: "%s" has a script tag with a source that points'
                ' to a third party site ("%s"). This practice is not'
                ' recommended, the security of the current site is being'
                ' delegated to the external entity.')
        desc %= (smart_str_ignore(response_url),
                 smart_str_ignore(script_domain))

        i = Info('Cross-domain javascript source', desc, response.id,
                 self.get_name())
        i.set_url(response_url)
        i.add_to_highlight(to_highlight)
        i[CrossDomainInfoSet.ITAG] = script_domain

        self.kb_append_uniq_group(self,
                                  'cross_domain_js',
                                  i,
                                  group_klass=CrossDomainInfoSet)
예제 #39
0
    def grep(self, request, response):
        """
        Plugin entry point. Parse the object tags.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None
        """
        if not response.is_text_or_html():
            return

        url = response.get_url()

        for tag in mp_doc_parser.get_tags_by_filter(response, self.TAGS):
            desc = ('The URL: "%s" has an "%s" tag. We recommend you download'
                    ' the client side code and analyze it manually.')
            desc %= (response.get_uri(), tag.name)

            i = Info('Browser plugin content', desc, response.id,
                     self.get_name())
            i.set_url(url)
            i.add_to_highlight('<%s' % tag.name)

            self.kb_append_uniq(self, tag.name, i, 'URL')
예제 #40
0
def register_globals(response):
    regex_str = 'register_globals</td><td class="v">(On|Off)</td>'
    register_globals_mo = re.search(regex_str, response.get_body(), re.I)

    if not register_globals_mo:
        return

    rg = register_globals_mo.group(1)
    if rg == 'On':
        desc = 'The phpinfo()::register_globals is on.'
        v = Vuln('PHP register_globals: On', desc, severity.MEDIUM,
                 response.id, 'phpinfo')
        v.set_url(response.get_url())

        kb.kb.append('phpinfo', 'phpinfo', v)
        om.out.vulnerability(v.get_desc(), severity=v.get_severity())
    else:
        rg_name = 'PHP register_globals: Off'
        rg_desc = 'The phpinfo()::register_globals is off.'
        i = Info(rg_name, rg_desc, response.id, 'phpinfo')
        i.set_url(response.get_url())

        kb.kb.append('phpinfo', 'phpinfo', i)
        om.out.information(i.get_desc())
예제 #41
0
    def grep(self, request, response):
        """
        Plugin entry point.

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        if not response.is_text_or_html():
            return

        url = response.get_url()

        if self.symfony_detected(response):
            dom = response.get_dom()
            if dom is not None and not self.csrf_detected(dom):
                desc = 'The URL: "%s" seems to be generated by the'\
                       ' Symfony framework and contains a form that'\
                       ' perhaps has CSRF protection disabled.'
                desc = desc % url
                i = Info('Symfony Framework with CSRF protection disabled',
                         desc, response.id, self.get_name())
                i.set_url(url)
                self.kb_append_uniq(self, 'symfony', i, 'URL')
예제 #42
0
    def end(self):
        """
        This method is called when the plugin wont be used anymore.
        """
        for title, desc, _id, url, highlight in self._potential_vulns:
            for info in kb.kb.get_all_findings_iter():
                # This makes sure that if the sqli plugin found a vulnerability
                # in the same URL as we found a detailed error, we won't report
                # the detailed error.
                #
                # If the user fixes the sqli vulnerability and runs the scan again
                # most likely the detailed error will disappear too. If the sqli
                # vulnerability disappears and this one remains, it will appear
                # as a new vulnerability in the second scan.
                if info.get_url() == url:
                    break
            else:
                i = Info(title, desc, _id, self.get_name())
                i.set_url(url)
                i.add_to_highlight(highlight)

                self.kb_append_uniq(self, 'error_page', i)

        self._potential_vulns.cleanup()
예제 #43
0
    def _verify_content_neg_enabled(self, fuzzable_request):
        """
        Checks if the remote website is vulnerable or not. Saves the result in
        self._content_negotiation_enabled , because we want to perform this test
        only once.

        :return: True if vulnerable.
        """
        if self._content_negotiation_enabled is not None:
            # The test was already performed, we return the old response
            return self._content_negotiation_enabled

        else:
            # We perform the test, for this we need a URL that has a filename,
            # URLs that don't have a filename can't be used for this.
            filename = fuzzable_request.get_url().get_file_name()
            if filename == '':
                return None

            filename = filename.split('.')[0]

            # Now I simply perform the request:
            alternate_resource = fuzzable_request.get_url().url_join(filename)
            headers = fuzzable_request.get_headers()
            headers['Accept'] = 'w3af/bar'
            response = self._uri_opener.GET(alternate_resource, headers=headers)

            if response.get_headers().icontains('alternates'):
                # Even if there is only one file, with an unique mime type,
                # the content negotiation will return an alternates header.
                # So this is pretty safe.

                # Save the result internally
                self._content_negotiation_enabled = True

                # Save the result as an info in the KB, for the user to see it:
                desc = ('HTTP Content negotiation is enabled in the remote web'
                        ' server. This could be used to bruteforce file names'
                        ' and find new resources.')
 
                i = Info('HTTP Content Negotiation enabled', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                
                kb.kb.append(self, 'content_negotiation', i)
                om.out.information(i.get_desc())
            else:
                om.out.information(
                    'The remote Web server has Content Negotiation disabled.')

                # I want to perform this test a couple of times... so I only
                # return False if that "couple of times" is empty
                self._tries_left -= 1
                if self._tries_left == 0:
                    # Save the FALSE result internally
                    self._content_negotiation_enabled = False
                else:
                    # None tells the plugin to keep trying with the next URL
                    return None

            return self._content_negotiation_enabled
예제 #44
0
    def _report_to_kb_if_needed(self, http_response, parser):
        """
        If the parser did find something, then we report it to the KB.

        :param http_response: The HTTP response that was parsed
        :param parser: The OpenAPI parser instance
        :return: None
        """
        if not parser.get_api_calls() and parser.get_parsing_errors():
            desc = (
                'An Open API specification was found at: "%s", but the scanner'
                ' was unable to extract any API endpoints. In most cases this'
                ' is because of a syntax error in the Open API specification.\n'
                '\n'
                'Use https://editor.swagger.io/ to inspect the Open API'
                ' specification, identify and fix any issues and try again.\n'
                '\n'
                'The errors found by the parser were:\n'
                '\n - %s')

            desc %= (http_response.get_url(),
                     '\n - '.join(parser.get_parsing_errors()))

            i = Info('Failed to parse Open API specification', desc,
                     http_response.id, self.get_name())
            i.set_url(http_response.get_url())

            kb.kb.append(self, 'open_api', i)
            om.out.error(i.get_desc())

            return

        # Save it to the kb!
        desc = ('An Open API specification was found at: "%s", the scanner'
                ' was able to extract %s API endpoints which will be audited'
                ' for vulnerabilities.')
        desc %= (http_response.get_url(), len(parser.get_api_calls()))

        i = Info('Open API specification found', desc, http_response.id,
                 self.get_name())
        i.set_url(http_response.get_url())

        kb.kb.append(self, 'open_api', i)
        om.out.information(i.get_desc())

        # Warn the user about missing credentials
        if self._query_string_auth or self._header_auth:
            return

        desc = (
            'An Open API specification was found at: "%s", but no credentials'
            ' were provided in the `open_api` plugin. The scanner will try'
            ' to audit the identified endpoints but coverage will most likely'
            ' be reduced due to missing authentication.')
        desc %= http_response.get_url()

        i = Info('Open API missing credentials', desc, http_response.id,
                 self.get_name())
        i.set_url(http_response.get_url())

        kb.kb.append(self, 'open_api', i)
        om.out.information(i.get_desc())
예제 #45
0
            return
        except KeyError:
            msg = 'SSL certificate does not have notAfter field.'
            om.out.debug(msg)
            return

        expire_days = (
            date(exp_date.tm_year, exp_date.tm_mon, exp_date.tm_mday) -
            date.today()).days

        if expire_days < self._min_expire_days:
            desc = 'The certificate for "%s" will expire soon.' % domain

            i = Info('Soon to expire SSL certificate', desc, 1,
                     self.get_name())
            i.set_url(url)

            self.kb_append(self, 'ssl_soon_expire', i)

    def _ssl_info_to_kb(self, url, domain):
        try:
            cert, cert_der, cipher = self._get_cert(url, domain)
        except RuntimeError, rte:
            msg = 'Failed to store SSL information to KB due to an error in'\
                  ' the get_cert method: "%s".'
            om.out.debug(msg % rte)
            return

        # Print the SSL information to the log
        desc = 'This is the information about the SSL certificate used for'\
               ' %s site:\n%s' % (domain,
예제 #46
0
        try:
            modified_response = self._uri_opener.GET(ip_url, cache=True)
        except BaseFrameworkException, w3:
            msg = 'An error occurred while fetching IP address URL in ' \
                  ' dns_wildcard plugin: "%s"' % w3
            om.out.debug(msg)
        else:
            if fuzzy_not_equal(modified_response.get_body(),
                               original_response.get_body(), 0.35):
                desc = 'The contents of %s and %s differ.'
                desc = desc % (modified_response.get_uri(),
                               original_response.get_uri())

                i = Info('Default virtual host', desc, modified_response.id,
                         self.get_name())
                i.set_url(modified_response.get_url())

                kb.kb.append(self, 'dns_wildcard', i)
                om.out.information(i.get_desc())

    def _test_DNS(self, original_response, dns_wildcard_url):
        """
        Check if http://www.domain.tld/ == http://domain.tld/
        """
        headers = Headers([('Host', dns_wildcard_url.get_domain())])
        try:
            modified_response = self._uri_opener.GET(
                original_response.get_url(), cache=True, headers=headers)
        except BaseFrameworkException:
            return
        else:
예제 #47
0
    def _get_dead_links(self, fuzzable_request):
        """
        Find every link on a HTML document verify if the domain is reachable or
        not; after that, verify if the web found a different name for the target
        site or if we found a new site that is linked. If the link points to a
        dead site then report it (it could be pointing to some private address
        or something...)
        """
        # Get some responses to compare later
        base_url = fuzzable_request.get_url().base_url()
        original_response = self._uri_opener.GET(fuzzable_request.get_uri(),
                                                 cache=True)
        base_response = self._uri_opener.GET(base_url, cache=True)
        base_resp_body = base_response.get_body()

        try:
            dp = parser_cache.dpc.get_document_parser_for(original_response)
        except BaseFrameworkException:
            # Failed to find a suitable parser for the document
            return []

        # Set the non existent response
        non_existent_response = self._get_non_exist(fuzzable_request)
        nonexist_resp_body = non_existent_response.get_body()

        # Note:
        # - With parsed_references I'm 100% that it's really something in the
        #   HTML that the developer intended to add.
        #
        # - The re_references are the result of regular expressions, which in
        #   some cases are just false positives.
        #
        # In this case, and because I'm only going to use the domain name of the
        # URL I'm going to trust the re_references also.
        parsed_references, re_references = dp.get_references()
        parsed_references.extend(re_references)

        res = []

        vhosts = self._verify_link_domain(parsed_references)

        for domain, vhost_response in self._send_in_threads(base_url, vhosts):

            vhost_resp_body = vhost_response.get_body()

            if fuzzy_not_equal(vhost_resp_body, base_resp_body, 0.35) and \
            fuzzy_not_equal(vhost_resp_body, nonexist_resp_body, 0.35):
                res.append((domain, vhost_response.id))
            else:
                desc = u'The content of "%s" references a non existent domain:'\
                       u' "%s". This can be a broken link, or an internal'\
                       u' domain name.'
                desc %= (fuzzable_request.get_url(), domain)
                
                i = Info(u'Internal hostname in HTML link', desc,
                         original_response.id, self.get_name())
                i.set_url(fuzzable_request.get_url())
                
                kb.kb.append(self, 'find_vhosts', i)
                om.out.information(i.get_desc())

        return res
예제 #48
0
    def audit_phpinfo(self, response):
        """
        Scan for insecure php settings
        :author: Aung Khant (aungkhant[at]yehg.net)
        :return none

        two divisions: vulnerable settings and useful informative settings

        """

        ##### [Vulnerable Settings] #####

        ### [register_globals] ###
        regex_str = 'register_globals</td><td class="v">(On|Off)</td>'
        register_globals = re.search(regex_str, response.get_body(), re.I)
        rg_flag = ''
        if register_globals:
            rg = register_globals.group(1)
            if (rg == 'On'):
                desc = 'The phpinfo()::register_globals is on.'
                v = Vuln('PHP register_globals: On', desc, severity.MEDIUM,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
            else:
                rg_flag = 'info'
                rg_name = 'PHP register_globals: Off'
                rg_desc = 'The phpinfo()::register_globals is off.'

        ### [/register_globals] ###

        ### [allow_url_fopen] ###
        regex_str = 'allow_url_fopen</td><td class="v">(On|<i>no value</i>)</td>'
        allow_url_fopen = re.search(regex_str, response.get_body(), re.I)
        if allow_url_fopen:
            desc = 'The phpinfo()::allow_url_fopen is enabled.'
            v = Vuln('PHP allow_url_fopen: On', desc, severity.MEDIUM,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/allow_url_fopen] ###

        ### [allow_url_include] ###
        regex_str = 'allow_url_include</td><td class="v">(On|<i>no value</i>)</td>'
        allow_url_include = re.search(regex_str, response.get_body(), re.I)
        if allow_url_include:
            desc = 'The phpinfo()::allow_url_include is enabled.'
            v = Vuln('PHP allow_url_include: On', desc, severity.MEDIUM,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/allow_url_include] ###

        ### [display_errors] ###
        regex_str = 'display_errors</td><td class="v">(On|<i>no value</i>)</td>'
        display_errors = re.search(regex_str, response.get_body(), re.I)
        if display_errors:
            desc = 'The phpinfo()::display_errors is enabled.'
            v = Vuln('PHP display_errors: On', desc, severity.MEDIUM,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/display_errors] ###

        ### [expose_php] ###
        regex_str = 'expose_php</td><td class="v">(On|<i>no value</i>)</td>'
        expose_php = re.search(regex_str, response.get_body(), re.I)
        if expose_php:
            desc = 'The phpinfo()::expose_php is enabled.'
            v = Vuln('PHP expose_php: On', desc, severity.MEDIUM, response.id,
                     self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/expose_php] ###

        ### [lowest_privilege_test] ###
        regex_str = 'User/Group </td><td class="v">(.*?)\((\d.*?)\)/(\d.*?)</td>'
        lowest_privilege_test = re.search(regex_str, response.get_body(), re.I)
        lpt_flag = ''
        if lowest_privilege_test:
            lpt_uname = lowest_privilege_test.group(1)
            lpt_uid = lowest_privilege_test.group(2)
            lpt_uid = int(lpt_uid)
            lpt_gid = lowest_privilege_test.group(3)
            if lpt_uid < 99 or lpt_gid < 99 or \
            re.match('root|apache|daemon|bin|operator|adm', lpt_uname, re.I):

                desc = 'phpinfo()::PHP may be executing as a higher privileged'\
                       ' group. Username: %s, UserID: %s, GroupID: %s.'
                desc = desc % (lpt_uname, lpt_uid, lpt_gid)

                v = Vuln('PHP lowest_privilege_test:fail', desc,
                         severity.MEDIUM, response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
            else:
                lpt_flag = 'info'
                lpt_name = 'privilege:' + lpt_uname
                lpt_desc = 'phpinfo()::PHP is executing under '
                lpt_desc += 'username: '******', '
                lpt_desc += 'userID: ' + str(lpt_uid) + ', '
                lpt_desc += 'groupID: ' + lpt_gid
        ### [/lowest_privilege_test] ###

        ### [disable_functions] ###
        regex_str = 'disable_functions</td><td class="v">(.*?)</td>'
        disable_functions = re.search(regex_str, response.get_body(), re.I)
        if disable_functions:
            secure_df = 8
            df = disable_functions.group(1)
            dfe = df.split(',')
            if (len(dfe) < secure_df):
                desc = 'The phpinfo()::disable_functions are set to few.'
                v = Vuln('PHP disable_functions:few', desc, severity.MEDIUM,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/disable_functions] ###

        ### [curl_file_support] ###
        regex_str = '<h1 class="p">PHP Version (\d).(\d).(\d)</h1>'
        curl_file_support = re.search(regex_str, response.get_body(), re.I)
        if curl_file_support:
            php_major_ver = curl_file_support.group(1)
            php_minor_ver = curl_file_support.group(2)
            php_rev_ver = curl_file_support.group(3)

            current_ver = php_major_ver + '.' + php_minor_ver + \
                '' + php_rev_ver
            current_ver = float(current_ver)
            php_major_ver = int(php_major_ver)
            php_minor_ver = int(php_minor_ver)
            php_rev_ver = int(php_rev_ver)

            cv4check = float(4.44)
            cv5check = float(5.16)
            curl_vuln = 1

            if (php_major_ver == 4):
                if (current_ver >= cv4check):
                    curl_vuln = 0
            elif (php_major_ver == 5):
                if (current_ver >= cv5check):
                    curl_vuln = 0
            elif (php_major_ver >= 6):
                curl_vuln = 0
            else:
                curl_vuln = 0

            if (curl_vuln == 1):
                desc = 'The phpinfo()::cURL::file_support has a security hole'\
                       ' present in this version of PHP allows the cURL'\
                       ' functions to bypass safe_mode and open_basedir'\
                       ' restrictions.'
                v = Vuln('PHP curl_file_support:not_fixed', desc,
                         severity.MEDIUM, response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/curl_file_support] ###

        ### [cgi_force_redirect] ###
        regex_str = 'cgi_force_redirect</td><td class="v">(.*?)</td>'
        cgi_force_redirect = re.search(regex_str, response.get_body(), re.I)
        if cgi_force_redirect:
            utd = cgi_force_redirect.group(1) + ''
            if (utd != 'On'):
                desc = 'The phpinfo()::CGI::force_redirect is disabled.'
                v = Vuln('PHP cgi_force_redirect: Off', desc, severity.MEDIUM,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/cgi_force_redirect] ###

        ### [session_cookie_httponly] ###
        regex_str = 'session\.cookie_httponly</td><td class="v">(Off|no|0)</td>'
        session_cookie_httponly = re.search(regex_str, response.get_body(),
                                            re.I)
        if session_cookie_httponly:
            desc = 'The phpinfo()::session.cookie_httponly is off.'
            v = Vuln('PHP session.cookie_httponly: Off', desc, severity.MEDIUM,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/session_cookie_httponly] ###

        ### [session_save_path] ###
        regex_str = 'session\.save_path</td><td class="v">(<i>no value</i>)</td>'
        session_save_path = re.search(regex_str, response.get_body(), re.I)
        if session_save_path:
            desc = 'The phpinfo()::session.save_path may be set to world-'\
                   'accessible directory.'
            v = Vuln('PHP session_save_path:Everyone', desc, severity.LOW,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/session_save_path] ###

        ### [session_use_trans] ###
        regex_str = 'session\.use_trans</td><td class="v">(On)</td>'
        session_use_trans = re.search(regex_str, response.get_body(), re.I)
        if session_use_trans:
            desc = 'The phpinfo()::session.use_trans is enabled. This makes'\
                   ' session hijacking easier.'
            v = Vuln('PHP session_use_trans: On', desc, severity.MEDIUM,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/session_use_trans] ###

        ### [default_charset] ###
        regex_str = 'default_charset</td><td class="v">(Off|no|0)</td>'
        default_charset = re.search(regex_str, response.get_body(), re.I)
        if default_charset:
            desc = 'The phpinfo()::default_charset is set to none. This'\
                   ' makes PHP scripts vulnerable to variable charset'\
                   ' encoding XSS.'
            v = Vuln('PHP default_charset: Off', desc, severity.MEDIUM,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/default_charset] ###

        ### [enable_dl] ###
        regex_str = 'enable_dl</td><td class="v">(On|Off)</td>'
        enable_dl = re.search(regex_str, response.get_body(), re.I)
        ed_flag = ''
        if enable_dl:
            rg = enable_dl.group(1)
            if (rg == 'On'):
                desc = 'The phpinfo()::enable_dl is on.'
                v = Vuln('PHP enable_dl: On', desc, severity.MEDIUM,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
            else:
                ed_flag = 'info'
                ed_name = 'PHP enable_dl: Off'
                ed_desc = 'The phpinfo()::enable_dl is off.'
        ### [/enable_dl] ###

        ### [memory_limit] ###
        regex_str = 'memory_limit</td><td class="v">(\d.*?)</td>'
        memory_limit = re.search(regex_str, response.get_body(), re.I)
        if memory_limit:
            secure_ml = 10
            ml = memory_limit.group(1) + ''
            ml = ml.replace('M', '')
            if (ml > secure_ml):
                desc = 'The phpinfo()::memory_limit is set to higher value'\
                       ' (%s).' % memory_limit.group(1)
                v = Vuln('PHP memory_limit:high', desc, severity.MEDIUM,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/memory_limit] ###

        ### [post_max_size] ###
        regex_str = 'post_max_size</td><td class="v">(\d.*?)</td>'
        post_max_size = re.search(regex_str, response.get_body(),
                                  re.IGNORECASE)
        if post_max_size:
            secure_pms = 20
            pms = post_max_size.group(1) + ''
            pms = pms.replace('M', '')
            pms = int(pms)
            if (pms > secure_pms):
                desc = 'The phpinfo()::post_max_size is set to higher value'\
                       ' (%s).' % post_max_size.group(1)
                v = Vuln('PHP post_max_size:high', desc, severity.LOW,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/post_max_size] ###

        ### [upload_max_filesize] ###
        regex_str = 'upload_max_filesize</td><td class="v">(\d.*?)</td>'
        upload_max_filesize = re.search(regex_str, response.get_body(),
                                        re.IGNORECASE)
        if upload_max_filesize:
            secure_umf = 20
            umf = upload_max_filesize.group(1) + ''
            umf = umf.replace('M', '')
            umf = int(umf)
            if (umf > secure_umf):
                desc = 'The phpinfo()::upload_max_filesize is set to higher'\
                       ' value (%s).' % upload_max_filesize.group(1)
                v = Vuln('PHP upload_max_filesize:high', desc, severity.LOW,
                         response.id, self.get_name())
                v.set_url(response.get_url())

                kb.kb.append(self, 'phpinfo', v)
                om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/upload_max_filesize] ###

        ### [upload_tmp_dir] ###
        regex_str = 'upload_tmp_dir</td><td class="v">(<i>no value</i>)</td>'
        upload_tmp_dir = re.search(regex_str, response.get_body(), re.I)
        if upload_tmp_dir:
            desc = 'The phpinfo()::upload_tmp_dir may be set to world-'\
                   'accessible directory.'
            v = Vuln('PHP upload_tmp_dir:Everyone', desc, severity.LOW,
                     response.id, self.get_name())
            v.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', v)
            om.out.vulnerability(v.get_desc(), severity=v.get_severity())
        ### [/upload_tmp_dir] ###

        ##### [/Vulnerable Settings] #####
        ##### [Useful Informative Settings] #####
        ### [privilege] ###
        if lpt_flag == 'info':
            i = Info(lpt_name, lpt_desc, response.id, self.get_name())
            i.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())
        ### [/privilege] ###

        ### [register_globals]###
        if rg_flag == 'info':
            i = Info(rg_name, rg_desc, response.id, self.get_name())
            i.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())
        ### [/register_globals]###

        ### [enable_dl]###
        if ed_flag == 'info':
            i = Info(ed_name, ed_desc, response.id, self.get_name())
            i.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())
        ### [/enable_dl]###

        ### [file_uploads] ###
        regex_str = 'file_uploads</td><td class="v">(On|<i>no value</i>)</td>'
        file_uploads = re.search(regex_str, response.get_body(), re.IGNORECASE)
        if file_uploads:
            desc = 'The phpinfo()::file_uploads is enabled.'
            i = Info('PHP file_uploads: On', desc, response.id,
                     self.get_name())
            i.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())
        ### [/file_uploads] ###

        ### [magic_quotes_gpc] ###
        regex_str = 'magic_quotes_gpc</td><td class="v">(On|Off)</td>'
        magic_quotes_gpc = re.search(regex_str, response.get_body(), re.I)
        if magic_quotes_gpc:
            mqg = magic_quotes_gpc.group(1)

            if (mqg == 'On'):
                desc = 'The phpinfo()::magic_quotes_gpc is on.'
                i = Info('PHP magic_quotes_gpc: On', desc, response.id,
                         self.get_name())

            else:
                desc = 'The phpinfo()::magic_quotes_gpc is off.'
                i = Info('PHP magic_quotes_gpc: Off', desc, response.id,
                         self.get_name())

            i.set_url(response.get_url())
            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())

        ### [/magic_quotes_gpc] ###

        ### [open_basedir] ###
        regex_str = 'open_basedir</td><td class="v">(.*?)</td>'
        open_basedir = re.search(regex_str, response.get_body(), re.I)

        if open_basedir:
            obd = open_basedir.group(1)

            if (obd == '<i>no value</i>'):
                desc = 'The phpinfo()::open_basedir is not set.'
                i = Info('PHP open_basedir:disabled', desc, response.id,
                         self.get_name())

            else:
                desc = 'The phpinfo()::open_basedir is set to %s.'
                desc = desc % open_basedir.group(1)
                i = Info('PHP open_basedir:enabled', desc, response.id,
                         self.get_name())

            i.set_url(response.get_url())

        kb.kb.append(self, 'phpinfo', i)
        om.out.information(i.get_desc())
        ### [/open_basedir] ###

        ### [session_hash_function] ###
        regex_str = 'session\.hash_function</td><td class="v">(.*?)</td>'
        session_hash_function = re.search(regex_str, response.get_body(), re.I)
        if session_hash_function:

            if session_hash_function.group(1) == 0\
            or session_hash_function.group(1) != 'no':
                desc = 'The phpinfo()::session.hash_function use md5 algorithm.'
                i = Info('PHP session.hash_function:md5', desc, response.id,
                         self.get_name())
            else:
                desc = 'The phpinfo()::session.hash_function use sha algorithm.'
                i = Info('PHP session.hash_function:sha', desc, response.id,
                         self.get_name())

            i.set_url(response.get_url())

            kb.kb.append(self, 'phpinfo', i)
            om.out.information(i.get_desc())
예제 #49
0
        """
        This method stores the scan result in the KB, called as a callback for
        the _scan_http_response method.
        
        :param response: The HTTP response
        :param scan_result: The result object from _scan_http_response
        :return: None
        """
        if scan_result.found:
        
            desc = 'ClamAV identified malware at URL: "%s", the matched'\
                   ' signature name is "%s".'
            desc = desc % (response.get_url(), scan_result.signature)
    
            i = Info('Malware identified', desc, response.id, self.get_name())
            i.set_url(response.get_url())
            
            self.kb_append(self, 'malware', i)

    def _parse_scan_result(self, result):
        """
        {'stream': ('FOUND', 'Eicar-Test-Signature')}
        {u'stream': (u'OK', None)}

        :return: A namedtuple with the scan result
        """
        try:
            signature = result['stream'][1]
            found = result['stream'][0] == 'FOUND'
            return ScanResult(found, signature)
        except:
예제 #50
0
파일: dav.py 프로젝트: chenbremer/w3af-1
    def _PUT(self, domain_path):
        """
        Tests PUT method.
        """
        # upload
        url = domain_path.url_join(rand_alpha(5))
        rnd_content = rand_alnum(6)
        headers = Headers([('content-type', 'text/plain')])

        put_response = self._uri_opener.PUT(url,
                                            data=rnd_content,
                                            headers=headers)

        # check if uploaded
        res = self._uri_opener.GET(url, cache=True)
        if res.get_body() == rnd_content:
            msg = ('File upload with HTTP PUT method was found at resource:'
                   ' "%s". A test file was uploaded to: "%s".')
            msg = msg % (domain_path, res.get_url())

            v = Vuln('Publicly writable directory', msg, severity.HIGH,
                     [put_response.id, res.id], self.get_name())

            v.set_url(url)
            v.set_method('PUT')

            self.kb_append(self, 'dav', v)

        # Report some common errors
        elif put_response.get_code() == 500:
            msg = ('DAV seems to be incorrectly configured. The web server'
                   ' answered with a 500 error code. In most cases, this means'
                   ' that the DAV extension failed in some way. This error was'
                   ' found at: "%s".' % put_response.get_url())

            i = Info('DAV incorrect configuration', msg, res.id,
                     self.get_name())

            i.set_url(url)
            i.set_method('PUT')

            self.kb_append(self, 'dav', i)

        # Report some common errors
        elif put_response.get_code() == 403:
            # handle false positive when PUT method is not supported
            # https://github.com/andresriancho/w3af/pull/2724/files
            if 'supported' in put_response.get_body().lower():
                return

            msg = ('DAV seems to be correctly configured and allowing you to'
                   ' use the PUT method but the directory does not have the'
                   ' right permissions that would allow the web server to'
                   ' write to it. This error was found at: "%s".')
            msg = msg % put_response.get_url()

            i = Info('DAV incorrect configuration', msg,
                     [put_response.id, res.id], self.get_name())

            i.set_url(url)
            i.set_method('PUT')

            self.kb_append(self, 'dav', i)
예제 #51
0
    def _analyze_crossdomain_clientaccesspolicy(self, url, response, file_name):

        # https://github.com/andresriancho/w3af/issues/14491
        if file_name not in self.FILE_TAG_ATTR:
            return

        try:
            dom = xml.dom.minidom.parseString(response.get_body())
        except Exception:
            # Report this, it may be interesting for the final user
            # not a vulnerability per-se... but... it's information after all
            if 'allow-access-from' in response.get_body() or \
            'cross-domain-policy' in response.get_body() or \
            'cross-domain-access' in response.get_body():

                desc = 'The "%s" file at: "%s" is not a valid XML.'
                desc %= (file_name, response.get_url())

                i = Info('Invalid RIA settings file', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())

                kb.kb.append(self, 'info', i)
                om.out.information(i.get_desc())

            return

        tag, attribute = self.FILE_TAG_ATTR.get(file_name)
        url_list = dom.getElementsByTagName(tag)

        for url in url_list:
            url = url.getAttribute(attribute)

            if url == '*':
                desc = 'The "%s" file at "%s" allows flash / silverlight'\
                       ' access from any site.'
                desc %= (file_name, response.get_url())

                v = Vuln('Insecure RIA settings', desc, severity.LOW,
                         response.id, self.get_name())
                v.set_url(response.get_url())
                v.set_method('GET')

                kb.kb.append(self, 'vuln', v)
                om.out.vulnerability(v.get_desc(),
                                     severity=v.get_severity())

                fr = FuzzableRequest.from_http_response(response)
                self.output_queue.put(fr)

            else:
                desc = 'The "%s" file at "%s" allows flash / silverlight'\
                       ' access from "%s".'
                desc %= (file_name, response.get_url(), url)

                i = Info('Cross-domain allow ACL', desc, response.id,
                         self.get_name())
                i.set_url(response.get_url())
                i.set_method('GET')

                kb.kb.append(self, 'info', i)
                om.out.information(i.get_desc())

                fr = FuzzableRequest.from_http_response(response)
                self.output_queue.put(fr)
예제 #52
0
                    response)
            except BaseFrameworkException:
                # Failed to find a suitable parser for the document
                pass
            else:
                # Search for email addresses
                for mail in document_parser.get_emails(self._domain_root):
                    if mail not in self._accounts:
                        self._accounts.append(mail)

                        desc = 'The mail account: "%s" was found at: "%s".'
                        desc = desc % (mail, page.URL)

                        i = Info('Email account', desc, response.id,
                                 self.get_name())
                        i.set_url(page.URL)
                        i['mail'] = mail
                        i['user'] = mail.split('@')[0]
                        i['url_list'] = set([
                            page.URL,
                        ])

                        self.kb_append('emails', 'emails', i)
                        self.kb_append('finger_bing', 'emails', i)

    def get_options(self):
        """
        :return: A list of option objects for this plugin.
        """
        ol = OptionList()
예제 #53
0
    def grep(self, request, response):
        """
        websockets_links

        :param request: The HTTP request object.
        :param response: The HTTP response object
        :return: None, all results are saved in the kb.
        """
        # if it is not html or js we are not interested
        if not response.is_text_or_html():
            return

        # checking if websockets are in use the fast way and if they
        # are moving on to slower checks
        if not (WS_URL in response.body or WSS_URL in response.body):
            return

        # if websockets usage signs were found we need to get the exact url
        url = request.get_url()

        # if it is javascript we search the whole doc
        if JavaScriptParser(response).can_parse(response):
            ws_links = find_websockets_links(response.body)
        else:
            # if it is html we should search inside <script> tags only
            dom = response.get_dom()

            if dom is None:
                return

            ws_links = set()
            script_tag_xpath = etree.XPath(SCRIPT_TAG_XPATH)

            for script in script_tag_xpath(dom):
                for ws_link in find_websockets_links(script.text):
                    ws_links.add(ws_link)

        # if we didn't find any link manual inspection is needed
        if len(ws_links) == 0:
            # TODO: In some scenarios this message is repeated multiple, since
            #       it's a debug() message we don't care that much.
            msg = 'The URL "%s" has signs of HTML5 WebSockets usage, ' \
                  'but couldn\'t find any useful links.\n' \
                  'Perhaps links are dynamically created using javascript.\n' \
                  'Manual inspection of the page is recommended.'
            om.out.debug(msg % url)

        for ws_link in ws_links:
            desc = 'The URL: "%s" uses HTML5 websocket "%s"'
            desc = desc % (url, ws_link)

            i = Info('HTML5 WebSocket detected', desc, response.id,
                     self.get_name())
            i.set_url(url)
            i[WebSocketInfoSet.ITAG] = ws_link

            # Store found links
            self.kb_append_uniq_group(self,
                                      'websockets_links',
                                      i,
                                      group_klass=WebSocketInfoSet)
예제 #54
0
파일: motw.py 프로젝트: webvul/webfuzzer
 def create_info(self, desc, response, motw_match):
     i = Info('Mark of the web', desc, response.id, self.get_name())
     i.set_url(response.get_url())
     i.add_to_highlight(motw_match.group(0))
     return i
예제 #55
0
파일: strategy.py 프로젝트: tim124058/w3af
                    if target_proto != redirect_proto:
                        site_does_redirect = True
                        break

                    # Check if the domain was changed:
                    target_domain = url.get_domain()
                    redirect_domain = redirect_url.get_domain()

                    if target_domain != redirect_domain:
                        site_does_redirect = True
                        break

        if site_does_redirect:
            name = 'Target redirect'
            info = Info(name, msg, http_response.id, name)
            info.set_url(url)
            info.add_to_highlight(http_response.get_redir_url().url_string)

            kb.kb.append_uniq('core', 'core', info)
            om.out.report_finding(info)

        return site_does_redirect

    def _setup_404_detection(self):
        #
        #    NOTE: I need to perform this test here in order to avoid some weird
        #    thread locking that happens when the webspider calls is_404, and
        #    because I want to initialize the is_404 database in a controlled
        #    try/except block.
        #
        from w3af.core.controllers.core_helpers.fingerprint_404 import is_404
예제 #56
0
    def _grep_worker(self, request, response, kb_key, domain=None):
        """
        Helper method for using in self.grep()

        :param request: The HTTP request
        :param response: The HTTP response
        :param kb_key: Knowledge base dict key
        :param domain: Target domain for get_emails filter
        :return: None
        """
        try:
            dp = parser_cache.dpc.get_document_parser_for(response)
        except BaseFrameworkException:
            msg = 'Failed to get document parser for "%s" at get_emails.'
            om.out.debug(msg % response.get_url())
            return

        emails = dp.get_emails(domain)

        for mail_address in emails:
            # Reduce false positives
            if request.sent(mail_address):
                continue

            # Email address are case insensitive
            mail_address = mail_address.lower()
            url = response.get_url()

            email_map = {}
            for info_obj in kb.kb.get('emails', 'emails'):
                mail_string = info_obj['mail']
                email_map[mail_string] = info_obj

            if mail_address not in email_map:
                # Create a new info object, and report it
                desc = 'The mail account: "%s" was found in: \n- %s'\
                       ' - In request with id: %s.'
                desc = desc % (mail_address, url, response.id)

                i = Info('Exposed email address', desc, response.id,
                         self.get_name())
                i.set_url(url)
                i['mail'] = mail_address
                i['url_list'] = set([
                    url,
                ])
                i['user'] = mail_address.split('@')[0]
                i.add_to_highlight(mail_address)

                self.kb_append('emails', kb_key, i)

            else:

                # Get the corresponding info object.
                i = email_map[mail_address]
                # And work
                if url not in i['url_list']:
                    # This email was already found in some other URL
                    # I'm just going to modify the url_list and the description
                    # message of the information object.
                    id_list_of_info = i.get_id()
                    id_list_of_info.append(response.id)
                    i.set_id(id_list_of_info)
                    i.set_url(url)
                    desc = i.get_desc()
                    desc += '\n- %s - In request with id: %s.'
                    desc = desc % (url, response.id)
                    i.set_desc(desc)
                    i['url_list'].add(url)