Пример #1
0
    def run(self, info):

        # Parse original URL
        m_url = info.url
        m_url_parts = info.parsed_url

        # If file is a javascript, css or image, do not run
        if info.parsed_url.extension[1:] in (
                'css', 'js', 'jpeg', 'jpg', 'png', 'gif',
                'svg') or not m_url_parts.extension:
            Logger.log_more_verbose("Skipping URL: %s" % m_url)
            return

        Logger.log_more_verbose("Bruteforcing URL: %s" % m_url)

        #
        # Load wordlist for changing directories
        #
        # COMMON
        m_urls = make_url_changing_folder_name(m_url_parts)

        # Generates the error page
        m_error_response = get_error_page(m_url)

        # Create the matching analyzer
        try:
            m_store_info = MatchingAnalyzer(m_error_response.raw_data,
                                            min_ratio=0.65)
        except ValueError, e:
            Logger.log_error(
                "There is not information for analyze when creating the matcher: '%s'"
                % e)
            return
Пример #2
0
    def recv_info(self, info):

        # Parse original URL
        m_url = info.url
        m_url_parts = info.parsed_url

        Logger.log_more_verbose("Bruteforcing URL: %s" % m_url)

        # If file is a javascript, css or image, do not run
        if info.parsed_url.extension[1:] in ('css', 'js', 'jpeg', 'jpg', 'png', 'gif', 'svg') or not m_url_parts.extension:
            Logger.log_more_verbose("Skipping URL: %s" % m_url)
            return

        #
        # Load wordlist for prefixes
        #
        # COMMON
        m_urls = make_url_with_prefixes(get_list_from_wordlist("common_prefixes"), m_url_parts)

        # Generates the error page
        m_error_response = get_error_page(m_url)

        # Create the matching analyzer
        try:
            m_store_info = MatchingAnalyzer(m_error_response, min_ratio=0.65)
        except ValueError:
            # Thereis not information
            return

        # Create the partial funs
        _f = partial(process_url,
                     severity_vectors['prefixes'],
                     get_http_method(m_url),
                     m_store_info,
                     self.update_status,
                     len(m_urls))

        # Process the URLs
        for i, l_url in enumerate(m_urls):
            _f((i, l_url))


        # Generate and return the results.
        return generate_results(m_store_info.unique_texts)
Пример #3
0
                    tmp_u = urljoin(m_url, l_wo)
                except ValueError, e:
                    Logger.log_error(
                        "Failed to parse key, from wordlist, '%s'" % tmp_u)
                    continue

                m_urls_update(tmp_u)

        Logger.log_verbose("Loaded %s URLs to test." % len(urls))

        # Generates the error page
        error_response = get_error_page(m_url)

        # Create the matching analyzer
        try:
            store_info = MatchingAnalyzer(error_response.raw_data,
                                          min_ratio=0.65)
        except ValueError, e:
            Logger.log_error(
                "There is not information for analyze when creating the matcher: '%s'"
                % e)
            return

        # Create the partial funs
        _f = partial(process_url, severity_vectors['predictables'],
                     get_http_method(m_url), store_info, self.update_status,
                     len(urls))

        # Process the URLs
        for i, l_url in enumerate(urls):
            _f((i, l_url))
Пример #4
0
        #
        # Filter results
        #
        # Delete repeated
        m_discovered_urls = set(m_discovered_urls)

        # Generating error page
        m_error_page          = generate_error_page_url(m_url_robots_txt)
        m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response)

        if m_response_error_page:
            m_return.append(m_response_error_page)

            # Analyze results
            match = {}
            m_analyzer = MatchingAnalyzer(m_response_error_page.data)
            m_total = len(m_discovered_urls)
            for m_step, l_url in enumerate(m_discovered_urls):

                # Update only odd iterations
                if m_step % 2:
                    progress = (float(m_step * 100) / m_total)
                    self.update_status(progress=progress)
                l_url = fix_url(l_url, m_url)
                if l_url in Config.audit_scope:
                    l_p = None
                    try:
                        l_p = HTTP.get_url(l_url, callback=self.check_response)  # FIXME handle exceptions!
                    except:
                        if l_p:
                            discard_data(l_p)
Пример #5
0
                continue


        #
        # Filter results
        #

        # Generating error page
        m_error_page          = generate_error_page_url(m_url_robots_txt)
        m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response)
        if m_response_error_page:
            m_return.append(m_response_error_page)

            # Analyze results
            match = {}
            m_analyzer = MatchingAnalyzer(m_response_error_page.data)
            for l_url in set(m_discovered_urls):
                l_url = fix_url(l_url, m_url)
                if l_url in Config.audit_scope:
                    l_p = HTTP.get_url(l_url, callback=self.check_response)  # FIXME handle exceptions!
                    if l_p:
                        match[l_url] = l_p
                        m_analyzer.append(l_p.data, url=l_url)

            # Generate results
            for i in m_analyzer.unique_texts:
                l_url = i.url
                l_p = match[l_url]
                m_result = Url(l_url, referer=m_url)
                m_result.add_information(l_p)
                m_return.append(m_result)
Пример #6
0
        # Filter results
        #
        # Delete repeated
        m_discovered_urls = set(m_discovered_urls)

        # Generating error page
        m_error_page = generate_error_page_url(m_url_robots_txt)
        m_response_error_page = HTTP.get_url(m_error_page,
                                             callback=self.check_response)

        if m_response_error_page:
            m_return.append(m_response_error_page)

            # Analyze results
            match = {}
            m_analyzer = MatchingAnalyzer(m_response_error_page.data)
            m_total = len(m_discovered_urls)
            for m_step, l_url in enumerate(m_discovered_urls):

                # Update only odd iterations
                if m_step % 2:
                    progress = (float(m_step * 100) / m_total)
                    self.update_status(progress=progress)
                l_url = fix_url(l_url, m_url)
                if l_url in Config.audit_scope:
                    l_p = None
                    try:
                        l_p = HTTP.get_url(l_url, callback=self.check_response
                                           )  # FIXME handle exceptions!
                    except:
                        if l_p:
Пример #7
0
    def recv_info(self, info):

        m_url = info.url

        Logger.log_more_verbose("Start to process URL: %r" % m_url)

        #
        # Get the remote web server fingerprint
        #
        m_webserver_finger = info.get_associated_informations_by_category(WebServerFingerprint.information_type)

        m_wordlist = set()
        # There is fingerprinting information?
        if m_webserver_finger:

            m_webserver_finger = m_webserver_finger.pop()

            m_server_canonical_name = m_webserver_finger.name_canonical
            m_servers_related       = m_webserver_finger.related # Set with related web servers

            #
            # Load wordlists
            #
            m_wordlist_update  = m_wordlist.update

            # Common wordlist
            try:
                w = Config.plugin_extra_config["common"]
                m_wordlist_update([l_w for l_w in w.itervalues()])
            except KeyError:
                pass


            # Wordlist of server name
            try:
                w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name]
                m_wordlist_update([l_w for l_w in w.itervalues()])
            except KeyError:
                pass

            # Wordlist of related with the server found
            try:
                for l_servers_related in m_servers_related:
                    w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name]
                    m_wordlist_update([l_w for l_w in w.itervalues()])
            except KeyError:
                pass

        else:

            # Common wordlists
            try:
                w = Config.plugin_extra_config["common"]
                m_wordlist.update([l_w for l_w in w.itervalues()])
            except KeyError:
                pass


        # Load content of wordlists
        m_urls           = set()
        m_urls_update    = m_urls.update

        # Fixed Url
        m_url_fixed      = m_url if m_url.endswith("/") else "%s/" % m_url

        for l_w in m_wordlist:
            # Use a copy of wordlist to avoid modify the original source
            l_loaded_wordlist = WordListLoader.get_advanced_wordlist_as_list(l_w)

            m_urls_update((urljoin(m_url_fixed, (l_wo[1:] if l_wo.startswith("/") else l_wo)) for l_wo in l_loaded_wordlist))

        # Generates the error page
        m_error_response = get_error_page(m_url)

        # Create the matching analyzer
        try:
            m_store_info = MatchingAnalyzer(m_error_response, min_ratio=0.65)
        except ValueError:
            # Thereis not information
            return

        # Create the partial funs
        _f = partial(process_url,
                     severity_vectors['predictables'],
                     get_http_method(m_url),
                     m_store_info,
                     self.update_status,
                     len(m_urls))

        # Process the URLs
        for i, l_url in enumerate(m_urls):
            _f((i, l_url))

        # Generate and return the results.
        return generate_results(m_store_info.unique_texts)
Пример #8
0
                continue


        #
        # Filter results
        #

        # Generating error page
        m_error_page          = generate_error_page_url(m_url_robots_txt)
        m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response)
        if m_response_error_page:
            m_return.append(m_response_error_page)

            # Analyze results
            match = {}
            m_analyzer = MatchingAnalyzer(m_response_error_page.data)
            m_total = len(set(m_discovered_urls))
            for m_step, l_url in enumerate(set(m_discovered_urls)):
                progress = (float(m_step * 100) / m_total)
                self.update_status(progress=progress)
                l_url = fix_url(l_url, m_url)
                if l_url in Config.audit_scope:
                    l_p = HTTP.get_url(l_url, callback=self.check_response)  # FIXME handle exceptions!
                    if l_p:
                        match[l_url] = l_p
                        m_analyzer.append(l_p.data, url=l_url)

            # Generate results
            for i in m_analyzer.unique_texts:
                l_url = i.url
                l_p = match[l_url]