Exemplo n.º 1
0
    def __detect_wordpress_installation(self, url, wordpress_urls):
        """
        Try to detect a wordpress instalation in the current path.

        :param url: URL where try to find the WordPress installation.
        :type url: str

        :param wordpress_urls: string with wordlist name with WordPress URLs.
        :type wordpress_urls: str

        :return: True if wordpress installation found. False otherwise.
        :rtype: bool
        """
        Logger.log_more_verbose(
            "Detecting Wordpress instalation in URI: '%s'." % url)
        total_urls = 0
        urls_found = 0

        error_page = get_error_page(url).raw_data

        for u in WordListLoader.get_wordlist(wordpress_urls):
            total_urls += 1
            tmp_url = urljoin(url, u)

            r = HTTP.get_url(tmp_url, use_cache=False)
            if r.status == "200":

                # Try to detect non-default error pages
                ratio = get_diff_ratio(r.raw_response, error_page)
                if ratio < 0.35:
                    urls_found += 1

            discard_data(r)

        # If Oks > 85% continue
        if (urls_found / float(total_urls)) < 0.85:

            # If all fails, make another last test
            url_wp_admin = urljoin(url, "wp-admin/")

            try:
                p = HTTP.get_url(url_wp_admin,
                                 use_cache=False,
                                 allow_redirects=False)
                if p:
                    discard_data(p)
            except Exception, e:
                return False

            if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get(
                    "Location", ""):
                return True
            else:
                return False
Exemplo n.º 2
0
    def __detect_wordpress_installation(self, url, wordpress_urls):
        """
        Try to detect a wordpress instalation in the current path.

        :param url: URL where try to find the WordPress installation.
        :type url: str

        :param wordpress_urls: string with wordlist name with WordPress URLs.
        :type wordpress_urls: str

        :return: True if wordpress installation found. False otherwise.
        :rtype: bool
        """
        Logger.log_more_verbose("Detecting Wordpress instalation in URI: '%s'." % url)
        total_urls = 0
        urls_found = 0

        error_page = get_error_page(url).raw_data

        for u in WordListLoader.get_wordlist(wordpress_urls):
            total_urls += 1
            tmp_url = urljoin(url, u)

            r = HTTP.get_url(tmp_url, use_cache=False)
            if r.status == "200":

                # Try to detect non-default error pages
                ratio = get_diff_ratio(r.raw_response, error_page)
                if ratio < 0.35:
                    urls_found += 1

            discard_data(r)

        # If Oks > 85% continue
        if (urls_found / float(total_urls)) < 0.85:

            # If all fails, make another last test
            url_wp_admin = urljoin(url, "wp-admin/")

            try:
                p = HTTP.get_url(url_wp_admin, use_cache=False, allow_redirects=False)
                if p:
                    discard_data(p)
            except Exception, e:
                return False

            if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get("Location", ""):
                return True
            else:
                return False
Exemplo n.º 3
0
def process_url(risk_level, method, matcher, updater_func, total_urls, url):
    """
    Checks if an URL exits.

    :param risk_level: risk level of the tested URL, if discovered.
    :type risk_level: int

    :param method: string with HTTP method used.
    :type method: str

    :param matcher: instance of MatchingAnalyzer object.
    :type matcher: `MatchingAnalyzer`

    :param updater_func: update_status function to send updates
    :type updater_func: update_status

    :param total_urls: total number of URL to globally process.
    :type total_urls: int

    :param url: a tuple with data: (index, the URL to process)
    :type url: tuple(int, str)
    """
    i, url = url

    updater_func((float(i) * 100.0) / float(total_urls))
    # Logger.log_more_verbose("Trying to discover URL %s" % url)

    # Get URL
    p = None
    try:
        p = HTTP.get_url(url, use_cache=False, method=method)
        if p:
            discard_data(p)
    except Exception, e:
        Logger.log_error_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
Exemplo n.º 4
0
def get_http_method(url):
    """
    This function determinates if the method HEAD is available. To do that, compare between two responses:
    - One with GET method
    - One with HEAD method

    If both are seem more than 90%, the response are the same and HEAD method are not allowed.
    """

    m_head_response = HTTP.get_url(url, method="HEAD")  # FIXME handle exceptions!
    discard_data(m_head_response)

    m_get_response  = HTTP.get_url(url)  # FIXME handle exceptions!
    discard_data(m_get_response)

    # Check if HEAD reponse is different that GET response, to ensure that results are valids
    return "HEAD" if HTTP_response_headers_analyzer(m_head_response.headers, m_get_response.headers) < 0.90 else "GET"
Exemplo n.º 5
0
def get_http_method(url):
    """
    This function determinates if the method HEAD is available. To do that, compare between two responses:
    - One with GET method
    - One with HEAD method

    If both are seem more than 90%, the response are the same and HEAD method are not allowed.
    """

    m_head_response = HTTP.get_url(url, method="HEAD")  # FIXME handle exceptions!
    discard_data(m_head_response)

    m_get_response  = HTTP.get_url(url)  # FIXME handle exceptions!
    discard_data(m_get_response)

    # Check if HEAD reponse is different that GET response, to ensure that results are valids
    return "HEAD" if HTTP_response_headers_analyzer(m_head_response.headers, m_get_response.headers) < 0.90 else "GET"
Exemplo n.º 6
0
def find_htm_file(url):
    new_file = []
    for file_name in ['DeveloperMenu.htm']:
        url_check = url[1:] if url.startswith("/") else url
        tmp_u = urljoin(url_check, file_name)
        p = HTTP.get_url(tmp_u, use_cache=False, method="GET")
        if p.status == "200":
            file_save = download(tmp_u)
            new_file = re.findall(r'href=[\'"]?([^\'" >]+)', file_save.raw_data)
    
    return new_file
Exemplo n.º 7
0
def find_xml_files(url):
    new_file = []
    for file_name in ['execute.xml', 'DeveloperMenu.xml']:
        url_check = url[1:] if url.startswith("/") else url
        tmp_u = urljoin(url_check, file_name)
        p = HTTP.get_url(tmp_u, use_cache=False, method="GET")
        if p.status == "200":
            file_save = download(tmp_u)
            tree = ET.fromstring(file_save.raw_data)
            try:
                for links in tree.findall('Object'):
                    Logger.log(links.find('ObjLink').text)
                    new_file.append(links.find('ObjLink').text)
            except Exception:
                ##raise # XXX DEBUG
                pass
    
    return new_file
Exemplo n.º 8
0
def get_error_page(url):
    """
    Generates an error page an get their content.

    :param url: string with the base Url.
    :type url: str

    :return: a string with the content of response.
    :rtype: str
    """

    #
    # Generate an error in server to get an error page, using a random string
    #
    # Make the URL
    m_error_url      = "%s%s" % (url, generate_random_string())

    # Get the request
    m_error_response = HTTP.get_url(m_error_url)  # FIXME handle exceptions!
    discard_data(m_error_response)
    m_error_response = m_error_response.data
Exemplo n.º 9
0
def get_error_page(url):
    """
    Generates an error page an get their content.

    :param url: string with the base Url.
    :type url: str

    :return: a string with the content of response.
    :rtype: str
    """

    #
    # Generate an error in server to get an error page, using a random string
    #
    # Make the URL
    m_error_url      = "%s%s" % (url, generate_random_string())

    # Get the request
    m_error_response = HTTP.get_url(m_error_url)  # FIXME handle exceptions!
    discard_data(m_error_response)
    m_error_response = m_error_response.data
Exemplo n.º 10
0
    def recv_info(self, info):

        # Get the response page.
        response = HTTP.get_url(info.url, callback=self.check_response)
        if response:

            try:

                # Look for a match.
                page_text = response.data

                total = float(len(signatures))
                for step, (server_name,
                           server_page) in enumerate(signatures.iteritems()):

                    # Update status
                    progress = float(step) / total
                    self.update_status(progress=progress)

                    level = get_diff_ratio(page_text, server_page)

                    if level > 0.95:  # magic number :)

                        # Match found.
                        vulnerability = DefaultErrorPage(info, server_name)
                        vulnerability.add_information(response)
                        return [vulnerability, response]

                # Discard the response if no match was found.
                discard_data(response)

            except Exception:

                # Discard the response on error.
                discard_data(response)

                raise
Exemplo n.º 11
0
    def recv_info(self, info):

        # Get the response page.
        response = HTTP.get_url(info.url, callback = self.check_response)
        if response:

            try:

                # Look for a match.
                page_text = response.data

                total = float(len(signatures))
                for step, (server_name, server_page) in enumerate(signatures.iteritems()):

                    # Update status
                    progress = float(step) / total
                    self.update_status(progress=progress)

                    level = get_diff_ratio(page_text, server_page)

                    if level > 0.95:  # magic number :)

                        # Match found.
                        vulnerability = DefaultErrorPage(info, server_name)
                        vulnerability.add_information(response)
                        return [vulnerability, response]

                # Discard the response if no match was found.
                discard_data(response)

            except Exception:

                # Discard the response on error.
                discard_data(response)

                raise
Exemplo n.º 12
0
def process_url(risk_level, method, matcher, updater_func, total_urls, url):
    """
    Checks if an URL exits.

    :param risk_level: risk level of the tested URL, if discovered.
    :type risk_level: int

    :param method: string with HTTP method used.
    :type method: str

    :param matcher: instance of MatchingAnalyzer object.
    :type matcher: `MatchingAnalyzer`

    :param updater_func: update_status function to send updates
    :type updater_func: update_status

    :param total_urls: total number of URL to globally process.
    :type total_urls: int

    :param url: a tuple with data: (index, the URL to process)
    :type url: tuple(int, str)
    """
    i, url = url

    updater_func((float(i) * 100.0) / float(total_urls))
    # Logger.log_more_verbose("Trying to discover URL %s" % url)

    # Get URL
    p = None
    try:
        p = HTTP.get_url(url, use_cache=False, method=method)
        if p:
            discard_data(p)
    except Exception, e:
        Logger.log_error_more_verbose("Error while processing: '%s': %s" %
                                      (url, str(e)))
Exemplo n.º 13
0
        Logger.log_error_more_verbose("Error while processing: '%s': %s" %
                                      (url, str(e)))

    # Check if the url is acceptable by comparing
    # the result content.
    #
    # If the maching level between the error page
    # and this url is greater than 52%, then it's
    # the same URL and must be discarded.
    #
    if p and p.status == "200":

        # If the method used to get URL was HEAD, get complete URL
        if method != "GET":
            try:
                p = HTTP.get_url(url, use_cache=False, method="GET")
                if p:
                    discard_data(p)
            except Exception, e:
                Logger.log_error_more_verbose(
                    "Error while processing: '%s': %s" % (url, str(e)))

        # Append for analyze and display info if is accepted
        if matcher.analyze(p.raw_response, url=url, risk=risk_level):
            Logger.log_more_verbose("Discovered partial url: '%s'" % url)


#------------------------------------------------------------------------------
#
# Aux functions
#
Exemplo n.º 14
0
class OSFingerprinting(TestingPlugin):
    """
    Plugin to fingerprint the remote OS.
    """

    #----------------------------------------------------------------------
    def get_accepted_info(self):
        return [IP, BaseUrl]

    #----------------------------------------------------------------------
    def recv_info(self, info):
        """
        Main function for OS fingerprint. Get a domain or IP and return the fingerprint results.

        :param info: Folder URL.
        :type info: FolderUrl

        :return: OS Fingerprint.
        :rtype: OSFingerprint
        """

        #
        # Detection methods and their weights.
        #
        # The weight is a value between 1-5
        #

        FINGERPRINT_METHODS_OS_AND_VERSION = {
            'ttl': {
                'function': self.ttl_platform_detection,
                'weight': 2
            }
        }

        FUNCTIONS = None  # Fingerprint methods to run
        m_host = None

        is_windows = None

        if isinstance(info, IP):
            m_host = info.address
            FUNCTIONS = ['ttl']
        else:  # BaseUrl
            m_host = info.hostname
            FUNCTIONS = ['ttl']

            # Try to detect if remote system is a Windows
            m_windows_host = "%s://%s:%s" % (info.parsed_url.scheme,
                                             info.parsed_url.host,
                                             info.parsed_url.port)
            is_windows = self.is_URL_in_windows(m_windows_host)

        # Logging
        Logger.log_more_verbose(
            "Starting OS fingerprinting plugin for site: %s" % m_host)

        m_counter = Counter()
        # Run functions
        for f in FUNCTIONS:
            l_function = FINGERPRINT_METHODS_OS_AND_VERSION[f]['function']

            ### For future use
            ### l_weight     = FINGERPRINT_METHODS_OS_AND_VERSION[f]['weight']

            # Run
            results = l_function(m_host)

            if results:
                for l_r in results:
                    m_counter[l_r] += 1

        # Return value
        m_return = None

        #
        # Filter the results
        #
        if len(m_counter) > 0:
            # Fooking for a windows system
            if is_windows:  # If Windows is detected
                l_counter = Counter()

                # Extract windows systems
                for x, y in m_counter.iteritems():
                    if "windows" == x:
                        l_counter[x] += y

                # Replace the counter for the new
                m_counter = l_counter

            # Get most common systems
            l_most_common = m_counter.most_common(5)

            # First elemente will be the detected OS
            m_OS_family = l_most_common[0][0][0]
            m_OS_version = l_most_common[0][0][1]

            # Next 4 will be the 'others'
            m_length = float(len(l_most_common))
            m_others = {
                "%s-%s" % (l_most_common[i][0][0], l_most_common[i][0][1]):
                float('{:.2f}'.format(l_most_common[i][1] / m_length))
                for i in xrange(1, len(l_most_common), 1)
            }

            # create the data
            m_return = OSFingerprint(m_OS_family,
                                     m_OS_version,
                                     others=m_others)

        elif is_windows is not None:
            if is_windows:  # Windows system detected
                m_return = OSFingerprint("windows")
            else:  # *NIX system detected
                m_return = OSFingerprint("unix_or_compatible")

        # If there is information, associate it with the resource
        if m_return:
            info.add_information(m_return)

        return m_return

    #----------------------------------------------------------------------
    #
    # Platform detection methods
    #
    #----------------------------------------------------------------------
    def is_URL_in_windows(self, main_url):
        """
        Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and
        does two resquest. If are the same response, then, platform are Windows. Else are \*NIX.

        :returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown.
        :rtype: bool
        """
        m_forbidden = (
            "logout",
            "logoff",
            "exit",
            "sigout",
            "signout",
        )

        # Get the main web page
        m_r = download(main_url, callback=self.check_download)
        if not m_r or not m_r.raw_data:
            return None
        discard_data(m_r)

        # Get the first link
        m_links = None
        try:
            if m_r.information_type == Information.INFORMATION_HTML:
                m_links = extract_from_html(m_r.raw_data, main_url)
            else:
                m_links = extract_from_text(m_r.raw_data, main_url)
        except TypeError, e:
            Logger.log_error_more_verbose("Plugin error: %s" % format_exc())
            return None

        if not m_links:
            return None

        # Get the first link of the page that's in scope of the audit
        m_first_link = None
        for u in m_links:
            if u in Config.audit_scope and not any(x in u
                                                   for x in m_forbidden):
                m_first_link = u
                break

        if not m_first_link:
            return None

        # Now get two request to the links. One to the original URL and other
        # as upper URL.

        # Original
        m_response_orig = HTTP.get_url(
            m_first_link,
            callback=self.check_response)  # FIXME handle exceptions!
        discard_data(m_response_orig)
        # Uppercase
        m_response_upper = HTTP.get_url(
            m_first_link.upper(),
            callback=self.check_response)  # FIXME handle exceptions!
        discard_data(m_response_upper)
        # Compare them
        m_orig_data = m_response_orig.raw_response if m_response_orig else ""
        m_upper_data = m_response_upper.raw_response if m_response_upper else ""
        m_match_level = get_diff_ratio(m_orig_data, m_upper_data)

        # If the responses are equal by 90%, two URL are the same => Windows; else => *NIX
        m_return = None
        if m_match_level > 0.95:
            m_return = True
        else:
            m_return = False

        return m_return
Exemplo n.º 15
0
    def is_URL_in_windows(self, main_url):
        """
        Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and
        does two resquest. If are the same response, then, platform are Windows. Else are \*NIX.

        :returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown.
        :rtype: bool
        """
        m_forbidden = (
            "logout",
            "logoff",
            "exit",
            "sigout",
            "signout",
        )

        # Get the main web page
        m_r = download(main_url, callback=self.check_download)
        if not m_r:
            return None
        discard_data(m_r)

        # Get the first link
        if m_r.information_type == Information.INFORMATION_HTML:
            m_links = extract_from_html(m_r.raw_data, main_url)
        else:
            m_links = extract_from_text(m_r.raw_data, main_url)

        if not m_links:
            return None

        # Get the first link of the page that's in scope of the audit
        m_first_link = None
        for u in m_links:
            if u in Config.audit_scope and not any(x in u
                                                   for x in m_forbidden):
                m_first_link = u
                break

        if not m_first_link:
            return None

        # Now get two request to the links. One to the original URL and other
        # as upper URL.

        # Original
        m_response_orig = HTTP.get_url(
            m_first_link,
            callback=self.check_response)  # FIXME handle exceptions!
        discard_data(m_response_orig)
        # Uppercase
        m_response_upper = HTTP.get_url(
            m_first_link.upper(),
            callback=self.check_response)  # FIXME handle exceptions!
        discard_data(m_response_upper)
        # Compare them
        m_orig_data = m_response_orig.raw_response if m_response_orig else ""
        m_upper_data = m_response_upper.raw_response if m_response_upper else ""
        m_match_level = get_diff_ratio(m_orig_data, m_upper_data)

        # If the responses are equal by 90%, two URL are the same => Windows; else => *NIX
        m_return = None
        if m_match_level > 0.95:
            m_return = True
        else:
            m_return = False

        return m_return
Exemplo n.º 16
0
                    # If is a disallow URL, it must be suspicious
                    if m_key.lower() == "disallow":
                        m_discovered_suspicious.append(tmp_discovered)

            except Exception,e:
                continue

        #
        # Filter results
        #
        # Delete repeated
        m_discovered_urls = set(m_discovered_urls)

        # Generating error page
        m_error_page          = generate_error_page_url(m_url_robots_txt)
        m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response)

        if m_response_error_page:
            m_return.append(m_response_error_page)

            # Analyze results
            match = {}
            m_analyzer = MatchingAnalyzer(m_response_error_page.data)
            m_total = len(m_discovered_urls)
            for m_step, l_url in enumerate(m_discovered_urls):

                # Update only odd iterations
                if m_step % 2:
                    progress = (float(m_step * 100) / m_total)
                    self.update_status(progress=progress)
                l_url = fix_url(l_url, m_url)
Exemplo n.º 17
0
    def __find_plugins(self, url, plugins_wordlist, update_func):
        """
        Try to find available plugins

        :param url: base URL to test.
        :type url: str

        :param plugins_wordlist: path to wordlist with plugins lists.
        :type plugins_wordlist: str

        :param update_func: function to update plugin status.
        :type update_func: function

        :return: list of lists as format:
                 list([PLUGIN_NAME, PLUGIN_URL, PLUGIN_INSTALLED_VERSION, PLUGIN_LAST_VERSION, [CVE1, CVE2...]])
        :type: list(list())
        """
        results = []
        urls_to_test = {
            "readme.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
            "README.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
        }

        # Generates the error page
        error_response = get_error_page(url).raw_data

        # Load plugins info
        plugins = []
        plugins_append = plugins.append
        with open(plugins_wordlist, "rU") as f:
            for x in f:
                plugins_append(x.replace("\n", ""))

        # Calculate sizes
        total_plugins = len(plugins)

        # Load CSV info
        csv_info = csv.reader(plugins)

        # Process the URLs
        for i, plugin_row in enumerate(csv_info):

            # Plugin properties
            plugin_URI = plugin_row[0]
            plugin_name = plugin_row[1]
            plugin_last_version = plugin_row[2]
            plugin_CVEs = [] if plugin_row[3] == "" else plugin_row[3].split(
                "|")

            # Update status
            update_func((float(i) * 100.0) / float(total_plugins))

            # Make plugin URL
            partial_plugin_url = "%s/%s" % (url, "wp-content/plugins/%s" %
                                            plugin_URI)

            # Test each URL with possible plugin version info
            for target, regex in urls_to_test.iteritems():

                plugin_url = "%s/%s" % (partial_plugin_url, target)

                # Try to get plugin
                p = None
                try:
                    p = HTTP.get_url(plugin_url, use_cache=False)
                    if p:
                        discard_data(p)
                except Exception, e:
                    Logger.log_error_more_verbose(
                        "Error while download: '%s': %s" %
                        (plugin_url, str(e)))
                    continue

                plugin_installed_version = None

                if p.status == "403":  # Installed, but inaccesible
                    plugin_installed_version = "Unknown"

                elif p.status == "200":

                    # Check if page is and non-generic not found page with 404 code
                    if get_diff_ratio(error_response, p.raw_response) < 0.52:

                        # Find the version
                        tmp_version = re.search(regex, p.raw_response)

                        if tmp_version is not None:
                            plugin_installed_version = tmp_version.group(2)

                # Store info
                if plugin_installed_version is not None:
                    Logger.log(
                        "Discovered plugin: '%s (installed version: %s)' (latest version: %s)"
                        % (plugin_name, plugin_installed_version,
                           plugin_last_version))
                    results.append([
                        plugin_name, plugin_url, plugin_installed_version,
                        plugin_last_version, plugin_CVEs
                    ])

                    # Plugin found -> not more URL test for this plugin
                    break
Exemplo n.º 18
0
    except Exception, e:
        Logger.log_more_verbose("Error while processing: '%s': %s" % (url, str(e)))

    # Check if the url is acceptable by comparing
    # the result content.
    #
    # If the maching level between the error page
    # and this url is greater than 52%, then it's
    # the same URL and must be discarded.
    #
    if p and p.status == "200":

        # If the method used to get URL was HEAD, get complete URL
        if method != "GET":
            try:
                p = HTTP.get_url(url, use_cache=False, method="GET")
                if p:
                    discard_data(p)
            except Exception, e:
                Logger.log_more_verbose("Error while processing: '%s': %s" % (url, str(e)))

        # Append for analyze and display info if is accepted
        if matcher.analyze(p.raw_response, url=url, risk=risk_level):
            updater_func(text="Discovered partial url: '%s'" % url)


#----------------------------------------------------------------------
#
# Aux functions
#
#----------------------------------------------------------------------
Exemplo n.º 19
0
    def is_URL_in_windows(self, main_url):
        """
        Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and
        does two resquest. If are the same response, then, platform are Windows. Else are \*NIX.

        :returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown.
        :rtype: bool
        """
        m_forbidden = (
            "logout",
            "logoff",
            "exit",
            "sigout",
            "signout",
        )

        # Get the main web page
        m_r = download(main_url, callback=self.check_download)
        if not m_r:
            return None
        discard_data(m_r)

        # Get the first link
        if m_r.information_type == Information.INFORMATION_HTML:
            m_links = extract_from_html(m_r.raw_data, main_url)
        else:
            m_links = extract_from_text(m_r.raw_data, main_url)

        if not m_links:
            return None

        # Get the first link of the page that's in scope of the audit
        m_first_link = None
        for u in m_links:
            if u in Config.audit_scope and not any(x in u for x in m_forbidden):
                m_first_link = u
                break

        if not m_first_link:
            return None

        # Now get two request to the links. One to the original URL and other
        # as upper URL.

        # Original
        m_response_orig  = HTTP.get_url(m_first_link, callback=self.check_response)  # FIXME handle exceptions!
        discard_data(m_response_orig)
        # Uppercase
        m_response_upper = HTTP.get_url(m_first_link.upper(), callback=self.check_response)  # FIXME handle exceptions!
        discard_data(m_response_upper)
        # Compare them
        m_orig_data      = m_response_orig.raw_response  if m_response_orig  else ""
        m_upper_data     = m_response_upper.raw_response if m_response_upper else ""
        m_match_level    = get_diff_ratio(m_orig_data, m_upper_data)

        # If the responses are equal by 90%, two URL are the same => Windows; else => *NIX
        m_return = None
        if m_match_level > 0.95:
            m_return = True
        else:
            m_return = False

        return m_return
Exemplo n.º 20
0
    def __find_plugins(self, url, plugins_wordlist, update_func):
        """
        Try to find available plugins

        :param url: base URL to test.
        :type url: str

        :param plugins_wordlist: path to wordlist with plugins lists.
        :type plugins_wordlist: str

        :param update_func: function to update plugin status.
        :type update_func: function

        :return: list of lists as format:
                 list([PLUGIN_NAME, PLUGIN_URL, PLUGIN_INSTALLED_VERSION, PLUGIN_LAST_VERSION, [CVE1, CVE2...]])
        :type: list(list())
        """
        results = []
        urls_to_test = {
            "readme.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
            "README.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
        }

        # Generates the error page
        error_response = get_error_page(url).raw_data

        # Load plugins info
        plugins = []
        plugins_append = plugins.append
        with open(plugins_wordlist, "rU") as f:
            for x in f:
                plugins_append(x.replace("\n", ""))

        # Calculate sizes
        total_plugins = len(plugins)

        # Load CSV info
        csv_info = csv.reader(plugins)

        # Process the URLs
        for i, plugin_row in enumerate(csv_info):

            # Plugin properties
            plugin_URI = plugin_row[0]
            plugin_name = plugin_row[1]
            plugin_last_version = plugin_row[2]
            plugin_CVEs = [] if plugin_row[3] == "" else plugin_row[3].split("|")

            # Update status
            update_func((float(i) * 100.0) / float(total_plugins))

            # Make plugin URL
            partial_plugin_url = "%s/%s" % (url, "wp-content/plugins/%s" % plugin_URI)

            # Test each URL with possible plugin version info
            for target, regex in urls_to_test.iteritems():

                plugin_url = "%s/%s" % (partial_plugin_url, target)

                # Try to get plugin
                p = None
                try:
                    p = HTTP.get_url(plugin_url, use_cache=False)
                    if p:
                        discard_data(p)
                except Exception, e:
                    Logger.log_error_more_verbose("Error while download: '%s': %s" % (plugin_url, str(e)))
                    continue

                plugin_installed_version = None

                if p.status == "403":  # Installed, but inaccesible
                    plugin_installed_version = "Unknown"

                elif p.status == "200":

                    # Check if page is and non-generic not found page with 404 code
                    if get_diff_ratio(error_response, p.raw_response) < 0.52:

                        # Find the version
                        tmp_version = re.search(regex, p.raw_response)

                        if tmp_version is not None:
                            plugin_installed_version = tmp_version.group(2)

                # Store info
                if plugin_installed_version is not None:
                    Logger.log("Discovered plugin: '%s (installed version: %s)' (latest version: %s)" %
                                (plugin_name, plugin_installed_version, plugin_last_version))
                    results.append([
                        plugin_name,
                        plugin_url,
                        plugin_installed_version,
                        plugin_last_version,
                        plugin_CVEs
                    ])

                    # Plugin found -> not more URL test for this plugin
                    break
Exemplo n.º 21
0
                    # If is a disallow URL, it must be suspicious
                    if m_key.lower() == "disallow":
                        m_discovered_suspicious.append(tmp_discovered)

            except Exception, e:
                continue

        #
        # Filter results
        #
        # Delete repeated
        m_discovered_urls = set(m_discovered_urls)

        # Generating error page
        m_error_page = generate_error_page_url(m_url_robots_txt)
        m_response_error_page = HTTP.get_url(m_error_page,
                                             callback=self.check_response)

        if m_response_error_page:
            m_return.append(m_response_error_page)

            # Analyze results
            match = {}
            m_analyzer = MatchingAnalyzer(m_response_error_page.data)
            m_total = len(m_discovered_urls)
            for m_step, l_url in enumerate(m_discovered_urls):

                # Update only odd iterations
                if m_step % 2:
                    progress = (float(m_step * 100) / m_total)
                    self.update_status(progress=progress)
                l_url = fix_url(l_url, m_url)