def __detect_wordpress_installation(self, url, wordpress_urls): """ Try to detect a wordpress instalation in the current path. :param url: URL where try to find the WordPress installation. :type url: str :param wordpress_urls: string with wordlist name with WordPress URLs. :type wordpress_urls: str :return: True if wordpress installation found. False otherwise. :rtype: bool """ Logger.log_more_verbose( "Detecting Wordpress instalation in URI: '%s'." % url) total_urls = 0 urls_found = 0 error_page = get_error_page(url).raw_data for u in WordListLoader.get_wordlist(wordpress_urls): total_urls += 1 tmp_url = urljoin(url, u) r = HTTP.get_url(tmp_url, use_cache=False) if r.status == "200": # Try to detect non-default error pages ratio = get_diff_ratio(r.raw_response, error_page) if ratio < 0.35: urls_found += 1 discard_data(r) # If Oks > 85% continue if (urls_found / float(total_urls)) < 0.85: # If all fails, make another last test url_wp_admin = urljoin(url, "wp-admin/") try: p = HTTP.get_url(url_wp_admin, use_cache=False, allow_redirects=False) if p: discard_data(p) except Exception, e: return False if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get( "Location", ""): return True else: return False
def __detect_wordpress_installation(self, url, wordpress_urls): """ Try to detect a wordpress instalation in the current path. :param url: URL where try to find the WordPress installation. :type url: str :param wordpress_urls: string with wordlist name with WordPress URLs. :type wordpress_urls: str :return: True if wordpress installation found. False otherwise. :rtype: bool """ Logger.log_more_verbose("Detecting Wordpress instalation in URI: '%s'." % url) total_urls = 0 urls_found = 0 error_page = get_error_page(url).raw_data for u in WordListLoader.get_wordlist(wordpress_urls): total_urls += 1 tmp_url = urljoin(url, u) r = HTTP.get_url(tmp_url, use_cache=False) if r.status == "200": # Try to detect non-default error pages ratio = get_diff_ratio(r.raw_response, error_page) if ratio < 0.35: urls_found += 1 discard_data(r) # If Oks > 85% continue if (urls_found / float(total_urls)) < 0.85: # If all fails, make another last test url_wp_admin = urljoin(url, "wp-admin/") try: p = HTTP.get_url(url_wp_admin, use_cache=False, allow_redirects=False) if p: discard_data(p) except Exception, e: return False if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get("Location", ""): return True else: return False
def process_url(risk_level, method, matcher, updater_func, total_urls, url): """ Checks if an URL exits. :param risk_level: risk level of the tested URL, if discovered. :type risk_level: int :param method: string with HTTP method used. :type method: str :param matcher: instance of MatchingAnalyzer object. :type matcher: `MatchingAnalyzer` :param updater_func: update_status function to send updates :type updater_func: update_status :param total_urls: total number of URL to globally process. :type total_urls: int :param url: a tuple with data: (index, the URL to process) :type url: tuple(int, str) """ i, url = url updater_func((float(i) * 100.0) / float(total_urls)) # Logger.log_more_verbose("Trying to discover URL %s" % url) # Get URL p = None try: p = HTTP.get_url(url, use_cache=False, method=method) if p: discard_data(p) except Exception, e: Logger.log_error_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
def get_http_method(url): """ This function determinates if the method HEAD is available. To do that, compare between two responses: - One with GET method - One with HEAD method If both are seem more than 90%, the response are the same and HEAD method are not allowed. """ m_head_response = HTTP.get_url(url, method="HEAD") # FIXME handle exceptions! discard_data(m_head_response) m_get_response = HTTP.get_url(url) # FIXME handle exceptions! discard_data(m_get_response) # Check if HEAD reponse is different that GET response, to ensure that results are valids return "HEAD" if HTTP_response_headers_analyzer(m_head_response.headers, m_get_response.headers) < 0.90 else "GET"
def find_htm_file(url): new_file = [] for file_name in ['DeveloperMenu.htm']: url_check = url[1:] if url.startswith("/") else url tmp_u = urljoin(url_check, file_name) p = HTTP.get_url(tmp_u, use_cache=False, method="GET") if p.status == "200": file_save = download(tmp_u) new_file = re.findall(r'href=[\'"]?([^\'" >]+)', file_save.raw_data) return new_file
def find_xml_files(url): new_file = [] for file_name in ['execute.xml', 'DeveloperMenu.xml']: url_check = url[1:] if url.startswith("/") else url tmp_u = urljoin(url_check, file_name) p = HTTP.get_url(tmp_u, use_cache=False, method="GET") if p.status == "200": file_save = download(tmp_u) tree = ET.fromstring(file_save.raw_data) try: for links in tree.findall('Object'): Logger.log(links.find('ObjLink').text) new_file.append(links.find('ObjLink').text) except Exception: ##raise # XXX DEBUG pass return new_file
def get_error_page(url): """ Generates an error page an get their content. :param url: string with the base Url. :type url: str :return: a string with the content of response. :rtype: str """ # # Generate an error in server to get an error page, using a random string # # Make the URL m_error_url = "%s%s" % (url, generate_random_string()) # Get the request m_error_response = HTTP.get_url(m_error_url) # FIXME handle exceptions! discard_data(m_error_response) m_error_response = m_error_response.data
def recv_info(self, info): # Get the response page. response = HTTP.get_url(info.url, callback=self.check_response) if response: try: # Look for a match. page_text = response.data total = float(len(signatures)) for step, (server_name, server_page) in enumerate(signatures.iteritems()): # Update status progress = float(step) / total self.update_status(progress=progress) level = get_diff_ratio(page_text, server_page) if level > 0.95: # magic number :) # Match found. vulnerability = DefaultErrorPage(info, server_name) vulnerability.add_information(response) return [vulnerability, response] # Discard the response if no match was found. discard_data(response) except Exception: # Discard the response on error. discard_data(response) raise
def recv_info(self, info): # Get the response page. response = HTTP.get_url(info.url, callback = self.check_response) if response: try: # Look for a match. page_text = response.data total = float(len(signatures)) for step, (server_name, server_page) in enumerate(signatures.iteritems()): # Update status progress = float(step) / total self.update_status(progress=progress) level = get_diff_ratio(page_text, server_page) if level > 0.95: # magic number :) # Match found. vulnerability = DefaultErrorPage(info, server_name) vulnerability.add_information(response) return [vulnerability, response] # Discard the response if no match was found. discard_data(response) except Exception: # Discard the response on error. discard_data(response) raise
def http_simple_analyzer(main_url, update_status_func, number_of_entries=4): """Simple method to get fingerprint server info :param main_url: Base url to test. :type main_url: str :param update_status_func: function used to update the status of the process :type update_status_func: function :param number_of_entries: number of resutls tu return for most probable web servers detected. :type number_of_entries: int :return: a typle as format: Web server family, Web server version, Web server complete description, related web servers (as a dict('SERVER_RELATED' : set(RELATED_NAMES))), others web server with their probabilities as a dict(CONCRETE_WEB_SERVER, PROBABILITY) """ m_actions = { 'GET' : { 'wordlist' : 'Wordlist_get' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/' }, 'LONG_GET' : { 'wordlist' : 'Wordlist_get_long' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/%s' % ('a' * 200) }, 'NOT_FOUND' : { 'wordlist' : 'Wordlist_get_notfound' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/404_NOFOUND__X02KAS' }, 'HEAD' : { 'wordlist' : 'Wordlist_head' , 'weight' : 3 , 'protocol' : 'HTTP/1.1', 'method' : 'HEAD' , 'payload': '/' }, 'OPTIONS' : { 'wordlist' : 'Wordlist_options' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'OPTIONS' , 'payload': '/' }, 'DELETE' : { 'wordlist' : 'Wordlist_delete' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'DELETE' , 'payload': '/' }, 'TEST' : { 'wordlist' : 'Wordlist_attack' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'TEST' , 'payload': '/' }, 'INVALID' : { 'wordlist' : 'Wordlist_wrong_method' , 'weight' : 5 , 'protocol' : 'HTTP/9.8', 'method' : 'GET' , 'payload': '/' }, 'ATTACK' : { 'wordlist' : 'Wordlist_wrong_version' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': "/etc/passwd?format=%%%%&xss=\x22><script>alert('xss');</script>&traversal=../../&sql='%20OR%201;"} } m_d = ParsedURL(main_url) m_hostname = m_d.hostname m_port = m_d.port m_debug = False # Only for develop i = 0 m_counters = HTTPAnalyzer() m_data_len = len(m_actions) # Var used to update the status m_banners_counter = Counter() for l_action, v in m_actions.iteritems(): if m_debug: print "###########" l_method = v["method"] l_payload = v["payload"] l_proto = v["protocol"] #l_wordlist = v["wordlist"] # Each type of probe hast different weight. # # Weights go from 0 - 5 # l_weight = v["weight"] # Make the raw request l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s\r\n\r\n" % ( { "method" : l_method, "payload" : l_payload, "protocol" : l_proto, "host" : m_hostname, "port" : m_port } ) if m_debug: print "REQUEST" print l_raw_request # Do the connection l_response = None try: m_raw_request = HTTP_Raw_Request(l_raw_request) discard_data(m_raw_request) l_response = HTTP.make_raw_request( host = m_hostname, port = m_port, raw_request = m_raw_request, callback = check_raw_response) if l_response: discard_data(l_response) except NetworkException,e: Logger.log_error_more_verbose("Server-Fingerprint plugin: No response for URL (%s) with method '%s'. Message: %s" % (m_hostname, l_method, str(e))) continue if not l_response: Logger.log_error_more_verbose("No response for host '%s' with method '%s'." % (m_hostname, l_method)) continue if m_debug: print "RESPONSE" print l_response.raw_headers # Update the status update_status_func((float(i) * 100.0) / float(m_data_len)) Logger.log_more_verbose("Making '%s' test." % l_method) i += 1 # Analyze for each wordlist # # Store the server banner try: m_banners_counter[l_response.headers["Server"]] += l_weight except KeyError: pass l_server_name = None try: l_server_name = l_response.headers["Server"] except KeyError: continue m_counters.simple_inc(l_server_name, l_method, l_weight)
def http_analyzers(main_url, update_status_func, number_of_entries=4): """ Analyze HTTP headers for detect the web server. Return a list with most possible web servers. :param main_url: Base url to test. :type main_url: str :param update_status_func: function used to update the status of the process :type update_status_func: function :param number_of_entries: number of resutls tu return for most probable web servers detected. :type number_of_entries: int :return: Web server family, Web server version, Web server complete description, related web servers (as a dict('SERVER_RELATED' : set(RELATED_NAMES))), others web server with their probabilities as a dict(CONCRETE_WEB_SERVER, PROBABILITY) """ # Load wordlist directly related with a HTTP fields. # { HTTP_HEADER_FIELD : [wordlists] } m_wordlists_HTTP_fields = { "Accept-Ranges" : "accept-range", "Server" : "banner", "Cache-Control" : "cache-control", "Connection" : "connection", "Content-Type" : "content-type", "WWW-Authenticate" : "htaccess-realm", "Pragma" : "pragma", "X-Powered-By" : "x-powered-by" } m_actions = { 'GET' : { 'wordlist' : 'Wordlist_get' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/' }, 'LONG_GET' : { 'wordlist' : 'Wordlist_get_long' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/%s' % ('a' * 200) }, 'NOT_FOUND' : { 'wordlist' : 'Wordlist_get_notfound' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/404_NOFOUND__X02KAS' }, 'HEAD' : { 'wordlist' : 'Wordlist_head' , 'weight' : 3 , 'protocol' : 'HTTP/1.1', 'method' : 'HEAD' , 'payload': '/' }, 'OPTIONS' : { 'wordlist' : 'Wordlist_options' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'OPTIONS' , 'payload': '/' }, 'DELETE' : { 'wordlist' : 'Wordlist_delete' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'DELETE' , 'payload': '/' }, 'TEST' : { 'wordlist' : 'Wordlist_attack' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'TEST' , 'payload': '/' }, 'INVALID' : { 'wordlist' : 'Wordlist_wrong_method' , 'weight' : 5 , 'protocol' : 'HTTP/9.8', 'method' : 'GET' , 'payload': '/' }, 'ATTACK' : { 'wordlist' : 'Wordlist_wrong_version' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': "/etc/passwd?format=%%%%&xss=\x22><script>alert('xss');</script>&traversal=../../&sql='%20OR%201;"} } # Store results for others HTTP params m_d = ParsedURL(main_url) m_hostname = m_d.hostname m_port = m_d.port m_debug = False # Only for develop # Counter of banners. Used when others methods fails. m_banners_counter = Counter() # Score counter m_counters = HTTPAnalyzer(debug=m_debug) # Var used to update the status m_data_len = len(m_actions) i = 1 # element in process for l_action, v in m_actions.iteritems(): if m_debug: print "###########" l_method = v["method"] l_payload = v["payload"] l_proto = v["protocol"] l_wordlist = v["wordlist"] # Each type of probe hast different weight. # # Weights go from 0 - 5 # l_weight = v["weight"] # Make the URL l_url = urljoin(main_url, l_payload) # Make the raw request #l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s:%(port)s\r\nConnection: Close\r\n\r\n" % ( l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s\r\n\r\n" % ( { "method" : l_method, "payload" : l_payload, "protocol" : l_proto, "host" : m_hostname, "port" : m_port } ) if m_debug: print "REQUEST" print l_raw_request # Do the connection l_response = None try: m_raw_request = HTTP_Raw_Request(l_raw_request) discard_data(m_raw_request) l_response = HTTP.make_raw_request( host = m_hostname, port = m_port, raw_request = m_raw_request, callback = check_raw_response) if l_response: discard_data(l_response) except NetworkException,e: Logger.log_error_more_verbose("Server-Fingerprint plugin: No response for URL (%s) '%s'. Message: %s" % (l_method, l_url, str(e))) continue if not l_response: Logger.log_error_more_verbose("No response for URL '%s'." % l_url) continue if m_debug: print "RESPONSE" print l_response.raw_headers # Update the status update_status_func((float(i) * 100.0) / float(m_data_len)) Logger.log_more_verbose("Making '%s' test." % (l_wordlist)) i += 1 # Analyze for each wordlist # # Store the server banner try: m_banners_counter[l_response.headers["Server"]] += l_weight except KeyError: pass # # ===================== # HTTP directly related # ===================== # # for l_http_header_name, l_header_wordlist in m_wordlists_HTTP_fields.iteritems(): # Check if HTTP header field is in response if l_http_header_name not in l_response.headers: continue l_curr_header_value = l_response.headers[l_http_header_name] # Generate concrete wordlist name l_wordlist_path = Config.plugin_extra_config[l_wordlist][l_header_wordlist] # Load words for the wordlist l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(l_wordlist_path) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_curr_header_value) m_counters.inc(l_matches, l_action, l_weight, l_http_header_name, message="HTTP field: " + l_curr_header_value) # # ======================= # HTTP INdirectly related # ======================= # # # # Status code # =========== # l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["statuscode"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_response.status) m_counters.inc(l_matches, l_action, l_weight, "statuscode", message="Status code: " + l_response.status) # # Status text # =========== # l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["statustext"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_response.reason) m_counters.inc(l_matches, l_action, l_weight, "statustext", message="Status text: " + l_response.reason) # # Header space # ============ # # Count the number of spaces between HTTP field name and their value, for example: # -> Server: Apache 1 # The number of spaces are: 1 # # -> Server:Apache 1 # The number of spaces are: 0 # l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["header-space"]) # Looking for matches try: l_http_value = l_response.headers[0] # get the value of first HTTP field l_spaces_num = str(abs(len(l_http_value) - len(l_http_value.lstrip()))) l_matches = l_wordlist_instance.matches_by_value(l_spaces_num) m_counters.inc(l_matches, l_action, l_weight, "header-space", message="Header space: " + l_spaces_num) except IndexError: print "index error header space" pass # # Header capitalafterdash # ======================= # # Look for non capitalized first letter of field name, for example: # -> Content-type: .... # Instead of: # -> Content-Type: .... # l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["header-capitalafterdash"]) # Looking for matches l_valid_fields = [x for x in l_response.headers.iterkeys() if "-" in x] if l_valid_fields: l_h = l_valid_fields[0] l_value = l_h.split("-")[1] # Get the second value: Content-type => type l_dush = None if l_value[0].isupper(): # Check first letter is lower l_dush = 1 else: l_dush = 0 l_matches = l_wordlist_instance.matches_by_value(l_dush) m_counters.inc(l_matches, l_action, l_weight, "header-capitalizedafterdush", message="Capital after dash: %s" % str(l_dush)) # # Header order # ============ # l_header_order = ','.join(l_response.headers.iterkeys()) l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["header-order"]) l_matches = l_wordlist_instance.matches_by_value(l_header_order) m_counters.inc(l_matches, l_action, l_weight, "header-order", message="Header order: " + l_header_order) # # Protocol name # ============ # # For a response like: # -> HTTP/1.0 200 OK # .... # # Get the 'HTTP' value. # try: l_proto = l_response.protocol # Get the 'HTTP' text from response, if available if l_proto: l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["protocol-name"]) l_matches = l_wordlist_instance.matches_by_value(l_proto) m_counters.inc(l_matches, l_action, l_weight, "proto-name", message="Proto name: " + l_proto) except IndexError: print "index error protocol name" pass # # Protocol version # ================ # # For a response like: # -> HTTP/1.0 200 OK # .... # # Get the '1.0' value. # try: l_version = l_response.version # Get the '1.0' text from response, if available if l_version: l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["protocol-version"]) l_matches = l_wordlist_instance.matches_by_value(l_version) m_counters.inc(l_matches, l_action, l_weight, "proto-version", message="Proto version: " + l_version) except IndexError: print "index error protocol version" pass if "ETag" in l_response.headers: l_etag_header = l_response.headers["ETag"] # # ETag length # ================ # l_etag_len = len(l_etag_header) l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["etag-legth"]) l_matches = l_wordlist_instance.matches_by_value(l_etag_len) m_counters.inc(l_matches, l_action, l_weight, "etag-length", message="ETag length: " + str(l_etag_len)) # # ETag Quotes # ================ # l_etag_striped = l_etag_header.strip() if l_etag_striped.startswith("\"") or l_etag_striped.startswith("'"): l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["etag-quotes"]) l_matches = l_wordlist_instance.matches_by_value(l_etag_striped[0]) m_counters.inc(l_matches, l_action, l_weight, "etag-quotes", message="Etag quotes: " + l_etag_striped[0]) if "Vary" in l_response.headers: l_vary_header = l_response.headers["Vary"] # # Vary delimiter # ================ # # Checks if Vary header delimiter is something like this: # -> Vary: Accept-Encoding,User-Agent # Or this: # -> Vary: Accept-Encoding, User-Agent # l_var_delimiter = ", " if l_vary_header.find(", ") else "," l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["vary-delimiter"]) l_matches = l_wordlist_instance.matches_by_value(l_var_delimiter) m_counters.inc(l_matches, l_action, l_weight, "vary-delimiter", message="Vary delimiter: " + l_var_delimiter) # # Vary capitalizer # ================ # # Checks if Vary header delimiter is something like this: # -> Vary: Accept-Encoding,user-Agent # Or this: # -> Vary: accept-encoding,user-agent # l_vary_capitalizer = str(0 if l_vary_header == l_vary_header.lower() else 1) l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["vary-capitalize"]) l_matches = l_wordlist_instance.matches_by_value(l_vary_capitalizer) m_counters.inc(l_matches, l_action, l_weight, "vary-capitalize", message="Vary capitalizer: " + l_vary_capitalizer) # # Vary order # ================ # # Checks order between vary values: # -> Vary: Accept-Encoding,user-Agent # Or this: # -> Vary: User-Agent,Accept-Encoding # l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["vary-order"]) l_matches = l_wordlist_instance.matches_by_value(l_vary_header) m_counters.inc(l_matches, l_action, l_weight, "vary-order", message="Vary order: " + l_vary_header) # # ===================== # HTTP specific options # ===================== # # if l_action == "HEAD": # # HEAD Options # ============ # l_option = l_response.headers.get("Allow") if l_option: l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-public"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_option) m_counters.inc(l_matches, l_action, l_weight, "options-allow", message="HEAD option: " + l_option) if l_action == "OPTIONS" or l_action == "INVALID" or l_action == "DELETE": if "Allow" in l_response.headers: # # Options allow # ============= # l_option = l_response.headers.get("Allow") if l_option: l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-public"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_option) m_counters.inc(l_matches, l_action, l_weight, "options-allow", message="OPTIONS allow: " + l_action + " # " + l_option) # # Allow delimiter # =============== # l_option = l_response.headers.get("Allow") if l_option: l_var_delimiter = ", " if l_option.find(", ") else "," l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-delimited"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_var_delimiter) m_counters.inc(l_matches, l_action, l_weight, "options-delimiter", message="OPTION allow delimiter " + l_action + " # " + l_option) if "Public" in l_response.headers: # # Public response # =============== # l_option = l_response.headers.get("Public") if l_option: l_wordlist_instance = WordListLoader.get_advanced_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-public"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_option) m_counters.inc(l_matches, l_action, l_weight, "options-public", message="Public response: " + l_action + " # " + l_option)
def is_URL_in_windows(self, main_url): """ Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and does two resquest. If are the same response, then, platform are Windows. Else are \*NIX. :returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown. :rtype: bool """ m_forbidden = ( "logout", "logoff", "exit", "sigout", "signout", ) # Get the main web page m_r = download(main_url, callback=self.check_download) if not m_r: return None discard_data(m_r) # Get the first link if m_r.information_type == Information.INFORMATION_HTML: m_links = extract_from_html(m_r.raw_data, main_url) else: m_links = extract_from_text(m_r.raw_data, main_url) if not m_links: return None # Get the first link of the page that's in scope of the audit m_first_link = None for u in m_links: if u in Config.audit_scope and not any(x in u for x in m_forbidden): m_first_link = u break if not m_first_link: return None # Now get two request to the links. One to the original URL and other # as upper URL. # Original m_response_orig = HTTP.get_url( m_first_link, callback=self.check_response) # FIXME handle exceptions! discard_data(m_response_orig) # Uppercase m_response_upper = HTTP.get_url( m_first_link.upper(), callback=self.check_response) # FIXME handle exceptions! discard_data(m_response_upper) # Compare them m_orig_data = m_response_orig.raw_response if m_response_orig else "" m_upper_data = m_response_upper.raw_response if m_response_upper else "" m_match_level = get_diff_ratio(m_orig_data, m_upper_data) # If the responses are equal by 90%, two URL are the same => Windows; else => *NIX m_return = None if m_match_level > 0.95: m_return = True else: m_return = False return m_return
# If is a disallow URL, it must be suspicious if m_key.lower() == "disallow": m_discovered_suspicious.append(tmp_discovered) except Exception,e: continue # # Filter results # # Delete repeated m_discovered_urls = set(m_discovered_urls) # Generating error page m_error_page = generate_error_page_url(m_url_robots_txt) m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response) if m_response_error_page: m_return.append(m_response_error_page) # Analyze results match = {} m_analyzer = MatchingAnalyzer(m_response_error_page.data) m_total = len(m_discovered_urls) for m_step, l_url in enumerate(m_discovered_urls): # Update only odd iterations if m_step % 2: progress = (float(m_step * 100) / m_total) self.update_status(progress=progress) l_url = fix_url(l_url, m_url)
except Exception, e: Logger.log_more_verbose("Error while processing: '%s': %s" % (url, str(e))) # Check if the url is acceptable by comparing # the result content. # # If the maching level between the error page # and this url is greater than 52%, then it's # the same URL and must be discarded. # if p and p.status == "200": # If the method used to get URL was HEAD, get complete URL if method != "GET": try: p = HTTP.get_url(url, use_cache=False, method="GET") if p: discard_data(p) except Exception, e: Logger.log_more_verbose("Error while processing: '%s': %s" % (url, str(e))) # Append for analyze and display info if is accepted if matcher.analyze(p.raw_response, url=url, risk=risk_level): updater_func(text="Discovered partial url: '%s'" % url) #---------------------------------------------------------------------- # # Aux functions # #----------------------------------------------------------------------
Logger.log_error_more_verbose("Error while processing: '%s': %s" % (url, str(e))) # Check if the url is acceptable by comparing # the result content. # # If the maching level between the error page # and this url is greater than 52%, then it's # the same URL and must be discarded. # if p and p.status == "200": # If the method used to get URL was HEAD, get complete URL if method != "GET": try: p = HTTP.get_url(url, use_cache=False, method="GET") if p: discard_data(p) except Exception, e: Logger.log_error_more_verbose( "Error while processing: '%s': %s" % (url, str(e))) # Append for analyze and display info if is accepted if matcher.analyze(p.raw_response, url=url, risk=risk_level): Logger.log_more_verbose("Discovered partial url: '%s'" % url) #------------------------------------------------------------------------------ # # Aux functions #
def is_URL_in_windows(self, main_url): """ Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and does two resquest. If are the same response, then, platform are Windows. Else are \*NIX. :returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown. :rtype: bool """ m_forbidden = ( "logout", "logoff", "exit", "sigout", "signout", ) # Get the main web page m_r = download(main_url, callback=self.check_download) if not m_r: return None discard_data(m_r) # Get the first link if m_r.information_type == Information.INFORMATION_HTML: m_links = extract_from_html(m_r.raw_data, main_url) else: m_links = extract_from_text(m_r.raw_data, main_url) if not m_links: return None # Get the first link of the page that's in scope of the audit m_first_link = None for u in m_links: if u in Config.audit_scope and not any(x in u for x in m_forbidden): m_first_link = u break if not m_first_link: return None # Now get two request to the links. One to the original URL and other # as upper URL. # Original m_response_orig = HTTP.get_url(m_first_link, callback=self.check_response) # FIXME handle exceptions! discard_data(m_response_orig) # Uppercase m_response_upper = HTTP.get_url(m_first_link.upper(), callback=self.check_response) # FIXME handle exceptions! discard_data(m_response_upper) # Compare them m_orig_data = m_response_orig.raw_response if m_response_orig else "" m_upper_data = m_response_upper.raw_response if m_response_upper else "" m_match_level = get_diff_ratio(m_orig_data, m_upper_data) # If the responses are equal by 90%, two URL are the same => Windows; else => *NIX m_return = None if m_match_level > 0.95: m_return = True else: m_return = False return m_return
def http_analyzers(main_url, update_status_func, number_of_entries=4): """ Analyze HTTP headers for detect the web server. Return a list with most possible web servers. :param main_url: Base url to test. :type main_url: str :param update_status_func: function used to update the status of the process :type update_status_func: function :param number_of_entries: number of resutls tu return for most probable web servers detected. :type number_of_entries: int :return: Web server family, Web server version, Web server complete description, related web servers (as a dict('SERVER_RELATED' : set(RELATED_NAMES))), others web server with their probabilities as a dict(CONCRETE_WEB_SERVER, PROBABILITY) """ # Load wordlist directly related with a HTTP fields. # { HTTP_HEADER_FIELD : [wordlists] } m_wordlists_HTTP_fields = { "Accept-Ranges" : "accept-range", "Server" : "banner", "Cache-Control" : "cache-control", "Connection" : "connection", "Content-Type" : "content-type", "WWW-Authenticate" : "htaccess-realm", "Pragma" : "pragma", "X-Powered-By" : "x-powered-by" } m_actions = { 'GET' : { 'wordlist' : 'Wordlist_get' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/' }, 'LONG_GET' : { 'wordlist' : 'Wordlist_get_long' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/%s' % ('a' * 200) }, 'NOT_FOUND' : { 'wordlist' : 'Wordlist_get_notfound' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/404_NOFOUND__X02KAS' }, 'HEAD' : { 'wordlist' : 'Wordlist_head' , 'weight' : 3 , 'protocol' : 'HTTP/1.1', 'method' : 'HEAD' , 'payload': '/' }, 'OPTIONS' : { 'wordlist' : 'Wordlist_options' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'OPTIONS' , 'payload': '/' }, 'DELETE' : { 'wordlist' : 'Wordlist_delete' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'DELETE' , 'payload': '/' }, 'TEST' : { 'wordlist' : 'Wordlist_attack' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'TEST' , 'payload': '/' }, 'INVALID' : { 'wordlist' : 'Wordlist_wrong_method' , 'weight' : 5 , 'protocol' : 'HTTP/9.8', 'method' : 'GET' , 'payload': '/' }, 'ATTACK' : { 'wordlist' : 'Wordlist_wrong_version' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': "/etc/passwd?format=%%%%&xss=\x22><script>alert('xss');</script>&traversal=../../&sql='%20OR%201;"} } # Store results for others HTTP params m_d = ParsedURL(main_url) m_hostname = m_d.hostname m_port = m_d.port m_debug = False # Only for develop # Counter of banners. Used when others methods fails. m_banners_counter = Counter() # Score counter m_counters = HTTPAnalyzer(debug=m_debug) # Var used to update the status m_data_len = len(m_actions) i = 1 # element in process for l_action, v in m_actions.iteritems(): if m_debug: print "###########" l_method = v["method"] l_payload = v["payload"] l_proto = v["protocol"] l_wordlist = v["wordlist"] # Each type of probe hast different weight. # # Weights go from 0 - 5 # l_weight = v["weight"] # Make the URL l_url = urljoin(main_url, l_payload) # Make the raw request #l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s:%(port)s\r\nConnection: Close\r\n\r\n" % ( l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s\r\n\r\n" % ( { "method" : l_method, "payload" : l_payload, "protocol" : l_proto, "host" : m_hostname, "port" : m_port } ) if m_debug: print "REQUEST" print l_raw_request # Do the connection l_response = None try: m_raw_request = HTTP_Raw_Request(l_raw_request) discard_data(m_raw_request) l_response = HTTP.make_raw_request( host = m_hostname, port = m_port, raw_request = m_raw_request, callback = check_raw_response) if l_response: discard_data(l_response) except NetworkException,e: Logger.log_error_more_verbose("Server-Fingerprint plugin: No response for URL (%s) '%s'. Message: %s" % (l_method, l_url, str(e))) continue if not l_response: Logger.log_error_more_verbose("No response for URL '%s'." % l_url) continue if m_debug: print "RESPONSE" print l_response.raw_headers # Update the status update_status_func((float(i) * 100.0) / float(m_data_len)) Logger.log_more_verbose("Making '%s' test." % (l_wordlist)) i += 1 # Analyze for each wordlist # # Store the server banner try: m_banners_counter[l_response.headers["Server"]] += l_weight except KeyError: pass # # ===================== # HTTP directly related # ===================== # # for l_http_header_name, l_header_wordlist in m_wordlists_HTTP_fields.iteritems(): # Check if HTTP header field is in response if l_http_header_name not in l_response.headers: continue l_curr_header_value = l_response.headers[l_http_header_name] # Generate concrete wordlist name l_wordlist_path = Config.plugin_extra_config[l_wordlist][l_header_wordlist] # Load words for the wordlist l_wordlist_instance = WordListLoader.get_wordlist_as_dict(l_wordlist_path) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_curr_header_value) m_counters.inc(l_matches, l_action, l_weight, l_http_header_name, message="HTTP field: " + l_curr_header_value) # # ======================= # HTTP INdirectly related # ======================= # # # # Status code # =========== # l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["statuscode"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_response.status) m_counters.inc(l_matches, l_action, l_weight, "statuscode", message="Status code: " + l_response.status) # # Status text # =========== # l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["statustext"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_response.reason) m_counters.inc(l_matches, l_action, l_weight, "statustext", message="Status text: " + l_response.reason) # # Header space # ============ # # Count the number of spaces between HTTP field name and their value, for example: # -> Server: Apache 1 # The number of spaces are: 1 # # -> Server:Apache 1 # The number of spaces are: 0 # l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["header-space"]) # Looking for matches try: l_http_value = l_response.headers[0] # get the value of first HTTP field l_spaces_num = str(abs(len(l_http_value) - len(l_http_value.lstrip()))) l_matches = l_wordlist_instance.matches_by_value(l_spaces_num) m_counters.inc(l_matches, l_action, l_weight, "header-space", message="Header space: " + l_spaces_num) except IndexError: print "index error header space" pass # # Header capitalafterdash # ======================= # # Look for non capitalized first letter of field name, for example: # -> Content-type: .... # Instead of: # -> Content-Type: .... # l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["header-capitalafterdash"]) # Looking for matches l_valid_fields = [x for x in l_response.headers.iterkeys() if "-" in x] if l_valid_fields: l_h = l_valid_fields[0] l_value = l_h.split("-")[1] # Get the second value: Content-type => type l_dush = None if l_value[0].isupper(): # Check first letter is lower l_dush = 1 else: l_dush = 0 l_matches = l_wordlist_instance.matches_by_value(l_dush) m_counters.inc(l_matches, l_action, l_weight, "header-capitalizedafterdush", message="Capital after dash: %s" % str(l_dush)) # # Header order # ============ # l_header_order = ','.join(l_response.headers.iterkeys()) l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["header-order"]) l_matches = l_wordlist_instance.matches_by_value(l_header_order) m_counters.inc(l_matches, l_action, l_weight, "header-order", message="Header order: " + l_header_order) # # Protocol name # ============ # # For a response like: # -> HTTP/1.0 200 OK # .... # # Get the 'HTTP' value. # try: l_proto = l_response.protocol # Get the 'HTTP' text from response, if available if l_proto: l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["protocol-name"]) l_matches = l_wordlist_instance.matches_by_value(l_proto) m_counters.inc(l_matches, l_action, l_weight, "proto-name", message="Proto name: " + l_proto) except IndexError: print "index error protocol name" pass # # Protocol version # ================ # # For a response like: # -> HTTP/1.0 200 OK # .... # # Get the '1.0' value. # try: l_version = l_response.version # Get the '1.0' text from response, if available if l_version: l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["protocol-version"]) l_matches = l_wordlist_instance.matches_by_value(l_version) m_counters.inc(l_matches, l_action, l_weight, "proto-version", message="Proto version: " + l_version) except IndexError: print "index error protocol version" pass if "ETag" in l_response.headers: l_etag_header = l_response.headers["ETag"] # # ETag length # ================ # l_etag_len = len(l_etag_header) l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["etag-legth"]) l_matches = l_wordlist_instance.matches_by_value(l_etag_len) m_counters.inc(l_matches, l_action, l_weight, "etag-length", message="ETag length: " + str(l_etag_len)) # # ETag Quotes # ================ # l_etag_striped = l_etag_header.strip() if l_etag_striped.startswith("\"") or l_etag_striped.startswith("'"): l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["etag-quotes"]) l_matches = l_wordlist_instance.matches_by_value(l_etag_striped[0]) m_counters.inc(l_matches, l_action, l_weight, "etag-quotes", message="Etag quotes: " + l_etag_striped[0]) if "Vary" in l_response.headers: l_vary_header = l_response.headers["Vary"] # # Vary delimiter # ================ # # Checks if Vary header delimiter is something like this: # -> Vary: Accept-Encoding,User-Agent # Or this: # -> Vary: Accept-Encoding, User-Agent # l_var_delimiter = ", " if l_vary_header.find(", ") else "," l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["vary-delimiter"]) l_matches = l_wordlist_instance.matches_by_value(l_var_delimiter) m_counters.inc(l_matches, l_action, l_weight, "vary-delimiter", message="Vary delimiter: " + l_var_delimiter) # # Vary capitalizer # ================ # # Checks if Vary header delimiter is something like this: # -> Vary: Accept-Encoding,user-Agent # Or this: # -> Vary: accept-encoding,user-agent # l_vary_capitalizer = str(0 if l_vary_header == l_vary_header.lower() else 1) l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["vary-capitalize"]) l_matches = l_wordlist_instance.matches_by_value(l_vary_capitalizer) m_counters.inc(l_matches, l_action, l_weight, "vary-capitalize", message="Vary capitalizer: " + l_vary_capitalizer) # # Vary order # ================ # # Checks order between vary values: # -> Vary: Accept-Encoding,user-Agent # Or this: # -> Vary: User-Agent,Accept-Encoding # l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["vary-order"]) l_matches = l_wordlist_instance.matches_by_value(l_vary_header) m_counters.inc(l_matches, l_action, l_weight, "vary-order", message="Vary order: " + l_vary_header) # # ===================== # HTTP specific options # ===================== # # if l_action == "HEAD": # # HEAD Options # ============ # l_option = l_response.headers.get("Allow") if l_option: l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-public"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_option) m_counters.inc(l_matches, l_action, l_weight, "options-allow", message="HEAD option: " + l_option) if l_action == "OPTIONS" or l_action == "INVALID" or l_action == "DELETE": if "Allow" in l_response.headers: # # Options allow # ============= # l_option = l_response.headers.get("Allow") if l_option: l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-public"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_option) m_counters.inc(l_matches, l_action, l_weight, "options-allow", message="OPTIONS allow: " + l_action + " # " + l_option) # # Allow delimiter # =============== # l_option = l_response.headers.get("Allow") if l_option: l_var_delimiter = ", " if l_option.find(", ") else "," l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-delimited"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_var_delimiter) m_counters.inc(l_matches, l_action, l_weight, "options-delimiter", message="OPTION allow delimiter " + l_action + " # " + l_option) if "Public" in l_response.headers: # # Public response # =============== # l_option = l_response.headers.get("Public") if l_option: l_wordlist_instance = WordListLoader.get_wordlist_as_dict(Config.plugin_extra_config[l_wordlist]["options-public"]) # Looking for matches l_matches = l_wordlist_instance.matches_by_value(l_option) m_counters.inc(l_matches, l_action, l_weight, "options-public", message="Public response: " + l_action + " # " + l_option)
def http_simple_analyzer(main_url, update_status_func, number_of_entries=4): """Simple method to get fingerprint server info :param main_url: Base url to test. :type main_url: str :param update_status_func: function used to update the status of the process :type update_status_func: function :param number_of_entries: number of resutls tu return for most probable web servers detected. :type number_of_entries: int :return: a typle as format: Web server family, Web server version, Web server complete description, related web servers (as a dict('SERVER_RELATED' : set(RELATED_NAMES))), others web server with their probabilities as a dict(CONCRETE_WEB_SERVER, PROBABILITY) """ m_actions = { 'GET' : { 'wordlist' : 'Wordlist_get' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/' }, 'LONG_GET' : { 'wordlist' : 'Wordlist_get_long' , 'weight' : 1 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/%s' % ('a' * 200) }, 'NOT_FOUND' : { 'wordlist' : 'Wordlist_get_notfound' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': '/404_NOFOUND__X02KAS' }, 'HEAD' : { 'wordlist' : 'Wordlist_head' , 'weight' : 3 , 'protocol' : 'HTTP/1.1', 'method' : 'HEAD' , 'payload': '/' }, 'OPTIONS' : { 'wordlist' : 'Wordlist_options' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'OPTIONS' , 'payload': '/' }, 'DELETE' : { 'wordlist' : 'Wordlist_delete' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'DELETE' , 'payload': '/' }, 'TEST' : { 'wordlist' : 'Wordlist_attack' , 'weight' : 5 , 'protocol' : 'HTTP/1.1', 'method' : 'TEST' , 'payload': '/' }, 'INVALID' : { 'wordlist' : 'Wordlist_wrong_method' , 'weight' : 5 , 'protocol' : 'HTTP/9.8', 'method' : 'GET' , 'payload': '/' }, 'ATTACK' : { 'wordlist' : 'Wordlist_wrong_version' , 'weight' : 2 , 'protocol' : 'HTTP/1.1', 'method' : 'GET' , 'payload': "/etc/passwd?format=%%%%&xss=\x22><script>alert('xss');</script>&traversal=../../&sql='%20OR%201;"} } m_d = ParsedURL(main_url) m_hostname = m_d.hostname m_port = m_d.port m_scheme = m_d.scheme m_debug = False # Only for develop i = 0 m_counters = HTTPAnalyzer() m_data_len = len(m_actions) # Var used to update the status m_banners_counter = Counter() for l_action, v in m_actions.iteritems(): if m_debug: print "###########" l_method = v["method"] l_payload = v["payload"] l_proto = v["protocol"] #l_wordlist = v["wordlist"] # Each type of probe hast different weight. # # Weights go from 0 - 5 # l_weight = v["weight"] # Make the raw request l_raw_request = "%(method)s %(payload)s %(protocol)s\r\nHost: %(host)s\r\n\r\n" % ( { "method" : l_method, "payload" : l_payload, "protocol" : l_proto, "host" : m_hostname, "port" : m_port } ) if m_debug: print "REQUEST" print l_raw_request # Do the connection l_response = None try: m_raw_request = HTTP_Raw_Request(l_raw_request) discard_data(m_raw_request) l_response = HTTP.make_raw_request( host = m_hostname, port = m_port, proto = m_scheme, raw_request = m_raw_request) #callback = check_raw_response) if l_response: discard_data(l_response) except NetworkException,e: Logger.log_error_more_verbose("Server-Fingerprint plugin: No response for host '%s:%d' with method '%s'. Message: %s" % (m_hostname, m_port, l_method, str(e))) continue if not l_response: Logger.log_error_more_verbose("No response for host '%s:%d' with method '%s'." % (m_hostname, m_port, l_method)) continue if m_debug: print "RESPONSE" print l_response.raw_headers # Update the status update_status_func((float(i) * 100.0) / float(m_data_len)) Logger.log_more_verbose("Making '%s' test." % l_method) i += 1 # Analyze for each wordlist # # Store the server banner try: m_banners_counter[l_response.headers["Server"]] += l_weight except KeyError: pass l_server_name = None try: l_server_name = l_response.headers["Server"] except KeyError: continue m_counters.simple_inc(l_server_name, l_method, l_weight)
def __find_plugins(self, url, plugins_wordlist, update_func): """ Try to find available plugins :param url: base URL to test. :type url: str :param plugins_wordlist: path to wordlist with plugins lists. :type plugins_wordlist: str :param update_func: function to update plugin status. :type update_func: function :return: list of lists as format: list([PLUGIN_NAME, PLUGIN_URL, PLUGIN_INSTALLED_VERSION, PLUGIN_LAST_VERSION, [CVE1, CVE2...]]) :type: list(list()) """ results = [] urls_to_test = { "readme.txt": r"(Stable tag:[\svV]*)([0-9\.]+)", "README.txt": r"(Stable tag:[\svV]*)([0-9\.]+)", } # Generates the error page error_response = get_error_page(url).raw_data # Load plugins info plugins = [] plugins_append = plugins.append with open(plugins_wordlist, "rU") as f: for x in f: plugins_append(x.replace("\n", "")) # Calculate sizes total_plugins = len(plugins) # Load CSV info csv_info = csv.reader(plugins) # Process the URLs for i, plugin_row in enumerate(csv_info): # Plugin properties plugin_URI = plugin_row[0] plugin_name = plugin_row[1] plugin_last_version = plugin_row[2] plugin_CVEs = [] if plugin_row[3] == "" else plugin_row[3].split("|") # Update status update_func((float(i) * 100.0) / float(total_plugins)) # Make plugin URL partial_plugin_url = "%s/%s" % (url, "wp-content/plugins/%s" % plugin_URI) # Test each URL with possible plugin version info for target, regex in urls_to_test.iteritems(): plugin_url = "%s/%s" % (partial_plugin_url, target) # Try to get plugin p = None try: p = HTTP.get_url(plugin_url, use_cache=False) if p: discard_data(p) except Exception, e: Logger.log_error_more_verbose("Error while download: '%s': %s" % (plugin_url, str(e))) continue plugin_installed_version = None if p.status == "403": # Installed, but inaccesible plugin_installed_version = "Unknown" elif p.status == "200": # Check if page is and non-generic not found page with 404 code if get_diff_ratio(error_response, p.raw_response) < 0.52: # Find the version tmp_version = re.search(regex, p.raw_response) if tmp_version is not None: plugin_installed_version = tmp_version.group(2) # Store info if plugin_installed_version is not None: Logger.log("Discovered plugin: '%s (installed version: %s)' (latest version: %s)" % (plugin_name, plugin_installed_version, plugin_last_version)) results.append([ plugin_name, plugin_url, plugin_installed_version, plugin_last_version, plugin_CVEs ]) # Plugin found -> not more URL test for this plugin break
def __find_plugins(self, url, plugins_wordlist, update_func): """ Try to find available plugins :param url: base URL to test. :type url: str :param plugins_wordlist: path to wordlist with plugins lists. :type plugins_wordlist: str :param update_func: function to update plugin status. :type update_func: function :return: list of lists as format: list([PLUGIN_NAME, PLUGIN_URL, PLUGIN_INSTALLED_VERSION, PLUGIN_LAST_VERSION, [CVE1, CVE2...]]) :type: list(list()) """ results = [] urls_to_test = { "readme.txt": r"(Stable tag:[\svV]*)([0-9\.]+)", "README.txt": r"(Stable tag:[\svV]*)([0-9\.]+)", } # Generates the error page error_response = get_error_page(url).raw_data # Load plugins info plugins = [] plugins_append = plugins.append with open(plugins_wordlist, "rU") as f: for x in f: plugins_append(x.replace("\n", "")) # Calculate sizes total_plugins = len(plugins) # Load CSV info csv_info = csv.reader(plugins) # Process the URLs for i, plugin_row in enumerate(csv_info): # Plugin properties plugin_URI = plugin_row[0] plugin_name = plugin_row[1] plugin_last_version = plugin_row[2] plugin_CVEs = [] if plugin_row[3] == "" else plugin_row[3].split( "|") # Update status update_func((float(i) * 100.0) / float(total_plugins)) # Make plugin URL partial_plugin_url = "%s/%s" % (url, "wp-content/plugins/%s" % plugin_URI) # Test each URL with possible plugin version info for target, regex in urls_to_test.iteritems(): plugin_url = "%s/%s" % (partial_plugin_url, target) # Try to get plugin p = None try: p = HTTP.get_url(plugin_url, use_cache=False) if p: discard_data(p) except Exception, e: Logger.log_error_more_verbose( "Error while download: '%s': %s" % (plugin_url, str(e))) continue plugin_installed_version = None if p.status == "403": # Installed, but inaccesible plugin_installed_version = "Unknown" elif p.status == "200": # Check if page is and non-generic not found page with 404 code if get_diff_ratio(error_response, p.raw_response) < 0.52: # Find the version tmp_version = re.search(regex, p.raw_response) if tmp_version is not None: plugin_installed_version = tmp_version.group(2) # Store info if plugin_installed_version is not None: Logger.log( "Discovered plugin: '%s (installed version: %s)' (latest version: %s)" % (plugin_name, plugin_installed_version, plugin_last_version)) results.append([ plugin_name, plugin_url, plugin_installed_version, plugin_last_version, plugin_CVEs ]) # Plugin found -> not more URL test for this plugin break
# If is a disallow URL, it must be suspicious if m_key.lower() == "disallow": m_discovered_suspicious.append(tmp_discovered) except Exception, e: continue # # Filter results # # Delete repeated m_discovered_urls = set(m_discovered_urls) # Generating error page m_error_page = generate_error_page_url(m_url_robots_txt) m_response_error_page = HTTP.get_url(m_error_page, callback=self.check_response) if m_response_error_page: m_return.append(m_response_error_page) # Analyze results match = {} m_analyzer = MatchingAnalyzer(m_response_error_page.data) m_total = len(m_discovered_urls) for m_step, l_url in enumerate(m_discovered_urls): # Update only odd iterations if m_step % 2: progress = (float(m_step * 100) / m_total) self.update_status(progress=progress) l_url = fix_url(l_url, m_url)
class OSFingerprinting(TestingPlugin): """ Plugin to fingerprint the remote OS. """ #---------------------------------------------------------------------- def get_accepted_info(self): return [IP, BaseUrl] #---------------------------------------------------------------------- def recv_info(self, info): """ Main function for OS fingerprint. Get a domain or IP and return the fingerprint results. :param info: Folder URL. :type info: FolderUrl :return: OS Fingerprint. :rtype: OSFingerprint """ # # Detection methods and their weights. # # The weight is a value between 1-5 # FINGERPRINT_METHODS_OS_AND_VERSION = { 'ttl': { 'function': self.ttl_platform_detection, 'weight': 2 } } FUNCTIONS = None # Fingerprint methods to run m_host = None is_windows = None if isinstance(info, IP): m_host = info.address FUNCTIONS = ['ttl'] else: # BaseUrl m_host = info.hostname FUNCTIONS = ['ttl'] # Try to detect if remote system is a Windows m_windows_host = "%s://%s:%s" % (info.parsed_url.scheme, info.parsed_url.host, info.parsed_url.port) is_windows = self.is_URL_in_windows(m_windows_host) # Logging Logger.log_more_verbose( "Starting OS fingerprinting plugin for site: %s" % m_host) m_counter = Counter() # Run functions for f in FUNCTIONS: l_function = FINGERPRINT_METHODS_OS_AND_VERSION[f]['function'] ### For future use ### l_weight = FINGERPRINT_METHODS_OS_AND_VERSION[f]['weight'] # Run results = l_function(m_host) if results: for l_r in results: m_counter[l_r] += 1 # Return value m_return = None # # Filter the results # if len(m_counter) > 0: # Fooking for a windows system if is_windows: # If Windows is detected l_counter = Counter() # Extract windows systems for x, y in m_counter.iteritems(): if "windows" == x: l_counter[x] += y # Replace the counter for the new m_counter = l_counter # Get most common systems l_most_common = m_counter.most_common(5) # First elemente will be the detected OS m_OS_family = l_most_common[0][0][0] m_OS_version = l_most_common[0][0][1] # Next 4 will be the 'others' m_length = float(len(l_most_common)) m_others = { "%s-%s" % (l_most_common[i][0][0], l_most_common[i][0][1]): float('{:.2f}'.format(l_most_common[i][1] / m_length)) for i in xrange(1, len(l_most_common), 1) } # create the data m_return = OSFingerprint(m_OS_family, m_OS_version, others=m_others) elif is_windows is not None: if is_windows: # Windows system detected m_return = OSFingerprint("windows") else: # *NIX system detected m_return = OSFingerprint("unix_or_compatible") # If there is information, associate it with the resource if m_return: info.add_information(m_return) return m_return #---------------------------------------------------------------------- # # Platform detection methods # #---------------------------------------------------------------------- def is_URL_in_windows(self, main_url): """ Detect if platform is Windows or \*NIX. To do this, get the first link, in scope, and does two resquest. If are the same response, then, platform are Windows. Else are \*NIX. :returns: True, if the remote host is a Windows system. False is \*NIX or None if unknown. :rtype: bool """ m_forbidden = ( "logout", "logoff", "exit", "sigout", "signout", ) # Get the main web page m_r = download(main_url, callback=self.check_download) if not m_r or not m_r.raw_data: return None discard_data(m_r) # Get the first link m_links = None try: if m_r.information_type == Information.INFORMATION_HTML: m_links = extract_from_html(m_r.raw_data, main_url) else: m_links = extract_from_text(m_r.raw_data, main_url) except TypeError, e: Logger.log_error_more_verbose("Plugin error: %s" % format_exc()) return None if not m_links: return None # Get the first link of the page that's in scope of the audit m_first_link = None for u in m_links: if u in Config.audit_scope and not any(x in u for x in m_forbidden): m_first_link = u break if not m_first_link: return None # Now get two request to the links. One to the original URL and other # as upper URL. # Original m_response_orig = HTTP.get_url( m_first_link, callback=self.check_response) # FIXME handle exceptions! discard_data(m_response_orig) # Uppercase m_response_upper = HTTP.get_url( m_first_link.upper(), callback=self.check_response) # FIXME handle exceptions! discard_data(m_response_upper) # Compare them m_orig_data = m_response_orig.raw_response if m_response_orig else "" m_upper_data = m_response_upper.raw_response if m_response_upper else "" m_match_level = get_diff_ratio(m_orig_data, m_upper_data) # If the responses are equal by 90%, two URL are the same => Windows; else => *NIX m_return = None if m_match_level > 0.95: m_return = True else: m_return = False return m_return