def __init__(self): CrawlPlugin.__init__(self) # internal variables self._exec = True self._already_analyzed = ScalableBloomFilter() # User configured parameters self._db_file = os.path.join('plugins', 'crawl', 'pykto', 'scan_database.db') self._extra_db_file = os.path.join('plugins', 'crawl', 'pykto', 'w3af_scan_database.db') self._cgi_dirs = ['/cgi-bin/'] self._admin_dirs = ['/admin/', '/adm/'] self._users = [ 'adm', 'bin', 'daemon', 'ftp', 'guest', 'listen', 'lp', 'mysql', 'noaccess', 'nobody', 'nobody4', 'nuucp', 'operator', 'root', 'smmsp', 'smtp', 'sshd', 'sys', 'test', 'unknown' ] self._nuke = [ '/', '/postnuke/', '/postnuke/html/', '/modules/', '/phpBB/', '/forum/' ] self._mutate_tests = False
def __init__(self): GrepPlugin.__init__(self) self._already_reported = ScalableBloomFilter() # regex to split between words self._split_re = re.compile('[^\w]')
def __init__(self): CrawlPlugin.__init__(self) self._already_visited = ScalableBloomFilter() # User options self._fuzz_images = False self._max_digit_sections = 4
def __init__(self): InfrastructurePlugin.__init__(self) # Internal variables self._first_exec = True self._already_queried = ScalableBloomFilter() self._can_resolve_domain_names = False
def __init__(self): GrepPlugin.__init__(self) # Compile the XPATH self._tag_xpath = etree.XPath('//object | //applet') self._tag_names = ('object', 'applet') self._already_analyzed = ScalableBloomFilter()
def filtered_freq_generator(freq_list): already_tested = ScalableBloomFilter() for freq in freq_list: if freq not in already_tested: already_tested.add(freq) yield freq
def __init__(self): GrepPlugin.__init__(self) # Internal variables self._already_inspected = ScalableBloomFilter() self._autocomplete_forms_xpath = etree.XPath(AUTOCOMPLETE_FORMS_XPATH) self._pwd_input_xpath = etree.XPath(PWD_INPUT_XPATH) self._text_input_xpath = etree.XPath(TEXT_INPUT_XPATH)
def __init__(self): InfrastructurePlugin.__init__(self) # Internal variables self._already_tested = ScalableBloomFilter() # On real web applications, if we can't trigger an error in the first # MAX_TESTS tests, it simply won't happen and we have to stop testing. self.MAX_TESTS = 25
def __init__(self): CrawlPlugin.__init__(self) # Internal variables self._already_crawled = ScalableBloomFilter() self._already_verified = ScalableBloomFilter() # User configured parameters self._max_depth = 3
class feeds(GrepPlugin): ''' Grep every page and finds rss, atom, opml feeds. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) self._feed_types = {'rss': 'RSS', # <rss version="..."> 'feed': 'OPML', # <feed version="..." 'opml': 'OPML' # <opml version="..."> } self._already_inspected = ScalableBloomFilter() # Compile the XPATH self._tag_xpath = etree.XPath('//rss | //feed | //opml') def grep(self, request, response): ''' Plugin entry point, find feeds. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' dom = response.get_dom() uri = response.get_uri() # In some strange cases, we fail to normalize the document if uri not in self._already_inspected and dom is not None: self._already_inspected.add(uri) # Find all feed tags element_list = self._tag_xpath(dom) for element in element_list: feed_tag = element.tag feed_type = self._feed_types[feed_tag.lower()] version = element.attrib.get('version', 'unknown') fmt = 'The URL "%s" is a %s version %s feed.' desc = fmt % (uri, feed_type, version) i = Info('Content feed resource', desc, response.id, self.get_name()) i.set_uri(uri) i.add_to_highlight(feed_type) self.kb_append_uniq(self, 'feeds', i, 'URL') def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
def __init__(self): CrawlPlugin.__init__(self) # Internal variables self._analyzed_dirs = ScalableBloomFilter() # -rw-r--r-- 1 andresr w3af 8139 Apr 12 13:23 foo.zip regex_str = '[a-z-]{10}\s*\d+\s*(.*?)\s+(.*?)\s+\d+\s+\w+\s+\d+\s+[0-9:]{4,5}\s+(.*)' self._listing_parser_re = re.compile(regex_str)
class svn_users(GrepPlugin): ''' Grep every response for users of the versioning system. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) self._already_inspected = ScalableBloomFilter() # Add the regex to match something like this: # # $Id: lzio.c,v 1.24 2003/03/20 16:00:56 roberto Exp $ # $Id: file name, version, timestamp, creator Exp $ # regex = '\$.{1,12}: .*? .*? \d{4}[-/]\d{1,2}[-/]\d{1,2}' regex += ' \d{1,2}:\d{1,2}:\d{1,2}.*? (.*?) (Exp )?\$' self._regex_list = [ re.compile(regex), ] def grep(self, request, response): ''' Plugin entry point. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. ''' uri = response.get_uri() if response.is_text_or_html() and uri not in self._already_inspected: # Don't repeat URLs self._already_inspected.add(uri) for regex in self._regex_list: for m in regex.findall(response.get_body()): user = m[0] desc = 'The URL: "%s" contains a SVN versioning signature'\ ' with the username "%s".' desc = desc % (uri, user) v = Vuln('SVN user disclosure vulnerability', desc, severity.LOW, response.id, self.get_name()) v.set_uri(uri) v['user'] = user v.add_to_highlight(user) self.kb_append_uniq(self, 'users', v, 'URL') def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
def __init__(self): GrepPlugin.__init__(self) self._feed_types = {'rss': 'RSS', # <rss version="..."> 'feed': 'OPML', # <feed version="..." 'opml': 'OPML' # <opml version="..."> } self._already_inspected = ScalableBloomFilter() # Compile the XPATH self._tag_xpath = etree.XPath('//rss | //feed | //opml')
def __init__(self): GrepPlugin.__init__(self) self._already_analyzed = ScalableBloomFilter() self._properly_configured = None self._config_check_lock = threading.RLock() # User configured settings # Default for ubuntu installation self._clamd_socket = '/var/run/clamav/clamd.ctl'
class svn_users(GrepPlugin): ''' Grep every response for users of the versioning system. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) self._already_inspected = ScalableBloomFilter() # Add the regex to match something like this: # # $Id: lzio.c,v 1.24 2003/03/20 16:00:56 roberto Exp $ # $Id: file name, version, timestamp, creator Exp $ # regex = '\$.{1,12}: .*? .*? \d{4}[-/]\d{1,2}[-/]\d{1,2}' regex += ' \d{1,2}:\d{1,2}:\d{1,2}.*? (.*?) (Exp )?\$' self._regex_list = [re.compile(regex), ] def grep(self, request, response): ''' Plugin entry point. :param request: The HTTP request object. :param response: The HTTP response object :return: None, all results are saved in the kb. ''' uri = response.get_uri() if response.is_text_or_html() and uri not in self._already_inspected: # Don't repeat URLs self._already_inspected.add(uri) for regex in self._regex_list: for m in regex.findall(response.get_body()): user = m[0] desc = 'The URL: "%s" contains a SVN versioning signature'\ ' with the username "%s".' desc = desc % (uri, user) v = Vuln('SVN user disclosure vulnerability', desc, severity.LOW, response.id, self.get_name()) v.set_uri(uri) v['user'] = user v.add_to_highlight(user) self.kb_append_uniq(self, 'users', v, 'URL') def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
class objects(GrepPlugin): ''' Grep every page for objects and applets. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) # Compile the XPATH self._tag_xpath = etree.XPath('//object | //applet') self._tag_names = ('object', 'applet') self._already_analyzed = ScalableBloomFilter() def grep(self, request, response): ''' Plugin entry point. Parse the object tags. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() dom = response.get_dom() if response.is_text_or_html() and dom is not None \ and url not in self._already_analyzed: self._already_analyzed.add(url) elem_list = self._tag_xpath(dom) for element in elem_list: tag_name = element.tag desc = 'The URL: "%s" has an "%s" tag. We recommend you download'\ ' the client side code and analyze it manually.' desc = desc % (response.get_uri(), tag_name) i = Info('Browser plugin content', desc, response.id, self.get_name()) i.set_url(url) i.add_to_highlight(tag_name) self.kb_append_uniq(self, tag_name, i, 'URL') def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
def __init__(self): GrepPlugin.__init__(self) self._already_inspected = ScalableBloomFilter() # Add the regex to match something like this: # # $Id: lzio.c,v 1.24 2003/03/20 16:00:56 roberto Exp $ # $Id: file name, version, timestamp, creator Exp $ # regex = '\$.{1,12}: .*? .*? \d{4}[-/]\d{1,2}[-/]\d{1,2}' regex += ' \d{1,2}:\d{1,2}:\d{1,2}.*? (.*?) (Exp )?\$' self._regex_list = [ re.compile(regex), ]
def __init__(self): GrepPlugin.__init__(self) # Internal variables self._already_inspected = ScalableBloomFilter() # Create the regular expression to search for AJAX ajax_regex_string = '(XMLHttpRequest|eval\(|ActiveXObject|Msxml2\.XMLHTTP|' ajax_regex_string += 'ActiveXObject|Microsoft\.XMLHTTP)' self._ajax_regex_re = re.compile(ajax_regex_string, re.IGNORECASE) # Compile the XPATH self._script_xpath = etree.XPath('.//script')
class file_upload(GrepPlugin): ''' Find HTML forms with file upload capabilities. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) # Internal variables self._already_inspected = ScalableBloomFilter() self._file_input_xpath = etree.XPath(FILE_INPUT_XPATH) def grep(self, request, response): ''' Plugin entry point, verify if the HTML has a form with file uploads. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() if response.is_text_or_html() and not url in self._already_inspected: self._already_inspected.add(url) dom = response.get_dom() # In some strange cases, we fail to normalize the document if dom is not None: # Loop through file inputs tags for input_file in self._file_input_xpath(dom): msg = 'The URL: "%s" has form with file upload capabilities.' msg = msg % url i = Info('File upload form', msg, response.id, self.get_name()) i.set_url(url) to_highlight = etree.tostring(input_file) i.add_to_highlight(to_highlight) self.kb_append_uniq(self, 'file_upload', i, 'URL') def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
def __init__(self): InfrastructurePlugin.__init__(self) # Internal variables self._exec = True self._already_tested = ScalableBloomFilter() # Methods self._supported_methods = self.DAV_METHODS | self.COMMON_METHODS | \ self.UNCOMMON_METHODS | self.PROPOSED_METHODS | \ self.EXTRA_METHODS | self.VERSION_CONTROL # User configured variables self._exec_one_time = True self._report_dav_only = True
def __init__(self): CrawlPlugin.__init__(self) # Internal variables self._analyzed_dirs = ScalableBloomFilter() self._analyzed_filenames = ScalableBloomFilter() self._dvcs = {} self._dvcs['git repository'] = {} self._dvcs['git ignore'] = {} self._dvcs['hg repository'] = {} self._dvcs['hg ignore'] = {} self._dvcs['bzr repository'] = {} self._dvcs['bzr ignore'] = {} self._dvcs['svn repository'] = {} self._dvcs['svn ignore'] = {} self._dvcs['cvs repository'] = {} self._dvcs['cvs ignore'] = {} self._dvcs['git repository']['filename'] = '.git/index' self._dvcs['git repository']['function'] = self.git_index self._dvcs['git ignore']['filename'] = '.gitignore' self._dvcs['git ignore']['function'] = self.ignore_file self._dvcs['hg repository']['filename'] = '.hg/dirstate' self._dvcs['hg repository']['function'] = self.hg_dirstate self._dvcs['hg ignore']['filename'] = '.hgignore' self._dvcs['hg ignore']['function'] = self.ignore_file self._dvcs['bzr repository']['filename'] = '.bzr/checkout/dirstate' self._dvcs['bzr repository']['function'] = self.bzr_checkout_dirstate self._dvcs['bzr ignore']['filename'] = '.bzrignore' self._dvcs['bzr ignore']['function'] = self.ignore_file self._dvcs['svn repository']['filename'] = '.svn/entries' self._dvcs['svn repository']['function'] = self.svn_entries self._dvcs['svn ignore']['filename'] = '.svnignore' self._dvcs['svn ignore']['function'] = self.ignore_file self._dvcs['cvs repository']['filename'] = 'CVS/Entries' self._dvcs['cvs repository']['function'] = self.cvs_entries self._dvcs['cvs ignore']['filename'] = '.cvsignore' self._dvcs['cvs ignore']['function'] = self.ignore_file
def __init__(self): CrawlPlugin.__init__(self) # User configured parameters self._wordlist = os.path.join( 'plugins', 'crawl', 'content_negotiation', 'common_filenames.db') # Internal variables self._already_tested_dir = ScalableBloomFilter() self._already_tested_resource = ScalableBloomFilter() self._content_negotiation_enabled = None self._to_bruteforce = Queue.Queue() # I want to try 3 times to see if the remote host is vulnerable # detection is not thaaat accurate! self._tries_left = 3
def __init__(self): GrepPlugin.__init__(self) # For more info regarding this regular expression, please see: # https://sourceforge.net/mailarchive/forum.php?thread_name=1955593874.20090122023644%40 #mlists.olympos.org&forum_name=w3af-develop regex_str = '(?<!\.)(?<!\d)(?:(?:10|127)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|192\.168|169\.' regex_str += '254|172\.0?(?:1[6-9]|2[0-9]|3[01]))(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-' regex_str += '9]?)){2}(?!\d)(?!\.)' self._private_ip_address = re.compile(regex_str) self._regex_list = [ self._private_ip_address, ] self._already_inspected = ScalableBloomFilter() self._ignore_if_match = None
def __init__(self): CrawlPlugin.__init__(self) # User configured parameters self._wordlist = os.path.join('plugins', 'crawl', 'content_negotiation', 'common_filenames.db') # Internal variables self._already_tested_dir = ScalableBloomFilter() self._already_tested_resource = ScalableBloomFilter() self._content_negotiation_enabled = None self._to_bruteforce = Queue.Queue() # I want to try 3 times to see if the remote host is vulnerable # detection is not thaaat accurate! self._tries_left = 3
def __init__(self): GrepPlugin.__init__(self) vs_regex = r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value=".*?" />' self._viewstate = re.compile(vs_regex, re.IGNORECASE | re.DOTALL) ev_regex = r'<input type="hidden" name="__EVENTVALIDATION" ' ev_regex += 'id="__EVENTVALIDATION" value=".*?" />' self._eventvalidation = re.compile(ev_regex, re.IGNORECASE | re.DOTALL) encryptedvs_regex = r'<input type="hidden" name="__VIEWSTATEENCRYPTED" ' encryptedvs_regex += 'id="__VIEWSTATEENCRYPTED" value=".*?" />' self._encryptedVs = re.compile(encryptedvs_regex, re.IGNORECASE | re.DOTALL) self._already_analyzed = ScalableBloomFilter()
class frontpage_version(InfrastructurePlugin): ''' Search FrontPage Server Info file and if it finds it will determine its version. :author: Viktor Gazdag ( [email protected] ) ''' VERSION_RE = re.compile('FPVersion="(.*?)"', re.IGNORECASE) ADMIN_URL_RE = re.compile('FPAdminScriptUrl="(.*?)"', re.IGNORECASE) AUTHOR_URL_RE = re.compile('FPAuthorScriptUrl="(.*?)"', re.IGNORECASE) def __init__(self): InfrastructurePlugin.__init__(self) # Internal variables self._analyzed_dirs = ScalableBloomFilter() @runonce(exc_class=w3afRunOnce) def discover(self, fuzzable_request): ''' For every directory, fetch a list of files and analyze the response. :param fuzzable_request: A fuzzable_request instance that contains (among other things) the URL to test. ''' for domain_path in fuzzable_request.get_url().get_directories(): if domain_path not in self._analyzed_dirs: # Save the domain_path so I know I'm not working in vane self._analyzed_dirs.add(domain_path) # Request the file frontpage_info_url = domain_path.url_join("_vti_inf.html") try: response = self._uri_opener.GET(frontpage_info_url, cache=True) except w3afException, w3: msg = 'Failed to GET Frontpage Server _vti_inf.html file: "' msg += frontpage_info_url + \ '". Exception: "' + str(w3) + '".' om.out.debug(msg) else: # Check if it's a Frontpage Info file if not is_404(response): for fr in self._create_fuzzable_requests(response): self.output_queue.put(fr) self._analyze_response(response)
class blank_body(GrepPlugin): ''' Find responses with empty body. :author: Andres Riancho ([email protected]) ''' METHODS = ('GET', 'POST') HTTP_CODES = (401, 304, 302, 301, 204) def __init__(self): GrepPlugin.__init__(self) self._already_reported = ScalableBloomFilter() def grep(self, request, response): ''' Plugin entry point, find the blank bodies and report them. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' if response.get_body() == '' and request.get_method() in self.METHODS\ and response.get_code() not in self.HTTP_CODES\ and 'location' not in response.get_lower_case_headers()\ and response.get_url() not in self._already_reported: # report these informations only once self._already_reported.add(response.get_url()) desc = 'The URL: "%s" returned an empty body, this could indicate'\ ' an application error.' desc = desc % response.get_url() i = Info('Blank http response body', desc, response.id, self.get_name()) i.set_url(response.get_url()) self.kb_append(self, 'blank_body', i) def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
class oracle(GrepPlugin): ''' Find Oracle applications. :author: Andres Riancho ([email protected]) ''' OAS_TAGS = [ '<!-- Created by Oracle ', ] def __init__(self): GrepPlugin.__init__(self) self._already_analyzed = ScalableBloomFilter() def grep(self, request, response): ''' Plugin entry point. Grep for oracle applications. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() if response.is_text_or_html() and url not in self._already_analyzed: self._already_analyzed.add(url) for msg in self.OAS_TAGS: if msg in response: desc = 'The URL: "%s" was created using Oracle Application'\ ' Server.' desc = desc % response.get_url() i = Info('Oracle application server', desc, response.id, self.get_name()) i.set_url(url) i.add_to_highlight(msg) self.kb_append(self, 'oracle', i) def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
class oracle(GrepPlugin): ''' Find Oracle applications. :author: Andres Riancho ([email protected]) ''' OAS_TAGS = ['<!-- Created by Oracle ',] def __init__(self): GrepPlugin.__init__(self) self._already_analyzed = ScalableBloomFilter() def grep(self, request, response): ''' Plugin entry point. Grep for oracle applications. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() if response.is_text_or_html() and url not in self._already_analyzed: self._already_analyzed.add(url) for msg in self.OAS_TAGS: if msg in response: desc = 'The URL: "%s" was created using Oracle Application'\ ' Server.' desc = desc % response.get_url() i = Info('Oracle application server', desc, response.id, self.get_name()) i.set_url(url) i.add_to_highlight(msg) self.kb_append(self, 'oracle', i) def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
def __init__(self): # # Set the opener, I need it to perform some tests and gain # the knowledge about the server's 404 response bodies. # self._uri_opener = None self._worker_pool = None # # Internal variables # self._already_analyzed = False self._404_bodies = [] self._lock = thread.allocate_lock() self._fingerprinted_paths = ScalableBloomFilter() self._directory_uses_404_codes = ScalableBloomFilter() # It is OK to store 200 here, I'm only storing path+filename as the key, # and bool as the value. self.is_404_LRU = LRU(200)
def __init__(self): GrepPlugin.__init__(self) self._already_inspected = ScalableBloomFilter() # Add the regex to match something like this: # # $Id: lzio.c,v 1.24 2003/03/20 16:00:56 roberto Exp $ # $Id: file name, version, timestamp, creator Exp $ # regex = '\$.{1,12}: .*? .*? \d{4}[-/]\d{1,2}[-/]\d{1,2}' regex += ' \d{1,2}:\d{1,2}:\d{1,2}.*? (.*?) (Exp )?\$' self._regex_list = [re.compile(regex), ]
def __init__(self): GrepPlugin.__init__(self) # Internal variables self._already_inspected = ScalableBloomFilter() # Create the regular expression to search for AJAX ajax_regex_string = "(XMLHttpRequest|eval\(|ActiveXObject|Msxml2\.XMLHTTP|" ajax_regex_string += "ActiveXObject|Microsoft\.XMLHTTP)" self._ajax_regex_re = re.compile(ajax_regex_string, re.IGNORECASE) # Compile the XPATH self._script_xpath = etree.XPath(".//script")
def __init__(self): GrepPlugin.__init__(self) # For more info regarding this regular expression, please see: # https://sourceforge.net/mailarchive/forum.php?thread_name=1955593874.20090122023644%40 #mlists.olympos.org&forum_name=w3af-develop regex_str = '(?<!\.)(?<!\d)(?:(?:10|127)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|192\.168|169\.' regex_str += '254|172\.0?(?:1[6-9]|2[0-9]|3[01]))(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-' regex_str += '9]?)){2}(?!\d)(?!\.)' self._private_ip_address = re.compile(regex_str) self._regex_list = [self._private_ip_address, ] self._already_inspected = ScalableBloomFilter() self._ignore_if_match = None
def __init__(self): CrawlPlugin.__init__(self) # Internal variables self._compiled_ignore_re = None self._compiled_follow_re = None self._broken_links = DiskSet() self._first_run = True self._known_variants = VariantDB() self._already_filled_form = ScalableBloomFilter() # User configured variables self._ignore_regex = '' self._follow_regex = '.*' self._only_forward = False self._compile_re()
def __init__(self): GrepPlugin.__init__(self) vs_regex = r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value=".*?" />' self._viewstate = re.compile(vs_regex, re.IGNORECASE | re.DOTALL) ev_regex = r'<input type="hidden" name="__EVENTVALIDATION" ' ev_regex += 'id="__EVENTVALIDATION" value=".*?" />' self._eventvalidation = re.compile(ev_regex, re.IGNORECASE | re.DOTALL) encryptedvs_regex = r'<input type="hidden" name="__VIEWSTATEENCRYPTED" ' encryptedvs_regex += 'id="__VIEWSTATEENCRYPTED" value=".*?" />' self._encryptedVs = re.compile( encryptedvs_regex, re.IGNORECASE | re.DOTALL) self._already_analyzed = ScalableBloomFilter()
def __init__(self, crawl_infrastructure_plugins, w3af_core, max_discovery_time): ''' :param in_queue: The input queue that will feed the crawl_infrastructure plugins :param crawl_infrastructure_plugins: Instances of crawl_infrastructure plugins in a list :param w3af_core: The w3af core that we'll use for status reporting :param max_discovery_time: The max time (in seconds) to use for the discovery phase ''' super(crawl_infrastructure, self).__init__(crawl_infrastructure_plugins, w3af_core, thread_name='CrawlInfra') self._max_discovery_time = int(max_discovery_time) # For filtering fuzzable requests found by plugins: self._variant_db = VariantDB() self._already_seen_urls = ScalableBloomFilter() self._disabled_plugins = set() self._running = True self._report_max_time = True
def __init__(self): CrawlPlugin.__init__(self) # internal variables self._exec = True self._already_analyzed = ScalableBloomFilter() # User configured parameters self._db_file = os.path.join("plugins", "crawl", "pykto", "scan_database.db") self._extra_db_file = os.path.join("plugins", "crawl", "pykto", "w3af_scan_database.db") self._cgi_dirs = ["/cgi-bin/"] self._admin_dirs = ["/admin/", "/adm/"] self._users = [ "adm", "bin", "daemon", "ftp", "guest", "listen", "lp", "mysql", "noaccess", "nobody", "nobody4", "nuucp", "operator", "root", "smmsp", "smtp", "sshd", "sys", "test", "unknown", ] self._nuke = ["/", "/postnuke/", "/postnuke/html/", "/modules/", "/phpBB/", "/forum/"] self._mutate_tests = False
def __init__(self): CrawlPlugin.__init__(self) # internal variables self._exec = True self._already_analyzed = ScalableBloomFilter() # User configured parameters self._extra_db_file = os.path.join('plugins', 'crawl', 'pykto', 'w3af_scan_database.db') self._cgi_dirs = ['/cgi-bin/'] self._admin_dirs = ['/admin/', '/adm/'] self._users = ['adm', 'bin', 'daemon', 'ftp', 'guest', 'listen', 'lp', 'mysql', 'noaccess', 'nobody', 'nobody4', 'nuucp', 'operator', 'root', 'smmsp', 'smtp', 'sshd', 'sys', 'test', 'unknown'] self._nuke = ['/', '/postnuke/', '/postnuke/html/', '/modules/', '/phpBB/', '/forum/'] self._mutate_tests = False
def __init__(self): GrepPlugin.__init__(self) # Internal variables self._already_inspected = ScalableBloomFilter() self._script_src_xpath = etree.XPath(SCRIPT_SRC_XPATH)
class cross_domain_js(GrepPlugin): ''' Find script tags with src attributes that point to a different domain. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) # Internal variables self._already_inspected = ScalableBloomFilter() self._script_src_xpath = etree.XPath(SCRIPT_SRC_XPATH) def grep(self, request, response): ''' Plugin entry point, verify if the HTML has a form with file uploads. :param request: The HTTP request object. :param response: The HTTP response object :return: None ''' url = response.get_url() if response.is_text_or_html() and not url in self._already_inspected: self._already_inspected.add(url) dom = response.get_dom() # In some strange cases, we fail to normalize the document if dom is None: return # Loop through script inputs tags for script_src_tag in self._script_src_xpath(dom): # This should be always False due to the XPATH we're using # but you never know... if not 'src' in script_src_tag.attrib: continue script_src = script_src_tag.attrib['src'] script_full_url = response.get_url().url_join(script_src) script_domain = script_full_url.get_domain() if script_domain != response.get_url().get_domain(): desc = 'The URL: "%s" has script tag with a source that points' \ ' to a third party site ("%s"). This practice is not' \ ' recommended as security of the current site is being' \ ' delegated to that external entity.' desc = desc % (url, script_domain) i = Info('Cross-domain javascript source', desc, response.id, self.get_name()) i.set_url(url) to_highlight = etree.tostring(script_src_tag) i.add_to_highlight(to_highlight) self.kb_append_uniq(self, 'cross_domain_js', i, 'URL') def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''
class dot_net_event_validation(GrepPlugin): ''' Grep every page and identify the ones that have viewstate and don't have event validation. :author: Andres Riancho ([email protected]) ''' def __init__(self): GrepPlugin.__init__(self) vs_regex = r'<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value=".*?" />' self._viewstate = re.compile(vs_regex, re.IGNORECASE | re.DOTALL) ev_regex = r'<input type="hidden" name="__EVENTVALIDATION" ' ev_regex += 'id="__EVENTVALIDATION" value=".*?" />' self._eventvalidation = re.compile(ev_regex, re.IGNORECASE | re.DOTALL) encryptedvs_regex = r'<input type="hidden" name="__VIEWSTATEENCRYPTED" ' encryptedvs_regex += 'id="__VIEWSTATEENCRYPTED" value=".*?" />' self._encryptedVs = re.compile(encryptedvs_regex, re.IGNORECASE | re.DOTALL) self._already_analyzed = ScalableBloomFilter() def grep(self, request, response): ''' If I find __VIEWSTATE and empty __EVENTVALIDATION => vuln. :param request: The HTTP request object. :param response: The HTTP response object ''' if not response.is_text_or_html(): return # First verify if we havent analyzed this URI yet if request.get_url() in self._already_analyzed: return self._already_analyzed.add(request.get_url()) res = self._viewstate.search(response.get_body()) if res: # I have __viewstate!, verify if event validation is enabled if not self._eventvalidation.search(response.get_body()): desc = 'The URL: "%s" has .NET Event Validation disabled.'\ ' This programming/configuration error should be'\ ' manually verified.' desc = desc % response.get_url() i = Info('.NET Event Validation is disabled', desc, response.id, self.get_name()) i.set_url(response.get_url()) i.add_to_highlight(res.group()) self.kb_append(self, 'dot_net_event_validation', i) if not self._encryptedVs.search(response.get_body()): # Nice! We can decode the viewstate! =) desc = 'The URL: "%s" has .NET ViewState encryption disabled.'\ ' This programming/configuration error could be exploited'\ ' to decode the viewstate contents.' desc = desc % response.get_url() i = Info('.NET ViewState encryption is disabled', desc, response.id, self.get_name()) i.set_url(response.get_url()) self.kb_append(self, 'dot_net_event_validation', i) def get_long_desc(self): ''' :return: A DETAILED description of the plugin functions and features. ''' return '''