def parse_status_codes(self, raw_status_codes): status_codes = [] for status_code in raw_status_codes.split(","): try: if "-" in status_code: status_codes.extend(get_range(status_code)) else: status_codes.append(int(status_code.strip())) except ValueError: print("Invalid status code or status code range: {0}".format(status_code)) exit(1) return uniq(status_codes)
def parse_status_codes(self, str_): if not str_: return [] status_codes = [] for status_code in str_.split(","): try: if "-" in status_code: s, e = status_code.strip().split("-") status_codes.extend(range(int(s), int(e) + 1)) else: status_codes.append(int(status_code.strip())) except ValueError: print("Invalid status code or status code range: {0}".format( status_code)) exit(1) return uniq(status_codes)
def generate(self): reext = re.compile(EXTENSION_KEY, re.IGNORECASE) result = [] # Enable to use multiple dictionaries at once for dict_file in self.dictionary_files: for line in uniq(dict_file.get_lines()): # Skip comments if line.startswith("#"): continue if line.startswith("/"): line = line[1:] if self._no_extension: line = line[0] + line[1:].split(".")[0] # Skip dummy paths if line == ".": continue # Skip if the path contains excluded extensions if self._exclude_extensions and (any( ("." + extension in line for extension in self._exclude_extensions))): continue # Classic dirsearch wordlist processing (with %EXT% keyword) if EXTENSION_KEY in line.lower(): for extension in self._extensions: newline = reext.sub(extension, line) result.append(newline) # If forced extensions is used and the path is not a directory ... (terminated by /) # process line like a forced extension. elif self._force_extensions and not line.rstrip().endswith( "/") and "." not in line: for extension in self._extensions: result.append(line + "." + extension) result.append(line) result.append(line + "/") # Append line unmodified. else: if not self._only_selected or any([ line.endswith("." + extension) for extension in self.extensions ]): result.append(line) # Re-add dictionary with prefixes result.extend([ pref + path for path in result for pref in self._prefixes if not path.startswith(pref) ]) # Re-add dictionary with suffixes result.extend([ path + suff for path in result for suff in self._suffixes if not path.endswith(("/", suff)) ]) if self.lowercase: self.entries = (entry.lower() for entry in uniq(result)) elif self.uppercase: self.entries = (entry.upper() for entry in uniq(result)) elif self.capitalization: self.entries = (entry.capitalize() for entry in uniq(result)) else: self.entries = tuple(uniq(result)) del result
def __init__(self, script_path): self.script_path = script_path self.parse_config() options = self.parse_arguments() self.quiet = options.quiet self.full_url = options.full_url self.url_list = [] self.raw_file = None if options.url: self.url_list = [options.url] elif options.url_list: file = self.access_file(options.url_list, "file contains URLs") self.url_list = list(file.get_lines()) elif options.cidr: self.url_list = iprange(options.cidr) elif options.stdin_urls: self.url_list = sys.stdin.read().splitlines() if options.raw_file: self.access_file(options.raw_file, "file with raw request") self.raw_file = options.raw_file elif not len(self.url_list): print("URL target is missing, try using -u <url>") exit(1) self.url_list = uniq(self.url_list) if not options.extensions and not options.no_extension: print("WARNING: No extension was specified!") if options.no_extension: options.extensions = str() for dict_file in options.wordlist.split(","): self.access_file(dict_file, "wordlist") if options.proxy_list: file = self.access_file(options.proxy_list, "proxylist file") self.proxylist = file.read().splitlines() options.request_by_hostname = True elif options.proxy: self.proxy = options.proxy options.request_by_hostname = True else: self.proxy = None if options.replay_proxy: self.replay_proxy = options.replay_proxy options.request_by_hostname = True else: self.replay_proxy = None self.headers = {} if options.header_list: try: file = self.access_file(options.header_list, "header list file") self.headers.update( HeadersParser(file.read()).headers ) except Exception as e: print("Error in headers file: " + str(e)) exit(1) if options.headers: try: self.headers.update( HeadersParser(options.headers).headers ) except Exception: print("Invalid headers") exit(1) if options.extensions == "*": self.extensions = [ "php", "jsp", "asp", "aspx", "do", "action", "cgi", "pl", "html", "htm", "js", "json", "tar.gz", "bak" ] elif options.extensions == "banner.txt": print("A weird extension was provided: 'banner.txt'. Please do not use * as the extension or enclose it in double quotes") exit(0) else: self.extensions = uniq([extension.lstrip(' .') for extension in options.extensions.split(",")]) if options.exclude_extensions: self.exclude_extensions = uniq( [exclude_extension.lstrip(' .') for exclude_extension in options.exclude_extensions.split(",")] ) else: self.exclude_extensions = [] self.useragent = options.useragent self.use_random_agents = options.use_random_agents self.cookie = options.cookie if options.threads_count < 1: print("Threads number must be greater than zero") exit(1) self.threads_count = options.threads_count if options.include_status_codes: self.include_status_codes = self.parse_status_codes(options.include_status_codes) else: self.include_status_codes = [] if options.exclude_status_codes: self.exclude_status_codes = self.parse_status_codes(options.exclude_status_codes) else: self.exclude_status_codes = [] if options.recursion_status_codes: self.recursion_status_codes = self.parse_status_codes(options.recursion_status_codes) else: self.recursion_status_codes = [] if options.exclude_sizes: try: self.exclude_sizes = uniq([ exclude_size.strip().upper() if exclude_size else None for exclude_size in options.exclude_sizes.split(",") ]) except ValueError: self.exclude_sizes = [] else: self.exclude_sizes = [] if options.exclude_texts: try: self.exclude_texts = uniq([ exclude_text.strip() if exclude_text else None for exclude_text in options.exclude_texts.split(",") ]) except ValueError: self.exclude_texts = [] else: self.exclude_texts = [] if options.exclude_regexps: try: self.exclude_regexps = uniq([ exclude_regexp.strip() if exclude_regexp else None for exclude_regexp in options.exclude_regexps.split(",") ]) except ValueError: self.exclude_regexps = [] else: self.exclude_regexps = [] if options.exclude_redirects: try: self.exclude_redirects = uniq([ exclude_redirect.strip() if exclude_redirect else None for exclude_redirect in options.exclude_redirects.split(",") ]) except ValueError: self.exclude_redirects = [] else: self.exclude_redirects = [] self.prefixes = uniq([prefix.strip() for prefix in options.prefixes.split(",")]) if options.prefixes else [] self.suffixes = uniq([suffix.strip() for suffix in options.suffixes.split(",")]) if options.suffixes else [] if options.wordlist: self.wordlist = uniq([wordlist.strip() for wordlist in options.wordlist.split(",")]) else: print("No wordlist was provided, try using -w <wordlist>") exit(1) self.lowercase = options.lowercase self.uppercase = options.uppercase self.capitalization = options.capitalization self.force_extensions = options.force_extensions self.data = options.data self.exclude_response = options.exclude_response self.color = options.color self.delay = options.delay self.timeout = options.timeout self.ip = options.ip self.max_retries = options.max_retries self.recursive = options.recursive self.deep_recursive = options.deep_recursive self.force_recursive = options.force_recursive self.minimum_response_size = options.minimum_response_size self.maximum_response_size = options.maximum_response_size self.no_extension = options.no_extension self.only_selected = options.only_selected self.output_file = options.output_file self.output_format = options.output_format self.scan_subdirs = [] if options.scan_subdirs: for subdir in options.scan_subdirs.split(","): subdir = subdir.strip(" ") if subdir.startswith("/"): subdir = subdir[1:] if not subdir.endswith("/"): subdir += "/" self.scan_subdirs.append(subdir) self.exclude_subdirs = [] if options.exclude_subdirs: for subdir in options.exclude_subdirs.split(","): subdir = subdir.strip(" ") if subdir.startswith("/"): subdir = subdir[1:] if not subdir.endswith("/"): subdir += "/" self.exclude_subdirs.append(subdir) if options.skip_on_status: self.skip_on_status = self.parse_status_codes(options.skip_on_status) else: self.skip_on_status = [] if options.auth and options.auth_type and ( options.auth_type not in ["basic", "digest", "bearer", "ntlm"] ): print("'{0}' is not in available authentication types: basic, digest, bearer, ntlm".format(options.auth_type)) exit(1) elif options.auth and not options.auth_type: print("Please select the authentication type with --auth-type") exit(1) elif options.auth_type and not options.auth: print("No authentication credential found") exit(1) if len(set(self.extensions).intersection(self.exclude_extensions)): print("Exclude extension list can not contain any extension that has already in the extension list") exit(1) self.auth_type = options.auth_type self.auth = options.auth self.redirect = options.follow_redirects self.httpmethod = options.httpmethod self.scheme = options.scheme self.request_by_hostname = options.request_by_hostname self.exit_on_error = options.exit_on_error self.maxrate = options.maxrate self.maxtime = options.maxtime self.recursion_depth = options.recursion_depth if self.scheme not in ["http", "https"]: print("Invalid URI scheme: {0}".format(self.scheme)) exit(1) if self.output_format and self.output_format not in ["simple", "plain", "json", "xml", "md", "csv", "html"]: print("Select one of the following output formats: simple, plain, json, xml, md, csv, html") exit(1)
def generate(self): reext = re.compile(r"\%ext\%", re.IGNORECASE) result = [] # Enable to use multiple dictionaries at once for dict_file in self.dictionary_files: for line in uniq(dict_file.get_lines(), filt=True): # Skip comments if line.startswith("#"): continue if line.startswith("/"): line = line[1:] if self._no_extension: line = line[0] + line[1:].split(".")[0] # Skip dummy paths if line == ".": continue # Skip if the path contains excluded extensions if self._exclude_extensions and (any([ "." + extension in line for extension in self._exclude_extensions ])): continue # Classic dirsearch wordlist processing (with %EXT% keyword) if "%ext%" in line.lower(): for extension in self._extensions: newline = reext.sub(extension, line) result.append(newline) # If forced extensions is used and the path is not a directory ... (terminated by /) # process line like a forced extension. elif self._force_extensions and not line.rstrip().endswith( "/") and "." not in line: for extension in self._extensions: result.append(line + "." + extension) result.append(line) result.append(line + "/") # Append line unmodified. else: if not self._only_selected or any([ line.endswith("." + extension) for extension in self.extensions ]): result.append(line) # Some custom changes for entry in uniq(result): entries = [entry] for pref in self._prefixes: if not entry.startswith(pref): entries.append(pref + entry) for suff in self._suffixes: if not entry.endswith("/") and not entry.endswith(suff): entries.append(entry + suff) if self.lowercase: self.entries.extend(lowercase(entries)) elif self.uppercase: self.entries.extend(uppercase(entries)) elif self.capitalization: self.entries.extend(capitalize(entries)) else: self.entries.extend(entries) del result
def __init__(self): options = self.parse_config(arguments_parser()) self.__dict__.update(options.__dict__) self.httpmethod = self.httpmethod.upper() self.url_list = [] if options.url: self.url_list = [options.url] elif options.url_list: file = self.access_file(options.url_list, "file contains URLs") self.url_list = file.get_lines() elif options.cidr: self.url_list = iprange(options.cidr) elif options.stdin_urls: self.url_list = sys.stdin.read().splitlines(0) if options.raw_file: self.access_file(options.raw_file, "file with raw request") elif not len(self.url_list): print("URL target is missing, try using -u <url>") exit(1) self.url_list = uniq(self.url_list) if not options.extensions and not options.no_extension: print("WARNING: No extension was specified!") for dict_file in options.wordlist.split(","): self.access_file(dict_file, "wordlist") if options.threads_count < 1: print("Threads number must be greater than zero") exit(1) if options.proxy_list: file = self.access_file(options.proxy_list, "proxylist file") self.proxylist = file.get_lines() if self.proxy or self.proxylist or self.replay_proxy: self.request_by_hostname = True self.headers = {} if options.header_list: try: file = self.access_file(options.header_list, "header list file") self.headers.update(HeadersParser(file.read()).headers) except Exception as e: print("Error in headers file: " + str(e)) exit(1) if options.headers: try: self.headers.update(HeadersParser(options.headers).headers) except Exception: print("Invalid headers") exit(1) if options.extensions == "*": self.extensions = COMMON_EXTENSIONS elif options.extensions == "banner.txt": print( "A weird extension was provided: 'banner.txt'. Please do not use * as the extension or enclose it in double quotes" ) exit(0) if options.no_extension: self.extensions = "" self.include_status_codes = self.parse_status_codes( options.include_status_codes) self.exclude_status_codes = self.parse_status_codes( options.exclude_status_codes) self.recursion_status_codes = self.parse_status_codes( options.recursion_status_codes) self.skip_on_status = self.parse_status_codes(options.skip_on_status) self.prefixes = uniq( [prefix.strip() for prefix in self.prefixes.split(",")]) self.suffixes = uniq( [suffix.strip() for suffix in self.suffixes.split(",")]) self.extensions = uniq([ extension.lstrip(' .') for extension in options.extensions.split(",") ]) self.exclude_extensions = uniq([ exclude_extension.lstrip(' .') for exclude_extension in options.exclude_extensions.split(",") ]) self.exclude_sizes = uniq([ exclude_size.strip().upper() for exclude_size in options.exclude_sizes.split(",") ]) self.exclude_texts = uniq([ exclude_text.strip() for exclude_text in options.exclude_texts.split(",") ]) self.exclude_regexps = uniq([ exclude_regexp.strip() for exclude_regexp in options.exclude_regexps.split(",") ]) self.exclude_redirects = uniq([ exclude_redirect.strip() for exclude_redirect in options.exclude_redirects.split(",") ]) self.scan_subdirs = [ subdir.strip().ljust(len(subdir) + 1, "/").lstrip("/") for subdir in options.scan_subdirs.split(",") ] self.exclude_subdirs = [ subdir.strip().ljust(len(subdir) + 1, "/").lstrip("/") for subdir in options.exclude_subdirs.split(",") ] if not options.wordlist: print("No wordlist was provided, try using -w <wordlist>") exit(1) self.wordlist = uniq( [wordlist.strip() for wordlist in options.wordlist.split(",")]) if options.auth and not options.auth_type: print("Please select the authentication type with --auth-type") exit(1) elif options.auth_type and not options.auth: print("No authentication credential found") exit(1) elif options.auth and options.auth_type not in AUTHENTICATION_TYPES: print("'{}' is not in available authentication types: {}".format( options.auth_type, ", ".join(AUTHENTICATION_TYPES))) exit(1) if len(set(self.extensions).intersection(self.exclude_extensions)): print( "Exclude extension list can not contain any extension that has already in the extension list" ) exit(1) if self.output_format and self.output_format not in list( OUTPUT_FORMATS): print("Select one of the following output formats: {}".format( ", ".join(OUTPUT_FORMATS))) exit(1)