def generate(self): result = [] for line in self.dictionaryFile.getLines(): # Skip comments if line.lstrip().startswith("#"): continue # Classic dirsearch wordlist processing (with %EXT% keyword) if '%EXT%' in line: for extension in self._extensions: quote = self.quote(line.replace('%EXT%', extension)) result.append(quote) # If forced extensions is used and the path is not a directory ... (terminated by /) # process line like a forced extension. elif self._forcedExtensions and not line.rstrip().endswith("/"): quoted = self.quote(line) for extension in self._extensions: #Why? check https://github.com/maurosoria/dirsearch/issues/70 if extension.strip() == '': result.append(quoted) else: result.append(quoted + '.' + extension) if quoted.strip() not in ['']: result.append(quoted + "/") # Append line unmodified. else: result.append(self.quote(line)) # oset library provides inserted ordered and unique collection. if self.lowercase: self.entries = list(oset(map(lambda l: l.lower(), result))) else: self.entries = list(oset(result)) del (result)
def generate(self, lowercase=False): self.entries = [] target = self.url hostuser = target.split('.') mainhost = hostuser[len(hostuser)-2] subhost = hostuser[len(hostuser)-3] #print(mainhost+':'+subhost) bak = ['/'+mainhost+'.rar','/'+mainhost+'.zip','/'+mainhost+mainhost+'.rar','/'+mainhost+'.rar','/'+mainhost+'.tar.gz','/'+mainhost+'.tar','/'+mainhost+'123.zip','/'+mainhost+'123.tar.gz','/'+mainhost+mainhost+'.zip','/'+mainhost+mainhost+'.tar.gz','/'+mainhost+mainhost+'.tar','/'+mainhost+'.bak'] bak1 = ['/'+subhost+'.rar','/'+subhost+'.zip','/'+subhost+subhost+'.rar','/'+subhost+'.rar','/'+subhost+'.tar.gz','/'+subhost+'.tar','/'+subhost+'123.zip','/'+subhost+'123.tar.gz','/'+subhost+subhost+'.zip','/'+subhost+subhost+'.tar.gz','/'+subhost+subhost+'.tar','/'+subhost+'.bak'] baks = bak+bak1 #print(baks) self.entries = self.entries+baks for line in self.dictionaryFile.getLines(): # Skip comments entry = line if line.lstrip().startswith("#"): continue if '%EXT%' in line: for extension in self._extensions: self.entries.append(self.quote(line.replace('%EXT%', extension))) else: quote = self.quote(line) self.entries.append(quote) #print(self.entries) if lowercase == True: self.entries = list(oset([entry.lower() for entry in self.entries]))
def generate(self): result = [] for line in self.dictionaryFile.getLines(): # Skip comments if line.lstrip().startswith("#"): continue # Classic dirsearch wordlist processing (with %EXT% keyword) if '%EXT%' in line or '%ext%' in line: for extension in self._extensions: if '%EXT%' in line: newline = line.replace('%EXT%', extension) if '%ext%' in line: newline = line.replace('%ext%', extension) quote = self.quote(newline) result.append(quote) # If forced extensions is used and the path is not a directory ... (terminated by /) # process line like a forced extension. elif self._forcedExtensions and not line.rstrip().endswith("/"): quoted = self.quote(line) for extension in self._extensions: # Why? check https://github.com/maurosoria/dirsearch/issues/70 if extension.strip() == '': result.append(quoted) else: result.append(quoted + '.' + extension) if quoted.strip() not in ['']: result.append(quoted + "/") # Append line unmodified. else: result.append(self.quote(line)) # oset library provides inserted ordered and unique collection. if self.lowercase: self.entries = list(oset(map(lambda l: l.lower(), result))) else: self.entries = list(oset(result)) del (result)
def generate(self, lowercase=False): self.entries = [] for line in self.dictionaryFile.getLines(): # Skip comments entry = line if line.startswith("#"): continue if '%EXT%' in line: for extension in self._extensions: entry = (line.replace('%EXT%', extension)) self.entries.append(urllib.quote(entry)) if lowercase == True: self.entries = list(oset([entry.lower() for entry in self.entries]))
def generate(self, lowercase=False): self.entries = [] for line in self.dictionaryFile.getLines(): # Skip comments entry = line if line.startswith("#"): continue if '%EXT%' in line: for extension in self._extensions: self.entries.append( urllib.parse.quote(line.replace('%EXT%', extension))) else: self.entries.append(urllib.parse.quote(line)) if lowercase == True: self.entries = list(oset([entry.lower() for entry in self.entries]))
def generate(self, lowercase=False): self.entries = [] for line in self.dictionaryFile.getLines(): # Skip comments entry = line if line.lstrip().startswith("#"): continue if '%EXT%' in line: for extension in self._extensions: self.entries.append(self.quote(line.replace('%EXT%', extension))) else: if self._forcedExtensions: for extension in self._extensions: self.entries.append(self.quote(line) + '.' + extension) quote = self.quote(line) self.entries.append(quote) if lowercase == True: self.entries = list(oset([entry.lower() for entry in self.entries]))
def generate(self, lowercase=False): self.entries = [] target = self.url hostuser = target.split('.') mainhost = hostuser[len(hostuser) - 2] subhost = hostuser[len(hostuser) - 3] #print(mainhost+':'+subhost) bak = [ '/' + mainhost + '.rar', '/' + mainhost + '.zip', '/' + mainhost + mainhost + '.rar', '/' + mainhost + '.rar', '/' + mainhost + '.tar.gz', '/' + mainhost + '.tar', '/' + mainhost + '123.zip', '/' + mainhost + '123.tar.gz', '/' + mainhost + mainhost + '.zip', '/' + mainhost + mainhost + '.tar.gz', '/' + mainhost + mainhost + '.tar', '/' + mainhost + '.bak' ] bak1 = [ '/' + subhost + '.rar', '/' + subhost + '.zip', '/' + subhost + subhost + '.rar', '/' + subhost + '.rar', '/' + subhost + '.tar.gz', '/' + subhost + '.tar', '/' + subhost + '123.zip', '/' + subhost + '123.tar.gz', '/' + subhost + subhost + '.zip', '/' + subhost + subhost + '.tar.gz', '/' + subhost + subhost + '.tar', '/' + subhost + '.bak' ] baks = bak + bak1 #print(baks) self.entries = self.entries + baks for line in self.dictionaryFile.getLines(): # Skip comments entry = line if line.lstrip().startswith("#"): continue if '%EXT%' in line: for extension in self._extensions: self.entries.append( self.quote(line.replace('%EXT%', extension))) else: quote = self.quote(line) self.entries.append(quote) #print(self.entries) if lowercase == True: self.entries = list(oset([entry.lower() for entry in self.entries]))
def __init__(self, script_path): self.script_path = script_path self.parseConfig() options = self.parseArguments() if options.url == None: if options.urlList != None: with File(options.urlList) as urlList: if not urlList.exists(): print("The file with URLs does not exist") exit(0) if not urlList.isValid(): print('The wordlist is invalid') exit(0) if not urlList.canRead(): print('The wordlist cannot be read') exit(0) self.urlList = list(urlList.getLines()) elif options.url == None: print('URL target is missing, try using -u <url> ') exit(0) else: self.urlList = [options.url] if options.extensions == None: print('No extension specified. You must specify at least one extension') exit(0) with File(options.wordlist) as wordlist: if not wordlist.exists(): print('The wordlist file does not exist') exit(0) if not wordlist.isValid(): print('The wordlist is invalid') exit(0) if not wordlist.canRead(): print('The wordlist cannot be read') exit(0) if options.httpProxy is not None: if options.httpProxy.startswith('http://'): self.proxy = options.httpProxy else: self.proxy = 'http://{0}'.format(options.httpProxy) else: self.proxy = None if options.headers is not None: try: self.headers = dict((key.strip(), value.strip()) for (key, value) in (header.split(':', 1) for header in options.headers)) except Exception as e: print('Invalid headers') exit(0) else: self.headers = {} self.extensions = list(oset([extension.strip() for extension in options.extensions.split(',')])) self.useragent = options.useragent self.useRandomAgents = options.useRandomAgents self.cookie = options.cookie if options.threadsCount < 1: print('Threads number must be a number greater than zero') exit(0) self.threadsCount = options.threadsCount if options.excludeStatusCodes is not None: try: self.excludeStatusCodes = list( oset([int(excludeStatusCode.strip()) if excludeStatusCode else None for excludeStatusCode in options.excludeStatusCodes.split(',')])) except ValueError: self.excludeStatusCodes = [] else: self.excludeStatusCodes = [] self.wordlist = options.wordlist self.lowercase = options.lowercase self.forceExtensions = options.forceExtensions self.simpleOutputFile = options.simpleOutputFile self.plainTextOutputFile = options.plainTextOutputFile self.jsonOutputFile = options.jsonOutputFile self.delay = options.delay self.timeout = options.timeout self.ip = options.ip self.maxRetries = options.maxRetries self.recursive = options.recursive self.suppressEmpty = options.suppressEmpty if options.scanSubdirs is not None: self.scanSubdirs = list(oset([subdir.strip() for subdir in options.scanSubdirs.split(',')])) for i in range(len(self.scanSubdirs)): while self.scanSubdirs[i].startswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][1:] while self.scanSubdirs[i].endswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][:-1] self.scanSubdirs = list(oset([subdir + "/" for subdir in self.scanSubdirs])) else: self.scanSubdirs = None if not self.recursive and options.excludeSubdirs is not None: print('--exclude-subdir argument can only be used with -r|--recursive') exit(0) elif options.excludeSubdirs is not None: self.excludeSubdirs = list(oset([subdir.strip() for subdir in options.excludeSubdirs.split(',')])) for i in range(len(self.excludeSubdirs)): while self.excludeSubdirs[i].startswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][1:] while self.excludeSubdirs[i].endswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][:-1] self.excludeSubdirs = list(oset(self.excludeSubdirs)) else: self.excludeSubdirs = None self.redirect = options.noFollowRedirects self.requestByHostname = options.requestByHostname self.httpmethod = options.httpmethod self.recursive_level_max = options.recursive_level_max
def __init__(self, script_path): self.script_path = script_path self.parseConfig() options = self.parseArguments() if options.url == None: if options.urlList != None: with File(options.urlList) as urlList: if not urlList.exists(): print("The file with URLs does not exist") exit(0) if not urlList.isValid(): print('The wordlist is invalid') exit(0) if not urlList.canRead(): print('The wordlist cannot be read') exit(0) self.urlList = list(urlList.getLines()) elif options.url == None: print('URL target is missing, try using -u <url> ') exit(0) else: self.urlList = [options.url] if options.extensions == None: print( 'No extension specified. You must specify at least one extension' ) exit(0) with File(options.wordlist) as wordlist: if not wordlist.exists(): print('The wordlist file does not exist') exit(0) if not wordlist.isValid(): print('The wordlist is invalid') exit(0) if not wordlist.canRead(): print('The wordlist cannot be read') exit(0) if options.httpProxy is not None: if options.httpProxy.startswith('http://'): self.proxy = options.httpProxy else: self.proxy = 'http://{0}'.format(options.httpProxy) else: self.proxy = None if options.headers is not None: try: self.headers = dict( (key.strip(), value.strip()) for (key, value) in (header.split(':', 1) for header in options.headers)) except Exception as e: print('Invalid headers') exit(0) else: self.headers = {} self.extensions = list( oset([ extension.strip() for extension in options.extensions.split(',') ])) self.useragent = options.useragent self.useRandomAgents = options.useRandomAgents self.cookie = options.cookie if options.threadsCount < 1: print('Threads number must be a number greater than zero') exit(0) self.threadsCount = options.threadsCount if options.excludeStatusCodes is not None: try: self.excludeStatusCodes = list( oset([ int(excludeStatusCode.strip()) if excludeStatusCode else None for excludeStatusCode in options.excludeStatusCodes.split(',') ])) except ValueError: self.excludeStatusCodes = [] else: self.excludeStatusCodes = [] self.wordlist = options.wordlist self.lowercase = options.lowercase self.forceExtensions = options.forceExtensions self.simpleOutputFile = options.simpleOutputFile self.plainTextOutputFile = options.plainTextOutputFile self.jsonOutputFile = options.jsonOutputFile self.delay = options.delay self.timeout = options.timeout self.ip = options.ip self.maxRetries = options.maxRetries self.recursive = options.recursive self.suppressEmpty = options.suppressEmpty if options.scanSubdirs is not None: self.scanSubdirs = list( oset([ subdir.strip() for subdir in options.scanSubdirs.split(',') ])) for i in range(len(self.scanSubdirs)): while self.scanSubdirs[i].startswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][1:] while self.scanSubdirs[i].endswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][:-1] self.scanSubdirs = list( oset([subdir + "/" for subdir in self.scanSubdirs])) else: self.scanSubdirs = None if not self.recursive and options.excludeSubdirs is not None: print( '--exclude-subdir argument can only be used with -r|--recursive' ) exit(0) elif options.excludeSubdirs is not None: self.excludeSubdirs = list( oset([ subdir.strip() for subdir in options.excludeSubdirs.split(',') ])) for i in range(len(self.excludeSubdirs)): while self.excludeSubdirs[i].startswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][1:] while self.excludeSubdirs[i].endswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][:-1] self.excludeSubdirs = list(oset(self.excludeSubdirs)) else: self.excludeSubdirs = None self.redirect = options.noFollowRedirects self.requestByHostname = options.requestByHostname
def __init__(self, script_path): self.script_path = script_path self.parseConfig() options = self.parseArguments() self.clean_view = options.clean_view self.full_url = options.full_url if options.url == None: if options.urlList != None: with File(options.urlList) as urlList: if not urlList.exists(): print("The file with URLs does not exist") exit(0) if not urlList.isValid(): print("The file with URLs is invalid") exit(0) if not urlList.canRead(): print("The file with URLs cannot be read") exit(0) self.urlList = list(urlList.getLines()) elif options.url == None: print("URL target is missing, try using -u <url> ") exit(0) else: self.urlList = [options.url] if not options.extensions and not options.defaultExtensions: print('No extension specified. You must specify at least one extension or try using default extension list.') exit(0) if not options.extensions and options.defaultExtensions: options.extensions = self.defaultExtensions # Enable to use multiple dictionaries at once for dictFile in options.wordlist.split(','): with File(dictFile) as wordlist: if not wordlist.exists(): print('The wordlist file does not exist') exit(1) if not wordlist.isValid(): print('The wordlist is invalid') exit(1) if not wordlist.canRead(): print('The wordlist cannot be read') exit(1) if options.proxyList is not None: with File(options.proxyList) as plist: if not plist.exists(): print('The proxylist file does not exist') exit(1) if not plist.isValid(): print('The proxylist is invalid') exit(1) if not plist.canRead(): print('The proxylist cannot be read') exit(1) self.proxylist = open(options.proxyList).read().splitlines() elif options.httpProxy is not None: if options.httpProxy.startswith("http://"): self.proxy = options.httpProxy else: self.proxy = "http://{0}".format(options.httpProxy) else: self.proxy = None if options.headers is not None: try: self.headers = dict( (key.strip(), value.strip()) for (key, value) in ( header.split(":", 1) for header in options.headers ) ) except Exception as e: print("Invalid headers") exit(0) else: self.headers = {} self.extensions = list( oset([extension.strip() for extension in options.extensions.split(",")]) ) self.useragent = options.useragent self.useRandomAgents = options.useRandomAgents self.cookie = options.cookie if options.threadsCount < 1: print('Threads number must be a number greater than zero') exit(1) self.threadsCount = options.threadsCount if options.includeStatusCodes: try: self.includeStatusCodes = list( oset([int(includeStatusCode.strip()) if includeStatusCode else None for includeStatusCode in options.includeStatusCodes.split(',')])) except ValueError: self.includeStatusCodes = [] else: self.includeStatusCodes = [] if options.excludeExtensions: try: self.excludeExtensions = list( oset( [ excludeExtension.strip() if excludeExtension else None for excludeExtension in options.excludeExtensions.split(",") ] ) ) except ValueError: self.excludeExtensions = [] else: self.excludeExtensions = [] if options.excludeStatusCodes: try: self.excludeStatusCodes = list( oset( [ int(excludeStatusCode.strip()) if excludeStatusCode else None for excludeStatusCode in options.excludeStatusCodes.split( "," ) ] ) ) except ValueError: self.excludeStatusCodes = [] else: self.excludeStatusCodes = [] if options.excludeTexts: try: self.excludeTexts = list( oset( [ excludeTexts.strip() if excludeTexts else None for excludeTexts in options.excludeTexts.split(",") ] ) ) except ValueError: self.excludeTexts = [] else: self.excludeTexts = [] if options.excludeRegexps: try: self.excludeRegexps = list( oset( [ excludeRegexps.strip() if excludeRegexps else None for excludeRegexps in options.excludeRegexps.split(",") ] ) ) except ValueError: self.excludeRegexps = [] else: self.excludeRegexps = [] self.suffixes = [] if not options.suffixes else list(oset([suffix.strip() for suffix in options.suffixes.split(',')])) self.wordlist = list(oset([wordlist.strip() for wordlist in options.wordlist.split(',')])) self.lowercase = options.lowercase self.uppercase = options.uppercase self.forceExtensions = options.forceExtensions self.data = options.data self.noDotExtensions = options.noDotExtensions self.simpleOutputFile = options.simpleOutputFile self.plainTextOutputFile = options.plainTextOutputFile self.jsonOutputFile = options.jsonOutputFile self.quietMode = options.quietMode self.delay = options.delay self.timeout = options.timeout self.ip = options.ip self.maxRetries = options.maxRetries self.recursive = options.recursive self.suppressEmpty = options.suppressEmpty self.minimumResponseSize = options.minimumResponseSize self.maximumResponseSize = options.maximumResponseSize if options.scanSubdirs is not None: self.scanSubdirs = list( oset([subdir.strip() for subdir in options.scanSubdirs.split(",")]) ) for i in range(len(self.scanSubdirs)): while self.scanSubdirs[i].startswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][1:] while self.scanSubdirs[i].endswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][:-1] self.scanSubdirs = list(oset([subdir + "/" for subdir in self.scanSubdirs])) else: self.scanSubdirs = None if not self.recursive and options.excludeSubdirs is not None: print("--exclude-subdir argument can only be used with -r|--recursive") exit(0) elif options.excludeSubdirs is not None: self.excludeSubdirs = list( oset([subdir.strip() for subdir in options.excludeSubdirs.split(",")]) ) for i in range(len(self.excludeSubdirs)): while self.excludeSubdirs[i].startswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][1:] while self.excludeSubdirs[i].endswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][:-1] self.excludeSubdirs = list(oset(self.excludeSubdirs)) else: self.excludeSubdirs = None if len(set(self.extensions).intersection(self.excludeExtensions)): print("Exclude extensions can not contain any extension that already in the extensions") exit(0) self.redirect = options.noFollowRedirects self.requestByHostname = options.requestByHostname self.httpmethod = options.httpmethod self.recursive_level_max = options.recursive_level_max
self.proxy = options.httpProxy else: self.proxy = 'http://{0}'.format(options.httpProxy) else: self.proxy = None if options.headers is not None: try: self.headers = dict((key.strip(), value.strip()) for (key, value) in (header.split(':', 1) for header in options.headers)) except Exception, e: print 'Invalid headers' exit(0) else: self.headers = {} self.url = options.url self.extensions = list(oset([extension.strip() for extension in options.extensions.split(',')])) self.useragent = options.useragent self.cookie = options.cookie if options.threadsCount < 1: print 'Threads number must be a number greater than zero' exit(0) self.threadsCount = options.threadsCount if options.excludeStatusCodes is not None: try: self.excludeStatusCodes = list(oset([int(excludeStatusCode.strip()) if excludeStatusCode else None for excludeStatusCode in options.excludeStatusCodes.split(',')])) except ValueError: self.excludeStatusCodes = [] else: self.excludeStatusCodes = [] self.wordlist = options.wordlist
def __init__(self, script_path): self.script_path = script_path self.parseConfig() options = self.parseArguments() self.quiet = options.quiet self.full_url = options.full_url if not options.url: if options.urlList: with File(options.urlList) as urlList: if not urlList.exists(): print("The file with URLs does not exist") exit(0) if not urlList.is_valid(): print("The file with URLs is invalid") exit(0) if not urlList.can_read(): print("The file with URLs cannot be read") exit(0) self.urlList = list(urlList.get_lines()) elif options.cidr: self.urlList = [str(ip) for ip in IPv4Network(options.cidr)] else: print("URL target is missing, try using -u <url> ") exit(0) else: self.urlList = [options.url] if not options.extensions and not options.noExtension: print( 'WARNING: No extension specified. You need to specify at least one extension.' ) if options.noExtension: options.extensions = str() # Enable to use multiple dictionaries at once for dictFile in options.wordlist.split(','): with File(dictFile) as wordlist: if not wordlist.exists(): print('The wordlist file does not exist') exit(1) if not wordlist.is_valid(): print('The wordlist is invalid') exit(1) if not wordlist.can_read(): print('The wordlist cannot be read') exit(1) if options.proxyList: with File(options.proxyList) as plist: if not plist.exists(): print('The proxylist file does not exist') exit(1) if not plist.is_valid(): print('The proxylist is invalid') exit(1) if not plist.can_read(): print('The proxylist cannot be read') exit(1) self.proxylist = open(options.proxyList).read().splitlines() elif options.proxy: if options.proxy.startswith( ("http://", "https://", "socks5://", "socks5h://", "socks4://", "socks4a://")): self.proxy = options.proxy else: self.proxy = "http://" + options.proxy else: self.proxy = None if options.matches_proxy: if options.matches_proxy.startswith( ("http://", "https://", "socks5://", "socks5h://", "socks4://", "socks4a://")): self.matches_proxy = options.matches_proxy else: self.matches_proxy = "http://" + options.matches_proxy else: self.matches_proxy = None if options.headers: try: self.headers = dict( (key, value) for (key, value) in (header.split(":", 1) for header in options.headers)) except Exception: print("Invalid headers") exit(0) else: self.headers = {} if options.headerList: try: with File(options.headerList) as hlist: if not hlist.exists(): print('The header list file does not exist') exit(1) if not hlist.is_valid(): print('The header list file is invalid') exit(1) if not hlist.can_read(): print('The header list cannot be read') exit(1) lines = hlist.get_lines() for line in lines: key, value = line.split(":")[0], line.split(":")[1] self.headers[key] = value except Exception as e: print("Error in headers file: " + str(e)) exit(0) self.extensions = list( oset([ extension.strip() for extension in options.extensions.split(",") ])) self.useragent = options.useragent self.useRandomAgents = options.useRandomAgents self.cookie = options.cookie if options.threadsCount < 1: print('Threads number must be greater than zero') exit(1) self.threadsCount = options.threadsCount self.includeStatusCodes = [] if options.includeStatusCodes: for statusCode in options.includeStatusCodes.split(","): try: if "-" in statusCode: statusCodes = [ i for i in range( int(statusCode.split("-")[0].strip()), int(statusCode.split("-")[1].strip()) + 1) ] self.includeStatusCodes.extend(statusCodes) else: self.includeStatusCodes.append(int(statusCode.strip())) except ValueError: print( "Invalid status code or status code range: {}".format( statusCode)) exit(0) self.excludeStatusCodes = [] if options.excludeStatusCodes: for statusCode in options.excludeStatusCodes.split(","): try: if "-" in statusCode: statusCodes = [ i for i in range( int(statusCode.split("-")[0].strip()), int(statusCode.split("-")[1].strip()) + 1) ] self.excludeStatusCodes.extend(statusCodes) else: self.excludeStatusCodes.append(int(statusCode.strip())) except ValueError: print( "Invalid status code or status code range: {}".format( statusCode)) exit(0) if options.excludeExtensions: try: self.excludeExtensions = list( oset([ excludeExtension.strip() if excludeExtension else None for excludeExtension in options.excludeExtensions.split(",") ])) except ValueError: self.excludeExtensions = [] else: self.excludeExtensions = [] if options.excludeSizes: try: self.excludeSizes = list( oset([ excludeSize.strip().upper() if excludeSize else None for excludeSize in options.excludeSizes.split(",") ])) except ValueError: self.excludeSizes = [] else: self.excludeSizes = [] if options.excludeTexts: try: self.excludeTexts = list( oset([ excludeText.strip() if excludeText else None for excludeText in options.excludeTexts.split(",") ])) except ValueError: self.excludeTexts = [] else: self.excludeTexts = [] if options.excludeRegexps: try: self.excludeRegexps = list( oset([ excludeRegexp.strip() if excludeRegexp else None for excludeRegexp in options.excludeRegexps.split(",") ])) except ValueError: self.excludeRegexps = [] else: self.excludeRegexps = [] self.prefixes = [] if not options.prefixes else list( oset([prefix.strip() for prefix in options.prefixes.split(',')])) self.suffixes = [] if not options.suffixes else list( oset([suffix.strip() for suffix in options.suffixes.split(',')])) self.wordlist = list( oset([ wordlist.strip() for wordlist in options.wordlist.split(',') ])) self.lowercase = options.lowercase self.uppercase = options.uppercase self.capitalization = options.capitalization self.forceExtensions = options.forceExtensions self.data = options.data self.noDotExtensions = options.noDotExtensions self.simpleOutputFile = options.simpleOutputFile self.plainTextOutputFile = options.plainTextOutputFile self.jsonOutputFile = options.jsonOutputFile self.xmlOutputFile = options.xmlOutputFile self.markdownOutputFile = options.markdownOutputFile self.csvOutputFile = options.csvOutputFile self.color = options.color self.delay = options.delay self.timeout = options.timeout self.ip = options.ip self.maxRetries = options.maxRetries self.recursive = options.recursive self.minimumResponseSize = options.minimumResponseSize self.maximumResponseSize = options.maximumResponseSize self.noExtension = options.noExtension self.onlySelected = options.onlySelected if options.scanSubdirs: self.scanSubdirs = list( oset([ subdir.strip() for subdir in options.scanSubdirs.split(",") ])) for i in range(len(self.scanSubdirs)): while self.scanSubdirs[i].startswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][1:] while self.scanSubdirs[i].endswith("/"): self.scanSubdirs[i] = self.scanSubdirs[i][:-1] self.scanSubdirs = list( oset([subdir + "/" for subdir in self.scanSubdirs])) else: self.scanSubdirs = None if not self.recursive and options.excludeSubdirs: self.excludeSubdirs = None elif options.excludeSubdirs: self.excludeSubdirs = list( oset([ subdir.strip() for subdir in options.excludeSubdirs.split(",") ])) for i in range(len(self.excludeSubdirs)): while self.excludeSubdirs[i].startswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][1:] while self.excludeSubdirs[i].endswith("/"): self.excludeSubdirs[i] = self.excludeSubdirs[i][:-1] self.excludeSubdirs = list(oset(self.excludeSubdirs)) else: self.excludeSubdirs = None if len(set(self.extensions).intersection(self.excludeExtensions)): print( "Exclude extension list can not contain any extension that has already in the extension list" ) exit(0) self.redirect = options.followRedirects self.httpmethod = options.httpmethod self.requestByHostname = options.requestByHostname self.exit_on_error = options.exit_on_error self.debug = options.debug self.recursive_level_max = options.recursive_level_max
def generate(self): reext = re.compile('\%ext\%', re.IGNORECASE) reextdot = re.compile('\.\%ext\%', re.IGNORECASE) result = [] # Enable to use multiple dictionaries at once for dictFile in self.dictionaryFiles: for line in dictFile.getLines(): # Skip comments if line.lstrip().startswith("#"): continue # Classic dirsearch wordlist processing (with %EXT% keyword) if '%ext%' in line.lower(): for extension in self._extensions: if self._noDotExtensions: line = reextdot.sub(extension, line) line = reext.sub(extension, line) quote = self.quote(line) result.append(quote) # If forced extensions is used and the path is not a directory ... (terminated by /) # process line like a forced extension. elif self._forcedExtensions and not line.rstrip().endswith( "/"): quoted = self.quote(line) for extension in self._extensions: # Why? check https://github.com/maurosoria/dirsearch/issues/70 if extension.strip() == '': result.append(quoted) else: result.append(quoted + ( '' if self._noDotExtensions else '.') + extension) if quoted.strip() not in ['']: result.append(quoted) result.append(quoted + "/") # Append line unmodified. else: result.append(self.quote(line)) # Adding suffixes for finding backups etc if self._suffixes: for res in list(result): if not res.rstrip().endswith("/"): for suff in self._suffixes: result.append(res + suff) # oset library provides inserted ordered and unique collection. if self.lowercase: self.entries = list(oset(map(lambda l: l.lower(), result))) else: self.entries = list(oset(result)) del result
def generate(self): result = [] # Enable to use multiple dictionaries at once for dictFile in self.dictionaryFiles: if Dict4Scan.logic_dir in dictFile.path: dict_deal_method = DealMethod.replace_dir else: dict_deal_method = DealMethod.extend_dir for line in dictFile.getLines(): # Skip comments if line.lstrip().startswith("#"): continue # # Classic dirsearch wordlist processing (with %EXT% keyword) # if '%EXT%' in line or '%ext%' in line: # for extension in self._extensions: # if '%EXT%' in line: # newline = line.replace('%EXT%', extension) # # if '%ext%' in line: # newline = line.replace('%ext%', extension) # # quote = self.quote(newline) # result.append(quote) # # # If forced extensions is used and the path is not a directory ... (terminated by /) # # process line like a forced extension. # elif self._forcedExtensions and not line.rstrip().endswith("/"): # quoted = self.quote(line) # # for extension in self._extensions: # # Why? check https://github.com/maurosoria/dirsearch/issues/70 # if extension.strip() == '': # result.append(quoted) # else: # result.append(quoted + '.' + extension) # # if quoted.strip() not in ['']: # result.append(quoted + "/") # # # Append line unmodified. # else: # result.append(self.quote(line)) filename_token = '[Filename]' extension_token = '[Extension]' directory_token = '[Directory Name]' if filename_token in line and self.filename and extension_token in line and self.extension: newline = line.replace(filename_token, self.filename) newline = newline.replace(extension_token, self.extension) result.append((dict_deal_method, self.quote(newline))) elif filename_token in line and self.filename: newline = line.replace(filename_token, self.filename) result.append((dict_deal_method, self.quote(newline))) elif directory_token in line and self.directory: newline = line.replace(directory_token, self.directory) result.append((dict_deal_method, self.quote(newline))) elif filename_token not in line and extension_token not in line and directory_token not in line: result.append((dict_deal_method, self.quote(line))) # oset library provides inserted ordered and unique collection. if self.lowercase: self.entries = list(oset(map(lambda l: l.lower(), result))) else: self.entries = list(oset(result)) del (result)