def rec(target): processed.add(target) print('%s Parsing %s' % (run, target)) url = getUrl(target, '', True) params = getParams(target, '', True) if '=' in target: inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, 0).text forms.append(zetanize(response)) matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches link = link.split( '#' )[0] # remove everything after a "#" to deal with in-page anchors if link[:4] == 'http': if link.startswith(main_url): storage.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): storage.add(schema + link) elif link[:1] == '/': storage.add(main_url + link) else: storage.add(main_url + '/' + link)
def rec(target): processed.add(target) printableTarget = '/'.join(target.split('/')[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] else: printableTarget = (printableTarget + (' ' * (40 - len(printableTarget)))) print ('%s Parsing %s' % (run, printableTarget), end='\r') url = getUrl(target, True) params = getParams(target, '', True) if '=' in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, delay, timeout).text forms.append(zetanize(response)) matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.split('#')[0] if link[:4] == 'http': if link.startswith(main_url): storage.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): storage.add(schema + link) elif link[:1] == '/': storage.add(main_url + link) else: storage.add(main_url + '/' + link)
def singleFuzz(target, paramData, verbose, encoding, headers, delay, timeout): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) if not params: print('%s No parameters to test.' % bad) quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding)
def bruteforcer(target, paramData, payloadList, verbose, encoding, headers, delay, timeout): GET, POST = (False, True) if paramData else (True, False) host = urlparse(target).netloc # Extracts host out of the url logger.debug('Parsed host to bruteforce: {}'.format(host)) url = getUrl(target, GET) logger.debug('Parsed url to bruteforce: {}'.format(url)) params = getParams(target, paramData, GET) logger.debug_json('Bruteforcer params:', params) if not params: logger.error('No parameters to test.') quit() for paramName in params.keys(): progress = 1 paramsCopy = copy.deepcopy(params) for payload in payloadList: logger.run('Bruteforcing %s[%s%s%s]%s: %i/%i\r' % (green, end, paramName, green, end, progress, len(payloadList))) if encoding: payload = encoding(unquote(payload)) paramsCopy[paramName] = payload response = requester(url, paramsCopy, headers, GET, delay, timeout).text if encoding: payload = encoding(payload) if payload in response: logger.info('%s %s' % (good, payload)) progress += 1 logger.no_format('')
def bruteforcer(target, paramData, payloadList, verbose, encoding): GET, POST = (False, True) if paramData else (True, False) host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) if not params: print('%s No parameters to test.' % bad) quit() verboseOutput(params, 'params', verbose) for paramName in params.keys(): progress = 1 paramsCopy = copy.deepcopy(params) for payload in payloadList: print('%s Progress: %i/%i' % (run, progress, len(payloadList)), end='\r') if encoding: payload = encoding(unquote(payload)) paramsCopy[paramName] = payload response = requester(url, paramsCopy, headers, GET, delay, timeout).text if encoding: payload = encoding(payload) if payload in response: print('%s %s' % (good, payload)) progress += 1 print('')
def bruteforcer(target, paramData, payloadList, verbose, encoding, headers, delay, timeout): GET, POST = (False, True) if paramData else (True, False) host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) if not params: print('%s No parameters to test.' % bad) quit() verboseOutput(params, 'params', verbose) for paramName in params.keys(): progress = 1 paramsCopy = copy.deepcopy(params) for payload in payloadList: print ('%s Bruteforcing %s[%s%s%s]%s: %i/%i' % (run, green, end, paramName, green, end, progress, len(payloadList)), end='\r') if encoding: payload = encoding(unquote(payload)) paramsCopy[paramName] = payload response = requester(url, paramsCopy, headers, GET, delay, timeout).text if encoding: payload = encoding(payload) if payload in response: print('%s %s' % (good, payload)) progress += 1 print ()
def prepare_requests(args): headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Connection': 'close', 'Upgrade-Insecure-Requests': '1' } if type(headers) == bool: headers = extractHeaders(prompt()) elif type(headers) == str: headers = extractHeaders(headers) if mem.var['method'] == 'JSON': mem.headers['Content-type'] = 'application/json' if args.url: params = getParams(args.include) return { 'url': args.url, 'method': mem.var['method'], 'headers': headers, 'include': params } elif args.import_file: return importer(args.import_file, mem.var['method'], headers, args.include) return []
def rec(target): processed.add(target) printableTarget = '/'.join(target.split('/')[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] else: printableTarget = (printableTarget + (' ' * (40 - len(printableTarget)))) print('%s Parsing %s' % (run, printableTarget), end='\r') url = getUrl(target, True) params = getParams(target, '', True) if '=' in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, delay, timeout).text forms.append(zetanize(response)) matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.split('#')[0] if link[:4] == 'http': if link.startswith(main_url): storage.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): storage.add(schema + link) elif link[:1] == '/': storage.add(main_url + link) else: storage.add(main_url + '/' + link)
def singleFuzz(target, paramData, encoding, headers, delay, timeout): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith("http"): try: response = requester("https://" + target, {}, headers, GET, delay, timeout) target = "https://" + target except: target = "http://" + target logger.debug("Single Fuzz target: {}".format(target)) host = urlparse(target).netloc # Extracts host out of the url logger.debug("Single fuzz host: {}".format(host)) url = getUrl(target, GET) logger.debug("Single fuzz url: {}".format(url)) params = getParams(target, paramData, GET) logger.debug_json("Single fuzz params:", params) if not params: logger.error("No parameters to test.") quit() WAF = wafDetector( url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout ) if WAF: logger.error("WAF detected: %s%s%s" % (green, WAF, end)) else: logger.good("WAF Status: %sOffline%s" % (green, end)) for paramName in params.keys(): logger.info("Fuzzing parameter: %s" % paramName) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding)
def bruteforcer(target, paramData, payloadList, encoding, headers, delay, timeout): GET, POST = (False, True) if paramData else (True, False) host = urlparse(target).netloc # Extracts host out of the url 从地址中解析出主机地址 #logger=>handler=>formatter分别是一对多的关系,日志的格式其实是由formatter决定的, # 所以想要扩展成你想要的各种格式,就重写定制formatter组件就可以了,它实际上和Java里面Log4j的LayOut组件类似。 logger.debug('Parsed host to bruteforce: {}'.format( host)) #解析主机 格式化字符串的函数 str.format(),它增强了字符串格式化的功能 url = getUrl(target, GET) logger.debug('Parsed url to bruteforce: {}'.format( url)) #解析URL 格式化字符串的函数 str.format(),它增强了字符串格式化的功能 params = getParams(target, paramData, GET) #解析参数 logger.debug_json('Bruteforcer params:', params) if not params: logger.error('No parameters to test.') #没有参数可以测试了 quit() #退出 for paramName in params.keys(): progress = 1 paramsCopy = copy.deepcopy(params) #深拷贝 for payload in payloadList: logger.run('Bruteforcing %s[%s%s%s]%s: %i/%i\r' % (green, end, paramName, green, end, progress, len(payloadList))) if encoding: payload = encoding(unquote(payload)) #编码payload paramsCopy[paramName] = payload response = requester(url, paramsCopy, headers, GET, delay, timeout).text if encoding: payload = encoding(payload) if payload in response: logger.info('%s %s' % (good, payload)) progress += 1 logger.no_format('')
def singleFuzz(target, paramData, verbose, encoding, headers, delay, timeout): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) if not params: print('%s No parameters to test.' % bad) quit() WAF = wafDetector( url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding)
def rec(target): processed.add(target) urlPrint = (target + (' ' * 60))[:60] print('%s Parsing %-40s' % (run, urlPrint), end='\r') url = getUrl(target, True) params = getParams(target, '', True) if '=' in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) raw_response = requester(url, params, True) response = raw_response.text js = js_extractor(response) scripts = script_extractor(response) for each in retirejs(url, response, checkedScripts): all_outdated_js.append(each) all_techs.extend(wappalyzer(raw_response, js, scripts)) parsed_response = zetanize(response) forms.append(parsed_response) matches = re.finditer( r'<[aA][^>]*?(?:href|HREF)=[\'"`]?([^>]*?)[\'"`]?>', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.group(1).split('#')[0] this_url = handle_anchor(target, link) if urlparse(this_url).netloc == host: storage.add(this_url)
def rec(url): processed.add(url) urlPrint = (url + (' ' * 60))[:60] print ('%s Parsing %-40s' % (run, urlPrint), end='\r') url = getUrl(url, '', True) params = getParams(url, '', True) if '=' in url: inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append( {url: {0: {'action': url, 'method': 'get', 'inputs': inps}}}) response = requester(url, params, headers, True, 0).text forms.append({url: zetanize(url, response)}) matches = findall( r'<[aA][^>]*?(href|HREF)=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link[1].split('#')[0].lstrip(' ') if link[:4] == 'http': if link.startswith(main_url): storage.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): storage.add(scheme + '://' + link) elif link[:1] == '/': storage.add(remove_file(url) + link) else: usable_url = remove_file(url) if usable_url.endswith('/'): storage.add(usable_url + link) elif link.startswith('/'): storage.add(usable_url + link) else: storage.add(usable_url + '/' + link)
def rec(target): vulnerable_items = list() vulnerable_report = dict() processed.add(target) printableTarget = '/'.join(target.split('/')[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] else: printableTarget = (printableTarget + (' ' * (40 - len(printableTarget)))) logger.run('Parsing %s\r' % printableTarget) url = getUrl(target, True) params = getParams(target, '', True) if '=' in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, delay, timeout).text vulnerable_components = retireJs(url, response) print("===== Vulnerable Components ======") print(vulnerable_components) vulnerable_report['vulnerable_components'] = vulnerable_components if not skipDOM: highlighted = dom(response) clean_highlighted = ''.join([re.sub(r'^\d+\s+', '', line) for line in highlighted]) if highlighted and clean_highlighted not in checkedDOMs: checkedDOMs.append(clean_highlighted) logger.good('Potentially vulnerable objects found at %s' % url) vulnerable_report['url'] = url logger.red_line(level='good') for line in highlighted: vulnerable_items.append(clean_colors(line)) logger.no_format(line, level='good') vulnerable_report['codes'] = vulnerable_items logger.red_line(level='good') forms.append(zetanize(response)) matches = re.findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.split('#')[0] if link.endswith(('.pdf', '.png', '.jpg', '.jpeg', '.xls', '.xml', '.docx', '.doc')): pass else: if link[:4] == 'http': if link.startswith(main_url): storage.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): storage.add(schema + link) elif link[:1] == '/': storage.add(main_url + link) else: storage.add(main_url + '/' + link) return vulnerable_report
def rec(target): print('%s Parsing %s' % (run, target)) url = getUrl(target, '', True) params = getParams(target, '', True) if '=' in target: inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, 0).text forms.append(zetanize(response))
def singleFuzz(target, paramData, encoding, headers, delay, timeout): report = dict() config = dict() GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target logger.debug('Single Fuzz target: {}'.format(target)) host = urlparse(target).netloc # Extracts host out of the url logger.debug('Single fuzz host: {}'.format(host)) url = getUrl(target, GET) logger.debug('Single fuzz url: {}'.format(url)) params = getParams(target, paramData, GET) config['target'] = target config['host'] = host config['url'] = url logger.debug_json('Single fuzz params:', params) if not params: config['param'] = 'No paramaters to test' report['config'] = config logger.error('No parameters to test.') quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: waf_status = WAF logger.error('WAF detected: %s%s%s' % (green, WAF, end)) else: waf_status = "offline" logger.good('WAF Status: %sOffline%s' % (green, end)) report['waf'] = waf_status report['parameters'] = list() for paramName in params.keys(): paramReport = dict() paramReport['paramater'] = paramName logger.info('Fuzzing parameter: %s' % paramName) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker result = fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding) paramReport['result'] = result report['parameters'] = paramReport report['config'] = config print(report)
def brute(target, paramData, payloadList): if paramData: GET, POST = False, True else: GET, POST = True, False host = urlparse(target).netloc # Extracts host out of the url url = getUrl(target, paramData, GET) params = getParams(target, paramData, GET) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) for payload in payloadList: paramsCopy[paramName] = payload response = requester(url, paramsCopy, headers, GET, delay, timeout).text if payload in response: print('%s %s' % (good, payload))
def photon(main_url, url, headers): urls = set() # urls found forms = [] # web forms processed = set() # urls that have been crawled storage = set() # urls that belong to the target i.e. in-scope host = urlparse(url).netloc url = getUrl(url, '', True) schema = urlparse(main_url).scheme params = getParams(url, '', True) response = requester(url, params, headers, True, 0).text forms.append(zetanize(response)) matches = findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches link = link.split('#')[ 0] # remove everything after a "#" to deal with in-page anchors if link[:4] == 'http': if link.startswith(main_url): urls.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): urls.add(schema + link) elif link[:1] == '/': urls.add(main_url + link) else: urls.add(main_url + '/' + link) def rec(target): print('%s Parsing %s' % (run, target)) url = getUrl(target, '', True) params = getParams(target, '', True) if '=' in target: inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, 0).text forms.append(zetanize(response)) from core.config import threadCount threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount) futures = (threadpool.submit(rec, url) for url in urls) for i, _ in enumerate(concurrent.futures.as_completed(futures)): pass return forms
def rec(target): processed.add(target) printableTarget = "/".join(target.split("/")[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] else: printableTarget = printableTarget + (" " * (40 - len(printableTarget))) logger.run("Parsing %s\r" % printableTarget) url = getUrl(target, True) params = getParams(target, "", True) if "=" in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({"name": name, "value": value}) forms.append({0: {"action": url, "method": "get", "inputs": inps}}) response = requester(url, params, headers, True, delay, timeout).text retireJs(url, response) if not skipDOM: highlighted = dom(response) clean_highlighted = "".join( [re.sub(r"^\d+\s+", "", line) for line in highlighted]) if highlighted and clean_highlighted not in checkedDOMs: checkedDOMs.append(clean_highlighted) logger.good("Potentially vulnerable objects found at %s" % url) logger.red_line(level="good") for line in highlighted: logger.no_format(line, level="good") logger.red_line(level="good") forms.append(zetanize(response)) matches = re.findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.split("#")[0] if link[:4] == "http": if link.startswith(main_url): storage.add(link) elif link[:2] == "//": if link.split("/")[2].startswith(host): storage.add(schema + link) elif link[:1] == "/": storage.add(main_url + link) else: storage.add(main_url + "/" + link)
def bruteforcer(target, paramData, payloadList, verbose, encoding): if paramData: GET, POST = False, True else: GET, POST = True, False host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) for payload in payloadList: if encoding: payload = encoding(unquote(payload)) paramsCopy[paramName] = payload response = requester(url, paramsCopy, headers, GET, delay, timeout).text if encoding: payload = encoding(payload) if payload in response: print('%s %s' % (good, payload))
sortedHeaders[header] = value except IndexError: pass return sortedHeaders if headers: headers = extractHeaders(prompt()) else: headers = {} if args.GET: GET = True else: GET = False include = getParams(include) paramList = [] try: with open(file, 'r') as file: for line in file: paramList.append(line.strip('\n')) except FileNotFoundError: print ('%s The specified file doesn\'t exist' % bad) quit() def heuristic(response, paramList): done = [] forms = re.findall(r'(?i)(?s)<form.*?</form.*?>', response) for form in forms:
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target logger.debug('Scan target: {}'.format(target)) response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: logger.run('Checking for DOM vulnerabilities') highlighted = dom(response) if highlighted: logger.good('Potentially vulnerable objects found') logger.red_line(level='good') for line in highlighted: logger.no_format(line, level='good') logger.red_line(level='good') host = urlparse(target).netloc # Extracts host out of the url logger.debug('Host to scan: {}'.format(host)) url = getUrl(target, GET) logger.debug('Url to scan: {}'.format(url)) params = getParams(target, paramData, GET) logger.debug_json('Scan parameters:', params) if find: params = arjun(url, GET, headers, delay, timeout) if not params: logger.error('No parameters to test.') quit() WAF = wafDetector( url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: logger.error('WAF detected: %s%s%s' % (green, WAF, end)) else: logger.good('WAF Status: %sOffline%s' % (green, end)) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) logger.info('Testing parameter: %s' % paramName) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] logger.debug('Scan occurences: {}'.format(occurences)) positions = parsedResponse[1] logger.debug('Scan positions: {}'.format(positions)) if not occurences: logger.error('No reflection found') continue else: logger.info('Reflections found: %i' % len(occurences)) logger.run('Analysing reflections') efficiencies = filterChecker( url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) logger.debug('Scan efficiencies: {}'.format(efficiencies)) logger.run('Generating payloads') vectors = generator(occurences, response.text) total = 0 for v in vectors.values(): total += len(v) if total == 0: logger.error('No vectors were crafted.') continue logger.info('Payloads generated: %i' % total) progress = 0 for confidence, vects in vectors.items(): for vect in vects: if core.config.globalVariables['path']: vect = vect.replace('/', '%2F') loggerVector = vect progress += 1 logger.run('Progress: %i/%i\r' % (progress, total)) if confidence == 10: if not GET: vect = unquote(vect) efficiencies = checker( url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) else: if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()): continue vect = unquote(vect) if encoding: paramsCopy[paramName] = encoding(vect) else: paramsCopy[paramName] = vect response = requester(url, paramsCopy, headers, GET, delay, timeout).text success = browserEngine(response) if success: logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % 100) logger.info('Confidence: %i' % 10) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() logger.no_format('')
def singleTarget(target, paramData, verbose, encoding): if paramData: GET, POST = False, True else: GET, POST = True, False # If the user hasn't supplied the root url with http(s), we will handle it if target.startswith('http'): target = target else: try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: print('%s Checking for DOM vulnerabilities' % run) highlighted = dom(response) if highlighted: print('%s Potentially vulnerable objects found' % good) print(red + ('-' * 60) + end) for line in highlighted: print(line) print(red + ('-' * 60) + end) host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) if args.find: params = arjun(url, GET, headers, delay, timeout) if not params: quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) if fuzz: for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding) quit() for paramName in params.keys(): paramsCopy = copy.deepcopy(params) print('%s Testing parameter: %s' % (info, paramName)) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] verboseOutput(occurences, 'occurences', verbose) positions = parsedResponse[1] verboseOutput(positions, 'positions', verbose) if not occurences: print('%s No reflection found' % bad) continue else: print('%s Reflections found: %s' % (info, len(occurences))) print('%s Analysing reflections' % run) efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) verboseOutput(efficiencies, 'efficiencies', verbose) print('%s Generating payloads' % run) vectors = generator(occurences, response.text) verboseOutput(vectors, 'vectors', verbose) total = 0 for v in vectors.values(): total += len(v) if total == 0: print('%s No vectors were crafted' % bad) continue print('%s Payloads generated: %i' % (info, total)) progress = 0 for confidence, vects in vectors.items(): for vect in vects: progress += 1 print('%s Payloads tried [%i/%i]' % (run, progress, total), end='\r') if not GET: vect = unquote(vect) efficiencies = checker(url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence)) if not args.skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence))
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith("http"): try: response = requester("https://" + target, {}, headers, GET, delay, timeout) target = "https://" + target except: target = "http://" + target logger.debug("Scan target: {}".format(target)) response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: logger.run("Checking for DOM vulnerabilities") highlighted = dom(response) if highlighted: logger.good("Potentially vulnerable objects found") logger.red_line(level="good") for line in highlighted: logger.no_format(line, level="good") logger.red_line(level="good") host = urlparse(target).netloc # Extracts host out of the url logger.debug("Host to scan: {}".format(host)) url = getUrl(target, GET) logger.debug("Url to scan: {}".format(url)) params = getParams(target, paramData, GET) logger.debug_json("Scan parameters:", params) if find: params = arjun(url, GET, headers, delay, timeout) if not params: logger.error("No parameters to test.") quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: logger.error("WAF detected: %s%s%s" % (green, WAF, end)) else: logger.good("WAF Status: %sOffline%s" % (green, end)) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) logger.info("Testing parameter: %s" % paramName) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) occurences = htmlParser(response, encoding) positions = occurences.keys() logger.debug("Scan occurences: {}".format(occurences)) if not occurences: logger.error("No reflection found") continue else: logger.info("Reflections found: %i" % len(occurences)) logger.run("Analysing reflections") efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) logger.debug("Scan efficiencies: {}".format(efficiencies)) logger.run("Generating payloads") vectors = generator(occurences, response.text) total = 0 for v in vectors.values(): total += len(v) if total == 0: logger.error("No vectors were crafted.") continue logger.info("Payloads generated: %i" % total) progress = 0 for confidence, vects in vectors.items(): for vect in vects: if core.config.globalVariables["path"]: vect = vect.replace("/", "%2F") loggerVector = vect progress += 1 logger.run("Progress: %i/%i\r" % (progress, total)) if not GET: vect = unquote(vect) efficiencies = checker( url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding, ) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == "\\" and bestEfficiency >= 95): logger.red_line() logger.good("Payload: %s" % loggerVector) logger.info("Efficiency: %i" % bestEfficiency) logger.info("Confidence: %i" % confidence) if not skip: choice = input( "%s Would you like to continue scanning? [y/N] " % que).lower() if choice != "y": quit() elif bestEfficiency > minEfficiency: logger.red_line() logger.good("Payload: %s" % loggerVector) logger.info("Efficiency: %i" % bestEfficiency) logger.info("Confidence: %i" % confidence) logger.no_format("")
def singleTarget(target, paramData): if paramData: GET, POST = False, True else: GET, POST = True, False # If the user hasn't supplied the root url with http(s), we will handle it if target.startswith('http'): target = target else: try: response = requests.get('https://' + target) target = 'https://' + target except: target = 'http://' + target try: response = requests.get(target).text if not skipDOM: print('%s Checking for DOM vulnerabilities' % run) if dom(response): print('%s Potentially vulnerable objects found' % good) except Exception as e: print('%s Unable to connect to the target' % bad) print('%s Error: %s' % (bad, e)) quit() host = urlparse(target).netloc # Extracts host out of the url url = getUrl(target, paramData, GET) params = getParams(target, paramData, GET) if args.find: params = arjun(url, GET, headers, delay) if not params: quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) if fuzz: for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, WAF) quit() for paramName in params.keys(): paramsCopy = copy.deepcopy(params) print('%s Testing parameter: %s' % (info, paramName)) paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay).text occurences = htmlParser(response) if not occurences: print('%s No reflection found' % bad) continue else: print('%s Reflections found: %s' % (info, len(occurences))) print('%s Analysing reflections' % run) efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences) print('%s Generating payloads' % run) vectors = generator(occurences, response) total = 0 for v in vectors.values(): total += len(v) if total == 0: print('%s No vectors were crafted' % bad) continue print('%s Payloads generated: %i' % (info, total)) progress = 0 for confidence, vects in vectors.items(): for vect in vects: progress += 1 print('%s Payloads tried [%i/%i]' % (run, progress, total), end='\r') if not GET: vect = unquote(vect) efficiencies = checker(url, paramsCopy, headers, GET, delay, vect) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Cofidence: %i' % (info, confidence)) if GET: flatParams = flattenParams(paramName, paramsCopy, vect) if '"' not in flatParams and '}' not in flatParams and not skipPOC: webbrowser.open(url + flatParams) choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Cofidence: %i' % (info, confidence))
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip): reports = {} GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target logger.debug('Scan target: {}'.format(target)) response = requester(target, {}, headers, GET, delay, timeout).text vulnerable_code = list() if not skipDOM: logger.run('Checking for DOM vulnerabilities') highlighted = dom(response) if highlighted: logger.good('Potentially vulnerable objects found') logger.red_line(level='good') for line in highlighted: vulnerable_code.append(line) logger.no_format(line, level='good') logger.red_line(level='good') potential_vulnerabilities = [{"code": vulnerable_code}] reports["potential_vulnerabilities"] = potential_vulnerabilities host = urlparse(target).netloc # Extracts host out of the url logger.debug('Host to scan: {}'.format(host)) url = getUrl(target, GET) logger.debug('Url to scan: {}'.format(url)) params = getParams(target, paramData, GET) logger.debug_json('Scan parameters:', params) if find: params = arjun(url, GET, headers, delay, timeout) if not params: logger.error('No parameters to test.') reports['parameter_reports'] = "No parameters to test." return reports WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: logger.error('WAF detected: %s%s%s' % (green, WAF, end)) else: logger.good('WAF Status: %sOffline%s' % (green, end)) paramReports = list() for paramName in params.keys(): paramReport = {"parameter": None, "encoding": None, "reflection": None} paramReport['parameter'] = paramName paramsCopy = copy.deepcopy(params) logger.info('Testing parameter: %s' % paramName) if encoding: paramsCopy[paramName] = encoding(xsschecker) paramReport['encoding'] = str(encoding) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) occurences = htmlParser(response, encoding) positions = occurences.keys() logger.debug('Scan occurences: {}'.format(occurences)) if not occurences: logger.error('No reflection found') paramReport['reflection'] = "No reflection found" continue else: logger.info('Reflections found: %i' % len(occurences)) paramReport['reflection'] = len(occurences) logger.run('Analysing reflections') efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) logger.debug('Scan efficiencies: {}'.format(efficiencies)) logger.run('Generating payloads') vectors = generator(occurences, response.text) total = 0 for v in vectors.values(): total += len(v) if total == 0: logger.error('No vectors were crafted.') continue logger.info('Payloads generated: %i' % total) paramReport['payloads_generated'] = total payloadLists = list() progress = 0 for confidence, vects in vectors.items(): for vect in vects: payloaditem = {} if core.config.globalVariables['path']: vect = vect.replace('/', '%2F') loggerVector = vect progress += 1 logger.run('Progress: %i/%i\r' % (progress, total)) if not GET: vect = unquote(vect) try: efficiencies = checker(url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) except Exception as e: payloaditem['error'] = str(e) print("ERROR") continue if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) payloaditem['payload'] = loggerVector payloaditem['efficiency'] = bestEfficiency payloaditem['confidence'] = confidence payloadLists.append(payloaditem) print(payloaditem) paramReport['payload_reports'] = payloadLists logger.no_format('') reports['parameter_reports'] = paramReport return reports
target = 'https://' + target except: target = 'http://' + target try: response = requests.get(target).text print('%s Checking for DOM vulnerabilities' % run) if dom(response): print('%s Potentially vulnerable objects found' % good) except Exception as e: print('%s Unable to connect to the target' % bad) print('%s Error: %s' % (bad, e)) quit() host = urlparse(target).netloc # Extracts host out of the url url = getUrl(target, paramData, GET) params = getParams(target, paramData, GET) if not params and not find: quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) if fuzz: for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, WAF)
def scan(target, paramData, verbose, encoding, headers, delay, timeout, skipDOM, find, skip): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: print('%s Checking for DOM vulnerabilities' % run) highlighted = dom(response) if highlighted: print('%s Potentially vulnerable objects found' % good) print(red + ('-' * 60) + end) for line in highlighted: print(line) print(red + ('-' * 60) + end) host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) if find: params = arjun(url, GET, headers, delay, timeout) if not params: print('%s No parameters to test.' % bad) quit() WAF = wafDetector( url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) print('%s Testing parameter: %s' % (info, paramName)) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] verboseOutput(occurences, 'occurences', verbose) positions = parsedResponse[1] verboseOutput(positions, 'positions', verbose) if not occurences: print('%s No reflection found' % bad) continue else: print('%s Reflections found: %s' % (info, len(occurences))) print('%s Analysing reflections' % run) efficiencies = filterChecker( url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) verboseOutput(efficiencies, 'efficiencies', verbose) print('%s Generating payloads' % run) vectors = generator(occurences, response.text) verboseOutput(vectors, 'vectors', verbose) total = 0 for v in vectors.values(): total += len(v) if total == 0: print('%s No vectors were crafted' % bad) continue print('%s Payloads generated: %i' % (info, total)) progress = 0 for confidence, vects in vectors.items(): for vect in vects: if core.config.globalVariables['path']: vect = vect.replace('/', '%2F') printVector = vect progress += 1 print ('%s Progress: %i/%i' % (run, progress, total), end='\r') if confidence == 10: if not GET: vect = unquote(vect) efficiencies = checker( url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, printVector)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence)) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, printVector)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence)) else: if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()): continue vect = unquote(vect) if encoding: paramsCopy[paramName] = encoding(vect) else: paramsCopy[paramName] = vect response = requester(url, paramsCopy, headers, GET, delay, timeout).text success = browserEngine(response) if success: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, printVector)) print('%s Efficiency: %i' % (info, 100)) print('%s Confidence: %i' % (info, 10)) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() print ('')