def crawl(scheme, host, main_url, form, domURL, verbose, blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding): if domURL and not skipDOM: response = requester(domURL, {}, headers, True, delay, timeout).text highlighted = dom(response) if highlighted: print('%s Potentially vulnerable objects found at %s' % (good, domURL)) print(red + ('-' * 60) + end) for line in highlighted: print(line) print(red + ('-' * 60) + end) if form: for each in form.values(): url = each['action'] if url: if url.startswith(main_url): pass elif url.startswith('//') and url[2:].startswith(host): url = scheme + '://' + url[2:] elif url.startswith('/'): url = scheme + '://' + host + url elif re.match(r'\w', url[0]): url = scheme + '://' + host + '/' + url method = each['method'] GET = True if method == 'get' else False inputs = each['inputs'] paramData = {} for one in inputs: paramData[one['name']] = one['value'] for paramName in paramData.keys(): paramsCopy = copy.deepcopy(paramData) paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] positions = parsedResponse[1] efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) vectors = generator(occurences, response.text) if vectors: for confidence, vects in vectors.items(): try: payload = list(vects)[0] print('%s Vulnerable webpage: %s%s%s' % (good, green, url, end)) print( '%s Vector for %s%s%s: %s' % (good, green, paramName, end, payload)) break except IndexError: pass if blindXSS and blindPayload: paramsCopy[paramName] = blindPayload requester(url, paramsCopy, headers, GET, delay, timeout)
def rec(target): vulnerable_items = list() vulnerable_report = dict() processed.add(target) printableTarget = '/'.join(target.split('/')[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] else: printableTarget = (printableTarget + (' ' * (40 - len(printableTarget)))) logger.run('Parsing %s\r' % printableTarget) url = getUrl(target, True) params = getParams(target, '', True) if '=' in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({'name': name, 'value': value}) forms.append({0: {'action': url, 'method': 'get', 'inputs': inps}}) response = requester(url, params, headers, True, delay, timeout).text vulnerable_components = retireJs(url, response) print("===== Vulnerable Components ======") print(vulnerable_components) vulnerable_report['vulnerable_components'] = vulnerable_components if not skipDOM: highlighted = dom(response) clean_highlighted = ''.join([re.sub(r'^\d+\s+', '', line) for line in highlighted]) if highlighted and clean_highlighted not in checkedDOMs: checkedDOMs.append(clean_highlighted) logger.good('Potentially vulnerable objects found at %s' % url) vulnerable_report['url'] = url logger.red_line(level='good') for line in highlighted: vulnerable_items.append(clean_colors(line)) logger.no_format(line, level='good') vulnerable_report['codes'] = vulnerable_items logger.red_line(level='good') forms.append(zetanize(response)) matches = re.findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.split('#')[0] if link.endswith(('.pdf', '.png', '.jpg', '.jpeg', '.xls', '.xml', '.docx', '.doc')): pass else: if link[:4] == 'http': if link.startswith(main_url): storage.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): storage.add(schema + link) elif link[:1] == '/': storage.add(main_url + link) else: storage.add(main_url + '/' + link) return vulnerable_report
def crawl(scheme, host, main_url, form, domURL, verbose, blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding): if domURL and not skipDOM: response = requester(domURL, {}, headers, True, delay, timeout).text highlighted = dom(response) if highlighted: print('%s Potentially vulnerable objects found at %s' % (good, domURL)) print(red + ('-' * 60) + end) for line in highlighted: print(line) print(red + ('-' * 60) + end) if form: for each in form.values(): url = each['action'] if url: if url.startswith(main_url): pass elif url.startswith('//') and url[2:].startswith(host): url = scheme + '://' + url[2:] elif url.startswith('/'): url = scheme + '://' + host + url elif re.match(r'\w', url[0]): url = scheme + '://' + host + '/' + url method = each['method'] GET = True if method == 'get' else False inputs = each['inputs'] paramData = {} for one in inputs: paramData[one['name']] = one['value'] for paramName in paramData.keys(): paramsCopy = copy.deepcopy(paramData) paramsCopy[paramName] = xsschecker response = requester( url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] positions = parsedResponse[1] efficiencies = filterChecker( url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) vectors = generator(occurences, response.text) if vectors: for confidence, vects in vectors.items(): try: payload = list(vects)[0] print('%s Vulnerable webpage: %s%s%s' % (good, green, url, end)) print('%s Vector for %s%s%s: %s' % (good, green, paramName, end, payload)) break except IndexError: pass if blindXSS and blindPayload: paramsCopy[paramName] = blindPayload requester(url, paramsCopy, headers, GET, delay, timeout)
def multiTargets(scheme, host, main_url, form): signatures = set() for each in form.values(): url = each['action'] if url: if url.startswith(main_url): pass elif url.startswith('//') and url[2:].startswith(host): url = scheme + '://' + url[2:] elif url.startswith('/'): url = scheme + '://' + host + url elif re.match(r'\w', url[0]): url = scheme + '://' + host + '/' + url method = each['method'] if method == 'get': GET = True else: GET = False inputs = each['inputs'] paramData = {} for one in inputs: paramData[one['name']] = one['value'] if target not in ''.join(signatures) and not skipDOM: response = requests.get(target).text if dom(response, silent=True): print('%s Potentially vulnerable objects found' % good) for paramName in paramData.keys(): signature = url + paramName if signature not in signatures: signatures.add(signature) print('%s Scanning %s%s%s, %s' % (run, green, url, end, paramName)) paramsCopy = copy.deepcopy(paramData) paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay).text try: occurences = htmlParser(response) efficiencies = filterChecker( url, paramsCopy, headers, GET, delay, occurences) vectors = generator(occurences, response) if vectors: for confidence, vects in vectors.items(): try: print( '%s Vector for %s: %s' % (good, paramName, list(vects)[0])) break except IndexError: pass except Exception as e: print('%s Error: %s' % (bad, e))
def multiTargets(scheme, host, main_url, form, domURL): signatures = set() if domURL and not skipDOM: response = requests.get(domURL).text if dom(response, silent=True): print('%s Potentially vulnerable objects found at %s' % (good, domURL)) if form: for each in form.values(): url = each['action'] if url: if url.startswith(main_url): pass elif url.startswith('//') and url[2:].startswith(host): url = scheme + '://' + url[2:] elif url.startswith('/'): url = scheme + '://' + host + url elif re.match(r'\w', url[0]): url = scheme + '://' + host + '/' + url method = each['method'] if method == 'get': GET = True else: GET = False inputs = each['inputs'] paramData = {} for one in inputs: paramData[one['name']] = one['value'] for paramName in paramData.keys(): paramsCopy = copy.deepcopy(paramData) paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response) occurences = parsedResponse[0] positions = parsedResponse[1] efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout) vectors = generator(occurences, response.text) if vectors: for confidence, vects in vectors.items(): try: payload = list(vects)[0] print('%s Vulnerable webpage: %s%s%s' % (good, green, url, end)) print( '%s Vector for %s%s%s: %s' % (good, green, paramName, end, payload)) break except IndexError: pass
def rec(target): processed.add(target) printableTarget = "/".join(target.split("/")[3:]) if len(printableTarget) > 40: printableTarget = printableTarget[-40:] else: printableTarget = printableTarget + (" " * (40 - len(printableTarget))) logger.run("Parsing %s\r" % printableTarget) url = getUrl(target, True) params = getParams(target, "", True) if "=" in target: # if there's a = in the url, there should be GET parameters inps = [] for name, value in params.items(): inps.append({"name": name, "value": value}) forms.append({0: {"action": url, "method": "get", "inputs": inps}}) response = requester(url, params, headers, True, delay, timeout).text retireJs(url, response) if not skipDOM: highlighted = dom(response) clean_highlighted = "".join( [re.sub(r"^\d+\s+", "", line) for line in highlighted]) if highlighted and clean_highlighted not in checkedDOMs: checkedDOMs.append(clean_highlighted) logger.good("Potentially vulnerable objects found at %s" % url) logger.red_line(level="good") for line in highlighted: logger.no_format(line, level="good") logger.red_line(level="good") forms.append(zetanize(response)) matches = re.findall(r'<[aA].*href=["\']{0,1}(.*?)["\']', response) for link in matches: # iterate over the matches # remove everything after a "#" to deal with in-page anchors link = link.split("#")[0] if link[:4] == "http": if link.startswith(main_url): storage.add(link) elif link[:2] == "//": if link.split("/")[2].startswith(host): storage.add(schema + link) elif link[:1] == "/": storage.add(main_url + link) else: storage.add(main_url + "/" + link)
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target logger.debug('Scan target: {}'.format(target)) response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: logger.run('Checking for DOM vulnerabilities') highlighted = dom(response) if highlighted: logger.good('Potentially vulnerable objects found') logger.red_line(level='good') for line in highlighted: logger.no_format(line, level='good') logger.red_line(level='good') host = urlparse(target).netloc # Extracts host out of the url logger.debug('Host to scan: {}'.format(host)) url = getUrl(target, GET) logger.debug('Url to scan: {}'.format(url)) params = getParams(target, paramData, GET) logger.debug_json('Scan parameters:', params) if find: params = arjun(url, GET, headers, delay, timeout) if not params: logger.error('No parameters to test.') quit() WAF = wafDetector( url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: logger.error('WAF detected: %s%s%s' % (green, WAF, end)) else: logger.good('WAF Status: %sOffline%s' % (green, end)) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) logger.info('Testing parameter: %s' % paramName) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] logger.debug('Scan occurences: {}'.format(occurences)) positions = parsedResponse[1] logger.debug('Scan positions: {}'.format(positions)) if not occurences: logger.error('No reflection found') continue else: logger.info('Reflections found: %i' % len(occurences)) logger.run('Analysing reflections') efficiencies = filterChecker( url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) logger.debug('Scan efficiencies: {}'.format(efficiencies)) logger.run('Generating payloads') vectors = generator(occurences, response.text) total = 0 for v in vectors.values(): total += len(v) if total == 0: logger.error('No vectors were crafted.') continue logger.info('Payloads generated: %i' % total) progress = 0 for confidence, vects in vectors.items(): for vect in vects: if core.config.globalVariables['path']: vect = vect.replace('/', '%2F') loggerVector = vect progress += 1 logger.run('Progress: %i/%i\r' % (progress, total)) if confidence == 10: if not GET: vect = unquote(vect) efficiencies = checker( url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) else: if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()): continue vect = unquote(vect) if encoding: paramsCopy[paramName] = encoding(vect) else: paramsCopy[paramName] = vect response = requester(url, paramsCopy, headers, GET, delay, timeout).text success = browserEngine(response) if success: logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % 100) logger.info('Confidence: %i' % 10) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() logger.no_format('')
def singleTarget(target, paramData, verbose, encoding): if paramData: GET, POST = False, True else: GET, POST = True, False # If the user hasn't supplied the root url with http(s), we will handle it if target.startswith('http'): target = target else: try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: print('%s Checking for DOM vulnerabilities' % run) highlighted = dom(response) if highlighted: print('%s Potentially vulnerable objects found' % good) print(red + ('-' * 60) + end) for line in highlighted: print(line) print(red + ('-' * 60) + end) host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) if args.find: params = arjun(url, GET, headers, delay, timeout) if not params: quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) if fuzz: for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, timeout, WAF, encoding) quit() for paramName in params.keys(): paramsCopy = copy.deepcopy(params) print('%s Testing parameter: %s' % (info, paramName)) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] verboseOutput(occurences, 'occurences', verbose) positions = parsedResponse[1] verboseOutput(positions, 'positions', verbose) if not occurences: print('%s No reflection found' % bad) continue else: print('%s Reflections found: %s' % (info, len(occurences))) print('%s Analysing reflections' % run) efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) verboseOutput(efficiencies, 'efficiencies', verbose) print('%s Generating payloads' % run) vectors = generator(occurences, response.text) verboseOutput(vectors, 'vectors', verbose) total = 0 for v in vectors.values(): total += len(v) if total == 0: print('%s No vectors were crafted' % bad) continue print('%s Payloads generated: %i' % (info, total)) progress = 0 for confidence, vects in vectors.items(): for vect in vects: progress += 1 print('%s Payloads tried [%i/%i]' % (run, progress, total), end='\r') if not GET: vect = unquote(vect) efficiencies = checker(url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence)) if not args.skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence))
def singleTarget(target, paramData): if paramData: GET, POST = False, True else: GET, POST = True, False # If the user hasn't supplied the root url with http(s), we will handle it if target.startswith('http'): target = target else: try: response = requests.get('https://' + target) target = 'https://' + target except: target = 'http://' + target try: response = requests.get(target).text if not skipDOM: print('%s Checking for DOM vulnerabilities' % run) if dom(response): print('%s Potentially vulnerable objects found' % good) except Exception as e: print('%s Unable to connect to the target' % bad) print('%s Error: %s' % (bad, e)) quit() host = urlparse(target).netloc # Extracts host out of the url url = getUrl(target, paramData, GET) params = getParams(target, paramData, GET) if args.find: params = arjun(url, GET, headers, delay) if not params: quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) if fuzz: for paramName in params.keys(): print('%s Fuzzing parameter: %s' % (info, paramName)) paramsCopy = copy.deepcopy(params) paramsCopy[paramName] = xsschecker fuzzer(url, paramsCopy, headers, GET, delay, WAF) quit() for paramName in params.keys(): paramsCopy = copy.deepcopy(params) print('%s Testing parameter: %s' % (info, paramName)) paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay).text occurences = htmlParser(response) if not occurences: print('%s No reflection found' % bad) continue else: print('%s Reflections found: %s' % (info, len(occurences))) print('%s Analysing reflections' % run) efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences) print('%s Generating payloads' % run) vectors = generator(occurences, response) total = 0 for v in vectors.values(): total += len(v) if total == 0: print('%s No vectors were crafted' % bad) continue print('%s Payloads generated: %i' % (info, total)) progress = 0 for confidence, vects in vectors.items(): for vect in vects: progress += 1 print('%s Payloads tried [%i/%i]' % (run, progress, total), end='\r') if not GET: vect = unquote(vect) efficiencies = checker(url, paramsCopy, headers, GET, delay, vect) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Cofidence: %i' % (info, confidence)) if GET: flatParams = flattenParams(paramName, paramsCopy, vect) if '"' not in flatParams and '}' not in flatParams and not skipPOC: webbrowser.open(url + flatParams) choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, vect)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Cofidence: %i' % (info, confidence))
elif url.startswith('/'): url = scheme + '://' + host + url elif re.match(r'\w', url[0]): url = scheme + '://' + host + '/' + url method = each['method'] if method == 'get': GET = True else: GET = False inputs = each['inputs'] paramData = {} for one in inputs: paramData[one['name']] = one['value'] if target not in ''.join(signatures) and not skipDOM: response = requests.get(target).text if dom(response, silent=True): print('%s Potentially vulnerable objects found' % good) for paramName in paramData.keys(): signature = url + paramName if signature not in signatures: signatures.add(signature) print('%s Scanning %s%s%s, %s' % (run, green, url, end, paramName)) paramsCopy = copy.deepcopy(paramData) paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay).text try: occurences = htmlParser(response) efficiencies = filterChecker(
print('\n' + parser.format_help().lower()) quit() # If the user hasn't supplied the root url with http(s), we will handle it if target.startswith('http'): target = target else: try: response = requests.get('https://' + target) target = 'https://' + target except: target = 'http://' + target try: response = requests.get(target).text print('%s Checking for DOM vulnerabilities' % run) if dom(response): print('%s Potentially vulnerable objects found' % good) except Exception as e: print('%s Unable to connect to the target' % bad) print('%s Error: %s' % (bad, e)) quit() host = urlparse(target).netloc # Extracts host out of the url url = getUrl(target, paramData, GET) params = getParams(target, paramData, GET) if not params and not find: quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end))
def scan_page(self, url, data=None): retval, usable = False, False url = re.sub(r"=(&|\Z)", "=1\g<1>", url) page_content = self._retrieve_content(url) soup = BeautifulSoup(str(page_content), 'html.parser') # TODO: Crawl for additional URLs in page. # links = soup.find_all(href=True) # self.LOGGER.info(f"{links}") # url_list = [url] # if len(links) > 0: # for links in links: # url_list.append(f"{url}") self.LOGGER.info(f"Scan Page: {url}") self.LOGGER.info(' - Checking for DOM vulnerabilities') response = requester(url, {}, HEADERS, True, DELAY, TIMEOUT).text highlighted = dom(response) if highlighted: self.LOGGER.good(' - Potentially vulnerable objects found') self.LOGGER.red_line(level='good') for line in highlighted: self.LOGGER.no_format(line, level='good') self.LOGGER.red_line(level='good') forms = soup.find_all('form') form_details = {} ret_value = {url: {}} for form in forms: inputs = [] form_action = form.get('action') for form_input in form.find_all('input'): input_type = form_input.get('type', None) input_name = form_input.get('name', None) input_value = form_input.get('value', None) if input_type != 'text': continue inputs.append({ 'type': input_type, 'name': input_name, # Important to determine name of the input }) form_details.update({form_action: inputs}) for form, inputs in form_details.items(): if len(inputs) < 1: continue # self.LOGGER.info(f"Inputs: {inputs}") # url_with_params = url + form + "?" param_discovery_list = [] for item in inputs: param = item["name"] param_discovery_list.append(param) # self.LOGGER.info(f"URL(w/ params): {url_with_params}") # self.LOGGER.info(f"Input: {inputs}\n") url_form, vuln_params_payloads = scan(url, form, param_discovery_list, ENCODING, HEADERS, DELAY, TIMEOUT, SKIP_DOM, SKIP_OPT) ret_value[url].update({form: vuln_params_payloads}) return ret_value
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip): reports = {} GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target logger.debug('Scan target: {}'.format(target)) response = requester(target, {}, headers, GET, delay, timeout).text vulnerable_code = list() if not skipDOM: logger.run('Checking for DOM vulnerabilities') highlighted = dom(response) if highlighted: logger.good('Potentially vulnerable objects found') logger.red_line(level='good') for line in highlighted: vulnerable_code.append(line) logger.no_format(line, level='good') logger.red_line(level='good') potential_vulnerabilities = [{"code": vulnerable_code}] reports["potential_vulnerabilities"] = potential_vulnerabilities host = urlparse(target).netloc # Extracts host out of the url logger.debug('Host to scan: {}'.format(host)) url = getUrl(target, GET) logger.debug('Url to scan: {}'.format(url)) params = getParams(target, paramData, GET) logger.debug_json('Scan parameters:', params) if find: params = arjun(url, GET, headers, delay, timeout) if not params: logger.error('No parameters to test.') reports['parameter_reports'] = "No parameters to test." return reports WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: logger.error('WAF detected: %s%s%s' % (green, WAF, end)) else: logger.good('WAF Status: %sOffline%s' % (green, end)) paramReports = list() for paramName in params.keys(): paramReport = {"parameter": None, "encoding": None, "reflection": None} paramReport['parameter'] = paramName paramsCopy = copy.deepcopy(params) logger.info('Testing parameter: %s' % paramName) if encoding: paramsCopy[paramName] = encoding(xsschecker) paramReport['encoding'] = str(encoding) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) occurences = htmlParser(response, encoding) positions = occurences.keys() logger.debug('Scan occurences: {}'.format(occurences)) if not occurences: logger.error('No reflection found') paramReport['reflection'] = "No reflection found" continue else: logger.info('Reflections found: %i' % len(occurences)) paramReport['reflection'] = len(occurences) logger.run('Analysing reflections') efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) logger.debug('Scan efficiencies: {}'.format(efficiencies)) logger.run('Generating payloads') vectors = generator(occurences, response.text) total = 0 for v in vectors.values(): total += len(v) if total == 0: logger.error('No vectors were crafted.') continue logger.info('Payloads generated: %i' % total) paramReport['payloads_generated'] = total payloadLists = list() progress = 0 for confidence, vects in vectors.items(): for vect in vects: payloaditem = {} if core.config.globalVariables['path']: vect = vect.replace('/', '%2F') loggerVector = vect progress += 1 logger.run('Progress: %i/%i\r' % (progress, total)) if not GET: vect = unquote(vect) try: efficiencies = checker(url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) except Exception as e: payloaditem['error'] = str(e) print("ERROR") continue if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: logger.red_line() logger.good('Payload: %s' % loggerVector) logger.info('Efficiency: %i' % bestEfficiency) logger.info('Confidence: %i' % confidence) payloaditem['payload'] = loggerVector payloaditem['efficiency'] = bestEfficiency payloaditem['confidence'] = confidence payloadLists.append(payloaditem) print(payloaditem) paramReport['payload_reports'] = payloadLists logger.no_format('') reports['parameter_reports'] = paramReport return reports
def scan(target, paramData, encoding, headers, delay, timeout, skipDOM, find, skip): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith("http"): try: response = requester("https://" + target, {}, headers, GET, delay, timeout) target = "https://" + target except: target = "http://" + target logger.debug("Scan target: {}".format(target)) response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: logger.run("Checking for DOM vulnerabilities") highlighted = dom(response) if highlighted: logger.good("Potentially vulnerable objects found") logger.red_line(level="good") for line in highlighted: logger.no_format(line, level="good") logger.red_line(level="good") host = urlparse(target).netloc # Extracts host out of the url logger.debug("Host to scan: {}".format(host)) url = getUrl(target, GET) logger.debug("Url to scan: {}".format(url)) params = getParams(target, paramData, GET) logger.debug_json("Scan parameters:", params) if find: params = arjun(url, GET, headers, delay, timeout) if not params: logger.error("No parameters to test.") quit() WAF = wafDetector(url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: logger.error("WAF detected: %s%s%s" % (green, WAF, end)) else: logger.good("WAF Status: %sOffline%s" % (green, end)) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) logger.info("Testing parameter: %s" % paramName) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) occurences = htmlParser(response, encoding) positions = occurences.keys() logger.debug("Scan occurences: {}".format(occurences)) if not occurences: logger.error("No reflection found") continue else: logger.info("Reflections found: %i" % len(occurences)) logger.run("Analysing reflections") efficiencies = filterChecker(url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) logger.debug("Scan efficiencies: {}".format(efficiencies)) logger.run("Generating payloads") vectors = generator(occurences, response.text) total = 0 for v in vectors.values(): total += len(v) if total == 0: logger.error("No vectors were crafted.") continue logger.info("Payloads generated: %i" % total) progress = 0 for confidence, vects in vectors.items(): for vect in vects: if core.config.globalVariables["path"]: vect = vect.replace("/", "%2F") loggerVector = vect progress += 1 logger.run("Progress: %i/%i\r" % (progress, total)) if not GET: vect = unquote(vect) efficiencies = checker( url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding, ) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == "\\" and bestEfficiency >= 95): logger.red_line() logger.good("Payload: %s" % loggerVector) logger.info("Efficiency: %i" % bestEfficiency) logger.info("Confidence: %i" % confidence) if not skip: choice = input( "%s Would you like to continue scanning? [y/N] " % que).lower() if choice != "y": quit() elif bestEfficiency > minEfficiency: logger.red_line() logger.good("Payload: %s" % loggerVector) logger.info("Efficiency: %i" % bestEfficiency) logger.info("Confidence: %i" % confidence) logger.no_format("")
def scan(target, paramData, verbose, encoding, headers, delay, timeout, skipDOM, find, skip): GET, POST = (False, True) if paramData else (True, False) # If the user hasn't supplied the root url with http(s), we will handle it if not target.startswith('http'): try: response = requester('https://' + target, {}, headers, GET, delay, timeout) target = 'https://' + target except: target = 'http://' + target response = requester(target, {}, headers, GET, delay, timeout).text if not skipDOM: print('%s Checking for DOM vulnerabilities' % run) highlighted = dom(response) if highlighted: print('%s Potentially vulnerable objects found' % good) print(red + ('-' * 60) + end) for line in highlighted: print(line) print(red + ('-' * 60) + end) host = urlparse(target).netloc # Extracts host out of the url verboseOutput(host, 'host', verbose) url = getUrl(target, GET) verboseOutput(url, 'url', verbose) params = getParams(target, paramData, GET) verboseOutput(params, 'params', verbose) if find: params = arjun(url, GET, headers, delay, timeout) if not params: print('%s No parameters to test.' % bad) quit() WAF = wafDetector( url, {list(params.keys())[0]: xsschecker}, headers, GET, delay, timeout) if WAF: print('%s WAF detected: %s%s%s' % (bad, green, WAF, end)) else: print('%s WAF Status: %sOffline%s' % (good, green, end)) for paramName in params.keys(): paramsCopy = copy.deepcopy(params) print('%s Testing parameter: %s' % (info, paramName)) if encoding: paramsCopy[paramName] = encoding(xsschecker) else: paramsCopy[paramName] = xsschecker response = requester(url, paramsCopy, headers, GET, delay, timeout) parsedResponse = htmlParser(response, encoding) occurences = parsedResponse[0] verboseOutput(occurences, 'occurences', verbose) positions = parsedResponse[1] verboseOutput(positions, 'positions', verbose) if not occurences: print('%s No reflection found' % bad) continue else: print('%s Reflections found: %s' % (info, len(occurences))) print('%s Analysing reflections' % run) efficiencies = filterChecker( url, paramsCopy, headers, GET, delay, occurences, timeout, encoding) verboseOutput(efficiencies, 'efficiencies', verbose) print('%s Generating payloads' % run) vectors = generator(occurences, response.text) verboseOutput(vectors, 'vectors', verbose) total = 0 for v in vectors.values(): total += len(v) if total == 0: print('%s No vectors were crafted' % bad) continue print('%s Payloads generated: %i' % (info, total)) progress = 0 for confidence, vects in vectors.items(): for vect in vects: if core.config.globalVariables['path']: vect = vect.replace('/', '%2F') printVector = vect progress += 1 print ('%s Progress: %i/%i' % (run, progress, total), end='\r') if confidence == 10: if not GET: vect = unquote(vect) efficiencies = checker( url, paramsCopy, headers, GET, delay, vect, positions, timeout, encoding) if not efficiencies: for i in range(len(occurences)): efficiencies.append(0) bestEfficiency = max(efficiencies) if bestEfficiency == 100 or (vect[0] == '\\' and bestEfficiency >= 95): print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, printVector)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence)) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() elif bestEfficiency > minEfficiency: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, printVector)) print('%s Efficiency: %i' % (info, bestEfficiency)) print('%s Confidence: %i' % (info, confidence)) else: if re.search(r'<(a|d3|details)|lt;(a|d3|details)', vect.lower()): continue vect = unquote(vect) if encoding: paramsCopy[paramName] = encoding(vect) else: paramsCopy[paramName] = vect response = requester(url, paramsCopy, headers, GET, delay, timeout).text success = browserEngine(response) if success: print(('%s-%s' % (red, end)) * 60) print('%s Payload: %s' % (good, printVector)) print('%s Efficiency: %i' % (info, 100)) print('%s Confidence: %i' % (info, 10)) if not skip: choice = input( '%s Would you like to continue scanning? [y/N] ' % que).lower() if choice != 'y': quit() print ('')