def initialize(url, include, headers, GET, delay, paramList, threadCount): url = stabilize(url) if not url: return {} else: firstResponse = requester(url, include, headers, GET, delay) originalFuzz = randomString(6) data = {originalFuzz: originalFuzz[::-1]} data.update(include) response = requester(url, data, headers, GET, delay) reflections = response.text.count(originalFuzz[::-1]) originalResponse = response.text originalCode = response.status_code newLength = len(response.text) plainText = removeTags(originalResponse) plainTextLength = len(plainText) factors = {'sameHTML': False, 'samePlainText': False} if len(firstResponse.text) == len(originalResponse): factors['sameHTML'] = True elif len(removeTags(firstResponse.text)) == len(plainText): factors['samePlainText'] = True heuristic(firstResponse.text, paramList) fuzz = randomString(8) data = {fuzz: fuzz[::-1]} data.update(include) toBeChecked = slicer(paramList, 50) foundParamsTemp = [] while True: toBeChecked = narrower(toBeChecked, url, include, headers, GET, delay, originalResponse, originalCode, reflections, factors, threadCount) toBeChecked = unityExtracter(toBeChecked, foundParamsTemp) if not toBeChecked: break foundParams = [] for param in foundParamsTemp: exists = quickBruter([param], originalResponse, originalCode, reflections, factors, include, delay, headers, url, GET) if exists: foundParams.append(param) for each in foundParams: print('%s?%s' % (url, each)) if not foundParams: pass return foundParams
inputs = re.findall(r'(?i)(?s)<input.*?>', response) for inp in inputs: inpName = re.search(r'(?i)name=[\'"](.*?)[\'"]', inp) if inpName: inpType = re.search(r'(?i)type=[\'"](.*?)[\'"]', inp) inpValue = re.search(r'(?i)value=[\'"](.*?)[\'"]', inp) inpName = d(e(inpName.group(1))) if inpName not in done: if inpName in paramList: paramList.remove(inpName) done.append(inpName) paramList.insert(0, inpName) print ('%s Heuristic found a potenial parameter: %s%s%s' % (good, green, inpName, end)) print ('%s Prioritizing it' % good) url = stabilize(url) print ('%s Analysing the content of the webpage' % run) firstResponse = requester(url, include, headers, GET, delay) print ('%s Now lets see how target deals with a non-existent parameter' % run) originalFuzz = randomString(6) data = {originalFuzz : originalFuzz[::-1]} data.update(include) response = requester(url, data, headers, GET, delay) reflections = response.text.count(originalFuzz[::-1]) print ('%s Reflections: %s%i%s' % (info, green, reflections, end)) originalResponse = response.text originalCode = response.status_code
def initialize(url, include, headers, GET, delay, paramList, threadCount): url = stabilize(url) if not url: return {} else: print('%s Analysing the content of the webpage' % run) firstResponse = requester(url, include, headers, GET, delay) print('%s Analysing behaviour for a non-existent parameter' % run) originalFuzz = randomString(6) data = {originalFuzz: originalFuzz[::-1]} data.update(include) response = requester(url, data, headers, GET, delay) reflections = response.text.count(originalFuzz[::-1]) print('%s Reflections: %s%i%s' % (info, green, reflections, end)) originalResponse = response.text originalCode = response.status_code print('%s Response Code: %s%i%s' % (info, green, originalCode, end)) newLength = len(response.text) plainText = removeTags(originalResponse) plainTextLength = len(plainText) print('%s Content Length: %s%i%s' % (info, green, newLength, end)) print('%s Plain-text Length: %s%i%s' % (info, green, plainTextLength, end)) factors = {'sameHTML': False, 'samePlainText': False} if len(firstResponse.text) == len(originalResponse): factors['sameHTML'] = True elif len(removeTags(firstResponse.text)) == len(plainText): factors['samePlainText'] = True print('%s Parsing webpage for potential parameters' % run) heuristic(firstResponse.text, paramList) fuzz = randomString(8) data = {fuzz: fuzz[::-1]} data.update(include) print('%s Performing heuristic level checks' % run) toBeChecked = slicer(paramList, 50) foundParamsTemp = [] while True: toBeChecked = narrower(toBeChecked, url, include, headers, GET, delay, originalResponse, originalCode, reflections, factors, threadCount) toBeChecked = unityExtracter(toBeChecked, foundParamsTemp) if not toBeChecked: break foundParams = [] for param in foundParamsTemp: exists = quickBruter([param], originalResponse, originalCode, reflections, factors, include, delay, headers, url, GET) if exists: foundParams.append(param) print('%s Scan Completed ' % info) for each in foundParams: print('%s Valid parameter found: %s%s%s' % (good, green, each, end)) if not foundParams: print( '%s Unable to verify existence of parameters detected by heuristic.' % bad) return foundParams
def initialize(url, include, headers, GET, delay, paramList, threadCount): url = stabilize(url) log('%s Analysing the content of the webpage' % run) firstResponse = requester(url, include, headers, GET, delay) log('%s Analysing behaviour for a non-existent parameter' % run) originalFuzz = randomString(6) data = {originalFuzz : originalFuzz[::-1]} data.update(include) response = requester(url, data, headers, GET, delay) reflections = response.text.count(originalFuzz[::-1]) log('%s Reflections: %s%i%s' % (info, green, reflections, end)) originalResponse = response.text originalCode = response.status_code log('%s Response Code: %s%i%s' % (info, green, originalCode, end)) newLength = len(response.text) plainText = removeTags(originalResponse) plainTextLength = len(plainText) log('%s Content Length: %s%i%s' % (info, green, newLength, end)) log('%s Plain-text Length: %s%i%s' % (info, green, plainTextLength, end)) factors = {'sameHTML': False, 'samePlainText': False} if len(firstResponse.text) == len(originalResponse): factors['sameHTML'] = True elif len(removeTags(firstResponse.text)) == len(plainText): factors['samePlainText'] = True log('%s Parsing webpage for potential parameters' % run) heuristic(firstResponse.text, paramList) fuzz = randomString(8) data = {fuzz : fuzz[::-1]} data.update(include) log('%s Performing heuristic level checks' % run) toBeChecked = slicer(paramList, 50) foundParams = [] while True: toBeChecked = narrower(toBeChecked, url, include, headers, GET, delay, originalResponse, originalCode, reflections, factors, threadCount) toBeChecked = unityExtracter(toBeChecked, foundParams) if not toBeChecked: break if foundParams: log('%s Heuristic found %i potential parameters.' % (info, len(foundParams))) paramList = foundParams finalResult = [] jsonResult = [] threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount) futures = (threadpool.submit(bruter, param, originalResponse, originalCode, factors, include, reflections, delay, headers, url, GET) for param in foundParams) for i, result in enumerate(concurrent.futures.as_completed(futures)): if result.result(): finalResult.append(result.result()) log('%s Progress: %i/%i' % (info, i + 1, len(paramList)), mode='run') log('%s Scan Completed ' % info) for each in finalResult: for param, reason in each.items(): log('%s Valid parameter found: %s%s%s' % (good, green, param, end)) log('%s Reason: %s' % (info, reason)) jsonResult.append({"param": param, "reason": reason}) if not jsonResult: log('%s Unable to verify existence of parameters detected by heuristic' % bad) return jsonResult
def main(): print('''%s _ /_| _ ' ( |/ /(//) %sv1.3%s _/ %s''' % (green, white, green, end)) parser = argparse.ArgumentParser() #defines the parser #Arguments that can be supplied parser.add_argument('-u', help='target url', dest='url', required=True) parser.add_argument('-d', help='request delay', dest='delay', type=int) parser.add_argument('-t', help='number of threads', dest='threads', type=int) parser.add_argument('-f', help='file path', dest='file') parser.add_argument('-o', help='Path for the output file', dest='output_file') parser.add_argument('--get', help='use get method', dest='GET', action='store_true') parser.add_argument('--post', help='use post method', dest='POST', action='store_true') parser.add_argument('--headers', help='http headers prompt', dest='headers', action='store_true') parser.add_argument('--include', help='include this data in every request', dest='include') args = parser.parse_args() #arguments to be parsed url = args.url params_file = args.file or './db/params.txt' headers = args.headers delay = args.delay or 0 include = args.include or {} threadCount = args.threads or 2 if headers: headers = extract_headers(prompt()) else: headers = {} if args.GET: GET = True else: GET = False include = get_params(include) paramList = [] try: with open(params_file, 'r') as params_file: for line in params_file: paramList.append(line.strip('\n')) except FileNotFoundError: print('%s The specified file doesn\'t exist' % bad) quit() url = stabilize(url) print('%s Analysing the content of the webpage' % run) firstResponse = requester(url, include, headers, GET, delay) print('%s Now lets see how target deals with a non-existent parameter' % run) originalFuzz = random_string(6) data = {originalFuzz: originalFuzz[::-1]} data.update(include) response = requester(url, data, headers, GET, delay) reflections = response.text.count(originalFuzz[::-1]) print('%s Reflections: %s%i%s' % (info, green, reflections, end)) originalResponse = response.text originalCode = response.status_code print('%s Response Code: %s%i%s' % (info, green, originalCode, end)) newLength = len(response.text) plainText = remove_tags(originalResponse) plainTextLength = len(plainText) print('%s Content Length: %s%i%s' % (info, green, newLength, end)) print('%s Plain-text Length: %s%i%s' % (info, green, plainTextLength, end)) factors = {'sameHTML': False, 'samePlainText': False} if len(firstResponse.text) == len(originalResponse): factors['sameHTML'] = True elif len(remove_tags(firstResponse.text)) == len(plainText): factors['samePlainText'] = True print('%s Parsing webpage for potential parameters' % run) heuristic(firstResponse.text, paramList) fuzz = random_string(8) data = {fuzz: fuzz[::-1]} data.update(include) print('%s Performing heuristic level checks' % run) toBeChecked = slicer(paramList, 25) foundParams = [] while True: toBeChecked = narrower(toBeChecked) toBeChecked = unity_extracter(toBeChecked, foundParams) if not toBeChecked: break if foundParams: print('%s Heuristic found %i potential parameters.' % (info, len(foundParams))) paramList = foundParams finalResult = [] jsonResult = [] threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount) futures = (threadpool.submit(bruter, param, originalResponse, originalCode, factors, include, reflections, delay, headers, url, GET) for param in foundParams) for i, result in enumerate(concurrent.futures.as_completed(futures)): if result.result(): finalResult.append(result.result()) print('%s Progress: %i/%i' % (info, i + 1, len(paramList)), end='\r') print('%s Scan Completed' % info) for each in finalResult: for param, reason in each.items(): print('%s Valid parameter found: %s%s%s' % (good, green, param, end)) print('%s Reason: %s' % (info, reason)) jsonResult.append({"param": param, "reason": reason}) # Finally, export to json if args.output_file and jsonResult: print("Saving output to JSON file in %s" % args.output_file) with open(str(args.output_file), 'w') as json_output: json.dump( {"results": jsonResult}, json_output, sort_keys=True, indent=4, )
(red, white, red, white, red, white, red, white, red, white, red, white, red, white, end)) try: from urllib.parse import urlparse # For Python 3 except ImportError: print('%s Photon runs only on Python 3.2 and above.', bad) quit() then = time.time() # record starting time # Disable SSL related warnings warnings.filterwarnings('ignore') var['path'] = sys.path[0] if not var['input_url'].startswith('http'): var['input_url'] = stabilize( args.input_url) # update input_url when http(s) isn't present if var['wide']: var['scope'] = get_tld(var['input_url']) else: var['scope'] = urlparse(var['input_url']).netloc if var['gentle']: var['threads'] = 1 var['delay'] = 1 if var['delay']: var['threads'] = 1 if var['random_agent']: with open(var['path'] + '/db/user_agents.txt', 'r') as uas: var['user_agents'] = [agent.strip('\n') for agent in uas]