def bypass_waf(req, res): """ Bypass_waf: try if the waf can be bypass, using different payloads """ win = False domain = res.split("/")[2] website_ip = socket.gethostbyname(domain) # take ip website header_base = [ "X-Originating-IP", "X-Forwarded", "Forwarded", "Forwarded-For", "Forwarded-For-IP", "X-Forwarder-For", "X-Forwarded-For", "X-Forwarded-For-Original", "X-Forwarded-By", "X-Forwarded-Host", "X-Remote-IP", "X-Remote-Addr", "X-Client-IP", "Cookie", "Access-Control-Allow-Origin", "Origin", "Timing-Allow-Origin" ] options = [website_ip, domain, "127.0.0.1", "*", "8.8.8.8"] for hb in header_base: for o in options: headers = { hb : o } try: display = False vrfy = verify_waf(req, res, headers, display) #print(vrfy) if vrfy == False: #win = True print("{}Potential bypass WAF rate limit with: \033[36m{}\033[0m".format(WAF, headers)) return headers except Exception: traceback.print_exc() if not win: try: headers = { "Clear-Site-Data":"*" } display = False vrfy = verify_waf(req, res, headers, display) if vrfy == False: #win = True print("{}Potential bypass WAF rate limit with: \033[36m{}\033[0m".format(WAF, headers)) return headers except: traceback.print_exc() #print("\033[31m[:(]\033[0m Our tests not bypass it, sorry") return win
def bypass_waf(req, res): """ Bypass_waf: try if the waf can be bypass, using different payloads """ win = False domain = res.split("/")[2] website_ip = socket.gethostbyname(domain) # take ip website header_base = [ "X-Originating-IP", "X-Forwarded", "Forwarded", "Forwarded-For", "Forwarded-For-IP", "X-Forwarder-For", "X-Forwarded-For", "X-Forwarded-For-Original", "X-Forwarded-By", "X-Forwarded-Host", "X-Remote-IP", "X-Remote-Addr", "X-Client-IP", "Client-IP", "Cookie", "Access-Control-Allow-Origin", "Origin", "Timing-Allow-Origin", "X-Forwarded-For " ] options = [ website_ip, domain, "127.0.0.1", "127.0.0.2", "*", "8.8.8.8", "null", "192.168.0.2", "10.0.0.1", "localhost", "0.0.0.0", "::1", "0:0:0:0:0:0:0:1" ] for hb in header_base: for o in options: headers = {hb: o} try: display = False vrfy = verify_waf(req, res, headers, display) #print(vrfy) if vrfy == False: #win = True for h in headers: print( "{}Potential bypass WAF rate limit with option:\033[36m -H \"{}:{}\" \033[0m" .format(WAF, h, headers[h])) return headers except Exception: pass #traceback.print_exc() if not win: try: headers = {"Clear-Site-Data": "*"} display = False vrfy = verify_waf(req, res, headers, display) if vrfy == False: #win = True for h in headers: print( "{}Potential bypass WAF rate limit with option:\033[36m -H \"{}:{}\" \033[0m" .format(WAF, h, headers[h])) return headers """else: bypass_by_user_agent(req, res)""" except: pass #traceback.print_exc() return win
def tryUrl(i, q, threads, manager=False, directory=False, forced=False, u_agent=False, nLine=False): """ tryUrl: Test all URL contains in the dictionnary with multi-threading. This script run functions: - create_backup() - dl() - file_backup() - mail() """ s = requests.session() parsing = parsing_html() thread_score = 0 score_next = 0 all_mail = [] waf_score = 0 percentage = lambda x, y: float(x) / float(y) * 100 thread_i = 0 stop_add_thread = False time_i = 120 time_bool = False waf = False error_bool = False tested_bypass = False for numbers in range(len_w): thread_count = threading.active_count() thread_all = thread_count - 1 now = time.localtime(time.time()) hour_t = time.strftime("%H:%M:%S", now) HOUR = "\033[35m[{}] \033[0m".format(hour_t) res = q.get() page = res.split("/")[-1] if auto and not stop_add_thread: thrds, scores = defined_thread(threads, thread_score, score_next) if scores == 1: thread_score += 1 if thrds == 1: threads += 1 score_next += 1 manager.add_thread(i, threads, manager) #print("{}: {}".format(threading.currentThread().getName() ,thread_score))#DEBUG try: if u_agent: user_agent = {'User-agent': u_agent} else: ua = UserAgent() user_agent = {'User-agent': ua.random} #for a random user-agent try: forbi = False if ts: #if --timesleep option defined time.sleep(ts) req = defined_connect(s, res, user_agent, cookie_auth) if "robots.txt" in res.split("/")[3:] and req.status_code == 200: print("{} {} {}".format(HOUR, PLUS, res)) for r in req.text.split("\n"): print("\t- {}".format(r)) waf = verify_waf(req, res, user_agent) #verfiy_waf function, to check if waf detected, True: detected # False: not detected if waf == True: if tested_bypass == False: print("{}We try to bypass it...".format(INFO)) try_bypass_waf = bypass_waf(req, res) #print(try_bypass_waf) #DEBUG #print(user_agent) #DEBUG if try_bypass_waf == False: # if not worked not repeat tested_bypass == True elif try_bypass_waf and type(try_bypass_waf) is not bool: user_agent.update(try_bypass_waf) waf_score += 1 time_bool = True if waf_score == 2: waf_score = 0 if thread_count != 1: thread_count += 1 stop_add_thread = True print("{} Auto-reconfig scan to prevent the WAF".format(INFO)) manager.stop_thread() #TODO: potentialy use TOR (apt install tor, pip install torrequest) for next requests after that. #pass if backup: hidden_dir(res, user_agent, directory, forbi, HOUR) if redirect and req.history: for histo in req.history: status_link = histo.status_code else: status_link = req.status_code redirect_link = req.url redirect_stat = req.status_code #test backup files #print(status_link) #DEBUG status response if status_link == 200: if exclude: cep = check_exclude_page(req, res, directory, forbi, HOUR) if cep: print("{} {} {}".format(HOUR, PLUS, res)) else: # dl files and calcul size size = dl(res, req, directory) if size: print("{} {} {} ({} bytes)".format(HOUR, PLUS, res, size)) outpt(directory, res, stats=0) else: print("{} {} {}".format(HOUR, PLUS, res)) outpt(directory, res, stats=0) #check backup create_backup(res, directory, forbi) #add directory for recursif scan parsing.get_links(req, directory) #scrape all link if res[-1] == "/" and recur: if ".git" in res: pass else: spl = res.split("/")[3:] result = "/".join(spl) rec_list.append(result) parsing.mail(req, directory, all_mail) #report.create_report_url(status_link, res, directory) #get mail if 'sitemap.xml' in res: parsing.sitemap(req, directory) parsing.search_s3(res, req, directory) elif status_link == 403: #pass if res[-1] == "/" and recur: if ".htaccess" in res or ".htpasswd" in res or ".git" in res or "wp" in res: outpt(directory, res, stats=403) else: spl = res.split("/")[3:] result = "/".join(spl) rec_list.append(result) outpt(directory, res, stats=403) #report.create_report_url(status_link, res, directory) if not forced: forbi = True print("{} {} {} \033[31m Forbidden \033[0m".format(HOUR, FORBI, res)) create_backup(res, directory, forbi) outpt(directory, res, stats=403) #report.create_report_url(status_link, res, directory) elif not forced and recur: pass else: #print("{}{} {} \033[31m Forbidden \033[0m".format(HOUR, FORBI, res)) pass elif status_link == 404: pass elif status_link == 405: print("{} {} {}").format(HOUR, INFO, res) #report.create_report_url(status_link, res, directory) elif status_link == 301: if redirect and redirect_stat == 200: if exclude: cep = check_exclude_page(req, res, directory, forbi, HOUR) if cep: print("{} {} {}\033[33m => {}\033[0m 301 Moved Permanently".format(HOUR, LESS, res, redirect_link)) parsing.search_s3(res, req, directory) outpt(directory, res, stats=301) else: print("{} {} {}\033[33m => {}\033[0m 301 Moved Permanently".format(HOUR, LESS, res, redirect_link)) parsing.search_s3(res, req, directory) outpt(directory, res, stats=301) #report.create_report_url(status_link, res, directory) elif status_link == 304: print("{}\033[33m[+] \033[0m {}\033[33m 304 Not modified \033[0m".format(HOUR, res)) parsing.search_s3(res, req, directory) #report.create_report_url(status_link, res, directory) elif status_link == 302: if redirect and redirect_stat == 200: if exclude: cep = check_exclude_page(req, res, directory, forbi, HOUR) if cep: print("{}{}{}\033[33m => {}\033[0m 302 Moved Temporarily".format(HOUR, LESS, res, redirect_link)) parsing.search_s3(res, req, directory) outpt(directory, res, stats=302) else: print("{}{}{}\033[33m => {}\033[0m 302 Moved Temporarily".format(HOUR, LESS, res, redirect_link)) parsing.search_s3(res, req, directory) outpt(directory, res, stats=302) #report.create_report_url(status_link, res, directory) elif status_link == 400 or status_link == 500: if "Server Error" in req.text or "Erreur du serveur dans l'application" in req.text: if status_link == 400: if exclude: cep = check_exclude_page(req, res, directory, forbi, HOUR) if cep: print("{} {} {} \033[31m400 Server Error\033[0m".format(HOUR, WARNING, res)) outpt(directory, res, stats=400) else: print("{} {} {} \033[31m400 Server Error\033[0m".format(HOUR, WARNING, res)) outpt(directory, res, stats=400) #report.create_report_url(status_link, res, directory) elif status_link == 500: if exclude: cep = heck_exclude_page(req, res, directory, forbi, HOUR) if cep: print("{} {} {} \033[31m500 Server Error\033[0m".format(HOUR, WARNING, res)) outpt(directory, res, stats=500) else: print("{} {} {} \033[31m500 Server Error\033[0m".format(HOUR, WARNING, res)) outpt(directory, res, stats=500) #report.create_report_url(status_link, res, directory) else: pass #print("{}{} \033[33m400 Server Error\033[0m").format(LESS, res) elif status_link == 422 or status_link == 423 or status_link == 424 or status_link == 425: print("{} {} {} \033[33mError WebDAV\033[0m".format(HOUR, LESS, res)) #report.create_report_url(status_link, res, directory) elif status_link == 401: """print("{} {} {} \033[33m401 Unauthorized\033[0m".format(HOUR,LESS, res)) outpt(directory, res, stats=401)""" pass elif status_link == 405: print("{} {} {}".format(HOUR, PLUS, res)) outpt(directory, res, stats=405) elif status_link == 503: req_test_index = requests.get(url, verify=False) # take origin page url (index) to check if it's really unavailable if req_test_index.status_code == 503: manager.stop_thread() print("{}{} Service potentialy Unavailable, The site web seem unavailable please wait...\n".format(HOUR, WARNING)) time_bool = True else: pass elif status_link == 429 or status_link == 522: req_test_many = requests.get(url, verify=False) if req_test_many == 429 or status_link == 522: print("{} {} Too many requests, web service seem to be offline".format(HOUR, WARNING)) print("STOP so many requests, we should wait a little...") time_bool = True else: pass #print("{}{}{} 429".format(HOUR, LESS, res)) if backup: fbackp = file_backup(s, res, directory, forbi, HOUR) if fbackp == False: with open(directory + "/errors.txt", "a+") as write_error: write_error.write(res+"\n") #errors = manager.error_check() #error_bool = True except Timeout: #traceback.print_exc() #DEBUG with open(directory + "/errors.txt", "a+") as write_error: write_error.write(res+"\n") #errors = manager.error_check() #error_bool = True except Exception: #traceback.print_exc() #DEBUG with open(directory + "/errors.txt", "a+") as write_error: write_error.write(res+"\n") #errors = manager.error_check()#TODO #error_bool = True q.task_done() except Exception: pass #traceback.print_exc() #DEBUG len_p = len(page) len_flush = len_page_flush(len_p) if time_bool: #if a waf detected, stop for any seconds while time_i != 0: time_i -= 1 time.sleep(1) print_time = "stop {}s |".format(time_i) if time_bool else "" #for flush display sys.stdout.write("\033[34m[i] {0:.2f}% - {1}/{2} | Threads: {3:} - {4} | {5:{6}}\033[0m\r".format(percentage(numbers+nLine, len_w)*thread_all, numbers*thread_all+nLine, len_w, thread_all, print_time, page, len_flush)) sys.stdout.flush() time_i = 60 time_bool = False else: sys.stdout.write("\033[34m[i] {0:.2f}% - {1}/{2} | Threads: {3:} | {4:{5}}\033[0m\r".format(percentage(numbers+nLine, len_w)*thread_all, numbers*thread_all+nLine, len_w, thread_all, page, len_flush)) sys.stdout.flush()