def scan_DNS_zone(self, domain_name): log.console_log("{}[*] Perfoming DNS Zone Scanning... {}".format(G, W)) log.console_log( "{}[*] Please wait, maximum timeout for checking is 1 minutes {}". format(G, W)) signal.signal(signal.SIGALRM, self.timeLimitHandler) signal.alarm(60) try: scan_list = str(list(Scanner(domain_name).scan())) ns_record_list = [] mx_record_list = [] log.console_log("{}{}{}".format(G, scan_list.replace(",", "\n"), W)) log.console_log("{}DNS Server:{}".format(G, W)) for ns in dns.resolver.query(domain_name, 'NS'): log.console_log(G + ns.to_text() + W) ns_record_list.append(ns.to_text()) log.console_log("{}MX Record:{}".format(G, W)) for ns in dns.resolver.query(domain_name, 'MX'): log.console_log("{}{}{}".format(G, ns.to_text(), W)) mx_record_list.append(ns.to_text()) self.db.update_dns_zone(self.project_id, domain_name, str(ns_record_list), str(mx_record_list)) except Exception, exc: print("{}[*] No response from server... SKIP!{}".format(R, W))
def scan_DNS_zone(self, domain_name): log.console_log(G + "[*] Perfoming DNS Zone Scanning..." + W) log.console_log(G + "[*] Please wait, maximum timeout for checking is 1 minutes") signal.signal(signal.SIGALRM, self.timeLimitHandler) signal.alarm(60) try: scan_list = str(list(Scanner(domain_name).scan())) log.console_log(G + scan_list.replace(",","\n") + W) log.console_log(G + "DNS Server:" + W) for ns in dns.resolver.query(domain_name, 'NS'): log.console_log(G + ns.to_text() + W) log.console_log(G + "MX Record:" + W) for ns in dns.resolver.query(domain_name, 'MX'): log.console_log(G + ns.to_text() + W) except Exception, exc: print(R + "[*] No response from server... SKIP!" + W)
def wds(): user=input('查询的url是:') headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} url='https://www.whois.com/search.php?query={}'.format(user) reqt=requests.get(url=url,headers=headers) bd=BeautifulSoup(reqt.content.decode('utf-8'),'html.parser') print('[+]whois信息') print('========================================================') for pre in bd.find_all('pre'): print(pre.get_text()) print('========================================================') guids = [] guids2 = [] print('[+]超级ping,判断是否有CDN') print('') print('========================================================') urli = 'http://ping.chinaz.com/{}'.format(user) headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} datas = {'host': '{}'.format(user), 'checktype': '0', 'linetype': '电信', 'linetype': '多线', 'linetype': '联通', 'linetype': '移动', 'linetype': '海外'} rev = requests.post(url=urli, headers=headers, data=datas) bd = BeautifulSoup(rev.text, 'html.parser') tr = bd.find_all('div') for v in tr: guids.append(v.get('id')) for key in guids: qz = re.findall( '[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]', str(key)) for r in qz: guids2.append(r + '') url = 'http://ping.chinaz.com/iframe.ashx?t=ping&callback=jQuery111306709270458227905_1535617821100' for v in guids2: data = {'guid': '{}'.format(v), 'host': 'www.baidu.com/', 'ishost': 'false', 'encode': 'uZVguOxtxhFU4L0rQ|zXgulyePFesj4w', 'checktype': '0'} headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} reqt = requests.get(url=url, headers=headers, data=data) hostname = re.findall("ip:'.*'", reqt.text) for l in hostname: print('[+]节点:{}'.format(l)) print('===========================================================') print('') print('===========================================================') print('[+]dns查询') dnscer = Scanner('{}'.format(user.replace('www.','')).strip()).scan() for l in dnscer: print(l) dnscer.close() print('===========================================================') print('') print('===========================================================') print('[+]nmap端口扫描') ml="{} nmap -sS -sC -T4 -A {}".format(sudo,socket.gethostbyname(user)).strip() os.system(ml) print('===========================================================') print('') print('===========================================================') print('[+]子域名查询') headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} url = 'http://site.ip138.com/{}/domain.htm'.format(user.replace('www.','').strip()) reqt = requests.get(url=url, headers=headers) domain = re.findall('<a href=".*" target="_blank">.*</a></p>', reqt.content.decode('utf-8')) for i in domain: bd = BeautifulSoup(i, 'html.parser') print(bd.get_text()) print('===========================================================')
def wds(): user = input('查询的url是:') headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' } url = 'https://www.whois.com/search.php?query={}'.format(user) reqt = requests.get(url=url, headers=headers) bd = BeautifulSoup(reqt.content.decode('utf-8'), 'html.parser') print('[+]whois信息') print('========================================================') for pre in bd.find_all('pre'): print(pre.get_text()) print('========================================================') guids = [] guids2 = [] print('[+]超级ping,判断是否有CDN') print('') print('========================================================') urli = 'http://ping.chinaz.com/{}'.format(user) headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' } datas = { 'host': '{}'.format(user), 'checktype': '0', 'linetype': '电信', 'linetype': '多线', 'linetype': '联通', 'linetype': '移动', 'linetype': '海外' } rev = requests.post(url=urli, headers=headers, data=datas) bd = BeautifulSoup(rev.text, 'html.parser') tr = bd.find_all('div') for v in tr: guids.append(v.get('id')) for key in guids: qz = re.findall( '[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]-[0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z][0-9-a-z-A-Z]', str(key)) for r in qz: guids2.append(r + '') url = 'http://ping.chinaz.com/iframe.ashx?t=ping&callback=jQuery111306709270458227905_1535617821100' for v in guids2: data = { 'guid': '{}'.format(v), 'host': 'www.baidu.com/', 'ishost': 'false', 'encode': 'uZVguOxtxhFU4L0rQ|zXgulyePFesj4w', 'checktype': '0' } headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' } reqt = requests.get(url=url, headers=headers, data=data) hostname = re.findall("ip:'.*'", reqt.text) for l in hostname: print('[+]节点:{}'.format(l)) print('===========================================================') print('') print('===========================================================') print('[+]dns查询') dnscer = Scanner('{}'.format(user.replace('www.', '')).strip()).scan() for l in dnscer: print(l) dnscer.close() print('===========================================================') print('') print('===========================================================') print('[+]nmap端口扫描') ml = "{} nmap -sS -sC -T4 -A {}".format( sudo, socket.gethostbyname(user)).strip() os.system(ml) print('===========================================================') print('') print('===========================================================') print('[+]子域名查询') headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' } url = 'http://site.ip138.com/{}/domain.htm'.format( user.replace('www.', '').strip()) reqt = requests.get(url=url, headers=headers) domain = re.findall('<a href=".*" target="_blank">.*</a></p>', reqt.content.decode('utf-8')) for i in domain: bd = BeautifulSoup(i, 'html.parser') print(bd.get_text()) print('===========================================================')