def Analyse(Sites): for site in Sites: if site.count('.') == 1: print '[+]', putColor(site, 'green') Sites.remove(site) for s in Sites: if site in s: print ' [-]', putColor(s, 'yellow') Sites.remove(s) for site in Sites: print '[+]', putColor(site, 'green')
def search(Host, s): api = 'https://dns.aizhan.com/' print '[*]Using API of:', putColor('dns.aizhan.com', 'magenta') html = requests.get(api + Host + '/').text page = getLastPages(html) print '[+]Info' Info = getInfo(html) print Info[0] print '[+]Sites(%d)' % Info[1] Sites = [] # 多个页面 if Info[1]: sites, Hosts = getSites(html) Sites.extend(sites) if s: print ' [-] ' + '\n [-] '.join(Hosts) for i in range(2, page): time.sleep(1) html = requests.get(api + Host + '/%d/' % i).text sites, Hosts = getSites(html) Sites.extend(sites) if s: print ' [-]' + '\n [-]'.join(Hosts) print return Sites
def search(Host, s): print '[*]Using API of:', putColor('site.ip138.com', 'magenta') Session = requests.session() # get the First Page html = Session.get('http://site.ip138.com/'+Host+'/').text # print html Info = [] Address = findall('<h3>(.+)</h3>', html) if Address: Info.append(Address[0]) else: Info.append('null') Hosts = [Host] if 'curadress' in html: IP = Session.get('http://site.ip138.com/domain/read.do?domain=%s' % Host).json() if str(IP['status']).lower() != 'false' and 'data' in IP: for ip in IP['data']: Info.append(ip['ip']) Info[0] += '(%d)' % (len(Info)-1) Hosts = Info[1:] print '[+]Info' print ' [-]', '\n [-] '.join(Info) Sites = [] for Host in Hosts: html = Session.get('http://site.ip138.com/'+Host+'/').text sites = findall('<li><span class="date">.+" target="_blank">(.+)</a></li>', html) for site in sites: if site not in Sites: Sites.append(site) _TOKEN = findall("var _TOKEN = '(.+)';", html) if _TOKEN: # get All Page from 2 to end. i = 2 while 1: sites = Session.get('http://site.ip138.com/index/querybyip/?ip=%s&page=%d&token=%s' % (Host, i, _TOKEN)).json() # print sites['msg'] if str(sites['status']).lower() == 'false' or 'data' not in sites: break for site in sites['data']: if site['domain'] not in Sites: Sites.append(site['domain']) time.sleep(0.5) i += 1 print '[+]Sites(%d)' % len(Sites) if len(Sites) and s: print ' [-] ' + '\n [-] '.join(Sites) print return Sites
def inside(Host, s): try: return func(Host, s) except Exception, e: if 'Keyboard' not in str(e): with open(self.log, 'a') as fp: fp.write(traceback.format_exc() + '\n' + '-' * 50 + '\n') print "[X]" + putColor( "The api of %s was broken. Check the log in %s\n" % (self.api_name, self.log), 'red') return []
def getIP(url): repeat = 10 while repeat: try: html = eval( requests.get('http://www.webscan.cc/?action=getip&domain=' + url).text[1:]) break except: time.sleep(1) repeat -= 1 if repeat == 0: return 'Error', putColor('The api of webscan.cc is broken\n', 'red') IP = html['ip'] Address = html['info'].decode('unicode-escape').encode('utf-8') return IP, Address
def search(Host, s): url = 'http://www4.bing.com/search?format=rss&ensearch=0&q=ip:%s&first=' % Host # url = 'https://cn.bing.com/search' print '[*]Using API of:', putColor('bing.com', 'magenta') # ----------- 反反爬 Bing --------- i = 11 while i: _, data = getXML(url) if data: break i -= 1 else: print '[!]Sites(0)\n' return [] # ------------- end -------- page = 1 Sites = [] Titles = [] while 1: titles, sites = getXML(url, page) page += len(Sites) for site, title in zip(sites, titles): for rep in [r'\bhttps{0,1}://', r'\bwww\.', '/.*']: site = re.sub(rep, '', site) if site not in Sites: Sites.append(site) Titles.append(title) if not sites or len(sites) < 10: break print '[!]Sites(%d)' % len(Sites) if s: print ' [-]' + '\n [-]'.join('%s: %s' % (i, j) for i, j in zip(Sites, Titles)) print return Sites
def search(Host, s): Sites = [] print '[*]Using API of:', putColor('www.webscan.cc', 'magenta') api = 'http://www.webscan.cc/?action=query&ip=' print '[+]Info' Host, Address = getIP(Host) print ' [-]', Host print ' [-]', Address if 'Error' in Host or 'Error' in Address: return [] while 1: html = requests.get(api + Host).text if 'setTimeout' not in html: break time.sleep(1) html = findall('"domain":"(.+)","title":"(.*)"', html.replace(']', '\n').replace('}', '\n')) if not html: print '[+]Sites(0)\n' return [] info = [[ info[0].replace('\/', '/'), info[1].decode('unicode-escape').encode('utf-8') ] for info in html] print '[+]Sites(%d)' % len(info) for i in info: if s: print ' [-]', i[0], ':', i[1] Sites.append(i[0]) print return Sites
def Saving(name, Sites): with open('result/%s.txt' % name, 'w') as fp: fp.write('\n'.join(Sites) + '\n') print '\n[!]Saving result to %s' % putColor(name + '.txt', 'blue')
Sites = list(set(Sites)) for i, site in enumerate(Sites): site = re.sub(r'\bhttps{0,1}://', '', site) site = re.sub(r'\bwww\.', '', site) Sites[i] = site Sites = sorted(list(set(Sites))) return Sites[:] print putColor( ''' x------------------------------------------------------------x .______ _______ __ .______ _______ ______ _______ | _ \ | ____|| | | _ \| \ / __ \ / _____| | |_) | | |__ | | | |_) | .--. | | | | | __ | / | __| | | | ___/| | | | | | | | |_ | | |\ \--.| |____ | | | | | '--' | `--' | |__| | | _| `.___||_______||__| | _| |_______/ \______/ \______| x------------------------------------------------------------x ''', 'cyan') print '[*]Searching[%s]' % putColor( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 'yellow') starttime = datetime.datetime.now() parser = argparse.ArgumentParser(usage=''' (sudo) (python) ReIPDog.py ip|host|url (1|0) Such as: Searching sites for ip -- 127.0.0.1 and output all info: python ReIPDog.py -host 127.0.0.1