def revip(web): requests = session() web = web.replace('http://', '') web = web.replace('https://', '') if "@" in web: web = web.split("@")[1] #print(R+'\n ===================================') #print(R+' R E V E R S E I P L O O K U P') #print(R+' ===================================\n') from core.methods.print import posintpas posintpas("reverse ip lookup") time.sleep(0.4) print('' + GR + color.BOLD + ' [!] Looking Up for Reverse IP Info...') time.sleep(0.4) print("" + GR + color.BOLD + " [~] Result : \n" + color.END) domains = [web] for dom in domains: text = requests.get('http://api.hackertarget.com/reverseiplookup/?q=' + dom).text result = str(text) res = result.splitlines() if 'error' not in result: for r in res: print(O + ' [+] Site :> ' + G + r) links.append(r) time.sleep(0.04) p = 'tmp/logs/' + web + '-logs/' + str(web) + '-reverse-ip.lst' open(p, 'w+') print(B + ' [!] Saving links...') time.sleep(1) for m in links: m = m + '\n' ile = open(p, "a") ile.write(m) ile.close() pa = os.getcwd() print(G + ' [+] Links saved under ' + pa + '/' + p + '!') print('') elif 'error' in result: print(R + ' [-] Outbound Query Exception!') time.sleep(0.8)
def iphistory(web): requests = session() try: #print(R+'\n =====================') #print(R+' I P H I S T O R Y') #print(R+' =====================\n') from core.methods.print import posintpas posintpas("ip history") print(GR + ' [*] Parsing Url...') web0 = web.split('//')[-1] if "@" in web0: web0 = web0.split("@")[1] print(web0) print(C + ' [!] Making the request...') html = requests.get('http://viewdns.info/iphistory/?domain=' + web0).text print(GR + ' [*] Parsing raw-data...') time.sleep(0.7) soup = BeautifulSoup(html, 'lxml') print(C + ' [!] Setting parameters...') table = soup.findAll('table', attrs={'border': '1'})[0] print(C + ' [!] Finding IP history instances...') trs = table.findAll('tr') trs.pop(0) print(C + '\n [+] Following instances were found...') for tr in trs: td = tr.findAll('td') info = { 'ip': td[0].text, 'owner': td[2].text.rstrip(), 'last': td[3].text } print(O + ' [+] Instance :' + C + color.TR3 + C + G + info['ip'] + ' => ' + info['owner'] + ' - (' + info['last'] + ')' + C + color.TR2 + C) time.sleep(0.02) except: print(R + ' [-] No instances of IP History found...')
def iphistory(web): name = targetname(web) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3='' lvl2=inspect.stack()[0][3] requests = session() try: #print(R+'\n =====================') #print(R+' I P H I S T O R Y') #print(R+' =====================\n') from core.methods.print import posintpas posintpas("ip history") print(GR+' [*] Parsing Url...') web0 = web.split('//')[-1] if "@" in web0: web0 = web0.split("@")[1] print(web0) print(C+' [!] Making the request...') html = requests.get('http://viewdns.info/iphistory/?domain=' + web0).text print(GR+' [*] Parsing raw-data...') time.sleep(0.7) soup = BeautifulSoup(html,'lxml') print(C+' [!] Setting parameters...') table = soup.findAll('table', attrs={'border':'1'})[0] print(C+' [!] Finding IP history instances...') trs = table.findAll('tr') trs.pop(0) print(C+'\n [+] Following instances were found...') data = [] for tr in trs: td = tr.findAll('td') info = {'ip' : td[0].text, 'owner' : td[2].text.rstrip(), 'last' : td[3].text} data.append(info) print(O+' [+] Instance :' +C+color.TR3+C+G+ info['ip'] + ' => ' + info['owner'] + ' - (' + info['last'] + ')'+C+color.TR2+C) time.sleep(0.02) save_data(database, module, lvl1, lvl2, lvl3, name, str(data)) except: print(R+' [-] No instances of IP History found...')
def revdns(web): requests = session() web = web.split('//')[1] if "@" in web: web = web.split("@")[1] #print(R+'\n =====================================') #print(R+' R E V E R S E D N S L O O K U P') #print(R+' =====================================\n') from core.methods.print import posintpas posintpas("reverse dns lookup") time.sleep(0.4) print('' + GR + color.BOLD + ' [!] Looking Up for Reverse DNS Info...') time.sleep(0.4) print("" + GR + color.BOLD + " [~] Result: \n" + color.END) text = requests.get('http://api.hackertarget.com/reversedns/?q=' + web) result = text.text if 'error' not in result and 'no result' not in result.lower(): res = result.splitlines() for r in res: print(r) print(O + ' [+] Received :' + C + color.TR3 + C + G + r.split(',')[0].strip() + ' => ' + C + '(' + r.split(',')[1].strip() + ')' + C + color.TR2 + C) time.sleep(0.04) links.append(r) p = 'tmp/logs/' + web + '-logs/' + web + '-reverse-dns.lst' open(p, 'w+') print(P + ' [!] Saving links...' + C) time.sleep(1) for m in links: m = m + '\n' ile = open(p, "a") ile.write(m) ile.close() pa = os.getcwd() print(C + ' [+] Links saved under ' + pa + '/' + p + '!') print('') else: print(R + ' [-] No result found!') time.sleep(0.8)
def piweb(web): requests = session() dom = web.split('//')[1] if "@" in dom: dom = dom.split("@")[1] #print(R+'\n =====================') #print(R+' P I N G C H E C K ') #print(R+' =====================\n') from core.methods.print import posintpas posintpas("ping check") time.sleep(0.4) print(GR + color.BOLD + ' [!] Pinging website using external APi...') time.sleep(0.4) print(GR + color.BOLD + " [~] Result: " + color.END) text = requests.get('http://api.hackertarget.com/nping/?q=' + dom).text nping = str(text) if 'null' not in nping: print(G + nping) else: print(R + ' [-] Outbound Query Exception!') time.sleep(0.8)
def googlegroups(web): print(GR + ' [*] Loading module...') time.sleep(0.7) #print(R+'\n ===========================') #print(R+' G O O G L E G R O U P S') #print(R+' ===========================\n') from core.methods.print import posintpas posintpas("google groups") print(O + ' [!] Initiating enumeration via Google Web...') time.sleep(0.7) print(O + ' [!] Parsing url...') web = web.replace('https://', '') web = web.replace('http://', '') if "@" in web: web = web.split("@")[1] getemails0x00(web) if flag == False: print(R + ' [-] No results found via enumeration on Google Groups...') print(G + ' [+] Done!')
def subnet(web): name = targetname(web) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3 = '' lvl2 = inspect.stack()[0][3] requests = session() web = web.replace('http://', '') web = web.replace('https://', '') if "@" in web: web = web.split("@")[1] time.sleep(0.4) #print(R+'\n ====================================') #print(R+' S U B N E T E N U M E R A T I O N') #print(R+' ====================================\n') from core.methods.print import posintpas posintpas("subnet enumeration") print(GR + ' [!] Enumerating subnets in network...') time.sleep(0.4) print(GR + ' [*] Getting subnet class infos...\n') domains = [web] for dom in domains: text = requests.get('http://api.hackertarget.com/subnetcalc/?q=' + dom).text #text = requests.get('https://steakovercooked.com/api/ping/?host=' + dom).text http = str(text) if 'error' not in http: result = http.splitlines() for r in result: print(O + ' ' + r.split('=')[0] + C + color.TR3 + C + G + '=' + r.split('=')[1] + C + color.TR2 + C) save_data(database, module, lvl1, lvl2, lvl3, name, http) elif 'No results found' in http: print(R + ' [-] No results found!') save_data(database, module, lvl1, lvl2, lvl3, name, "No results found.") else: print(R + ' [-] Outbound Query Exception!')
def revdns(web): name = targetname(web) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3 = '' lvl2 = inspect.stack()[0][3] requests = session() web = web.split('//')[1] if "@" in web: web = web.split("@")[1] #print(R+'\n =====================================') #print(R+' R E V E R S E D N S L O O K U P') #print(R+' =====================================\n') from core.methods.print import posintpas posintpas("reverse dns lookup") time.sleep(0.4) print('' + GR + color.BOLD + ' [!] Looking Up for Reverse DNS Info...') time.sleep(0.4) print("" + GR + color.BOLD + " [~] Result: \n" + color.END) text = requests.get('http://api.hackertarget.com/reversedns/?q=' + web) result = text.text if 'error' not in result and 'no result' not in result.lower(): res = result.splitlines() for r in res: print(r) print(O + ' [+] Received :' + C + color.TR3 + C + G + r.split(',')[0].strip() + ' => ' + C + '(' + r.split(',')[1].strip() + ')' + C + color.TR2 + C) time.sleep(0.04) links.append(r) data = result save_data(database, module, lvl1, lvl2, lvl3, name, data) else: print(R + ' [-] No result found!') save_data(database, module, lvl1, lvl2, lvl3, name, "No result found.") time.sleep(0.8)
def revip(web): name = targetname(web) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3 = '' lvl2 = inspect.stack()[0][3] requests = session() web = web.replace('http://', '') web = web.replace('https://', '') if "@" in web: web = web.split("@")[1] #print(R+'\n ===================================') #print(R+' R E V E R S E I P L O O K U P') #print(R+' ===================================\n') from core.methods.print import posintpas posintpas("reverse ip lookup") time.sleep(0.4) print('' + GR + color.BOLD + ' [!] Looking Up for Reverse IP Info...') time.sleep(0.4) print("" + GR + color.BOLD + " [~] Result : \n" + color.END) domains = [web] for dom in domains: text = requests.get('http://api.hackertarget.com/reverseiplookup/?q=' + dom).text result = str(text) res = result.splitlines() if 'error' not in result: for r in res: print(O + ' [+] Site :>' + C + color.TR3 + C + G + r + C + color.TR2 + C) links.append(r) time.sleep(0.04) save_data(database, module, lvl1, lvl2, lvl3, name, result) elif 'error' in result: print(R + ' [-] Outbound Query Exception!') time.sleep(0.8)
def getgeoip(web): name = targetname(web) requests = session() web = web.replace('http://', '') web = web.replace('https://', '') if "@" in web: web = web.split("@")[1] #print(R+'\n =========================') #print(R+' G E O I P L O O K U P') #print(R+' =========================\n') from core.methods.print import posintpas posintpas("geoip lookup") time.sleep(0.4) print(GR + ' [!] Looking Up for WhoIS Information...') time.sleep(0.4) print(GR + " [~] Found GeoIp Location: \n") domains = socket.gethostbyname(web) time.sleep(0.6) text = requests.get('http://api.hackertarget.com/geoip/?q=' + domains).text result = str(text) if 'error' not in result and 'invalid' not in result: res = result.splitlines() for r in res: print(O + ' [+] ' + r.split(':')[0].strip() + '' + C + color.TR3 + C + G + r.split(':')[1].strip() + C + color.TR2 + C) time.sleep(0.1) else: print(R + ' [-] Outbound Query Exception!') time.sleep(0.8) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl2 = inspect.stack()[0][3] lvl3 = "" data = result save_data(database, module, lvl1, lvl2, lvl3, name, data)
def whoischeckup(web): web = web.replace('http://','') web = web.replace('https://','') if "@" in web: web = web.split("@")[1] #print(R+'\n =========================') #print(R+' W H O I S L O O K U P') #print(R+' =========================\n') from core.methods.print import posintpas posintpas("whois lookup") time.sleep(0.4) print('' + GR + color.BOLD + ' [!] Looking Up for WhoIS Information...') time.sleep(0.4) print(""+ GR + color.BOLD + " [~] Result: \n"+ color.END) domains = [web] for dom in domains: text = requests.get('http://api.hackertarget.com/whois/?q=' + dom).text nping = str(text) if 'error' not in nping: print(G+ nping) else: print(R+' [-] Outbound Query Exception!') time.sleep(0.8)
def googledorker(web): name = targetname(web) lvl2 = inspect.stack()[0][3] webx = web if "@" in webx: web = web.split("@")[1] if "https" in webx: web = "https://" + web else: web = "http://" + web site = str(web) def clear_cookie(): fo = open(".google-cookie", "w") fo.close() def google_it(site, dork, lvl2, name): data = [] module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3 = '' clear_cookie() for title in search(dork, stop=30): print(O + ' [!] Site Found :>' + C + color.TR3 + C + G + title + C + color.TR2 + C) data.append(title) time.sleep(0.1) save_data(database, module, lvl1, lvl2, lvl3, name, str(data)) try: #print(R+'\n ===========================') #print(R+' G O O G L E D O R K E R') #print(R+' ===========================\n') from core.methods.print import posintpas posintpas("google dorker") print( P + ' [-] Warning! You may get a captcha if you are being too frequent...' + C) print(GR + ' [*] Initializing google dorking...') print(C + " [*] Finding Login Pages for " + site + "...\n") google_it( site, "site:" + site + " inurl:wp- OR inurl:login OR inurl:signin OR inurl:checkin OR inurl:join", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding Subdomains for " + site + "...\n") google_it(site, "site:*." + site + "", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding Sub-subdomains for " + site + "...\n") google_it(site, "site:*.*." + site + "", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding Upload/Download Pages for " + site + "...\n") google_it( site, "site:" + site + " inurl:wp- OR inurl:plugin OR inurl:upload OR inurl:download", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding Backdoors for " + site + "...\n") google_it( site, "site:" + site + " inurl:shell OR inurl:backdoor OR inurl:wso OR inurl:cmd OR shadow OR passwd OR boot.ini OR inurl:backdoor", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding Install / Setup files for " + site + "...\n") google_it( site, "site:" + site + " inurl:readme OR inurl:license OR inurl:install OR inurl:setup OR inurl:config", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding WORDPRESS PLUGINS/UPLOADS/DOWNLOADS for " + site + "...\n") google_it( site, "site:" + site + " inurl:wp- OR inurl:plugin OR inurl:upload OR inurl:download", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding OPEN REDIRECTS for " + site + "...\n") google_it( site, "site:" + site + " inurl:redir OR inurl:url OR inurl:redirect OR inurl:return OR inurl:src=http OR inurl:r=http", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding FILES BY EXTENSION for " + site + "...\n") google_it( site, "site:" + site + " ext:cgi OR ext:php OR ext:asp OR ext:aspx OR ext:jsp OR ext:jspx OR ext:swf OR ext:fla OR ext:xml", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding DOCUMENTS BY EXTENSION for " + site + "...\n") google_it( site, "site:" + site + " ext:doc OR ext:docx OR ext:csv OR ext:pdf OR ext:txt OR ext:log OR ext:bak", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding EMPLOYEES ON LINKEDIN for " + site + "...\n") google_it(site, "site:linkedin.com employees " + site + "", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding PHPINFO Files for " + site + "...\n") google_it(site, "inurl:'/phpinfo.php' " + site + "", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding Files containing passwords for " + site + "...\n") google_it( site, "intext:'connectionString' AND inurl:'web' AND ext:'config'", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) print(C + " [*] Finding .htaccess & sensitive fields for " + site + "...\n") google_it( site, "inurl:'/phpinfo.php' OR inurl:'.htaccess' OR inurl:'/.git' " + site + " -github", lvl2, name) print(P + ' [!] Pausing to avoid captcha...' + C) sleep(randint(20, 50)) google_it(site, "site:" + site + " inurl:callback", lvl2, name) time.sleep(5) except urllib.error.HTTPError as err: if err.code == 503: print(R + ' [-] Captcha appeared...\n') pass
def censysdom(web): name = targetname(web) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl2 = inspect.stack()[0][3] lvl3 = "" requests = session() #print(R+'\n =======================================') #print(R+' C E N S Y S D O M A I N R E C O N') #print(R+' =======================================\n') from core.methods.print import posintpas posintpas("censys domain recon") time.sleep(0.6) print(GR+' [*] Importing API Key...') try: from files.API_KEYS import CENSYS_UID, CENSYS_SECRET except IOError as ImportError: print(R+' [-] Error while importing key...') web = web.split('//')[1] if "@" in web: web = web.split("@")[1] if CENSYS_SECRET != '' and CENSYS_UID != '': print(O+' [+] Found Censys UID Key : '+C+color.TR3+C+G+CENSYS_UID+C+color.TR2+C) print(O+' [+] Found Censys Secret Token : '+C+color.TR3+C+G+CENSYS_SECRET+C+color.TR2+C) base_url = 'https://www.censys.io/api/v1' print(GR+' [*] Looking up info...') time.sleep(0.7) resp = requests.get(base_url + "/view/websites/"+web, auth=(CENSYS_UID, CENSYS_SECRET)) if 'quota_exceeded' in resp.text: print(R+' [-] Daily limit reached for this module. Use you own API key for CENSYS.') if resp.status_code == 200: print(G+' [+] Found domain info!'+C+color.TR2+C) w = resp.text.encode('utf-8') asio = json.dumps(resp.json(), indent=4) data = asio.splitlines() save_data(database, module, lvl1, lvl2, lvl3, name, str(data)) quest = asio.splitlines() print(O+' [!] Parsing info...'+C+'\n') time.sleep(1) for q in quest: q = q.replace('"','') if ':' in q and '[' not in q and '{' not in q: q1 = q.split(':',1)[0].strip().title() q2 = q.split(':',1)[1].strip().replace(',','') print(C+' [+] '+q1+' : '+GR+q2) time.sleep(0.01) elif ('{' or '[' in q) and (':' in q): w1 = q.split(':',1)[0].strip().upper() w2 = q.split(':',1)[1].strip() print(C+'\n [+] '+w1+' :-'+'\n') elif '{' not in q and '[' not in q and ']' not in q and '}' not in q: print(GR+' [+] '+q.replace(',','').strip()) print(C+' [!] Saving retrieved CENSYS data...') time.sleep(1) with open('tmp/logs/'+web+'-logs/'+web+'-censys-data.json', 'w+') as file: json.dump(resp.json(), file, ensure_ascii=True,indent=4) eq = os.getcwd() print(C+' [+] Censys Data stored '+eq+'/tmp/logs/'+web+'-logs/'+web+'-censys-data.json') else: print(R+' [-] Did not find any info about domain '+O+web+C) print(R+' [+] Try with another one...') save_data(database, module, lvl1, lvl2, lvl3, name, "Did not find any info about domain "+web) else: print(R+' [-] CENSYS API TOKENs not set!') print(R+' [-] This module cannot be used!')
def links(web): requests = session() #print(R+'\n =====================') #print(R+' P A G E L I N K S ') #print(R+' =====================\n') from core.methods.print import posintpas posintpas("page links") time.sleep(0.4) print('' + GR + color.BOLD + ' [!] Fetching links to the website...') time.sleep(0.4) print(GR +" [~] Result: "+ color.END) if "https://" in web: web0 = web.replace('https://','') else: web0 = web.replace('http://','') if "@" in web: if "https" in web: web = "https://" + web.split("@")[1] else: web = "http://" + web.split("@")[1] web0 = web0.split("@")[1] domains = [web] for dom in domains: text = requests.get('http://api.hackertarget.com/pagelinks/?q=' + dom).text result = str(text) if 'null' not in result and 'no links found' not in result: woo = result.splitlines() for w in woo: if str(web0).lower() in w.lower(): final_links.append(w) print(C+'\n [!] Receiving links...') for p in final_links: print(O+' [+] Found link :'+C+color.TR3+C+G+p+C+color.TR2+C) time.sleep(0.06) if 'http://' in web: po = web.replace('http://','') elif 'https://' in web: po = web.replace('https://','') if "@" in po: po = po.split("@")[1] p = 'tmp/logs/'+po+'-logs/'+str(po)+'-links.lst' open(p, 'w+') print(B+' [!] Saving links...') time.sleep(1) for m in final_links: m = m + '\n' ile = open(p,"a") ile.write(m) ile.close() pa = os.getcwd() print(G+' [+] Links saved under '+pa+'/'+p+'!') print('') else: print(R+' [-] Outbound Query Exception!') time.sleep(0.8)
def dnschk(domain): name = targetname(domain) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3='' lvl2=inspect.stack()[0][3] #print(R+'\n =====================') #print(R+' D N S L 0 0 K U P') #print(R+' =====================\n') from core.methods.print import posintpas posintpas("dns lookup") domain = domain.split('//')[1] res = DNSDumpsterAPI(False).search(domain) try: dns = [] mx = [] host = [] txt = [] print(G+'\n [+] DNS Records'+C+color.TR2+C) for entry in res['dns_records']['dns']: print(''+O+("{domain} ({ip}) {as} {provider} {country}".format(**entry))+C) dns.append(entry) for entry in res['dns_records']['mx']: print(G+"\n [+] MX Records"+C+color.TR2+C) print(''+O+("{domain} ({ip}) {as} {provider} {country}".format(**entry))+C) mx.append(entry) print(G+"\n [+] Host Records (A)"+C+color.TR2+C) for entry in res['dns_records']['host']: if entry['reverse_dns']: print((O+"{domain} ({reverse_dns}) ({ip}) {as} {provider} {country}".format(**entry))+C) else: print(O+("{domain} ({ip}) {as} {provider} {country}".format(**entry))+C) host.append(entry) print(G+'\n [+] TXT Records:'+C+color.TR2+C) for entry in res['dns_records']['txt']: print(''+O+entry+C) txt.append(entry) data = {"DNS":dns, "MX":mx, "HOST":host, "TXT":txt} save_data(database, module, lvl1, lvl2, lvl3, name, str(data)) print(GR+'\n [*] Preparing DNS Map...') time.sleep(0.5) url = 'https://dnsdumpster.com/static/map/' + str(domain) + '.png' print(P+' [!] Fetching map...'+C) try: os.system('wget -q ' + url) except Exception: print(R+' [-] Map generation failed!') sys.exit(1) st = str(domain) + '.png' st1 = str(domain)+'-dnsmap.png' p = 'mv '+st+' '+ st1 os.system(p) mov = 'mv '+ st1 + ' tmp/' os.system(mov) print(C+' [+] Map saved under "tmp/' + st1 + '"') try: print(GR+' [!] Trying to open DNS Map...') os.system('xdg-open tmp/'+st1) except Exception: print(R+' [-] Failed to open automatically.') print(GR+' [!] Please view the map manually.') except TypeError: print(R+' [-] No standard publicly recorded DNS records found.\n')
def getconinfo(domain): web = domain if "@" in web: domain = domain.split("@")[1] if "https" in web: domain = "https://" + domain else: domain = "http://" + domain #print(R+'\n =======================================') #print(R+' D O M A I N C O N T A C T I N F O') #print(R+' =======================================\n') from core.methods.print import posintpas posintpas("domain contact info") time.sleep(0.6) print(GR + ' [*] Importing API Key...') try: from files.API_KEYS import FULLCONTACT_API_KEY except (IOError, ImportError): print(R + ' [-] Error while importing key...') try: if FULLCONTACT_API_KEY != '': print(G + ' [+] Found API Key : ' + O + FULLCONTACT_API_KEY) base_url = 'https://api.fullcontact.com/v2/company/lookup.json' print(GR + ' [*] Looking up info...') time.sleep(0.7) payload = {'domain': domain, 'apiKey': FULLCONTACT_API_KEY} resp = requests.get(base_url, params=payload) if resp.status_code == 200: print(G + ' [+] Found domain info!') w = resp.text.encode('ascii', 'ignore') quest = w.splitlines() print(O + ' [!] Parsing info...\n') print(R + ' [+] REPORT :-\n') time.sleep(1) for q in quest: q = q.replace('"', '') if ':' in q and '[' not in q and '{' not in q: q1 = q.split(':', 1)[0].strip().title() q2 = q.split(':', 1)[1].strip().replace(',', '') if q1.lower() == 'typeid' or q1.lower( ) == 'number' or q1.lower() == 'type': print(C + '\n [+] ' + q1 + ' : ' + GR + q2) else: print(C + ' [+] ' + q1 + ' : ' + GR + q2) time.sleep(0.01) elif ('{' or '[' in q) and (':' in q): w1 = q.split(':', 1)[0].strip().upper() w2 = q.split(':', 1)[1].strip() if w1.lower() == 'keywords': print(C + '\n [+] ' + w1 + ' : ' + GR + w2) else: print(O + '\n [+] ' + w1 + ' :-' + '\n') else: print(R + ' [-] Did not find any info about domain ' + O + domain) print(R + ' [+] Try with another one...') else: print(R + ' [-] FULL CONTACT API TOKEN not set!') print(R + ' [-] This module cannot be used!') except Exception as e: print(R + ' [-] Encountered Exception : ' + str(e)) print(G + '\n [+] Public Contact Info Module Completed!\n')
def googlenum(web): requests = session() #print(R+'\n =================================') #print(R+' G O O G L E G A T H E R I N G ') #print(R+' =================================\n') from core.methods.print import posintpas posintpas("google gathering") try: print(GR + ' [*] Importing API Token...') time.sleep(0.7) from files.API_KEYS import GOOGLE_API_TOKEN if GOOGLE_API_TOKEN != '': maxr = '50' print(GR + ' [*] Fetching maximum 50 results...') print(O + ' [!] Parsing website address...') time.sleep(0.6) web = web.replace('http://', '') web = web.replace('https://', '') if "@" in web: web = web.split("@")[1] print(GR + ' [*] Making the request...') try: resp = requests.get( 'https://www.googleapis.com/plus/v1/people?query=' + web + '&key=' + GOOGLE_API_TOKEN + '&maxResults=' + maxr).text except: print(R + ' [-] Access Forbidden (403)...') print(O + ' [!] Parsing raw-data...') time.sleep(1) r = json.loads(resp) ctr = 1 print(GR + ' [*] Fetching data...') if "items" in r: for p in r["items"]: ctr += 1 time.sleep(0.8) print(G + '\n [+] Info about Profile ' + O + str(ctr) + ' ...') if 'kind' in p: print(B + ' [+] Kind : ' + C + p['kind']) time.sleep(0.05) if 'etag' in p: print(B + ' [+] E-Tag : ' + C + p['etag']) time.sleep(0.05) if 'objectType' in p: print(B + ' [+] Object Type : ' + C + p['objectType']) time.sleep(0.05) if 'id' in p: print(B + ' [+] ID : ' + C + p['id']) time.sleep(0.05) if 'displayName' in p: print(B + ' [+] Display Name : ' + C + p['displayName']) time.sleep(0.05) if 'url' in p: print(B + ' [+] Link : ' + C + p['url']) time.sleep(0.05) print(O + ' [+] Google Enumeration Completed!') else: print( R + ' [-] Google API Token Key not set... This modules cannot be used!' ) except IOError: print( R + ' [-] Google API Token Key not set... This modules cannot be used!' )
def googlenum(web): requests = session() name = targetname(web) module = "ReconANDOSINT" lvl1 = "Passive Reconnaissance & OSINT" lvl3 = '' lvl2 = inspect.stack()[0][3] kind = [] etag = [] obj = [] ID = [] disp = [] url = [] #print(R+'\n =================================') #print(R+' G O O G L E G A T H E R I N G ') #print(R+' =================================\n') from core.methods.print import posintpas posintpas("google gathering") try: print(GR + ' [*] Importing API Token...') time.sleep(0.7) from files.API_KEYS import GOOGLE_API_TOKEN if GOOGLE_API_TOKEN != '': maxr = '50' print(GR + ' [*] Fetching maximum 50 results...') print(O + ' [!] Parsing website address...' + C) time.sleep(0.6) web = web.replace('http://', '') web = web.replace('https://', '') if "@" in web: web = web.split("@")[1] print(GR + ' [*] Making the request...') try: resp = requests.get( 'https://www.googleapis.com/plus/v1/people?query=' + web + '&key=' + GOOGLE_API_TOKEN + '&maxResults=' + maxr).text except Exception: print(R + ' [-] Access Forbidden (403)...') print(O + ' [!] Parsing raw-data...' + C) time.sleep(1) r = json.loads(resp) ctr = 1 print(GR + ' [*] Fetching data...') if "items" in r: for p in r["items"]: ctr += 1 time.sleep(0.8) print(C + '\n [+] Info about Profile ' + P + str(ctr) + C + ' ...') if 'kind' in p: print(O + ' [+] Kind :' + C + color.TR3 + C + G + p['kind'] + C + color.TR2 + C) kind.append(p['kind']) time.sleep(0.05) if 'etag' in p: print(O + ' [+] E-Tag :' + C + color.TR3 + C + G + p['etag'] + C + color.TR2 + C) etag.append(p['etag']) time.sleep(0.05) if 'objectType' in p: print(O + ' [+] Object Type :' + C + color.TR3 + C + G + p['objectType'] + C + color.TR2 + C) obj.append(p['objectType']) time.sleep(0.05) if 'id' in p: print(O + ' [+] ID :' + C + color.TR3 + C + G + p['id'] + C + color.TR2 + C) ID.append(p['id']) time.sleep(0.05) if 'displayName' in p: print(O + ' [+] Display Name :' + C + color.TR3 + C + G + p['displayName'] + C + color.TR2 + C) disp.append(p['displayName']) time.sleep(0.05) if 'url' in p: print(O + ' [+] Link :' + C + color.TR3 + C + G + p['url'] + C + color.TR2 + C) url.append(p['url']) time.sleep(0.05) data = { "Kind": kind, "E-Tag": etag, "ObjectType": obj, "ID": ID, "DisplayName": disp, "Link": url } save_data(database, module, lvl1, lvl2, lvl3, name, str(data)) print(G + ' [+] Google Enumeration Completed!' + C + color.TR2 + C) else: print( R + ' [-] Google API Token Key not set... This modules cannot be used!' ) except IOError: print( R + ' [-] Google API Token Key not set... This modules cannot be used!' )