def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # Global Class Variables self.subdomains = set() self.verified_domains = list() self.exposed_files = list() self.sub_takeover = dict() # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] # Object initiation self.scraper = scraper.Scraper() self.slack = Slack() # Slack push notification message self.message = "" return
def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # Global Class Variables self.subdomains = set() self.verified_domains = list() self.exposed_files = list() self.sub_takeover = dict() # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] # Object initiation self.scraper = scraper.Scraper() self.slack = Slack() # Slack push notification message self.message = "" # Parsing config self.path = os.path.dirname(os.path.abspath(__file__)) with open(self.path + "/config", "r") as ymlfile: self.config = yaml.load(ymlfile, Loader=yaml.FullLoader) return
def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # object initialization self.nessus = Nessus() self.wpscan = WpScan() # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] # Slack notification self.slack = Slack()
def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] self.collection = self.dbname['scraper'] self.collection.create_index([("url", DESCENDING), ("hostname", ASCENDING)], unique=True) # Config parser path = os.path.dirname(os.path.abspath(__file__)) with open(path + "/../../config", "r") as ymlfile: config = yaml.load(ymlfile) # Github config self.github_token = config['scraper']['github_token'] self.github_keywords = config['scraper']['github_keywords'] self.github_url = "https://api.github.com" # Shodan config self.shodan_token = config['scraper']['shodan_token'] self.shodan_url = "https://api.shodan.io/shodan/host/search" #twitter config self.twitter_keywords = config['scraper']['twitter_keywords'] self.access_key = config['scraper']["twitter_access_token"] self.access_secret = config['scraper']["twitter_access_token_secret"] self.consumer_key = config['scraper']["twitter_consumer_key"] self.consumer_secret = config['scraper']["twitter_consumer_secret"] # Slack configuration self.slack = Slack() self.message = "[+] Scraper Results:\n" return
def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white path = os.path.dirname(os.path.abspath(__file__)) with open(path + "/../config", "r") as ymlfile: config = yaml.load(ymlfile) self.nessus_url = config['nessus']['url'] self.username = config['nessus']['username'] self.password = config['nessus']['password'] self.token = "" self.policy_uuid = "" self.policy_id = "" self.scan_id = 0 self.scan_uuid = "" self.nessus_result = "" self.slack = Slack() return
class Recon(object): """ Class which deals with finding out as many subdomains and enumerate as much as possible """ def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # Global Class Variables self.subdomains = set() self.verified_domains = list() self.exposed_files = list() self.sub_takeover = dict() # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] # Object initiation self.scraper = scraper.Scraper() self.slack = Slack() # Slack push notification message self.message = "" return ############################ Finding Subdomains ############################ def zonetransfer(self, target): """ The function checks if Zonetransfer is enabled for the target and if so, it will try to get all of its subdomains from it. Module: recon/custom/zonetransfer.py """ print(self.Y + "[i] Checking for Zonetransfer") zoneresult = json.loads(zonetransfer.zonetransfer(target)) if zoneresult["enabled"]: print(self.R + "[+] Zone Transfer is enabled") self.subdomains |= set([str(x) for x in zoneresult["list"]]) else: print(self.G + "[i] Zone Transfer is not enabled\n") return def sublister(self, target, silent=True): """ Sublist3r tool (located in recon/Sublist3r) is run against the target and it returns a list of subdomains. self.subdomains -> List of all subdomains returned by sublist3r """ self.subdomains |= set( sublist3r.main(target, 5, savefile=None, ports=None, silent=silent, verbose=False, enable_bruteforce=False, engines=None)) # Enter the subdomains to MongoDB collection = self.dbname['subdomains'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() # Get the list of domains already in the DB for slack notifications old_subs = collection.find({}, {'domain': 1, '_id': 0}) old_sub = set() for i in old_subs: old_sub.add(i['domain']) # New subs is the diff between old set and new one diff = list(self.subdomains - old_sub) # Slack push notification if diff: self.message = "[+] New subdomains enumerated " self.message += "(previous results are not included):\n```" # Keep 30 domains per slack message for i in range(len(diff)): self.message += diff[i].strip("\n") + ",\n" if (i + 1) % 30 == 0: self.message += "```" self.slack.notify_slack(self.message) if i + 1 != len(diff): self.message = "```" if len(diff) > 30 and len(diff) % 30 != 0: self.message += "```" self.slack.notify_slack(self.message) for domain in self.subdomains: try: data = { "id": count + 1, "domain": domain, "time": datetime.now(), "parent": target } dataid = collection.insert(data) count += 1 except Exception as e: pass return ############################ Verifying Subdomains ############################ def verify(self, target): """ By initiating a request, we verify if the subdomain has a webserver running in it based on if it gets resolved correctly or not. If the status is any other value than 200, the CNAME lookup is done and if it points out to 3rd parties, then the details is listed. self.verified_domains -> list of all verified subdomains (webserver running) """ # Enter the subdomains to MongoDB collection = self.dbname['verified_subdomains'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() print("\n" + self.Y + "[i] Verifying Subdomains and takeover options") for url in self.subdomains: cname = False url = url.strip("\n") data = {"id": count + 1, "time": datetime.now(), "parent": target} try: req = requests.get("http://" + url, timeout=4, verify=False, allow_redirects=False) # print "url: " + url self.verified_domains.append(url) if req.status_code != 200: # Checking for subdomain takeover cname = subtakeover.check_takeover(target, url) if cname: self.sub_takeover[url] = cname data.update({ "cname": cname, "domain": url, "takeover": "true" }) else: data.update({ "cname": "", "domain": url, "takeover": "false" }) # push the data into MongoDB dataid = collection.insert(data) count += 1 except Exception as e: continue if (len(self.sub_takeover) > 0): # Slack push notifications self.message = "[+] Possible subdomain takeovers" self.message += "(Manual verification required):\n```" print( "\n" + self.Y + "[+] Possible subdomain takeovers (Manual verification required): " ) for url, cname in self.sub_takeover.items(): print(self.W + " " + url + ": " + self.R + cname) length = len(url) length = 30 - length self.message += url + " --> ".rjust(length) + cname + "\n" self.message += "```\n" self.slack.notify_slack(self.message) return def wappalyzer(self, target, verbose=False): """ All verified subdomains are scanned with Wappalyzer to find out the technology stack used in each of them. Once wappalyzer is run, it prints out all verified domains """ print("\n" + self.Y + "[i] Verified and Analyzed Subdomains: \n") wappalyzer = Wappalyzer.latest() # Tech stack db which contains the tech stack of all the sub domains collection = self.dbname['tech_stack'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() for url in self.verified_domains: try: webpage = WebPage.new_from_url('http://' + url, verify=False) tech_stack = wappalyzer.analyze(webpage) if tech_stack and verbose: print(self.G + "[i] URL: " + url) print(self.B + "[i] Wappalyzer: " + str(list(tech_stack)) + "\n") # Push the above data to DB data = { "id": count + 1, "domain": url, "time": datetime.now() } data["parent"] = target data['tech_stack'] = list(tech_stack) dataid = collection.insert(data) count += 1 except Exception as e: continue return ############################ SPF Records ############################ def spfcheck(self, target): """ The function will check the number of look up needed for the SPF record and checks if it is greater than 10 or not. """ print(self.Y + "[i] Checking for SPF records") resolves = spfcheck.spflookups(target) if (resolves > 10): print(self.R + "[-] SPF record lookup exceeds 10. Current value is: " + str(resolves) + "\n") else: print(self.G + "[+] SPF record lookups is good. Current value is: " + str(resolves) + "\n") return ############################ Open Source Intelligence ############################ def scrape(self, target): """ Run the scraper """ print(self.Y + "[i] Scraper Results" + self.G) self.scraper.run_scrape(target) return
class Scraper(): """ Class which scrapes the internet to figure out any confidential data leak""" def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] self.collection = self.dbname['scraper'] self.collection.create_index([("url", DESCENDING), ("hostname", ASCENDING)], unique=True) # Config parser path = os.path.dirname(os.path.abspath(__file__)) with open(path + "/../../config", "r") as ymlfile: config = yaml.load(ymlfile) # Github config self.github_token = config['scraper']['github_token'] self.github_keywords = config['scraper']['github_keywords'] self.github_url = "https://api.github.com" # Shodan config self.shodan_token = config['scraper']['shodan_token'] self.shodan_url = "https://api.shodan.io/shodan/host/search" #twitter config self.twitter_keywords = config['scraper']['twitter_keywords'] self.access_key = config['scraper']["twitter_access_token"] self.access_secret = config['scraper']["twitter_access_token_secret"] self.consumer_key = config['scraper']["twitter_consumer_key"] self.consumer_secret = config['scraper']["twitter_consumer_secret"] # Slack configuration self.slack = Slack() self.message = "[+] Scraper Results:\n" return def github(self): """ Search the github for results based on keywords in config. This runs as 2 parts: 1) search in code 2) search in commits """ self.message += "*Github*\n```" headers = {"Accept": "application/vnd.github.cloak-preview"} # Mongodb setup count = self.collection.count() for i in ['code', 'commits']: path = "/search/" + i url = self.github_url + path for search_string in self.github_keywords: payload = {"access_token": self.github_token, "q": search_string} req = requests.get(url, headers=headers, params=payload) if req.status_code == 200: results = json.loads(req.text) for item in results['items']: try: data = {"id": count+1, "source": "github", "search_string": search_string, "url": item['html_url']} data['profile'] = item['repository']['full_name'] data['timestamp'] = datetime.now() dataid = self.collection.insert(data) count += 1 # Slack push notifications self.message += "url: " + data['url'] + " (Searched String: " + search_string + ")\n" except Exception as e: pass break break self.message += "```" return def shodan(self, target): """ Search the shodan for results regarding "example.com" domain. This uses the query=hostname:example.com to get the subdomains and ports which are open """ message = "" url = self.shodan_url payload = {"key": self.shodan_token, "query": "hostname:" + target} req = requests.get(url, params=payload) results = json.loads(req.text) # MongoDB variables count = self.collection.count() for result in results['matches']: try: data = {"id": count+1, "source": "shodan", "timestamp": datetime.now()} data['port'] = result['port'] data['ip_str'] = result['ip_str'] data['hostname'] = result['hostnames'] dataid = self.collection.insert(data) count += 1 # Slack notification length = 38 - len(data['hostname'][0]) message += "\nHostname: " + data['hostname'][0] + "IP: ".rjust(length) + data['ip_str'] length = 28 - len(data['ip_str']) message += "Ports: ".rjust(length) + str(data['port']) except Exception as e: pass if message: print(self.G + "[+] Shodan" +self.R + message + "\n") self.message += "*Shodan*:\n```" self.message += message self.message += "\n```" return def twitter(self): """ Keywords based search on twitter and returns the recent results based on the same """ message = "" count = self.collection.count() twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret)) for keyword in self.twitter_keywords: query = twitter.search.tweets(q = keyword) for result in query['statuses']: try: data = {"id": count+1, "source": "twitter", "timestamp": datetime.now()} data['tweet'] = result['text'] data['name'] = result["user"]["screen_name"] data['url'] = "https://twitter.com/" + data["name"] + "/status/" + str(result['id']) data['search_string'] = keyword try: dataid = self.collection.insert(data) except DuplicateKeyError as e: continue count += 1 # Slack push notification length = 82 - len(data['url']) message += "\nURL: " + data['url'] + " search string: ".rjust(length) + keyword except Exception as e: print(e) pass if message: print(self.G + "[+] Twitter" + self.B + message) self.message += "\n*Twitter*:\n```" self.message += message self.message += "\n```" return def run_scrape(self, target): """ Get all possible results for all the defined websites. """ # self.github() try: self.shodan(target) except Exception as e: # print("Exception occured: \n" + str(e)) print("\033[91m" + "[+]Skipping Shodan since config file is not updated") pass try: self.twitter() except Exception as e: # print("Exception occured: \n" + str(e)) print("\033[91m" + "[+]Skipping Twitter since config file is not updated") pass self.slack.notify_slack(self.message) return
class Scan(): """ This class will take care of the Active/Passive scanning """ def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # object initialization self.nessus = Nessus() self.wpscan = WpScan() # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] # Slack notification self.slack = Slack() def nessus_scan(self, target, filename): """ This function will take care of nessus scans and getting its output""" self.nessus.login() self.nessus.get_custom_uuid() self.nessus.get_policy_id() self.nessus.add_scan(list(target)) print(self.G + "[i] Successfully added the Nessus scan") self.nessus.launch_scan() print( "[i] Successfully launched the Nessus scan & waiting for the scan to complete" ) while True: time.sleep(60) try: status = self.nessus.check_status() if (status != "running"): break except Exception as e: continue self.nessus.scan_results(filename) print(self.G + "[+] Nessus consolidated report:") self.nessus.slack_notify() return def wp_scan(self, parent): """ Launch WpScan if the techstack used is wordpress. """ collection = self.dbname['wpscan'] collection_tech = self.dbname['tech_stack'] count = self.dbname.collection.count() # collection.create_index('domain', unique=True) flag = True for item in collection_tech.find({'parent': parent}): message = "" if 'wordpress' in str(item['tech_stack']).lower(): if flag: message = "[+] *Wpscan report*: (" + item['domain'] + ")\n" flag = False result = self.wpscan.scan(item['domain'], parent) data = { 'id': count + 1, 'domain': item['domain'], 'time': datetime.now() } data['version'] = result['version']['number'] message += "Version: `" + data['version'] + "`\n" data['vulnerabilities'] = [] data['plugins'] = {} message += "Wordpress core vulnerabilities: \n```\n" for value in result['version']['vulnerabilities']: data['vulnerabilities'].append(value['title']) message += value['title'] + "\n" message += "```\nPlugins: \n" for key, value in result['plugins'].iteritems(): if message[-1] != "\n": message += "```" message += "\n" + key + ": \n```" for vuln in value['vulnerabilities']: message += "\n" try: data['plugins'][key].append(vuln['title']) except: data['plugins'][key] = [] data['plugins'][key].append(vuln['title']) message += vuln['title'] # Push the above data to DB message += "\n```" print(self.W + message) self.slack.notify_slack(message) dataid = collection.insert(data) count += 1
class Nessus(): """ Nessus scanner APIs for automatted scan and report generation """ def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white path = os.path.dirname(os.path.abspath(__file__)) with open(path + "/../config", "r") as ymlfile: config = yaml.load(ymlfile) self.nessus_url = config['nessus']['url'] self.username = config['nessus']['username'] self.password = config['nessus']['password'] self.token = "" self.policy_uuid = "" self.policy_id = "" self.scan_id = 0 self.scan_uuid = "" self.nessus_result = "" self.slack = Slack() return def login(self): """ login() will login to the nessus and retrieve the token which is used in all subsequent requests. self.token -> Access token used for subsequent Authorization """ url = self.nessus_url + "/session" params = {"username": self.username, "password": self.password} headers = {"Content-Type": "application/json"} req = requests.post(url, data=json.dumps(params), headers=headers, verify=False) self.token = json.loads(req.text)['token'] return def get_folder(self): """ get_folder() checks if a folder named "Red Team Arsenal" exists in the folder list where automatted scan will go (the ones launched by this script) """ url = self.nessus_url + "/folders" headers = {"X-Cookie":"token=" + self.token} req = requests.get(url, headers=headers, verify=False) return def get_custom_uuid(self): """ get_custom_uuid() function gets the UUID of custom scans using which we launch a custom policy based scan. self.policy_uuid -> Custom Policy UUID """ url = self.nessus_url + "/editor/policy/templates" headers = {"X-Cookie":"token=" + self.token} req = requests.get(url, headers=headers, verify=False) policies = json.loads(req.text) for policy in policies["templates"]: if(policy["title"] == "Custom Scan"): self.policy_uuid = policy["uuid"] break return def get_policy_id(self): """ get_policy() function gets the user defined policies of Nessus using which we can run custom scans. self.policy_id -> Custom policy ID that we have created """ url = self.nessus_url + "/policies" headers = {"X-Cookie":"token=" + self.token} req = requests.get(url, headers=headers, verify=False) scans = json.loads(req.text) for policy in scans["policies"]: if(policy["name"] == "RTA"): self.policy_id = policy["id"] break return def add_scan(self, target): """ add_scan() functions add a new scan in to the list of scans which can be laucnched at a later point of time (or can be scheduled also). self.scan_id -> Scan ID name which is needed for scan launch """ url = self.nessus_url + "/scans" headers = {"X-Cookie":"token=" + self.token, "Content-Type": "application/json"} targets = ", ".join(target) settings = {"name":"RTA_" + str(datetime.now()), "description":"Red Team Arsenal weekly Auto scan"} settings["text_targets"] = targets settings["policy_id"] = self.policy_id params = {"uuid":self.policy_uuid, "settings":settings} req = requests.post(url, headers=headers, data=json.dumps(params), verify=False) self.scan_id = json.loads(req.text)["scan"]["id"] return def launch_scan(self): """ launch_scan() launches a scan which is already added via add_scan(). """ url = self.nessus_url + "/scans/" + str(self.scan_id) + "/launch" headers = {"X-Cookie":"token=" + self.token} req = requests.post(url, headers=headers, verify=False) self.scan_uuid = json.loads(req.text)["scan_uuid"] return def check_status(self): """ check_status() returns the the status of the scans """ url = self.nessus_url + "/scans/" headers = {"X-Cookie":"token=" + self.token} req = requests.get(url, headers=headers, verify=False) response = json.loads(req.text) if "Invalid Credentials" in req.text: self.login() return "running" for scan in response["scans"]: if(scan["uuid"] == self.scan_uuid): status = scan["status"] return status def scan_results(self, filename, format='pdf'): """ scan_result() returns the list of scan's overall result 3 parts: 1) initiate the report download 2) Check the report status 3) Download the report if the status is ready """ # Part 1: Initiating report download url = self.nessus_url + "/scans/" + str(self.scan_id) + "/export" headers = {"X-Cookie":"token=" + self.token, "Content-Type": "application/json"} params = {"format": format, "chapters": "vuln_hosts_summary"} req = requests.post(url, headers=headers, data=json.dumps(params), verify=False) response = json.loads(req.text) export_file_id = response["file"] # Part 2: Checking export status while True: url = self.nessus_url + "/scans/" + str(self.scan_id) + "/export/" + str(export_file_id) + "/status" req = requests.get(url, headers=headers, verify=False) response = json.loads(req.text) if response["status"] == "ready": break # Part 3: Downloading the CSV report if status is ready: url = self.nessus_url + "/scans/" + str(self.scan_id) + "/export/" + str(export_file_id) + "/download" filename = filename + "." + str(format) req = requests.get(url, headers=headers, verify=False, stream=True) # Writing the output to a file with open(filename, 'wb') as fd: for chunk in req.iter_content(chunk_size=2000): fd.write(chunk) fd.close() return def slack_notify(self): self.scan_results('Nessus_report_file', 'csv') csvfile = open('Nessus_report_file.csv', 'r') fieldnames = ('Plugin ID', 'CVE', 'CVSS', 'Risk', 'Host', 'Protocol', 'Port', 'Name', 'Synopsis', 'Description', 'Solution', 'See Also', 'Plugin Output') # Parsing the CSV to dict reader = csv.DictReader( csvfile, fieldnames) # Consolidated report to slack critical = high = medium = low = "" for row in reader: if row['Risk'] == 'Critical': critical += row['Name'] + " (" + row['Host'] + ":" + row['Port'] + ")\n" if row['Risk'] == 'High': high += row['Name'] + " (" + row['Host'] + ":" + row['Port'] + ")\n" #high += "Output: " + row['Plugin Output'] + "\n" if row['Risk'] == 'Medium': medium += row['Name'] + " (" + row['Host'] + ":" + row['Port'] + ")\n" #medium += "Output: " + row['Plugin Output'] + "\n" if row['Risk'] == 'Low': low += row['Name'] + " (" + row['Host'] + ":" + row['Port'] + ")\n" #low += "Output: " + row['Plugin Output'] + "\n" if critical or high or medium or low: self.message = "[+] Nessus consolidated report: \n" if critical: print(self.R + "Critical:\n" + critical) self.message += "*Critical*:\n```\n" + critical + "```\n" self.slack.notify_slack(self.message) self.message = "" if high: print(self.R + "High:\n" + high) self.message += "*High*:\n```\n" + high + "```\n" self.slack.notify_slack(self.message) self.message = "" if medium: print(self.Y + "Medium:\n" + medium) self.message += "*Medium*:\n```\n" + medium + "```\n" self.slack.notify_slack(self.message) self.message = "" if low: print(self.G + "Low:\n" + low) self.message += "*Low*:\n```\n" + low + "```\n" self.slack.notify_slack(self.message) self.message = "" # cleaning up files os.remove('Nessus_report_file.csv')
class Recon(object): """ Class which deals with finding out as many subdomains and enumerate as much as possible """ def __init__(self): # colors self.G = '\033[92m' # green self.Y = '\033[93m' # yellow self.B = '\033[94m' # blue self.R = '\033[91m' # red self.W = '\033[0m' # white # Global Class Variables self.subdomains = set() self.verified_domains = list() self.exposed_files = list() self.sub_takeover = dict() # MongoDB variables self.mongocli = MongoClient('localhost', 27017) self.dbname = self.mongocli['RTA'] # Object initiation self.scraper = scraper.Scraper() self.slack = Slack() # Slack push notification message self.message = "" # Parsing config self.path = os.path.dirname(os.path.abspath(__file__)) with open(self.path + "/config", "r") as ymlfile: self.config = yaml.load(ymlfile, Loader=yaml.FullLoader) return ############################ Finding Subdomains ############################ def zonetransfer(self, target): """ The function checks if Zonetransfer is enabled for the target and if so, it will try to get all of its subdomains from it. Module: recon/custom/zonetransfer.py """ print(self.Y + "[i] Checking for Zonetransfer") zoneresult = json.loads(zonetransfer.zonetransfer(target)) if zoneresult["enabled"]: print(self.R + "[+] Zone Transfer is enabled") self.slack.notify_slack("[+] Zone Transfer is enabled for " + target) self.subdomains |= set([str(x) for x in zoneresult["list"]]) else: print(self.G + "[i] Zone Transfer is not enabled\n") return def sublister(self, target, silent=True): """ Sublist3r tool (located in recon/Sublist3r) is run against the target and it returns a list of subdomains. self.subdomains -> List of all subdomains returned by sublist3r """ self.subdomains |= set( sublist3r.main(target, 5, savefile=None, ports=None, silent=silent, verbose=False, enable_bruteforce=False, engines=None)) # Enter the subdomains to MongoDB collection = self.dbname['subdomains'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() # Get the list of domains already in the DB for slack notifications old_subs = collection.find({}, {'domain': 1, '_id': 0}) old_sub = set() for i in old_subs: old_sub.add(i['domain']) # New subs is the diff between old set and new one diff = list(self.subdomains - old_sub) # Slack push notification if diff: self.message = "[+] New subdomains enumerated " self.message += "(previous results are not included):\n```" # Keep 30 domains per slack message for i in range(len(diff)): self.message += diff[i].strip("\n") + ",\n" if (i + 1) % 30 == 0: self.message += "```" self.slack.notify_slack(self.message) if i + 1 != len(diff): self.message = "```" if len(diff) > 30 and len(diff) % 30 != 0: self.message += "```" if len(diff) < 30: self.message += "```" self.slack.notify_slack(self.message) for domain in self.subdomains: try: data = { "id": count + 1, "domain": domain, "time": datetime.now(), "parent": target } dataid = collection.insert(data) count += 1 except Exception as e: pass return ############################ Verifying Subdomains ############################ def verify(self, target): """ By initiating a request, we verify if the subdomain has a webserver running in it based on if it gets resolved correctly or not. If the status is any other value than 200, the CNAME lookup is done and if it points out to 3rd parties, then the details is listed. self.verified_domains -> list of all verified subdomains (webserver running) """ # Enter the subdomains to MongoDB collection = self.dbname['verified_subdomains'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() print("\n" + self.Y + "[i] Verifying Subdomains and takeover options") for url in self.subdomains: cname = False url = url.strip("\n") data = {"id": count + 1, "time": datetime.now(), "parent": target} try: req = requests.get("http://" + url, timeout=4, verify=False, allow_redirects=False) # print "url: " + url self.verified_domains.append(url) if req.status_code != 200: # Checking for subdomain takeover cname = subtakeover.check_takeover(target, url) if cname: self.sub_takeover[url] = cname data.update({ "cname": cname, "domain": url, "takeover": "true" }) else: data.update({ "cname": "", "domain": url, "takeover": "false" }) # push the data into MongoDB dataid = collection.insert(data) count += 1 except Exception as e: continue if (len(self.sub_takeover) > 0): # Slack push notifications self.message = "[+] Possible subdomain takeovers" self.message += "(Manual verification required):\n```" print( "\n" + self.Y + "[+] Possible subdomain takeovers (Manual verification required): " ) for url, cname in self.sub_takeover.items(): print(self.W + " " + url + ": " + self.R + cname) length = len(url) length = 30 - length self.message += url + " --> ".rjust(length) + cname + "\n" self.message += "```\n" self.slack.notify_slack(self.message) return def wappalyzer(self, target, verbose=False): """ All verified subdomains are scanned with Wappalyzer to find out the technology stack used in each of them. Once wappalyzer is run, it prints out all verified domains """ print("\n" + self.Y + "[i] Verified and Analyzed Subdomains: \n") wappalyzer = Wappalyzer.latest() # Tech stack db which contains the tech stack of all the sub domains collection = self.dbname['tech_stack'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() for url in self.verified_domains: try: webpage = WebPage.new_from_url('http://' + url, verify=False) tech_stack = wappalyzer.analyze(webpage) if tech_stack and verbose: print(self.G + "[i] URL: " + url) print(self.B + "[i] Wappalyzer: " + str(list(tech_stack)) + "\n") # Push the above data to DB data = { "id": count + 1, "domain": url, "time": datetime.now() } data["parent"] = target data['tech_stack'] = list(tech_stack) dataid = collection.insert(data) count += 1 except Exception as e: continue return ############################ DNS Records ############################ def spfcheck(self, target): """ The function will check the number of look up needed for the SPF record and checks if it is greater than 10 or not. """ print(self.Y + "[i] Checking for SPF records") resolves = spfcheck.spflookups(target) if (resolves > 10): print(self.R + "[+] SPF record lookup exceeds 10. Current value is: " + str(resolves) + "\n") self.message = "[+] SPF record lookup for " + target + " exceeds 10. Current value is: " + str( resolves) + "\n" self.slack.notify_slack(self.message) else: print(self.G + "[+] SPF record lookups is good. Current value is: " + str(resolves) + "\n") return def dnscheck(self, target): """ The function checks the MX, TXT and DMARC records and calculate a hash. Hash is compared against a previous computed hash """ for record in ['MX', 'TXT', 'DMARC']: if record == "MX": data = dns.resolver.query(target, record) flag = False for result in data: exchange = str(result.exchange) if 'google.com' not in exchange and 'googlemail.com' not in exchange and 'amazonaws.com' not in exchange: flag = True if flag: self.slack.notify_slack( "[+] %s record of %s has been changed. ```%s```" % (record, target, str(data.response))) if record == "TXT": data = dns.resolver.query(target, 'TXT') flag = "" for result in data: flag += str(result).strip('"') if len(flag) != self.config['dns'][target]['TXT_LEN']: self.slack.notify_slack( "[+] %s record of %s has been changed. ```%s```" % (record, target, str(data.response))) if record == "DMARC": domain = "_dmarc." + target data = dns.resolver.query(domain, 'TXT') if len(data) > 1 or hashlib.sha1(str( data[0]).strip('"')).hexdigest( ) != self.config['dns'][target]['DMARC']: self.slack.notify_slack( "[+] %s record of %s has been changed. ```%s```" % (record, target, str(data.response))) return ############################ Open Source Intelligence ############################ def scrape(self, target): """ Run the scraper """ print(self.Y + "[i] Scraper Results" + self.G) self.scraper.run_scrape(target) return def firebase_scan(self): """ Check for exposed firebase data based on config. """ exposed_list = [] if len(self.config['firebase']['url']) > 0: for url in self.config['firebase']['url']: req = requests.get(url + "/.json") response = json.loads(req.text) if req.status_code == 404: continue if req.status_code != 401 or response[ 'error'] != 'Permission denied': exposed_list.append(url) if len(exposed_list) > 0: self.message = "[+] Misconfigured Firebase: \n```" for url in exposed_list: self.message += url + "/.json" self.slack.notify_slack(self.message + "```") return