def main(): """ Begin Main... """ now = datetime.now() print("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() jobs_collection = mongo_connector.get_jobs_connection() dead_dns_collection = mongo_connector.get_dead_dns_connection() google_dns = GoogleDNS.GoogleDNS() results = dead_dns_collection.find({}) for result in results: time.sleep(1) lookup_result = google_dns.fetch_DNS_records(result['fqdn']) if lookup_result == []: print("Removing " + result['fqdn']) dead_dns_collection.remove({"_id": ObjectId(result['_id'])}) # Record status jobs_collection.update_one({'job_name': 'dead_dns_cleanup'}, { '$currentDate': { "updated": True }, "$set": { 'status': 'COMPLETE' } }) now = datetime.now() print("Ending: " + str(now))
def download_amass_data(logger, amass_collection, remote_amass_collection, dns_manager, zones): """ Download the latest OWASP Amass information. """ logger.info("Beginning Amass download") now = datetime.now() mirror_date = datetime.now() - timedelta(days=7, hours=9) amass_results = remote_amass_collection.find( { "timestamp": { "$gt": mirror_date } }, { "_id": 0 }).batch_size(50) google_dns = GoogleDNS.GoogleDNS() for result in amass_results: zone = check_zones(result["name"], zones) if zone is not None: time.sleep(1) if record_finding(logger, dns_manager, google_dns, zone, result): amass_collection.replace_one({"name": result["name"]}, result, upsert=True) # Establish a date four months back scrub_date = datetime.now() - timedelta(days=120, hours=9) # Remove data from scrub_date amass_collection.delete_many({"timestamp": {"$lte": scrub_date}}) remote_amass_collection.delete_many({"timestamp": {"$lte": now}})
def main(): """ Begin Main... """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print ("Starting: " + str(now)) logger.info("Starting...") mongo_connector = MongoConnector.MongoConnector() dead_dns_collection = mongo_connector.get_dead_dns_connection() jobs_manager = JobsManager.JobsManager(mongo_connector, 'dead_dns_cleanup') jobs_manager.record_job_start() google_dns = GoogleDNS.GoogleDNS() results = dead_dns_collection.find({}) for result in results: time.sleep(1) lookup_result = google_dns.fetch_DNS_records(result['fqdn']) if lookup_result == []: logger.info ("Removing " + result['fqdn']) dead_dns_collection.remove({"_id":ObjectId(result['_id'])}) # Record status jobs_manager.record_job_complete() now = datetime.now() print ("Ending: " + str(now)) logger.info("Complete.")
def parse_file(vertices_file, reversed_zones, dns_manager): """ For each vertices files, iterate over the entries searching for matching zones. """ vertices = open(vertices_file, "r") google_dns = GoogleDNS.GoogleDNS() for line in vertices: parts = line.split("\t") if len(parts) > 1: domain = parts[1].rstrip("\n") reversed_zone = check_zones(domain, reversed_zones) if reversed_zone is not None: matched_domain = swap_order(domain) matched_zone = swap_order(reversed_zone) results = google_dns.fetch_DNS_records(matched_domain) for result in results: if result['fqdn'].endswith( "." + matched_zone) or result['fqdn'] == matched_zone: print("Inserting: " + matched_domain) result['created'] = datetime.now() result['status'] = 'confirmed' result['zone'] = matched_zone dns_manager.insert_record(result, "common_crawl")
def main(): """ Begin Main... """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print ("Starting: " + str(now)) logger.info("Starting...") dns_types = {"a":1, "ns":2, "cname":5, "soa":6, "ptr":12, "hinfo": 13, "mx": 15, "txt":16, "aaaa":28, "srv":33, "naptr": 35, "ds": 43, "rrsig": 46, "dnskey": 48} mongo_connector = MongoConnector.MongoConnector() all_dns_collection = mongo_connector.get_all_dns_connection() jobs_manager = JobsManager.JobsManager(mongo_connector, 'marinus_dns') jobs_manager.record_job_start() dns_manager = DNSManager.DNSManager(mongo_connector) zones = ZoneManager.get_distinct_zones(mongo_connector) google_dns = GoogleDNS.GoogleDNS() for zone in zones: time.sleep(1) for dtype, dnum in dns_types.items(): result = google_dns.fetch_DNS_records(zone, dnum) if result == []: logger.debug("No records found for " + zone) else: new_record = result[0] new_record['status'] = 'confirmed' new_record['zone'] = zone new_record['created'] = datetime.now() logger.debug ("Found " + dtype + " for: " + zone) dns_manager.insert_record(new_record, "marinus") logger.info("Starting SOA Search") soa_searches = find_sub_zones(all_dns_collection) for entry in soa_searches: time.sleep(1) result = google_dns.fetch_DNS_records(zone, dns_types['soa']) if result != []: new_record = result[0] new_record['status'] = 'confirmed' new_record['zone'] = get_fld_from_value(entry, '') new_record['created'] = datetime.now() logger.debug ("Found SOA: " + entry) if new_record['zone'] != '': dns_manager.insert_record(new_record, "marinus") jobs_manager.record_job_complete() now = datetime.now() print ("Complete: " + str(now)) logger.info("Complete.")
def update_rdns(logger, rdns_file, zones, dns_mgr, mongo_connector): """ Insert any matching Sonar RDNS records in the Marinus database. """ rdns_collection = mongo_connector.get_sonar_reverse_dns_connection() g_dns = GoogleDNS.GoogleDNS() with open(rdns_file, "r") as read_f: for line in read_f: try: data = json.loads(line) except ValueError: continue except: raise try: domain = data["value"] ip_addr = data["name"] zone = find_zone(domain, zones) except KeyError: domain = "" ip_addr = "" zone = "" timestamp = data["timestamp"] if zone != "" and domain != "": logger.debug("Domain matches! " + domain + " Zone: " + zone) result = mongo_connector.perform_count(rdns_collection, {"ip": ip_addr}) if result == 0: insert_json = {} insert_json["ip"] = ip_addr insert_json["zone"] = zone insert_json["fqdn"] = domain insert_json["status"] = "unknown" insert_json["sonar_timestamp"] = int(timestamp) insert_json["created"] = datetime.now() insert_json["updated"] = datetime.now() mongo_connector.perform_insert(rdns_collection, insert_json) else: rdns_collection.update( {"ip": ip_addr}, { "$set": { "fqdn": domain }, "$currentDate": { "updated": True } }, ) check_for_ptr_record(ip_addr, g_dns, zones, dns_mgr)
def update_rdns(logger, rdns_file, rdns_collection, dns_manager, ip_manager, zones): """ Search RDNS file and insert relevant records into the database. """ g_dns = GoogleDNS.GoogleDNS() with open(rdns_file, "r") as rdns_f: for line in rdns_f: try: data = json.loads(line) except ValueError: continue except: raise try: ip_addr = data['name'] except: ip_addr = None try: domain = data['value'] except KeyError: domain = None timestamp = data['timestamp'] if domain != None and ip_addr != None and ip_manager.is_tracked_ip( ip_addr): logger.debug("Matched RDNS " + ip_addr) zone = find_zone(domain, zones) result = rdns_collection.find({'ip': ip_addr}).count() if result == 0: insert_json = {} insert_json['ip'] = ip_addr insert_json['zone'] = zone insert_json['fqdn'] = domain insert_json['status'] = 'unknown' insert_json['sonar_timestamp'] = int(timestamp) insert_json['created'] = datetime.now() insert_json['updated'] = datetime.now() rdns_collection.insert(insert_json) else: rdns_collection.update({"ip": ip_addr}, { '$set': { "fqdn": domain }, '$currentDate': { "updated": True } }) check_for_ptr_record(ip_addr, dns_manager, g_dns, zones)
def main(): now = datetime.now() print ("Starting: " + str(now)) dns_types = {"a":1, "ns":2, "cname":5, "soa":6, "ptr":12, "hinfo": 13, "mx": 15, "txt":16, "aaaa":28, "srv":33, "naptr": 35, "ds": 43, "rrsig": 46, "dnskey": 48} mongo_connector = MongoConnector.MongoConnector() all_dns_collection = mongo_connector.get_all_dns_connection() jobs_collection = mongo_connector.get_jobs_connection() dns_manager = DNSManager.DNSManager(mongo_connector) zones = ZoneManager.get_distinct_zones(mongo_connector) google_dns = GoogleDNS.GoogleDNS() for zone in zones: time.sleep(1) for dtype, dnum in dns_types.items(): result = google_dns.fetch_DNS_records(zone, dnum) if result == []: print("No records found for " + zone) else: new_record = result[0] new_record['status'] = 'confirmed' new_record['zone'] = zone new_record['created'] = datetime.now() print ("Found " + dtype + " for: " + zone) dns_manager.insert_record(new_record, "marinus") print("Starting SOA Search") soa_searches = find_sub_zones(all_dns_collection) for entry in soa_searches: time.sleep(1) result = google_dns.fetch_DNS_records(zone, dns_types['soa']) if result != []: new_record = result[0] new_record['status'] = 'confirmed' new_record['zone'] = get_fld_from_value(entry, '') new_record['created'] = datetime.now() print ("Found SOA: " + entry) if new_record['zone'] != '': dns_manager.insert_record(new_record, "marinus") jobs_collection.update_one({'job_name': 'marinus_dns'}, {'$currentDate': {"updated" : True}, "$set": {'status': 'COMPLETE'}}) now = datetime.now() print ("Complete: " + str(now))
def update_dns(logger, dns_file, zones, dns_mgr): """ Insert any matching Sonar DNS records in the Marinus database. """ with open(dns_file, "r") as dns_f: for line in dns_f: try: data = json.loads(line) except ValueError: continue except: raise dtype = data["type"] try: value = data["value"] domain = data["name"] zone = find_zone(domain, zones) except KeyError: logger.warning("Error with line: " + line) value = "" zone = "" domain = "" timestamp = data["timestamp"] if zone != "" and value != "": logger.debug("Domain matches! " + domain + " Zone: " + zone) if dtype.startswith("unk_in_"): # Sonar didn't recognize the response type_num = int(dtype[7:]) g_dns = GoogleDNS.GoogleDNS() for key, value in g_dns.DNS_TYPES.items(): if value == type_num: dtype = key break if dtype.startswith("unk_in_"): # Marinus didn't recognize it either. logger.warning("Unknown type: " + dtype) insert_json = {} insert_json["fqdn"] = domain insert_json["zone"] = zone insert_json["type"] = dtype insert_json["status"] = "unknown" insert_json["value"] = value insert_json["sonar_timestamp"] = int(timestamp) insert_json["created"] = datetime.now() dns_mgr.insert_record(insert_json, "sonar_dns")
def update_rdns(logger, rdns_file, zones, mongo_connector): """ Insert any matching Sonar RDNS records in the Marinus database. """ rdns_collection = mongo_connector.get_sonar_reverse_dns_connection() g_dns = GoogleDNS.GoogleDNS() with open(rdns_file, "r") as read_f: for line in read_f: try: data = json.loads(line) except ValueError: continue except: raise try: domain = data['value'] ip_addr = data['name'] zone = find_zone(domain, zones) except KeyError: domain = "" ip_addr = "" zone = "" timestamp = data['timestamp'] if zone != "" and domain != "": logger.debug("Domain matches! " + domain + " Zone: " + zone) result = rdns_collection.find({'ip': ip_addr}).count() if result == 0: insert_json = {} insert_json['ip'] = ip_addr insert_json['zone'] = zone insert_json['fqdn'] = domain insert_json['status'] = 'unknown' insert_json['sonar_timestamp'] = int(timestamp) insert_json['created'] = datetime.now() insert_json['updated'] = datetime.now() rdns_collection.insert(insert_json) else: rdns_collection.update({"ip": ip_addr}, { '$set': { "fqdn": domain }, '$currentDate': { "updated": True } }) check_for_ptr_record(ip_addr, g_dns, zones)
def update_dns(dns_file, zones, dns_mgr): """ Insert any matching Sonar DNS records in the Marinus database. """ with open(dns_file, "r") as dns_f: for line in dns_f: try: data = json.loads(line) except ValueError: continue except: raise dtype = data['type'] try: value = data['value'] domain = data['name'] zone = find_zone(domain, zones) except KeyError: print("Error with line: " + line) value = "" zone = "" domain = "" timestamp = data['timestamp'] if zone != "" and value != "": print("Domain matches! " + domain + " Zone: " + zone) if dtype.startswith("unk_in_"): # Sonar didn't recognize the response type_num = int(dtype[7:]) g_dns = GoogleDNS.GoogleDNS() for key, value in g_dns.DNS_TYPES.items(): if value == type_num: dtype = key break if dtype.startswith("unk_in_"): # Marinus didn't recognize it either. print("WARNING: Unknown type: " + dtype) insert_json = {} insert_json['fqdn'] = domain insert_json['zone'] = zone insert_json['type'] = dtype insert_json['status'] = 'unknown' insert_json['value'] = value insert_json['sonar_timestamp'] = int(timestamp) insert_json['created'] = datetime.now() dns_mgr.insert_record(insert_json, "sonar_dns")
def find_reverse_dns(self, ip): """ Perform a reverse DNS lookup of the IP """ if isinstance(ip, str): ip_addr = IPAddress(ip) google_gns = GoogleDNS.GoogleDNS() results = google_gns.fetch_DNS_records(ip_addr.reverse_dns, google_gns.DNS_TYPES["ptr"]) if len(results) > 0: return results[0]['value'] return None
def main(): """ This function extract the IP address ranges from the TXT records and stores them in gcp_ips collection within the database. """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print("Starting: " + str(now)) logger.info("Starting...") mongo_connector = MongoConnector.MongoConnector() gcp_collection = mongo_connector.get_gcp_ips_connection() google_dns = GoogleDNS.GoogleDNS() jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_gcp_ranges') jobs_manager.record_job_start() ip_ranges = recursive_search(logger, "_cloud-netblocks.googleusercontent.com", google_dns) ipv4_ranges = [] ipv6_ranges = [] for entry in ip_ranges: parts = entry.split(":", 1) if parts[0] == "ip4" and parts[1] not in ipv4_ranges: ipv4_ranges.append({"ip_prefix": parts[1]}) elif parts[0] == "ip6" and parts[1] not in ipv6_ranges: ipv6_ranges.append({"ipv6_prefix": parts[1]}) else: logger.warning("Unrecognized data: " + entry) new_data = {} new_data['prefixes'] = ipv4_ranges new_data['ipv6_prefixes'] = ipv6_ranges new_data['created'] = now gcp_collection.remove({}) gcp_collection.insert(new_data) jobs_manager.record_job_complete() now = datetime.now() print("Ending: " + str(now)) logger.info("Complete.")
def add_new_domain_names(hostnames, zones, mongo_connector): """ Perform a GoogleDNS lookup on all identified domain names and add them to the DNS tracker. """ google_dns = GoogleDNS.GoogleDNS() dns_manager = DNSManager.DNSManager(mongo_connector) for hostname in hostnames: results = google_dns.fetch_DNS_records(hostname) if results != []: for result in results: temp_zone = get_tracked_zone(result['fqdn'], zones) if temp_zone is not None: new_record = {"fqdn": result['fqdn']} new_record['zone'] = temp_zone new_record['created'] = datetime.now() new_record['type'] = result['type'] new_record['value'] = result['value'] new_record['status'] = 'unknown' dns_manager.insert_record(new_record, "ssl")
def main(): """ Begin Main... """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print("Starting: " + str(now)) logger.info("Starting...") mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) google_dns = GoogleDNS.GoogleDNS() jobs_manager = JobsManager.JobsManager(mongo_connector, 'extract_ssl_domains') jobs_manager.record_job_start() parser = argparse.ArgumentParser( description='Search TLS certificates for additional DNS names') parser.add_argument('--zgrab_version', default=2, type=int, choices=[1, 2], metavar="version", help='The version of ZGrab used to collect data') args = parser.parse_args() dns_names = [] round_two = [] zones = ZoneManager.get_distinct_zones(mongo_connector) # Collect the list of domains from the SSL Certificates extract_ct_certificate_names(dns_names, mongo_connector) # extract_censys_certificate_names(dns_names, mongo_connector) if args.zgrab_version == 1: extract_zgrab_certificate_names(logger, dns_names, mongo_connector) else: extract_zgrab2_certificate_names(logger, dns_names, mongo_connector) input_list = [] # Some SSL certificates are for multiple domains. # The tracked company may not own all domains. # Therefore, we filter to only the root domains that belong to the tracked company. logger.info("Pre-filter list: " + str(len(dns_names))) for hostname in dns_names: if not hostname.startswith("*"): zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) # Pause to prevent DoS-ing of Google's HTTPS DNS Service time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) if ip_addr['type'] == "cname" and is_tracked_zone( ip_addr['value'], zones): add_to_round_two(ip_addr['value'], round_two) else: logger.warning("Failed IP Lookup for: " + hostname) else: logger.warning("Failed match on zone for: " + hostname) else: logger.warning("Skipping wildcard: " + hostname) dead_dns_collection = mongo_connector.get_dead_dns_connection() # Some DNS records will be CNAME records pointing to other tracked domains. # This is a single level recursion to lookup those domains. logger.info("Round Two list: " + str(len(round_two))) for hostname in round_two: zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) else: logger.warning("Failed IP Lookup for: " + hostname) original_record = dns_manager.find_one({"fqdn": hostname}, "ssl") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) else: logger.warning("Failed match on zone for: " + hostname) # Record all the results. dns_manager.remove_by_source("ssl") logger.info("List length: " + str(len(input_list))) for final_result in input_list: dns_manager.insert_record(final_result, "ssl") # Record status jobs_manager.record_job_complete() now = datetime.now() print("Ending: " + str(now)) logger.info("Complete")
def main(): """ Begin Main... """ now = datetime.now() print("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) jobs_manager = JobsManager.JobsManager(mongo_connector, 'extract_mx_domains') google_dns = GoogleDNS.GoogleDNS() jobs_manager.record_job_start() dns_names = [] round_two = [] zones = ZoneManager.get_distinct_zones(mongo_connector) # Collect the list of domains from the MX Records extract_mx_names(dns_names, dns_manager) input_list = [] # Some MX records point to the third-party domains. # Therefore, we filter to only the root domains that belong to the tracked company. print("Pre-filter list: " + str(len(dns_names))) for hostname in dns_names: zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) # Pause to prevent DoS-ing of Google's HTTPS DNS Service time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) if ip_addr['type'] == "cname" and is_tracked_zone( ip_addr['value'], zones): add_to_round_two(ip_addr['value'], round_two) else: print("Failed IP Lookup for: " + hostname) else: print("Failed match on zone for: " + hostname) dead_dns_collection = mongo_connector.get_dead_dns_connection() # Some DNS records will be CNAME records pointing to other tracked domains. # This is a single level recursion to lookup those domains. print("Round Two list: " + str(len(round_two))) for hostname in round_two: zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) else: print("Failed IP Lookup for: " + hostname) original_record = dns_manager.find_one({"fqdn": hostname}, "mx") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) else: print("Failed match on zone for: " + hostname) # Record all the results. dns_manager.remove_by_source("mx") print("List length: " + str(len(input_list))) for final_result in input_list: dns_manager.insert_record(final_result, "mx") # Record status jobs_manager.record_job_complete() now = datetime.now() print("Ending: " + str(now))
def update_rdns(logger, rdns_file, mongo_connection, dns_manager, ip_manager, zones): """ Search RDNS file and insert relevant records into the database. """ g_dns = GoogleDNS.GoogleDNS() rdns_collection = mongo_connection.get_sonar_reverse_dns_connection() with open(rdns_file, "r") as rdns_f: for line in rdns_f: try: data = json.loads(line) except ValueError: continue except: raise if "type" in data and data["type"] != "ptr": continue try: ip_addr = data["name"] except: ip_addr = None try: domain = data["value"] except KeyError: domain = None timestamp = data["timestamp"] if domain != None and ip_addr != None and ip_manager.is_tracked_ip( ip_addr): logger.debug("Matched RDNS " + ip_addr) zone = find_zone(domain, zones) result = mongo_connection.perform_count( rdns_collection, {"ip": ip_addr}) if result == 0: insert_json = {} insert_json["ip"] = ip_addr insert_json["zone"] = zone insert_json["fqdn"] = domain insert_json["status"] = "unknown" insert_json["sonar_timestamp"] = int(timestamp) insert_json["created"] = datetime.now() insert_json["updated"] = datetime.now() mongo_connection.perform_insert(rdns_collection, insert_json) else: rdns_collection.update_one( {"ip": ip_addr}, { "$set": { "fqdn": domain }, "$currentDate": { "updated": True } }, ) check_for_ptr_record(ip_addr, dns_manager, g_dns, zones)
def main(): """ Begin Main... """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print("Starting: " + str(now)) logger.info("Starting...") mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) jobs_manager = JobsManager.JobsManager(mongo_connector, "sonar_round_two") google_dns = GoogleDNS.GoogleDNS() jobs_manager.record_job_start() zones = ZoneManager.get_distinct_zones(mongo_connector) results = dns_manager.find_multiple({"type": "cname"}, "sonar_dns") round_two = [] round_three = [] # Get all the CNAME values from all_dns and append them to round_two for result in results: if is_tracked_zone(result["value"], zones): round_two.append(result["value"]) logger.info("Round two pre-list: " + str(len(round_two))) dead_dns_collection = mongo_connector.get_dead_dns_connection() for value in round_two: is_present = dns_manager.find_count({"fqdn": value}, "sonar_dns") if is_present == 0: logger.debug(value + " not found") time.sleep(1) result = google_dns.fetch_DNS_records(value) if result == []: logger.debug("Unable to resolve") original_records = dns_manager.find_multiple({"value": value}, "sonar_dns") for record in original_records: check = dead_dns_collection.count_documents( {"fqdn": record["fqdn"]}) if check == 0: record.pop("_id") dead_dns_collection.insert(record) else: for entry in result: if is_tracked_zone(entry["fqdn"], zones): new_record = entry new_record["status"] = "unconfirmed" new_record["zone"] = get_fld_from_value(value, "") new_record["created"] = datetime.now() if result[0]["type"] == "cname" and is_tracked_zone( entry["value"], zones): add_to_list(entry["value"], round_three) logger.debug("Found: " + value) if new_record["zone"] != "": dns_manager.insert_record(new_record, "marinus") # For each tracked CName result found in the first pass across Sonar DNS logger.info("Round Three length: " + str(len(round_three))) for hostname in round_three: zone = get_fld_from_value(hostname, "") if zone != None and zone != "": ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: if is_tracked_zone(ip_addr["fqdn"], zones): record = {"fqdn": ip_addr["fqdn"]} record["zone"] = get_fld_from_value( ip_addr["fqdn"], "") record["created"] = datetime.now() record["type"] = ip_addr["type"] record["value"] = ip_addr["value"] record["status"] = "unconfirmed" dns_manager.insert_record(new_record, "marinus") else: original_record = dns_manager.find_one({"fqdn": hostname}, "marinus") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) logger.debug("Failed IP Lookup for: " + hostname) else: logger.debug("Failed match on zone for: " + hostname) # Record status jobs_manager.record_job_complete() now = datetime.now() print("Ending: " + str(now)) logger.info("Complete.")
def main(): """ Begin Main... """ now = datetime.now() print("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) jobs_manager = JobsManager.JobsManager(mongo_connector, 'sonar_round_two') google_dns = GoogleDNS.GoogleDNS() jobs_manager.record_job_start() zones = ZoneManager.get_distinct_zones(mongo_connector) results = dns_manager.find_multiple({'type': 'cname'}, "sonar_dns") round_two = [] round_three = [] # Get all the CNAME values from all_dns and append them to round_two for result in results: if is_tracked_zone(result['value'], zones): round_two.append(result['value']) print("Round two pre-list: " + str(len(round_two))) dead_dns_collection = mongo_connector.get_dead_dns_connection() for value in round_two: is_present = dns_manager.find_count({'fqdn': value}, "sonar_dns") if is_present == 0: print(value + " not found") time.sleep(1) result = google_dns.fetch_DNS_records(value) if result == []: print("Unable to resolve") original_records = dns_manager.find_multiple({"value": value}, "sonar_dns") for record in original_records: check = dead_dns_collection.find({ 'fqdn': record['fqdn'] }).count() if check == 0: record.pop("_id") dead_dns_collection.insert(record) else: for entry in result: if is_tracked_zone(entry['fqdn'], zones): new_record = entry new_record['status'] = 'unconfirmed' new_record['zone'] = get_fld_from_value(value, '') new_record['created'] = datetime.now() if result[0]['type'] == "cname" and is_tracked_zone( entry['value'], zones): add_to_list(entry['value'], round_three) print("Found: " + value) if new_record['zone'] != '': dns_manager.insert_record(new_record, "marinus") # For each tracked CName result found in the first pass across Sonar DNS print("Round Three length: " + str(len(round_three))) for hostname in round_three: zone = get_fld_from_value(hostname, '') if zone != None and zone != '': ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: if is_tracked_zone(ip_addr['fqdn'], zones): record = {"fqdn": ip_addr['fqdn']} record['zone'] = get_fld_from_value( ip_addr['fqdn'], '') record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unconfirmed' dns_manager.insert_record(new_record, "marinus") else: original_record = dns_manager.find_one({"fqdn": hostname}, "marinus") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) print("Failed IP Lookup for: " + hostname) else: print("Failed match on zone for: " + hostname) # Record status jobs_manager.record_job_complete() now = datetime.now() print("Ending: " + str(now))
def main(): """ Begin Main... """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print("Starting: " + str(now)) logger.info("Starting...") mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) jobs_manager = JobsManager.JobsManager(mongo_connector, "extract_vt_domains") google_dns = GoogleDNS.GoogleDNS() jobs_manager.record_job_start() round_two = [] zones = ZoneManager.get_distinct_zones(mongo_connector) vt_collection = mongo_connector.get_virustotal_connection() vt_results = vt_collection.find({ "subdomains": { "$exists": True } }, { "zone": 1, "subdomains": 1 }).batch_size(30) input_list = [] # For each result found in the first pass across VirusTotal for result in vt_results: # Pause to prevent DoS-ing of Google's HTTPS DNS Service time.sleep(1) for hostname in result["subdomains"]: ips = google_dns.fetch_DNS_records(hostname) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr["fqdn"], zones) if temp_zone is not None: record = {"fqdn": ip_addr["fqdn"]} record["zone"] = temp_zone record["created"] = datetime.now() record["type"] = ip_addr["type"] record["value"] = ip_addr["value"] record["status"] = "unknown" input_list.append(record) if ip_addr["type"] == "cname" and is_tracked_zone( ip_addr["value"], zones): add_to_list(ip_addr["value"], round_two) else: logger.warning("Failed IP Lookup for: " + hostname) dead_dns_collection = mongo_connector.get_dead_dns_connection() # For each tracked CName result found in the first pass across VirusTotal logger.info("Round Two length: " + str(len(round_two))) for hostname in round_two: zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr["fqdn"], zones) if temp_zone is not None: record = {"fqdn": ip_addr["fqdn"]} record["zone"] = temp_zone record["created"] = datetime.now() record["type"] = ip_addr["type"] record["value"] = ip_addr["value"] record["status"] = "unknown" input_list.append(record) else: original_record = dns_manager.find_one({"fqdn": hostname}, "virustotal") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) logger.warning("Failed IP Lookup for: " + hostname) else: logger.warning("Failed match on zone for: " + hostname) # Update the database dns_manager.remove_by_source("virustotal") logger.info("List length: " + str(len(input_list))) for final_result in input_list: dns_manager.insert_record(final_result, "virustotal") # Record status jobs_manager.record_job_complete() now = datetime.now() print("Ending: " + str(now)) logger.info("Complete.")
def main(): """ Begin Main... """ now = datetime.now() print("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) google_dns = GoogleDNS.GoogleDNS() jobs_collection = mongo_connector.get_jobs_connection() dns_names = [] round_two = [] zones = ZoneManager.get_distinct_zones(mongo_connector) # Collect the list of domains from the SSL Certificates extract_ct_certificate_names(dns_names, mongo_connector) # extract_censys_certificate_names(dns_names, mongo_connector) extract_zgrab_certificate_names(dns_names, mongo_connector) input_list = [] # Some SSL certificates are for multiple domains. # The tracked company may not own all domains. # Therefore, we filter to only the root domains that belong to the tracked company. print("Pre-filter list: " + str(len(dns_names))) for hostname in dns_names: if not hostname.startswith("*"): zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) # Pause to prevent DoS-ing of Google's HTTPS DNS Service time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) if ip_addr['type'] == "cname" and is_tracked_zone( ip_addr['value'], zones): add_to_round_two(ip_addr['value'], round_two) else: print("Failed IP Lookup for: " + hostname) else: print("Failed match on zone for: " + hostname) else: print("Skipping wildcard: " + hostname) dead_dns_collection = mongo_connector.get_dead_dns_connection() # Some DNS records will be CNAME records pointing to other tracked domains. # This is a single level recursion to lookup those domains. print("Round Two list: " + str(len(round_two))) for hostname in round_two: zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) else: print("Failed IP Lookup for: " + hostname) original_record = dns_manager.find_one({"fqdn": hostname}, "ssl") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) else: print("Failed match on zone for: " + hostname) # Record all the results. dns_manager.remove_by_source("ssl") print("List length: " + str(len(input_list))) for final_result in input_list: dns_manager.insert_record(final_result, "ssl") # Record status jobs_collection.update_one({'job_name': 'extract_ssl_domains'}, { '$currentDate': { "updated": True }, "$set": { 'status': 'COMPLETE' } }) now = datetime.now() print("Ending: " + str(now))
def main(): """ Begin Main... """ # The sources for which to remove expired entries # Infoblox is handled separately # Sonar RDNS is hard code below in a separate section # {"source_name": date_difference_in_months} sources = [{"name": "sonar_dns", "diff": -2}, {"name": "sonar_dns_saved", "diff": -2}, {"name": "ssl", "diff": -2}, {"name": "ssl_saved", "diff": -2}, {"name": "virustotal", "diff": -2}, {"name": "virustotal_saved", "diff": -2}, {"name": "UltraDNS", "diff": -2}, {"name": "UltraDNS_saved", "diff": -2}, {"name": "skms", "diff": -2}, {"name": "skms_saved", "diff": -2}, {"name": "marinus", "diff": -2}, {"name": "marinus_saved", "diff": -2}, {"name": "mx", "diff": -2}, {"name": "mx_saved", "diff": -2}, {"name": "common_crawl", "diff": -4}, {"name": "common_crawl_saved", "diff": -4}] now = datetime.now() print ("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() all_dns_collection = mongo_connector.get_all_dns_connection() dns_manager = DNSManager.DNSManager(mongo_connector) GDNS = GoogleDNS.GoogleDNS() zones = ZoneManager.get_distinct_zones(mongo_connector) jobs_collection = mongo_connector.get_jobs_connection() # Get the date for today minus two months d_minus_2m = monthdelta(datetime.now(), -2) print("Removing SRDNS as of: " + str(d_minus_2m)) # Remove the old records srdns_collection = mongo_connector.get_sonar_reverse_dns_connection() srdns_collection.remove({'updated': {"$lt": d_minus_2m}}) # Before completely removing old entries, make an attempt to see if they are still valid. # Occasionally, a host name will still be valid but, for whatever reason, is no longer tracked by a source. # Rather than throw away valid information, this will archive it. for entry in sources: removal_date = monthdelta(datetime.now(), entry['diff']) source = entry['name'] print("Removing " + source + " as of: " + str(removal_date)) last_domain = "" results = all_dns_collection.find({'sources': {"$size": 1}, 'sources.source': source, 'sources.updated': {"$lt": removal_date}}) for result in results: if result['fqdn'] != last_domain: last_domain = result['fqdn'] dns_result = GDNS.fetch_DNS_records(result['fqdn'], GDNS.DNS_TYPES[result['type']]) if dns_result != []: for dns_entry in dns_result: if is_tracked_zone(dns_entry['fqdn'], zones): new_entry={} new_entry['updated'] = datetime.now() new_entry['zone'] = result['zone'] new_entry['fqdn'] = dns_entry['fqdn'] new_entry['created'] = result['created'] new_entry['value'] = dns_entry['value'] new_entry['type'] = dns_entry['type'] new_entry['status'] = 'confirmed' if 'sonar_timestamp' in result: new_entry['sonar_timestamp'] = result['sonar_timestamp'] if source.endswith("_saved"): dns_manager.insert_record(new_entry, source) else: dns_manager.insert_record(new_entry, source + "_saved") dns_manager.remove_all_by_source_and_date(source, entry['diff']) # Record status jobs_collection.update_one({'job_name': 'remove_expired_entries'}, {'$currentDate': {"updated": True}, "$set": {'status': 'COMPLETE'}}) now = datetime.now() print("Complete: " + str(now))
def main(): """ Begin Main... """ logger = LoggingUtil.create_log(__name__) now = datetime.now() print("Starting: " + str(now)) logger.info("Starting...") mongo_connector = MongoConnector.MongoConnector() all_dns_collection = mongo_connector.get_all_dns_connection() dns_manager = DNSManager.DNSManager(mongo_connector) GDNS = GoogleDNS.GoogleDNS() ip_manager = IPManager.IPManager(mongo_connector) jobs_manager = JobsManager.JobsManager(mongo_connector, 'remove_expired_entries') jobs_manager.record_job_start() zones = ZoneManager.get_distinct_zones(mongo_connector) # The sources for which to remove expired entries results = mongo_connector.perform_distinct(all_dns_collection, 'sources.source') sources = [] for source in results: temp = {} temp['name'] = source if "common_crawl" in source: temp['diff'] = -4 else: temp['diff'] = -2 sources.append(temp) # Before completely removing old entries, make an attempt to see if they are still valid. # Occasionally, a host name will still be valid but, for whatever reason, is no longer tracked by a source. # Rather than throw away valid information, this will archive it. for entry in sources: removal_date = monthdelta(datetime.now(), entry['diff']) source = entry['name'] logger.debug("Removing " + source + " as of: " + str(removal_date)) last_domain = "" results = all_dns_collection.find({ 'sources': { "$size": 1 }, 'sources.source': source, 'sources.updated': { "$lt": removal_date } }) for result in results: if result['fqdn'] != last_domain: last_domain = result['fqdn'] lookup_int = get_lookup_int(logger, result, GDNS) dns_result = GDNS.fetch_DNS_records(result['fqdn'], lookup_int) if dns_result != []: insert_current_results(dns_result, dns_manager, zones, result, source) dns_manager.remove_all_by_source_and_date(source, entry['diff']) # Get the date for today minus two months d_minus_2m = monthdelta(datetime.now(), -2) logger.info("Removing SRDNS as of: " + str(d_minus_2m)) # Remove the old records srdns_collection = mongo_connector.get_sonar_reverse_dns_connection() srdns_collection.remove({'updated': {"$lt": d_minus_2m}}) ip_manager.delete_records_by_date(d_minus_2m) # Record status jobs_manager.record_job_complete() now = datetime.now() print("Complete: " + str(now)) logger.info("Complete")
def main(): """ Begin Main... """ # The sources for which to remove expired entries # Infoblox is handled separately # {"source_name": date_difference_in_months} sources = [{ "name": "sonar_dns", "diff": -2 }, { "name": "sonar_dns_saved", "diff": -2 }, { "name": "sonar_rdns", "diff": -2 }, { "name": "sonar_rdns_saved", "diff": -2 }, { "name": "ssl", "diff": -2 }, { "name": "ssl_saved", "diff": -2 }, { "name": "virustotal", "diff": -2 }, { "name": "virustotal_saved", "diff": -2 }, { "name": "UltraDNS", "diff": -2 }, { "name": "UltraDNS_saved", "diff": -2 }, { "name": "marinus", "diff": -2 }, { "name": "marinus_saved", "diff": -2 }, { "name": "mx", "diff": -2 }, { "name": "mx_saved", "diff": -2 }, { "name": "common_crawl", "diff": -4 }, { "name": "common_crawl_saved", "diff": -4 }] amass_diff = -2 now = datetime.now() print("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() all_dns_collection = mongo_connector.get_all_dns_connection() dns_manager = DNSManager.DNSManager(mongo_connector) GDNS = GoogleDNS.GoogleDNS() ip_manager = IPManager.IPManager(mongo_connector) jobs_manager = JobsManager.JobsManager(mongo_connector, 'remove_expired_entries') jobs_manager.record_job_start() zones = ZoneManager.get_distinct_zones(mongo_connector) # Get the date for today minus two months d_minus_2m = monthdelta(datetime.now(), -2) print("Removing SRDNS as of: " + str(d_minus_2m)) # Remove the old records srdns_collection = mongo_connector.get_sonar_reverse_dns_connection() srdns_collection.remove({'updated': {"$lt": d_minus_2m}}) ip_manager.delete_records_by_date(d_minus_2m) # Before completely removing old entries, make an attempt to see if they are still valid. # Occasionally, a host name will still be valid but, for whatever reason, is no longer tracked by a source. # Rather than throw away valid information, this will archive it. for entry in sources: removal_date = monthdelta(datetime.now(), entry['diff']) source = entry['name'] print("Removing " + source + " as of: " + str(removal_date)) last_domain = "" results = all_dns_collection.find({ 'sources': { "$size": 1 }, 'sources.source': source, 'sources.updated': { "$lt": removal_date } }) for result in results: if result['fqdn'] != last_domain: last_domain = result['fqdn'] lookup_int = get_lookup_int(result, GDNS) dns_result = GDNS.fetch_DNS_records(result['fqdn'], lookup_int) if dns_result != []: insert_current_results(dns_result, dns_manager, zones, result, source) dns_manager.remove_all_by_source_and_date(source, entry['diff']) # Process amass entries temp_sources = mongo_connector.perform_distinct(all_dns_collection, 'sources.source') amass_sources = [] for entry in temp_sources: if entry.startswith("amass:"): amass_sources.append(entry) for source in amass_sources: removal_date = monthdelta(datetime.now(), amass_diff) print("Removing " + source + " as of: " + str(removal_date)) last_domain = "" results = mongo_connector.perform_find( all_dns_collection, { 'sources': { "$size": 1 }, 'sources.source': source, 'sources.updated': { "$lt": removal_date } }) for result in results: if result['fqdn'] != last_domain: last_domain = result['fqdn'] lookup_int = get_lookup_int(result, GDNS) dns_result = GDNS.fetch_DNS_records(result['fqdn'], lookup_int) if dns_result != []: insert_current_results(dns_result, dns_manager, zones, result, source) dns_manager.remove_all_by_source_and_date(source, amass_diff) # Record status jobs_manager.record_job_complete() now = datetime.now() print("Complete: " + str(now))
def main(): """ Begin Main... """ now = datetime.now() print("Starting: " + str(now)) mongo_connector = MongoConnector.MongoConnector() dns_manager = DNSManager.DNSManager(mongo_connector) jobs_collection = mongo_connector.get_jobs_connection() google_dns = GoogleDNS.GoogleDNS() round_two = [] zones = ZoneManager.get_distinct_zones(mongo_connector) vt_collection = mongo_connector.get_virustotal_connection() vt_results = vt_collection.find({ 'subdomains': { "$exists": True } }, { 'zone': 1, 'subdomains': 1 }).batch_size(30) input_list = [] # For each result found in the first pass across VirusTotal for result in vt_results: # Pause to prevent DoS-ing of Google's HTTPS DNS Service time.sleep(1) for hostname in result['subdomains']: ips = google_dns.fetch_DNS_records(hostname) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) if ip_addr['type'] == "cname" and is_tracked_zone( ip_addr['value'], zones): add_to_list(ip_addr['value'], round_two) else: print("Failed IP Lookup for: " + hostname) dead_dns_collection = mongo_connector.get_dead_dns_connection() # For each tracked CName result found in the first pass across VirusTotal print("Round Two length: " + str(len(round_two))) for hostname in round_two: zone = get_tracked_zone(hostname, zones) if zone != None: ips = google_dns.fetch_DNS_records(hostname) time.sleep(1) if ips != []: for ip_addr in ips: temp_zone = get_tracked_zone(ip_addr['fqdn'], zones) if temp_zone is not None: record = {"fqdn": ip_addr['fqdn']} record['zone'] = temp_zone record['created'] = datetime.now() record['type'] = ip_addr['type'] record['value'] = ip_addr['value'] record['status'] = 'unknown' input_list.append(record) else: original_record = dns_manager.find_one({"fqdn": hostname}, "virustotal") if original_record != None: original_record.pop("_id") dead_dns_collection.insert(original_record) print("Failed IP Lookup for: " + hostname) else: print("Failed match on zone for: " + hostname) # Update the database dns_manager.remove_by_source("virustotal") print("List length: " + str(len(input_list))) for final_result in input_list: dns_manager.insert_record(final_result, "virustotal") # Record status jobs_collection.update_one({'job_name': 'extract_vt_domains'}, { '$currentDate': { "updated": True }, "$set": { 'status': 'COMPLETE' } }) now = datetime.now() print("Ending: " + str(now))