def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print ("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    dead_dns_collection = mongo_connector.get_dead_dns_connection()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'dead_dns_cleanup')
    jobs_manager.record_job_start()

    google_dns = GoogleDNS.GoogleDNS()

    results = dead_dns_collection.find({})

    for result in results:
        time.sleep(1)
        lookup_result = google_dns.fetch_DNS_records(result['fqdn'])
        if lookup_result == []:
            logger.info ("Removing " + result['fqdn'])
            dead_dns_collection.remove({"_id":ObjectId(result['_id'])})

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print ("Ending: " + str(now))
    logger.info("Complete.")
def main():
    """
    Begin main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))

    parser = argparse.ArgumentParser(
        description='Search Splunk logs for IP address')
    parser.add_argument('--collection_name',
                        choices=["http_80", "http_443"],
                        metavar="COLLECTION",
                        required=True,
                        help='The collection to upload to Splunk')
    args = parser.parse_args()

    mongo_connector = MongoConnector.MongoConnector()
    splunk_manager = SplunkHECManager.SplunkHECManager()

    jobs_manager = JobsManager.JobsManager(mongo_connector,
                                           "splunk_headers_upload")
    jobs_manager.record_job_start()

    if args.collection_name == "http_443":
        upload_zgrab_443(logger, splunk_manager, mongo_connector)
    elif args.collection_name == "http_80":
        upload_zgrab_80(logger, splunk_manager, mongo_connector)

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 3
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    umbrella = Umbrella.Umbrella()
    zi = ZoneIngestor.ZoneIngestor()

    # Obtain the list of known email addresses and name servers from the config collection
    config_collection = mongo_connector.get_config_connection()
    res = config_collection.find({})

    jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_umbrella_whois')
    jobs_manager.record_job_start()

    # Perform a search for each email address
    for i in range(0, len(res[0]['DNS_Admins'])):
        search_umbrella_by_email(logger, res[0]['DNS_Admins'][i], umbrella, zi, jobs_manager)

    # Perform a search based on each name server
    for i in range(0, len(res[0]['Whois_Name_Servers'])):
        search_umbrella_by_nameserver(logger, res[0]['Whois_Name_Servers'][i], res[0]['Whois_Orgs'], umbrella, zi, jobs_manager)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 4
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Obtain the list of known email addresses from the config collection
    MC = MongoConnector.MongoConnector()
    PT = PassiveTotal.PassiveTotal()
    zi = ZoneIngestor.ZoneIngestor()
    config_collection = MC.get_config_connection()
    res = config_collection.find({})

    jobs_manager = JobsManager.JobsManager(MC, 'get_passivetotal_data')
    jobs_manager.record_job_start()

    # Perform a search for each email address
    for i in range(0, len(res[0]['DNS_Admins'])):
        search_pt_email(logger, res[0]['DNS_Admins'][i], PT, zi, jobs_manager)

    for i in range(0, len(res[0]['Whois_Orgs'])):
        search_pt_org(logger, res[0]['Whois_Orgs'][i], PT, zi, jobs_manager)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 5
0
def main():
    """
    Begin main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print ("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_azure_data')
    jobs_manager.record_job_start()

    # Download the XML file
    req = requests.get(XML_LOCATION)

    if req.status_code != 200:
        logger.error("Bad Request")
        jobs_manager.record_job_error()
        exit(1)

    parser = MyHTMLParser()
    parser.feed(req.text)

    if parser.URL == "":
        logger.error("Unable to identify URL in Microsoft HTML")
        jobs_manager.record_job_error()
        exit(1)

    req = requests.get(parser.URL)

    if req.status_code != 200:
        logger.error("Bad Request")
        jobs_manager.record_job_error()
        exit(1)

    root = ET.fromstring(req.text)

    insert_json = {}
    insert_json['created'] = datetime.now()
    insert_json['prefixes'] = []

    for region in root.findall('Region'):
        region_name = region.get("Name")
        for iprange in region.findall('IpRange'):
            cidr = iprange.get("Subnet")
            insert_json['prefixes'].append({'region': region_name, 'ip_prefix': cidr})

    azure_ips = mongo_connector.get_azure_ips_connection()
    azure_ips.remove({})
    azure_ips.insert(insert_json)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 6
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print ("Starting: " + str(now))
    logger.info("Starting...")

    dns_types = {"a":1, "ns":2, "cname":5, "soa":6, "ptr":12, "hinfo": 13, "mx": 15, "txt":16, "aaaa":28, "srv":33, "naptr": 35, "ds": 43, "rrsig": 46, "dnskey": 48}

    mongo_connector = MongoConnector.MongoConnector()
    all_dns_collection = mongo_connector.get_all_dns_connection()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'marinus_dns')
    jobs_manager.record_job_start()

    dns_manager = DNSManager.DNSManager(mongo_connector)

    zones = ZoneManager.get_distinct_zones(mongo_connector)

    google_dns = GoogleDNS.GoogleDNS()

    for zone in zones:
        time.sleep(1)
        for dtype, dnum in dns_types.items():
            result = google_dns.fetch_DNS_records(zone, dnum)

            if result == []:
                logger.debug("No records found for " + zone)
            else:
                new_record = result[0]
                new_record['status'] = 'confirmed'
                new_record['zone'] = zone
                new_record['created'] = datetime.now()
                logger.debug ("Found " + dtype + " for: " + zone)
                dns_manager.insert_record(new_record, "marinus")

    logger.info("Starting SOA Search")

    soa_searches = find_sub_zones(all_dns_collection)
    for entry in soa_searches:
        time.sleep(1)
        result = google_dns.fetch_DNS_records(zone, dns_types['soa'])
        if result != []:
            new_record = result[0]
            new_record['status'] = 'confirmed'
            new_record['zone'] = get_fld_from_value(entry, '')
            new_record['created'] = datetime.now()
            logger.debug ("Found SOA: " + entry)
            if new_record['zone'] != '':
                dns_manager.insert_record(new_record, "marinus")

    jobs_manager.record_job_complete()

    now = datetime.now()
    print ("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 7
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = RemoteMongoConnector.RemoteMongoConnector()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'whois_lookups')
    jobs_manager.record_job_start()

    # Collect the tracked zones...
    zones = get_zones(mongo_connector)

    whois_collection = mongo_connector.get_whois_connection()

    for zone in zones:
        # Ensure the zone contains at least one dot. This is left over from an old bug.
        if zone.find(".") > 0:

            logger.debug(zone)
            zone_result = whois_collection.find_one({'zone': zone})

            # If we haven't done a lookup in the past, try to collect the data.
            # A limit exists on the number of whois lookups you can perform so limit to new domains.
            if zone_result is None:
                do_whois_lookup(logger, zone, whois_collection)

    # The cap on the number of old entries to be updated.
    MAX_OLD_ENTRIES = 400

    # Grab entries that haven't been updated in 3 months
    last_week = datetime.now() - timedelta(days=90, hours=1)
    zone_result = whois_collection.find({
        "updated": {
            "$lte": last_week
        }
    }).batch_size(10)

    i = 0
    for result in zone_result:
        do_whois_lookup(logger, result["zone"], whois_collection)
        i = i + 1

        # Chances are that a lot of the entries were inserted on the same day.
        # This helps break updating old entries across different runs.
        if i > MAX_OLD_ENTRIES:
            break

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 8
0
 def __init__(self, thread_id, q, port, command, zone, zgrab_collection):
     threading.Thread.__init__(self)
     self.thread_id = thread_id
     self.port = port
     self.zgrab_collection = zgrab_collection
     self.zone = zone
     self.run_command = command
     self.q = q
     self.logger = LoggingUtil.create_log(__name__)
Esempio n. 9
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print ("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    zone_ingestor = ZoneIngestor.ZoneIngestor()

    jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_route53')
    jobs_manager.record_job_start()

    current_zones = ZoneManager.get_distinct_zones(mongo_connector)

    # For cases with multiple R53 accounts, include the account id for reference
    sts = boto3.client('sts')
    account_id = sts.get_caller_identity()["Arn"].split(':')[4]
    r53_source = "R53:" + str(account_id)

    r53_client = boto3.client('route53')

    r53_domains = r53_client.list_hosted_zones()
    r53_zone_list = []
    while r53_domains != {}:
        for zone_data in r53_domains['HostedZones']:
            # Only add public zones
            if zone_data['Config']['PrivateZone'] == False:
                r53_zone_list.append(zone_data)

        if r53_domains['IsTruncated'] == True:
            r53_domains = r53_client.list_domains(Marker=r53_domains['NextMarker'])
        else:
            r53_domains = {}


    for zone_data in r53_zone_list:
        # Double check that this is not a new zone
        zone_name = zone_data['Name'][:-1]
        if zone_name not in current_zones:
            logger.info("Creating zone: " + zone_name)
            zone_ingestor.add_zone(zone_data['Name'], r53_source)

        # Add hosts to the zone
        update_records(r53_client, dns_manager, zone_data, r53_source)


    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print ("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 10
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    jobs_manager = JobsManager.JobsManager(mongo_connector,
                                           "get_external_cnames")
    jobs_manager.record_job_start()

    groups = {}

    # Collect zones
    zone_results = ZoneManager.get_distinct_zones(mongo_connector)

    zones = []
    for zone in zone_results:
        if zone.find(".") >= 0:
            zones.append(zone)

    # Collect the all_dns cnames.
    logger.info("Starting All DNS...")
    all_dns_recs = dns_manager.find_multiple({"type": "cname"}, None)

    for srec in all_dns_recs:
        if not is_tracked_zone(srec["value"], zones):
            add_to_list(
                get_fld_from_value(srec["value"], srec["zone"]),
                srec["fqdn"],
                srec["value"],
                srec["zone"],
                groups,
            )

    # Update the database
    tpds_collection = mongo_connector.get_tpds_connection()

    tpds_collection.delete_many({})
    for key in groups.keys():
        tpds_collection.insert_one(groups[key])

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 11
0
def main():
    """
    This function extract the IP address ranges from the TXT records
    and stores them in gcp_ips collection within the database.
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    gcp_collection = mongo_connector.get_gcp_ips_connection()
    google_dns = GoogleDNS.GoogleDNS()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_gcp_ranges')
    jobs_manager.record_job_start()

    ip_ranges = recursive_search(logger,
                                 "_cloud-netblocks.googleusercontent.com",
                                 google_dns)

    ipv4_ranges = []
    ipv6_ranges = []

    for entry in ip_ranges:
        parts = entry.split(":", 1)
        if parts[0] == "ip4" and parts[1] not in ipv4_ranges:
            ipv4_ranges.append({"ip_prefix": parts[1]})
        elif parts[0] == "ip6" and parts[1] not in ipv6_ranges:
            ipv6_ranges.append({"ipv6_prefix": parts[1]})
        else:
            logger.warning("Unrecognized data: " + entry)

    new_data = {}
    new_data['prefixes'] = ipv4_ranges
    new_data['ipv6_prefixes'] = ipv6_ranges
    new_data['created'] = now

    gcp_collection.remove({})
    gcp_collection.insert(new_data)

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 12
0
class MyHTMLParser(HTMLParser):
    """
    Create a subclass and override the handler methods.
    """
    URL = ""
    logger = LoggingUtil.create_log(__name__)

    def handle_starttag(self, tag, attrs):
        found = False
        if tag == "a":
            for attr in attrs:
                if attr[0] == "class" and attr[1] == "mscom-link failoverLink":
                    found = True

            if found:
                for attr in attrs:
                    if attr[0] == "href":
                        self.logger.info(attr[1])
                        self.URL = attr[1]
Esempio n. 13
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    splunk_query_manager = SplunkQueryManager.SplunkQueryManager()
    splunk_collection = mongo_connector.get_splunk_connection()
    dns_manager = DNSManager.DNSManager(mongo_connector)

    jobs_manager = JobsManager.JobsManager(mongo_connector, "get_splunk_data")
    jobs_manager.record_job_start()

    logger.info ("Starting Splunk Query")

    results_per_page = 100

    # Put your custom Splunk search query here.
    results = splunk_query_manager.do_search('search index=...', results_per_page)

    logger.info ("Processing " + str(splunk_query_manager.RESULTCOUNT) + " results")

    parse_splunk_results(logger, results, dns_manager, splunk_collection)

    while True:
        results = splunk_query_manager.get_next_page()
        if results is None:
            break
        parse_splunk_results(logger, results, dns_manager, splunk_collection)


    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 14
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    print("Starting: " + str(datetime.now()))
    logger.info("Starting...")

    # Make database connections
    mc = MongoConnector.MongoConnector()
    jobs_manager = JobsManager.JobsManager(mc, "get_iblox_txt")
    jobs_manager.record_job_start()

    idm = InfobloxDNSManager.InfobloxDNSManager("txt")
    idm.get_infoblox_dns()

    # Record status
    jobs_manager.record_job_complete()

    print("Ending: " + str(datetime.now()))
    logger.info("Complete.")
Esempio n. 15
0
    def get_ultradns_zones(self):
        """
        Extracts the zones listing from UltraDNS in a paginated manner.
        """
        self._logger = LoggingUtil.create_log(__name__)

        print("Starting: " + str(datetime.now()))
        self._logger.info("Starting...")
        self.UH.jobs_manager.record_job_start()

        # Part of clean_collection code.
        # self.UH.get_previous_zones()

        self.__paginated_ultradns_zones_request()
        while self.UH.offset:
            self.__paginated_ultradns_zones_request()

        # Record status
        self.UH.jobs_manager.record_job_complete()

        print("Ending: " + str(datetime.now()))
        self._logger.info("Complete.")
Esempio n. 16
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    print("Starting: " + str(datetime.now()))
    logger.info("Starting...")

    # Make database connections
    mc = MongoConnector.MongoConnector()
    jobs_manager = JobsManager.JobsManager(mc, 'get_infoblox_cname_extattrs')
    jobs_manager.record_job_start()

    iem = InfobloxExtattrManager.InfobloxExtattrManager('cname')
    iem.get_infoblox_extattr()

    # Record status
    jobs_manager.record_job_complete()

    print("Ending: " + str(datetime.now()))
    logger.info("Complete.")
Esempio n. 17
0
def main():
    """
    Begin main...
    """
    logger = LoggingUtil.create_log(__name__)

    # Make database connections
    mongo_connector = MongoConnector.MongoConnector()

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_aws_data')
    jobs_manager.record_job_start()

    # Download the JSON file
    req = requests.get(JSON_LOCATION)

    if req.status_code != 200:
        logger.error("Bad Request")
        jobs_manager.record_job_error()
        exit(1)

    # Convert the response to JSON
    json_data = json.loads(req.text)

    # Replace the old entries with the new entries
    aws_collection = mongo_connector.get_aws_ips_connection()
    aws_collection.remove({})
    aws_collection.insert(json_data)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    jobs_manager = JobsManager.JobsManager(mongo_connector, 'remote_download')
    jobs_manager.record_job_start()

    remote_jobs_collection = rm_connector.get_jobs_connection()

    # Check the status of the Censys job on the remote database
    status = remote_jobs_collection.find_one({'job_name': 'censys'})
    if status is not None and 'status' in status and status[
            'status'] != jobs_manager.COMPLETE:
        logger.info("Censys scans status is not COMPLETE")
    elif status is not None and 'status' in status and status[
            'status'] == jobs_manager.COMPLETE:
        # Get connections to the relevant collections.
        censys_collection = mongo_connector.get_zgrab_443_data_connection()
        remote_censys_collection = rm_connector.get_zgrab_443_data_connection()

        download_censys_scan_info(censys_collection, remote_censys_collection)

        # Tell the remote database that is safe to start processing the next Censys file
        remote_jobs_collection.update_one({'job_name': 'censys'}, {
            '$currentDate': {
                "updated": True
            },
            "$set": {
                'status': jobs_manager.READY
            }
        })

    # Get connections to the relevant HTTPS collections.
    zgrab_443_data_collection = mongo_connector.get_zgrab_443_data_connection()
    remote_zgrab_443_data_collection = rm_connector.get_zgrab_443_data_connection(
    )

    download_zgrab_info(logger, zgrab_443_data_collection,
                        remote_zgrab_443_data_collection)

    # Get connections to the relevant HTTP collections.
    zgrab_80_data_collection = mongo_connector.get_zgrab_80_data_connection()
    remote_zgrab_80_data_collection = rm_connector.get_zgrab_80_data_connection(
    )

    download_zgrab_info(logger, zgrab_80_data_collection,
                        remote_zgrab_80_data_collection)

    # Get connections to the relevant port collections.
    zgrab_port_data_collection = mongo_connector.get_zgrab_port_data_connection(
    )
    remote_zgrab_port_data_collection = rm_connector.get_zgrab_port_data_connection(
    )

    download_zgrab_port_info(logger, zgrab_port_data_collection,
                             remote_zgrab_port_data_collection)

    # Download latest whois information
    status = remote_jobs_collection.find_one({'job_name': 'whois_lookups'})
    if status['status'] == jobs_manager.COMPLETE:
        whois_collection = mongo_connector.get_whois_connection()
        remote_whois_collection = rm_connector.get_whois_connection()
        download_whois_data(logger, whois_collection, remote_whois_collection)
        remote_jobs_collection.update({'job_name': 'whois'},
                                      {'$set': {
                                          'status': jobs_manager.READY
                                      }})

    # Download the status of the remote jobs
    download_jobs_status(logger, jobs_manager._jobs_collection,
                         remote_jobs_collection)

    # Update the local jobs database to done
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 19
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    google_dns = GoogleDNS.GoogleDNS()
    jobs_manager = JobsManager.JobsManager(mongo_connector,
                                           'extract_ssl_domains')
    jobs_manager.record_job_start()

    parser = argparse.ArgumentParser(
        description='Search TLS certificates for additional DNS names')
    parser.add_argument('--zgrab_version',
                        default=2,
                        type=int,
                        choices=[1, 2],
                        metavar="version",
                        help='The version of ZGrab used to collect data')
    args = parser.parse_args()

    dns_names = []
    round_two = []

    zones = ZoneManager.get_distinct_zones(mongo_connector)

    # Collect the list of domains from the SSL Certificates
    extract_ct_certificate_names(dns_names, mongo_connector)
    # extract_censys_certificate_names(dns_names, mongo_connector)
    if args.zgrab_version == 1:
        extract_zgrab_certificate_names(logger, dns_names, mongo_connector)
    else:
        extract_zgrab2_certificate_names(logger, dns_names, mongo_connector)

    input_list = []

    # Some SSL certificates are for multiple domains.
    # The tracked company may not own all domains.
    # Therefore, we filter to only the root domains that belong to the tracked company.
    logger.info("Pre-filter list: " + str(len(dns_names)))
    for hostname in dns_names:
        if not hostname.startswith("*"):
            zone = get_tracked_zone(hostname, zones)
            if zone != None:
                ips = google_dns.fetch_DNS_records(hostname)

                # Pause to prevent DoS-ing of Google's HTTPS DNS Service
                time.sleep(1)

                if ips != []:
                    for ip_addr in ips:
                        temp_zone = get_tracked_zone(ip_addr['fqdn'], zones)
                        if temp_zone is not None:
                            record = {"fqdn": ip_addr['fqdn']}
                            record['zone'] = temp_zone
                            record['created'] = datetime.now()
                            record['type'] = ip_addr['type']
                            record['value'] = ip_addr['value']
                            record['status'] = 'unknown'
                            input_list.append(record)

                        if ip_addr['type'] == "cname" and is_tracked_zone(
                                ip_addr['value'], zones):
                            add_to_round_two(ip_addr['value'], round_two)

                else:
                    logger.warning("Failed IP Lookup for: " + hostname)
            else:
                logger.warning("Failed match on zone for: " + hostname)
        else:
            logger.warning("Skipping wildcard: " + hostname)

    dead_dns_collection = mongo_connector.get_dead_dns_connection()

    # Some DNS records will be CNAME records pointing to other tracked domains.
    # This is a single level recursion to lookup those domains.
    logger.info("Round Two list: " + str(len(round_two)))
    for hostname in round_two:
        zone = get_tracked_zone(hostname, zones)
        if zone != None:
            ips = google_dns.fetch_DNS_records(hostname)
            time.sleep(1)
            if ips != []:
                for ip_addr in ips:
                    temp_zone = get_tracked_zone(ip_addr['fqdn'], zones)
                    if temp_zone is not None:
                        record = {"fqdn": ip_addr['fqdn']}
                        record['zone'] = temp_zone
                        record['created'] = datetime.now()
                        record['type'] = ip_addr['type']
                        record['value'] = ip_addr['value']
                        record['status'] = 'unknown'
                        input_list.append(record)
            else:
                logger.warning("Failed IP Lookup for: " + hostname)
                original_record = dns_manager.find_one({"fqdn": hostname},
                                                       "ssl")
                if original_record != None:
                    original_record.pop("_id")
                    dead_dns_collection.insert(original_record)
        else:
            logger.warning("Failed match on zone for: " + hostname)

    # Record all the results.
    dns_manager.remove_by_source("ssl")
    logger.info("List length: " + str(len(input_list)))
    for final_result in input_list:
        dns_manager.insert_record(final_result, "ssl")

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete")
Esempio n. 20
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Obtain the list of known email addresses from the config collection
    mongo_connector = MongoConnector.MongoConnector()
    whois_collection = mongo_connector.get_whois_connection()
    all_dns_collection = mongo_connector.get_all_dns_connection()
    zones_collection = mongo_connector.get_zone_connection()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'mark_expired')
    jobs_manager.record_job_start()

    # Grab all zones that are not expired of false_positives
    # Also exclude any that were recently created since they won't have data yet
    date_delta = datetime.today() - timedelta(days=30)
    zones = zones_collection.distinct(
        'zone', {
            'created': {
                "$lt": date_delta
            },
            'status': {
                "$nin": [ZoneManager.EXPIRED, ZoneManager.FALSE_POSITIVE]
            }
        })

    # The Python Whois library is hit and miss with some international zones.
    # For now, this script focuses on the most popular TLDs.
    new_zones = get_primary_zones(logger, zones)

    expired_list = []
    for zone in new_zones:
        if whois_collection.find({'zone': zone}).count() == 0:
            # Assume it is expired if there is no longer a whois record present
            expired_list.append(zone)

    for zone in expired_list:
        if all_dns_collection.find({'zone': zone}).count() > 0:
            # This may be a case where the Python Whois library failed
            # and the zone is still active.
            logger.debug("DNS records still exist for " + zone)
            expired_list.remove(zone)

    zone_manager = ZoneManager(mongo_connector)

    # Need to get this list before setting zones to expired in order to avoid a recursion problem.
    already_expired = zone_manager.get_zones_by_status(ZoneManager.EXPIRED)

    possibly_renewed = []
    for zone in already_expired:
        if whois_collection.find({'zone': zone}).count() == 1:
            possibly_renewed.append(zone)

    for zone in expired_list:
        logger.debug("Expiring: " + zone)
        zone_manager.set_status(zone, ZoneManager.EXPIRED, "mark_expired.py")

    # Get the list of known registering entities.
    # This will only work for some whois lookups since Python Whois doesn't get
    # a valid org for all lookups and some have privacy enabled.
    config_collection = mongo_connector.get_config_connection()
    result = config_collection.find({}, {
        'Whois_Orgs': 1,
        'Whois_Name_Servers': 1
    })
    orgs = result[0]['Whois_Orgs']
    name_servers = []
    if 'Whois_Name_Servers' in result[0]:
        name_servers = result[0]['Whois_Name_Servers']

    logger.debug(str(name_servers))

    for zone in possibly_renewed:
        # We need to be careful of automatically marking something renewed
        # since it could have been registered by someone else.
        if whois_collection.find({
                'zone': zone,
                'org': {
                    "$in": orgs
                }
        }).count() == 1:
            logger.warning("ATTENTION: " + zone +
                           " has been renewed based on org")
            zone_manager.set_status(zone, ZoneManager.UNCONFIRMED,
                                    "mark_expired.py")
        else:
            result = whois_collection.find({'zone': zone}, {
                'name_servers': 1,
                "_id": 0
            })
            found = 0
            if result is not None and 'name_servers' in result[0] and result[
                    0]['name_servers'] is not None:
                for entry in result[0]['name_servers']:
                    if entry.lower() in name_servers:
                        logger.warning(
                            "ATTENTION: " + zone +
                            " has been renewed based on name servers")
                        zone_manager.set_status(zone, ZoneManager.UNCONFIRMED,
                                                "mark_expired.py")
                        found = 1
                        break
            if found == 0:
                result = whois_collection.find({'zone': zone}, {
                    'name_server_groups': 1,
                    "_id": 0
                })
                if result is not None and 'name_server_groups' in result[
                        0] and result[0]['name_server_groups'] is not None:
                    for entry in result[0]['name_server_groups']:
                        if entry.lower() in name_servers:
                            logger.warning(
                                "ATTENTION: " + zone +
                                " has been renewed based on name server_groups"
                            )
                            zone_manager.set_status(zone,
                                                    ZoneManager.UNCONFIRMED,
                                                    "mark_expired.py")
                            found = 1
                            break
            if found == 0:
                logger.warning(zone + " has been renewed by an unknown entity")

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 21
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Make database connections
    mongo_connector = MongoConnector.MongoConnector()
    ct_collection = mongo_connector.get_certificate_transparency_connection()
    config_collection = mongo_connector.get_config_connection()
    x509parser = X509Parser.X509Parser()

    zones = ZoneManager.get_distinct_zones(mongo_connector)
    result = config_collection.find_one({}, {'SSL_Orgs': 1, "_id": 0})
    ssl_orgs = result['SSL_Orgs']

    # Defaults
    save_location = '/mnt/workspace/'
    download_method = 'dbAndSave'
    save_type = "PEM"

    parser = argparse.ArgumentParser(
        description='Download certificate information from the provide CT Log.'
    )
    parser.add_argument(
        '--log_source',
        required=True,
        help=
        'Indicates which log to query based on values in the x509Parser library'
    )
    parser.add_argument(
        '--include_precerts',
        action="store_true",
        help='Include pre-certificates which are not finalized')
    parser.add_argument(
        '--download_methods',
        choices=['dbAndSave', 'dbOnly'],
        default=download_method,
        help=
        'Indicates whether to download the raw files or just save to the database'
    )
    parser.add_argument(
        '--starting_index',
        required=False,
        default=-1,
        type=int,
        help='Force the script to start at specific index within the log.')
    parser.add_argument(
        '--cert_save_location',
        required=False,
        default=save_location,
        help=
        'Indicates where to save the certificates on disk when choosing dbAndSave'
    )
    parser.add_argument(
        '--save_type',
        choices=['PEM', 'ASN1'],
        default=save_type,
        help='Indicates which format to use for the data. The default is PEM')
    args = parser.parse_args()

    source = args.log_source
    try:
        ct_log_map = x509parser.CT_LOG_MAP[source]
    except:
        logger.error("ERROR: UNKNOWN LOG SOURCE: " + source)
        exit(1)

    if args.cert_save_location:
        save_location = args.cert_save_location
        if not save_location.endswith("/"):
            save_location = save_location + "/"

    if args.download_methods:
        download_method = args.download_methods
        check_save_location(save_location, source)

    if args.save_type:
        save_type = args.save_type

    jobs_manager = JobsManager.JobsManager(mongo_connector, "ct_log-" + source)
    jobs_manager.record_job_start()

    if args.starting_index == -1:
        starting_index = fetch_starting_index(ct_collection, source)
    else:
        starting_index = args.starting_index
    logger.info("Starting Index: " + str(starting_index))

    sth_data = fetch_sth(logger, "https://" + ct_log_map['url'])
    logger.info("Tree size: " + str(sth_data['tree_size']))

    current_index = starting_index
    while current_index < sth_data['tree_size']:
        ending_index = current_index + 256
        if ending_index > sth_data['tree_size']:
            ending_index = sth_data['tree_size']

        logger.debug("Checking from index: " + str(current_index) +
                     " to index " + str(ending_index))
        certs = fetch_certificate_batch(logger, "https://" + ct_log_map['url'],
                                        current_index, ending_index)

        for entry in certs['entries']:
            der_cert, cert_type = get_cert_from_leaf(logger,
                                                     entry['leaf_input'])
            if der_cert is None and cert_type == 1 and not args.include_precerts:
                current_index = current_index + 1
                continue
            elif der_cert is None and cert_type == 0:
                current_index = current_index + 1
                continue
            elif der_cert is None and cert_type == 1:
                der_cert = get_cert_from_extra_data(entry['extra_data'])

            cert = x509parser.parse_data(der_cert, source)
            if cert is None:
                logger.warning("Skipping certificate index: " +
                               str(current_index))
                current_index = current_index + 1
                continue

            if cert_type == 1:
                cert['ct_log_type'] = "PRE-CERTIFICATE"
            else:
                cert['ct_log_type'] = "CERTIFICATE"

            cert_zones = check_zone_relevancy(cert, zones)

            if check_org_relevancy(cert, ssl_orgs) or cert_zones != []:
                cert[source + "_id"] = current_index
                cert['zones'] = cert_zones
                logger.info("Adding " + source + " id: " + str(current_index) +
                            " SHA256: " + cert['fingerprint_sha256'])
                insert_certificate(cert, source, ct_collection, cert_zones)

                if download_method == 'dbAndSave':
                    write_file(logger, cert, save_location, save_type, source)

            current_index = current_index + 1

    # Set isExpired for any entries that have recently expired.
    ct_collection.update(
        {
            "not_after": {
                "$lt": datetime.utcnow()
            },
            "isExpired": False
        }, {"$set": {
            "isExpired": True
        }},
        multi=True)

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 22
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    parser = argparse.ArgumentParser(
        description=
        'Send specific collections to the remote MongoDB. If no arguments are provided, then all data is mirrored.'
    )
    parser.add_argument('--send_zones',
                        action="store_true",
                        required=False,
                        help='Send IP zones')
    parser.add_argument('--send_ip_zones',
                        action="store_true",
                        required=False,
                        help='Send IP zones')
    parser.add_argument('--send_third_party_zones',
                        action="store_true",
                        required=False,
                        help='Send AWS, Azure, etc.')
    parser.add_argument('--send_config',
                        action="store_true",
                        required=False,
                        help='Send configs')
    parser.add_argument('--send_dns_records',
                        action="store_true",
                        required=False,
                        help='Send DNS records')
    args = parser.parse_args()

    send_all = False
    if len(sys.argv) == 1:
        send_all = True

    mongo_connector = MongoConnector.MongoConnector()
    remote_mongo_connector = RemoteMongoConnector.RemoteMongoConnector()

    jobs_manager = JobsManager.JobsManager(mongo_connector,
                                           'send_remote_server')
    jobs_manager.record_job_start()

    if send_all or args.send_zones:
        zone_list = update_zones(logger, mongo_connector,
                                 remote_mongo_connector, True)
    else:
        zone_list = update_zones(logger, mongo_connector,
                                 remote_mongo_connector, False)

    if send_all or args.send_ip_zones:
        update_ip_zones(logger, mongo_connector, remote_mongo_connector)

    if send_all or args.send_third_party_zones:
        update_aws_cidrs(logger, mongo_connector, remote_mongo_connector)
        update_azure_cidrs(logger, mongo_connector, remote_mongo_connector)
        update_akamai_cidrs(logger, mongo_connector, remote_mongo_connector)
        update_gcp_cidrs(logger, mongo_connector, remote_mongo_connector)

    if send_all or args.send_config:
        update_config(logger, mongo_connector, remote_mongo_connector)

    if send_all or args.send_dns_records:
        update_all_dns(logger, mongo_connector, remote_mongo_connector,
                       zone_list)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 23
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    parser = argparse.ArgumentParser(
        description="Send specific collections to the remote MongoDB. If no arguments are provided, then all data is mirrored."
    )
    parser.add_argument(
        "--send_zones", action="store_true", required=False, help="Send IP zones"
    )
    parser.add_argument(
        "--send_ip_zones", action="store_true", required=False, help="Send IP zones"
    )
    parser.add_argument(
        "--send_third_party_zones",
        action="store_true",
        required=False,
        help="Send AWS, Azure, etc.",
    )
    parser.add_argument(
        "--send_config", action="store_true", required=False, help="Send configs"
    )
    parser.add_argument(
        "--send_dns_records",
        action="store_true",
        required=False,
        help="Replace all DNS records",
    )
    parser.add_argument(
        "--send_dns_diff",
        action="store_true",
        required=False,
        help="Send new DNS records",
    )
    parser.add_argument(
        "--date_diff",
        default=2,
        type=int,
        help="The number of days used for identifying new records in send_dns_diff",
    )
    args = parser.parse_args()

    send_all = False
    if len(sys.argv) == 1:
        send_all = True

    mongo_connector = MongoConnector.MongoConnector()
    remote_mongo_connector = RemoteMongoConnector.RemoteMongoConnector()

    jobs_manager = JobsManager.JobsManager(mongo_connector, "send_remote_server")
    jobs_manager.record_job_start()

    if send_all or args.send_zones:
        try:
            zone_list = update_zones(
                logger, mongo_connector, remote_mongo_connector, True
            )
        except:
            logger.error(
                "Could not communicate with the remote database when sending zones"
            )
            jobs_manager.record_job_error()
            exit(1)
    else:
        zone_list = update_zones(logger, mongo_connector, remote_mongo_connector, False)

    if send_all or args.send_ip_zones:
        try:
            update_ip_zones(logger, mongo_connector, remote_mongo_connector)
        except:
            logger.error(
                "Could not communicate with the remote database when sending IP zones"
            )
            jobs_manager.record_job_error()
            exit(1)

    if send_all or args.send_third_party_zones:
        try:
            update_aws_cidrs(logger, mongo_connector, remote_mongo_connector)
            update_azure_cidrs(logger, mongo_connector, remote_mongo_connector)
            update_akamai_cidrs(logger, mongo_connector, remote_mongo_connector)
            update_gcp_cidrs(logger, mongo_connector, remote_mongo_connector)
        except:
            logger.error(
                "Could not communicate with the remote database when sending third-party zones"
            )
            jobs_manager.record_job_error()
            exit(1)

    if send_all or args.send_config:
        try:
            update_config(logger, mongo_connector, remote_mongo_connector)
        except:
            logger.error(
                "Could not communicate with the remote database when sending config data"
            )
            jobs_manager.record_job_error()
            exit(1)

    # This will completely repopulate the DNS records table.
    if args.send_dns_records is not False:
        try:
            update_all_dns(logger, mongo_connector, remote_mongo_connector, zone_list)
        except:
            logger.error(
                "Could not communicate with the remote database when sending DNS records"
            )
            jobs_manager.record_job_error()
            exit(1)

    # If you have a large data set, then you may only want to send updated records
    if send_all or args.send_dns_diff:
        try:
            update_all_dns_diff_mode(
                logger,
                mongo_connector,
                remote_mongo_connector,
                zone_list,
                args.date_diff,
            )
        except:
            logger.error(
                "Could not communicate with the remote database when sending DNS diff records"
            )
            jobs_manager.record_job_error()
            exit(1)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Esempio n. 24
0
def main():
    """
    Begin Main()
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    mongo_ct = mongo_connector.get_certificate_transparency_connection()
    cert_graphs_collection = mongo_connector.get_cert_graphs_connection()
    jobs_manager = JobsManager.JobsManager(mongo_connector,
                                           "create_cert_graphs")
    jobs_manager.record_job_start()

    zones = ZoneManager.get_distinct_zones(mongo_connector)

    parser = argparse.ArgumentParser(
        description=
        "Creates and stores certificate graphs in the database based on one or more sources."
    )
    parser.add_argument(
        "--check_censys",
        action="store_true",
        default=False,
        required=False,
        help="Whether to check the Censys collection in the database",
    )
    parser.add_argument(
        "--check_443_scans",
        action="store_true",
        default=False,
        required=False,
        help="Whether to check the zgrab collection in the database",
    )
    parser.add_argument(
        "--check_ct_scans",
        action="store_true",
        default=False,
        required=False,
        help="Whether to check the CT collection in the database",
    )
    parser.add_argument(
        "--zgrab_version",
        default=2,
        type=int,
        choices=[1, 2],
        metavar="version",
        help="The version of ZGrab used to collect data",
    )
    args = parser.parse_args()

    if args.check_censys is True:
        censys_collection = mongo_connector.get_censys_connection()

    if args.check_443_scans is True:
        zgrab_collection = mongo_connector.get_zgrab_443_data_connection()

    for zone in zones:
        logger.info("Creating: " + zone)
        graph = nx.DiGraph()

        certs_list = {}

        if args.check_ct_scans:
            certs_list = get_current_ct_certificates(mongo_ct, zone)
        if args.check_censys:
            certs_list = add_censys_certificates(censys_collection, zone,
                                                 certs_list)
        if args.check_443_scans:
            if args.zgrab_version == 1:
                certs_list = add_terminal_zgrab_certificates(
                    zgrab_collection, zone, certs_list)
                certs_list = add_initial_zgrab_certificates(
                    zgrab_collection, zone, certs_list)
            else:
                certs_list = add_terminal_zgrab2_certificates(
                    zgrab_collection, zone, certs_list)
                certs_list = add_initial_zgrab2_certificates(
                    zgrab_collection, zone, certs_list)

        graph = create_nodes(graph, mongo_connector, zone, certs_list)
        data = json_graph.node_link_data(graph)

        my_data = {}
        my_data["links"] = data["links"]
        my_data["nodes"] = data["nodes"]
        my_data["zone"] = zone
        my_data["created"] = datetime.now()

        cert_graphs_collection.delete_one({"zone": zone})
        mongo_connector.perform_insert(cert_graphs_collection, my_data)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 25
0
def main():
    """
    Begin main...
    """
    logger = LoggingUtil.create_log(__name__)

    if is_running("get_censys_files.py"):
        """
        Check to see if a download is in process...
        """
        logger.warning("Can't run due to get_files running. Goodbye!")
        exit(0)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        logger.warning("I am already running! Goodbye!")
        exit(0)

    # Make the relevant database connections
    RMC = RemoteMongoConnector.RemoteMongoConnector()

    ip_manager = IPManager.IPManager(RMC)

    # Verify that the get_files script has a recent file in need of parsing.
    jobs_collection = RMC.get_jobs_connection()

    status = jobs_collection.find_one({"job_name": "censys"})
    if status["status"] != "DOWNLOADED":
        logger.warning("The status is not set to DOWNLOADED. Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Collect the list of available zones
    zones = ZoneManager.get_distinct_zones(RMC)

    logger.info("Zones: " + str(len(zones)))

    # Get the current configuration information for Marinus.
    config_collection = RMC.get_config_connection()

    configs = config_collection.find({})
    orgs = []
    for org in configs[0]["SSL_Orgs"]:
        orgs.append(org)

    logger.info("Orgs: " + str(len(orgs)))

    # Obtain the name of the decompressed file.
    filename_f = open(FILENAME_FILE, "r")
    decompressed_file = filename_f.readline()
    filename_f.close()

    # For manual testing: decompressed_file = "ipv4.json"

    logger.info("Beginning file processing...")

    # Remove old results from the database
    results_collection = RMC.get_results_connection()
    results_collection.delete_many({})
    all_dns_collection = RMC.get_all_dns_connection()

    try:
        with open(decompressed_file, "r") as dec_f:
            for line in dec_f:
                try:
                    entry = json.loads(line)
                    """
                    Does the SSL certificate match a known organization?
                    Is the IP address in a known CIDR?
                    Is the IP address recorded in Splunk?
                    """
                    if (check_in_org(entry, orgs)
                            or ip_manager.is_tracked_ip(entry["ip"])
                            or ip_manager.find_splunk_data(entry["ip"], "AWS")
                            is not None or ip_manager.find_splunk_data(
                                entry["ip"], "AZURE") is not None):
                        entry["zones"] = check_in_zone(entry, zones)
                        entry["aws"] = ip_manager.is_aws_ip(entry["ip"])
                        entry["azure"] = ip_manager.is_azure_ip(entry["ip"])
                        (domains,
                         zones) = lookup_domain(entry, zones,
                                                all_dns_collection)
                        if len(domains) > 0:
                            entry["domains"] = domains
                            if len(zones) > 0:
                                for zone in zones:
                                    if zone not in entry["zones"]:
                                        entry["zones"].append(zone)
                        insert_result(entry, results_collection)
                    # else:
                    #     #This will add days to the amount of time necessary to scan the file.
                    #     matched_zones = check_in_zone(entry, zones)
                    #     if matched_zones != []:
                    #         entry['zones'] = matched_zones
                    #         entry['aws'] = ip_manager.is_aws_ip(entry['ip'])
                    #         entry['azure'] = ip_manager.is_azure_ip(entry['ip'])
                    #         insert_result(entry, results_collection)
                except ValueError as err:
                    logger.error("Value Error!")
                    logger.error(str(err))
                except:
                    logger.error("Line unexpected error: " +
                                 str(sys.exc_info()[0]))
                    logger.error("Line unexpected error: " +
                                 str(sys.exc_info()[1]))
    except IOError as err:
        logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
        exit(1)
    except:
        logger.error("Unexpected error: " + str(sys.exc_info()[0]))
        logger.error("Unexpected error: " + str(sys.exc_info()[1]))
        exit(1)

    # Indicate that the processing of the job is complete and ready for download to Marinus
    jobs_collection.update_one(
        {"job_name": "censys"},
        {
            "$currentDate": {
                "updated": True
            },
            "$set": {
                "status": "COMPLETE"
            }
        },
    )

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 26
0
 def __init__(self):
     self._logger = LoggingUtil.create_log(__name__)
     self.incorrect_response_json_allowed = self.APIH.INCORRECT_RESPONSE_JSON_ALLOWED
     self.get_infoblox_zones()
Esempio n. 27
0
        self._logger.info("Starting....")
        self.job_manager = JobsManager.JobsManager(self.MC,
                                                   'get_iblox_alpha_zones')
        self.job_manager.record_job_start()

        self.__get_previous_zones()
        for alphabet in self.alphabets:
            self.alphabet_queried = alphabet
            self.next_page_id = None
            self.__infoblox_paginated_request()
            while self.next_page_id:
                self.__infoblox_paginated_request()

        self.__clean_collection()

        # Record status
        self.job_manager.record_job_complete()

        print("Ending: " + str(datetime.now()))
        self._logger.info("Complete")

    def __init__(self):
        self._logger = LoggingUtil.create_log(__name__)
        self.incorrect_response_json_allowed = self.APIH.INCORRECT_RESPONSE_JSON_ALLOWED
        self.get_infoblox_zones()


if __name__ == '__main__':
    logger = LoggingUtil.create_log(__name__)
    IZ = InfobloxZone()
Esempio n. 28
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Set up the common objects
    mongo_connector = MongoConnector.MongoConnector()
    ct_collection = mongo_connector.get_certificate_transparency_connection()
    zones = ZoneManager.get_distinct_zones(mongo_connector)
    jobs_manager = JobsManager.JobsManager(mongo_connector, "get_crt_sh")
    jobs_manager.record_job_start()

    save_location = "/mnt/workspace/crt_sh"
    download_method = 'dbAndSave'

    parser = argparse.ArgumentParser(
        description='Download DNS and/or certificate information from crt.sh.')
    parser.add_argument(
        '--fetch_dns_records',
        action='store_true',
        help='Indicates whether to add DNS entries to the database')
    parser.add_argument(
        '--download_methods',
        choices=['dbAndSave', 'dbOnly'],
        default=download_method,
        help=
        'Indicates whether to download the raw files or just record in the database.'
    )
    parser.add_argument(
        '--cert_save_location',
        required=False,
        default=save_location,
        help=
        'Indicates where to save the certificates on disk when choosing dbAndSave'
    )
    args = parser.parse_args()

    if args.cert_save_location:
        save_location = args.cert_save_location
        if not save_location.endswith("/"):
            save_location = save_location + "/"

    if args.download_methods == 'dbAndSave':
        check_save_location(save_location)

    for zone in zones:
        # Pace out requests so as not to DoS crt.sh and Google DNS
        time.sleep(5)

        # This could be done with backoff but we don't want to be overly aggressive.
        json_result = make_https_request(
            logger, "https://crt.sh/?q=%25." + zone + "&output=json")
        if json_result is None:
            logger.warning("Can't find result for: " + zone)
            json_result = "{}"

        json_data = json.loads(json_result)

        new_names = []
        new_ids = []
        for entry in json_data:
            if entry['id'] not in new_ids:
                new_ids.append(entry['id'])

            if "*" not in entry["name_value"] and entry[
                    "name_value"] not in new_names:
                new_names.append(entry["name_value"])

        if args.fetch_dns_records:
            add_new_domain_names(new_names, zones, mongo_connector)

        if args.download_methods == "dbAndSave":
            add_new_certificate_values(logger, new_ids, ct_collection, zones,
                                       save_location)
        elif args.download_methods == "dbOnly":
            add_new_certificate_values(logger, new_ids, ct_collection, zones,
                                       None)

    # Set isExpired for any entries that have recently expired.
    ct_collection.update(
        {
            "not_after": {
                "$lt": datetime.utcnow()
            },
            "isExpired": False
        }, {"$set": {
            "isExpired": True
        }},
        multi=True)

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Esempio n. 29
0
def main():
    """
    Beging Main...
    """
    global global_exit_flag
    global global_retest_list
    global global_sleep_time
    global global_queue_size
    global global_zgrab_path

    logger = LoggingUtil.create_log(__name__)

    global_retest_list = []

    parser = argparse.ArgumentParser(
        description="Launch zgrab against IPs using port 22, 25, 443, or 465.")
    parser.add_argument(
        "-p",
        choices=["22", "25", "443", "465"],
        metavar="port",
        help="The port to scan: 22, 25, 443, or 465",
    )
    parser.add_argument("-t",
                        default=5,
                        type=int,
                        metavar="threadCount",
                        help="The number of threads")
    parser.add_argument(
        "--mx",
        action="store_true",
        help="Scan only IPs from MX records. Useful for SMTP scans.",
    )
    parser.add_argument(
        "-s",
        default=0,
        type=int,
        metavar="sleepTime",
        help="Sleep time in order to spread out the batches",
    )
    parser.add_argument(
        "--qs",
        default=0,
        type=int,
        metavar="queueSize",
        help="How many hosts to scan in a batch",
    )
    parser.add_argument("--zones_only",
                        action="store_true",
                        help="Scan only IPs from IP zones.")
    parser.add_argument(
        "--zgrab_path",
        default=global_zgrab_path,
        metavar="zgrabVersion",
        help="The version of ZGrab to use",
    )
    args = parser.parse_args()

    if args.p == None:
        logger.error("A port value (22, 25, 443, or 465) must be provided.")
        exit(1)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        logger.warning(str(now) + ": I am already running! Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    all_dns_collection = rm_connector.get_all_dns_connection()
    ip_manager = IPManager.IPManager(rm_connector, True)

    jobs_manager = JobsManager.JobsManager(rm_connector,
                                           "zgrab_port_ip-" + args.p)
    jobs_manager.record_job_start()

    zones_struct = {}
    zones_struct["zones"] = ZoneManager.get_distinct_zones(rm_connector)

    # Not pretty but works
    zones_struct["ip_manager"] = ip_manager

    if args.mx:
        (ips, ip_context) = get_mx_ips(zones_struct["zones"], ip_manager,
                                       all_dns_collection)
    elif args.zones_only:
        (ips, ip_context) = get_only_ipzones(ip_manager.Tracked_CIDRs)
    else:
        (ips, ip_context) = get_ips(ip_manager, all_dns_collection)

    if args.s and int(args.s) > 0:
        global_sleep_time = int(args.s)

    if args.qs and int(args.qs) > 0:
        global_queue_size = int(args.qs)

    logger.info("Got IPs: " + str(len(ips)))
    zones_struct["ip_context"] = ip_context

    zgrab_collection = rm_connector.get_zgrab_port_data_connection()
    if args.p == "443":
        run_command = run_port_443_command
    elif args.p == "22":
        run_command = run_port_22_command
    elif args.p == "25":
        run_command = run_port_25_command
    elif args.p == "465":
        run_command = run_port_465_command

    check_save_location("./json_p" + args.p)

    global_zgrab_path = args.zgrab_path

    threads = []

    logger.debug("Creating " + str(args.t) + " threads")
    for thread_id in range(1, args.t + 1):
        thread = ZgrabThread(
            thread_id,
            global_work_queue,
            args.p,
            run_command,
            zones_struct,
            zgrab_collection,
        )
        thread.start()
        threads.append(thread)
        thread_id += 1

    logger.info("Populating Queue")
    global_queue_lock.acquire()
    for ip in ips:
        global_work_queue.put(ip)
    global_queue_lock.release()

    # Wait for queue to empty
    while not global_work_queue.empty():
        pass

    # Notify threads it's time to exit
    global_exit_flag = 1

    # Wait for all threads to complete
    for t in threads:
        t.join()

    logger.info("Exiting Main Thread")

    logger.info("Global retest list: " + str(len(global_retest_list)))

    # Retest any SMTP hosts that did not respond to the StartTLS handshake
    if args.p == "25" and len(global_retest_list) > 0:
        process_thread(
            logger,
            global_retest_list,
            args.p,
            run_port_25_no_tls_command,
            zones_struct,
            zgrab_collection,
            "retest",
        )

    # Remove old entries from before the scan
    if args.p == "443":
        other_results = zgrab_collection.find({
            "data.tls": {
                "$exists": True
            },
            "data.tls.timestamp": {
                "$lt": now
            }
        })
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                        {"$unset": {
                                            "data.tls": ""
                                        }})
    elif args.p == "22":
        if "zgrab2" in global_zgrab_path:
            other_results = zgrab_collection.find({
                "data.ssh": {
                    "$exists": True
                },
                "data.ssh.timestamp": {
                    "$lt": now
                }
            })
            for result in other_results:
                zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                            {"$unset": {
                                                "data.ssh": ""
                                            }})
        else:
            other_results = zgrab_collection.find({
                "data.xssh": {
                    "$exists": True
                },
                "data.xssh.timestamp": {
                    "$lt": now
                }
            })
            for result in other_results:
                zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                            {"$unset": {
                                                "data.xssh": ""
                                            }})
    elif args.p == "25":
        other_results = zgrab_collection.find({
            "data.smtp": {
                "$exists": True
            },
            "data.smtp.timestamp": {
                "$lt": now
            }
        })
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                        {"$unset": {
                                            "data.smtp": ""
                                        }})
    elif args.p == "465":
        other_results = zgrab_collection.find({
            "data.smtps": {
                "$exists": True
            },
            "data.smtps.timestamp": {
                "$lt": now
            }
        })
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                        {"$unset": {
                                            "data.smtps": ""
                                        }})

    # Remove any completely empty entries
    zgrab_collection.delete_many({"data": {}})

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    if is_running(os.path.basename(__file__)):
        logger.warning("Already running...")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    r7 = Rapid7.Rapid7()

    mongo_connection = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connection)
    ip_manager = IPManager.IPManager(mongo_connection)
    rdns_collection = mongo_connection.get_sonar_reverse_dns_connection()

    zones = ZoneManager.get_distinct_zones(mongo_connection)
    logger.info("Zone length: " + str(len(zones)))

    save_directory = "./files/"

    parser = argparse.ArgumentParser(
        description='Parse Sonar files based on CIDRs.')
    parser.add_argument('--sonar_file_type',
                        required=True,
                        help='Specify "dns" or "rdns"')
    args = parser.parse_args()

    check_save_location(save_directory)

    # A session is necessary for the multi-step log-in process
    s = requests.Session()

    if args.sonar_file_type == "rdns":
        jobs_manager = JobsManager.JobsManager(mongo_connection,
                                               'get_data_by_cidr_rdns')
        jobs_manager.record_job_start()

        try:
            html_parser = r7.find_file_locations(s, "rdns", jobs_manager)
            if html_parser.rdns_url == "":
                logger.error("Unknown Error")
                jobs_manager.record_job_error()
                exit(0)

            unzipped_rdns = download_remote_files(logger, s,
                                                  html_parser.rdns_url,
                                                  save_directory, jobs_manager)
            update_rdns(logger, unzipped_rdns, rdns_collection, dns_manager,
                        ip_manager, zones)
        except Exception as ex:
            logger.error("Unexpected error: " + str(ex))
            jobs_manager.record_job_error()
            exit(0)

        logger.info("RDNS Complete")
        jobs_manager.record_job_complete()

    elif args.sonar_file_type == "dns":
        jobs_manager = JobsManager.JobsManager(mongo_connection,
                                               'get_data_by_cidr_dns')
        jobs_manager.record_job_start()

        try:
            html_parser = r7.find_file_locations(s, "fdns", jobs_manager)
            if html_parser.any_url != "":
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.any_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, dns_manager, ip_manager,
                           zones)
            if html_parser.a_url != "":
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.a_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, dns_manager, ip_manager,
                           zones)
            if html_parser.aaaa_url != "":
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.aaaa_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, dns_manager, ip_manager,
                           zones)
        except Exception as ex:
            logger.error("Unexpected error: " + str(ex))

            jobs_manager.record_job_error()
            exit(0)

        logger.info("DNS Complete")

        jobs_manager.record_job_complete()

    else:
        logger.error("Unrecognized sonar_file_type option. Exiting...")

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")