Beispiel #1
0
def main():
    """
    Begin Main...
    """
    now = datetime.now()
    print("Starting: " + str(now))

    mongo_connector = MongoConnector.MongoConnector()
    remote_mongo_connector = RemoteMongoConnector.RemoteMongoConnector()

    jobs_collection = mongo_connector.get_jobs_connection()

    zone_list = update_zones(mongo_connector, remote_mongo_connector)
    update_ip_zones(mongo_connector, remote_mongo_connector)
    update_aws_cidrs(mongo_connector, remote_mongo_connector)
    update_azure_cidrs(mongo_connector, remote_mongo_connector)
    update_config(mongo_connector, remote_mongo_connector)
    update_braas(mongo_connector, remote_mongo_connector)
    update_all_dns(mongo_connector, remote_mongo_connector, zone_list)

    # Record status
    jobs_collection.update_one({'job_name': 'send_remote_server'}, {
        '$currentDate': {
            "updated": True
        },
        "$set": {
            'status': 'COMPLETE'
        }
    })

    now = datetime.now()
    print("Complete: " + str(now))
Beispiel #2
0
def main():
    """
    Begin Main...
    """
    now = datetime.now()
    print("Starting: " + str(now))

    mongo_connector = RemoteMongoConnector.RemoteMongoConnector()
    jobs_collection = mongo_connector.get_jobs_connection()

    # Collect the tracked zones...
    zones = get_zones(mongo_connector)

    whois_collection = mongo_connector.get_whois_connection()

    for zone in zones:
        # Ensure the zone contains at least one dot. This is left over from an old bug.
        if zone.find(".") > 0:

            print(zone)
            zone_result = whois_collection.find_one({'zone': zone})

            # If we haven't done a lookup in the past, try to collect the data.
            # A limit exists on the number of whois lookups you can perform so limit to new domains.
            if zone_result is None:
                do_whois_lookup(zone, whois_collection)

    # The cap on the number of old entries to be updated.
    MAX_OLD_ENTRIES = 400

    # Grab entries that haven't been updated in 3 months
    last_week = datetime.now() - timedelta(days=90, hours=1)
    zone_result = whois_collection.find({
        "updated": {
            "$lte": last_week
        }
    }).batch_size(10)

    i = 0
    for result in zone_result:
        do_whois_lookup(result["zone"], whois_collection)
        i = i + 1

        # Chances are that a lot of the entries were inserted on the same day.
        # This helps break updating old entries across different runs.
        if i > MAX_OLD_ENTRIES:
            break

    # Record status
    jobs_collection.update_one({'job_name': 'whois_lookups'}, {
        '$currentDate': {
            "updated": True
        },
        "$set": {
            'status': 'COMPLETE'
        }
    })

    now = datetime.now()
    print("Ending: " + str(now))
Beispiel #3
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = RemoteMongoConnector.RemoteMongoConnector()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'whois_lookups')
    jobs_manager.record_job_start()

    # Collect the tracked zones...
    zones = get_zones(mongo_connector)

    whois_collection = mongo_connector.get_whois_connection()

    for zone in zones:
        # Ensure the zone contains at least one dot. This is left over from an old bug.
        if zone.find(".") > 0:

            logger.debug(zone)
            zone_result = whois_collection.find_one({'zone': zone})

            # If we haven't done a lookup in the past, try to collect the data.
            # A limit exists on the number of whois lookups you can perform so limit to new domains.
            if zone_result is None:
                do_whois_lookup(logger, zone, whois_collection)

    # The cap on the number of old entries to be updated.
    MAX_OLD_ENTRIES = 400

    # Grab entries that haven't been updated in 3 months
    last_week = datetime.now() - timedelta(days=90, hours=1)
    zone_result = whois_collection.find({
        "updated": {
            "$lte": last_week
        }
    }).batch_size(10)

    i = 0
    for result in zone_result:
        do_whois_lookup(logger, result["zone"], whois_collection)
        i = i + 1

        # Chances are that a lot of the entries were inserted on the same day.
        # This helps break updating old entries across different runs.
        if i > MAX_OLD_ENTRIES:
            break

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Beispiel #4
0
def main():
    global global_exit_flag
    global global_retest_list
    global global_sleep_time
    global global_queue_size

    global_retest_list = []

    parser = argparse.ArgumentParser(description='Launch zgrab against IPs using port 22, 25, 443, or 465.')
    parser.add_argument('-p',  choices=['443','22', '25', '465'], metavar="port", help='The port to scan: 22, 25, 443, or 465')
    parser.add_argument('-t',  default=5, type=int, metavar="threadCount", help='The number of threads')
    parser.add_argument('--mx', action="store_true", help='Scan only IPs from MX records. Useful for SMTP scans.')
    parser.add_argument('-s',  default=0, type=int, metavar="sleepTime", help='Sleep time in order to spread out the batches')
    parser.add_argument('--qs',  default=0, type=int, metavar="queueSize", help='How many hosts to scan in a batch')
    parser.add_argument('--zones_only', action="store_true", help='Scan only IPs from IP zones.')
    args = parser.parse_args()

    if args.p == None:
        print("A port value (22, 25, 443, or 465) must be provided.")
        exit(0)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        print(str(now) + ": I am already running! Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))

    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    all_dns_collection = rm_connector.get_all_dns_connection()

    zones_struct = {}
    zones_struct['zones'] = ZoneManager.get_distinct_zones(rm_connector)

    zones_struct['ip_zones'] = get_ip_zones(rm_connector)

    # Collect the list of AWS CIDRs
    zones_struct['aws_ips'] = get_aws_ips(rm_connector)

    # Collect the list of Azure CIDRs
    zones_struct['azure_ips'] = get_azure_ips(rm_connector)

    if args.mx:
        (ips, ip_context) = get_mx_ips(zones_struct['zones'], all_dns_collection)
    elif args.zones_only:
        (ips, ip_context) = get_only_ipzones(zones_struct['ip_zones'])
    else:
        (ips, ip_context) = get_ips(zones_struct['ip_zones'], all_dns_collection)

    if args.s and int(args.s) > 0:
        global_sleep_time = int(args.s)

    if args.qs and int(args.qs) > 0:
        global_queue_size = int(args.qs)

    print("Got IPs: " + str(len(ips)))
    zones_struct['ip_context'] = ip_context

    zgrab_collection = rm_connector.get_zgrab_port_data_connection()
    if args.p == "443":
        run_command = run_port_443_command
    elif args.p == "22":
        run_command = run_port_22_command
    elif args.p == "25":
        run_command = run_port_25_command
    elif args.p == "465":
        run_command = run_port_465_command

    threads = []

    print ("Creating " + str(args.t) + " threads")
    for thread_id in range (1, args.t + 1):
        thread = ZgrabThread(thread_id, global_work_queue, args.p, run_command, zones_struct, zgrab_collection)
        thread.start()
        threads.append(thread)
        thread_id += 1

    print("Populating Queue")
    global_queue_lock.acquire()
    for ip in ips:
        global_work_queue.put(ip)
    global_queue_lock.release()

    # Wait for queue to empty
    while not global_work_queue.empty():
        pass

    # Notify threads it's time to exit
    global_exit_flag = 1

    # Wait for all threads to complete
    for t in threads:
        t.join()

    print ("Exiting Main Thread")

    print("Global retest list: " + str(len(global_retest_list)))

    # Retest any SMTP hosts that did not respond to the StartTLS handshake
    if args.p == "25" and len(global_retest_list) > 0:
        process_thread(global_retest_list, args.p, run_port_25_no_tls_command, zones_struct, zgrab_collection, "retest")


    # Remove old entries from before the scan
    if args.p == "443":
        other_results = zgrab_collection.find({'data.tls': {"$exists": True}, 'data.tls.timestamp': {"$lt": now}})
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.tls': ""}})
    elif args.p == "22":
        other_results = zgrab_collection.find({'data.xssh': {"$exists": True}, 'data.xssh.timestamp': {"$lt": now}})
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.xssh': ""}})
    elif args.p == "25":
        other_results = zgrab_collection.find({'data.smtp': {"$exists": True}, 'data.smtp.timestamp': {"$lt": now}})
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.smtp': ""}})
    elif args.p == "465":
        other_results = zgrab_collection.find({'data.smtps': {"$exists": True}, 'data.smtps.timestamp': {"$lt": now}})
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result['_id'])}, {"$unset": {'data.smtps': ""}})

    # Remove any completely empty entries
    zgrab_collection.remove({'data': {}})

    now = datetime.now()
    print("Complete: " + str(now))
Beispiel #5
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    parser = argparse.ArgumentParser(
        description=
        'Send specific collections to the remote MongoDB. If no arguments are provided, then all data is mirrored.'
    )
    parser.add_argument('--send_zones',
                        action="store_true",
                        required=False,
                        help='Send IP zones')
    parser.add_argument('--send_ip_zones',
                        action="store_true",
                        required=False,
                        help='Send IP zones')
    parser.add_argument('--send_third_party_zones',
                        action="store_true",
                        required=False,
                        help='Send AWS, Azure, etc.')
    parser.add_argument('--send_config',
                        action="store_true",
                        required=False,
                        help='Send configs')
    parser.add_argument('--send_dns_records',
                        action="store_true",
                        required=False,
                        help='Send DNS records')
    args = parser.parse_args()

    send_all = False
    if len(sys.argv) == 1:
        send_all = True

    mongo_connector = MongoConnector.MongoConnector()
    remote_mongo_connector = RemoteMongoConnector.RemoteMongoConnector()

    jobs_manager = JobsManager.JobsManager(mongo_connector,
                                           'send_remote_server')
    jobs_manager.record_job_start()

    if send_all or args.send_zones:
        zone_list = update_zones(logger, mongo_connector,
                                 remote_mongo_connector, True)
    else:
        zone_list = update_zones(logger, mongo_connector,
                                 remote_mongo_connector, False)

    if send_all or args.send_ip_zones:
        update_ip_zones(logger, mongo_connector, remote_mongo_connector)

    if send_all or args.send_third_party_zones:
        update_aws_cidrs(logger, mongo_connector, remote_mongo_connector)
        update_azure_cidrs(logger, mongo_connector, remote_mongo_connector)
        update_akamai_cidrs(logger, mongo_connector, remote_mongo_connector)
        update_gcp_cidrs(logger, mongo_connector, remote_mongo_connector)

    if send_all or args.send_config:
        update_config(logger, mongo_connector, remote_mongo_connector)

    if send_all or args.send_dns_records:
        update_all_dns(logger, mongo_connector, remote_mongo_connector,
                       zone_list)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Beispiel #6
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    parser = argparse.ArgumentParser(
        description="Send specific collections to the remote MongoDB. If no arguments are provided, then all data is mirrored."
    )
    parser.add_argument(
        "--send_zones", action="store_true", required=False, help="Send IP zones"
    )
    parser.add_argument(
        "--send_ip_zones", action="store_true", required=False, help="Send IP zones"
    )
    parser.add_argument(
        "--send_third_party_zones",
        action="store_true",
        required=False,
        help="Send AWS, Azure, etc.",
    )
    parser.add_argument(
        "--send_config", action="store_true", required=False, help="Send configs"
    )
    parser.add_argument(
        "--send_dns_records",
        action="store_true",
        required=False,
        help="Replace all DNS records",
    )
    parser.add_argument(
        "--send_dns_diff",
        action="store_true",
        required=False,
        help="Send new DNS records",
    )
    parser.add_argument(
        "--date_diff",
        default=2,
        type=int,
        help="The number of days used for identifying new records in send_dns_diff",
    )
    args = parser.parse_args()

    send_all = False
    if len(sys.argv) == 1:
        send_all = True

    mongo_connector = MongoConnector.MongoConnector()
    remote_mongo_connector = RemoteMongoConnector.RemoteMongoConnector()

    jobs_manager = JobsManager.JobsManager(mongo_connector, "send_remote_server")
    jobs_manager.record_job_start()

    if send_all or args.send_zones:
        try:
            zone_list = update_zones(
                logger, mongo_connector, remote_mongo_connector, True
            )
        except:
            logger.error(
                "Could not communicate with the remote database when sending zones"
            )
            jobs_manager.record_job_error()
            exit(1)
    else:
        zone_list = update_zones(logger, mongo_connector, remote_mongo_connector, False)

    if send_all or args.send_ip_zones:
        try:
            update_ip_zones(logger, mongo_connector, remote_mongo_connector)
        except:
            logger.error(
                "Could not communicate with the remote database when sending IP zones"
            )
            jobs_manager.record_job_error()
            exit(1)

    if send_all or args.send_third_party_zones:
        try:
            update_aws_cidrs(logger, mongo_connector, remote_mongo_connector)
            update_azure_cidrs(logger, mongo_connector, remote_mongo_connector)
            update_akamai_cidrs(logger, mongo_connector, remote_mongo_connector)
            update_gcp_cidrs(logger, mongo_connector, remote_mongo_connector)
        except:
            logger.error(
                "Could not communicate with the remote database when sending third-party zones"
            )
            jobs_manager.record_job_error()
            exit(1)

    if send_all or args.send_config:
        try:
            update_config(logger, mongo_connector, remote_mongo_connector)
        except:
            logger.error(
                "Could not communicate with the remote database when sending config data"
            )
            jobs_manager.record_job_error()
            exit(1)

    # This will completely repopulate the DNS records table.
    if args.send_dns_records is not False:
        try:
            update_all_dns(logger, mongo_connector, remote_mongo_connector, zone_list)
        except:
            logger.error(
                "Could not communicate with the remote database when sending DNS records"
            )
            jobs_manager.record_job_error()
            exit(1)

    # If you have a large data set, then you may only want to send updated records
    if send_all or args.send_dns_diff:
        try:
            update_all_dns_diff_mode(
                logger,
                mongo_connector,
                remote_mongo_connector,
                zone_list,
                args.date_diff,
            )
        except:
            logger.error(
                "Could not communicate with the remote database when sending DNS diff records"
            )
            jobs_manager.record_job_error()
            exit(1)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
def main():
    """
    Begin main...
    """
    logger = LoggingUtil.create_log(__name__)

    if is_running("get_censys_files.py"):
        """
        Check to see if a download is in process...
        """
        logger.warning("Can't run due to get_files running. Goodbye!")
        exit(0)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        logger.warning("I am already running! Goodbye!")
        exit(0)

    # Make the relevant database connections
    RMC = RemoteMongoConnector.RemoteMongoConnector()

    ip_manager = IPManager.IPManager(RMC)

    # Verify that the get_files script has a recent file in need of parsing.
    jobs_collection = RMC.get_jobs_connection()

    status = jobs_collection.find_one({"job_name": "censys"})
    if status["status"] != "DOWNLOADED":
        logger.warning("The status is not set to DOWNLOADED. Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Collect the list of available zones
    zones = ZoneManager.get_distinct_zones(RMC)

    logger.info("Zones: " + str(len(zones)))

    # Get the current configuration information for Marinus.
    config_collection = RMC.get_config_connection()

    configs = config_collection.find({})
    orgs = []
    for org in configs[0]["SSL_Orgs"]:
        orgs.append(org)

    logger.info("Orgs: " + str(len(orgs)))

    # Obtain the name of the decompressed file.
    filename_f = open(FILENAME_FILE, "r")
    decompressed_file = filename_f.readline()
    filename_f.close()

    # For manual testing: decompressed_file = "ipv4.json"

    logger.info("Beginning file processing...")

    # Remove old results from the database
    results_collection = RMC.get_results_connection()
    results_collection.delete_many({})
    all_dns_collection = RMC.get_all_dns_connection()

    try:
        with open(decompressed_file, "r") as dec_f:
            for line in dec_f:
                try:
                    entry = json.loads(line)
                    """
                    Does the SSL certificate match a known organization?
                    Is the IP address in a known CIDR?
                    Is the IP address recorded in Splunk?
                    """
                    if (check_in_org(entry, orgs)
                            or ip_manager.is_tracked_ip(entry["ip"])
                            or ip_manager.find_splunk_data(entry["ip"], "AWS")
                            is not None or ip_manager.find_splunk_data(
                                entry["ip"], "AZURE") is not None):
                        entry["zones"] = check_in_zone(entry, zones)
                        entry["aws"] = ip_manager.is_aws_ip(entry["ip"])
                        entry["azure"] = ip_manager.is_azure_ip(entry["ip"])
                        (domains,
                         zones) = lookup_domain(entry, zones,
                                                all_dns_collection)
                        if len(domains) > 0:
                            entry["domains"] = domains
                            if len(zones) > 0:
                                for zone in zones:
                                    if zone not in entry["zones"]:
                                        entry["zones"].append(zone)
                        insert_result(entry, results_collection)
                    # else:
                    #     #This will add days to the amount of time necessary to scan the file.
                    #     matched_zones = check_in_zone(entry, zones)
                    #     if matched_zones != []:
                    #         entry['zones'] = matched_zones
                    #         entry['aws'] = ip_manager.is_aws_ip(entry['ip'])
                    #         entry['azure'] = ip_manager.is_azure_ip(entry['ip'])
                    #         insert_result(entry, results_collection)
                except ValueError as err:
                    logger.error("Value Error!")
                    logger.error(str(err))
                except:
                    logger.error("Line unexpected error: " +
                                 str(sys.exc_info()[0]))
                    logger.error("Line unexpected error: " +
                                 str(sys.exc_info()[1]))
    except IOError as err:
        logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
        exit(1)
    except:
        logger.error("Unexpected error: " + str(sys.exc_info()[0]))
        logger.error("Unexpected error: " + str(sys.exc_info()[1]))
        exit(1)

    # Indicate that the processing of the job is complete and ready for download to Marinus
    jobs_collection.update_one(
        {"job_name": "censys"},
        {
            "$currentDate": {
                "updated": True
            },
            "$set": {
                "status": "COMPLETE"
            }
        },
    )

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Beispiel #8
0
def main():
    """
    Begin main...
    """

    if is_running("get_censys_files.py"):
        """
        Check to see if a download is in process...
        """
        now = datetime.now()
        print(str(now) + ": Can't run due to get_files running. Goodbye!")
        exit(0)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        print(str(now) + ": I am already running! Goodbye!")
        exit(0)

    # Make the relevant database connections
    RMC = RemoteMongoConnector.RemoteMongoConnector()

    # Verify that the get_files script has a recent file in need of parsing.
    jobs_collection = RMC.get_jobs_connection()

    status = jobs_collection.find_one({'job_name': 'censys'})
    if status['status'] != "DOWNLOADED":
        now = datetime.now()
        print(str(now) + ": The status is not set to DOWNLOADED. Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))

    # Collect the list of available zones
    zones = ZoneManager.get_distinct_zones(RMC)

    print("Zones: " + str(len(zones)))

    # Collect the list of AWS CIDRs
    aws_ips = []
    get_aws_ips(RMC, aws_ips)

    print("AWS IPs: " + str(len(aws_ips)))

    # Collect the list of Azure CIDRs
    azure_ips = []
    get_azure_ips(RMC, azure_ips)

    print("Azure IPs: " + str(len(azure_ips)))

    # Collect the list of known CIDRs
    ip_zones_collection = RMC.get_ipzone_connection()

    results = ip_zones_collection.find({'status': {"$ne": "false_positive"}})
    cidrs = []
    for entry in results:
        cidrs.append(IPNetwork(entry['zone']))

    print("CIDRs: " + str(len(cidrs)))

    # Get the current configuration information for Marinus.
    config_collection = RMC.get_config_connection()

    configs = config_collection.find({})
    orgs = []
    for org in configs[0]['SSL_Orgs']:
        orgs.append(org)

    print("Orgs: " + str(len(orgs)))

    # Obtain the name of the decompressed file.
    filename_f = open(FILENAME_FILE, "r")
    decompressed_file = filename_f.readline()
    filename_f.close()

    # For manual testing: decompressed_file = "ipv4.json"

    now = datetime.now()
    print(str(now) + ": Beginning file processing...")

    # Remove old results from the database
    results_collection = RMC.get_results_connection()
    results_collection.remove({})
    all_dns_collection = RMC.get_all_dns_connection()

    try:
        with open(decompressed_file, "r") as dec_f:
            for line in dec_f:
                try:
                    entry = json.loads(line)
                    """
                    Does the SSL certificate match a known organization?
                    Is the IP address in a known CIDR?
                    """
                    if check_in_org(entry, orgs) or \
                       check_in_cidr(entry['ip'], cidrs):
                        entry['zones'] = check_in_zone(entry, zones)
                        entry['aws'] = is_aws_ip(entry['ip'], aws_ips)
                        entry['azure'] = is_azure_ip(entry['ip'], azure_ips)
                        (domains,
                         zones) = lookup_domain(entry, zones,
                                                all_dns_collection)
                        if len(domains) > 0:
                            entry['domains'] = domains
                            if len(zones) > 0:
                                for zone in zones:
                                    if zone not in entry['zones']:
                                        entry['zones'].append(zone)
                        insert_result(entry, results_collection)
                    # else:
                    #     #This will add days to the amount of time necessary to scan the file.
                    #     matched_zones = check_in_zone(entry, zones)
                    #     if matched_zones != []:
                    #         entry['zones'] = matched_zones
                    #         entry['aws'] = is_aws_ip(entry['ip'], aws_ips)
                    #         entry['azure'] = is_azure_ip(entry['ip'], azure_ips)
                    #         insert_result(entry, results_collection)
                except ValueError as err:
                    print("Value Error!")
                    print(str(err))
                except:
                    print("Line unexpected error:", sys.exc_info()[0])
                    print("Line unexpected error:", sys.exc_info()[1])
    except IOError as err:
        print("I/O error({0}): {1}".format(err.errno, err.strerror))
        exit(0)
    except:
        print("Unexpected error:", sys.exc_info()[0])
        print("Unexpected error:", sys.exc_info()[1])
        exit(0)

    # Indicate that the processing of the job is complete and ready for download to Marinus
    jobs_collection.update_one({'job_name': 'censys'}, {
        '$currentDate': {
            "updated": True
        },
        "$set": {
            'status': 'COMPLETE'
        }
    })

    now = datetime.now()
    print("Ending: " + str(now))
Beispiel #9
0
def main():
    """
    Begin Main...
    """
    global global_exit_flag
    global global_zgrab_path

    logger = LoggingUtil.create_log(__name__)

    parser = argparse.ArgumentParser(
        description="Launch zgrab against IPs using port 80 or 443.")
    parser.add_argument("-p",
                        choices=["443", "80"],
                        metavar="port",
                        help="The web port: 80 or 443")
    parser.add_argument("-t",
                        default=5,
                        type=int,
                        metavar="threadCount",
                        help="The number of threads")
    parser.add_argument(
        "--zgrab_path",
        default=global_zgrab_path,
        metavar="zgrabVersion",
        help="The version of ZGrab to use",
    )
    args = parser.parse_args()

    if args.p == None:
        logger.error("A port value (80 or 443) must be provided.")
        exit(1)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        logger.warning(str(now) + ": I am already running! Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    all_dns_collection = rm_connector.get_all_dns_connection()
    ip_manager = IPManager.IPManager(rm_connector, True)

    jobs_manager = JobsManager.JobsManager(rm_connector,
                                           "zgrab_http_ip-" + args.p)
    jobs_manager.record_job_start()

    zones_struct = {}
    zones_struct["zones"] = ZoneManager.get_distinct_zones(rm_connector)

    # Not pretty but cleaner than previous method
    zones_struct["ip_manager"] = ip_manager

    (ips, ip_context) = get_ips(ip_manager, all_dns_collection)
    logger.info("Got IPs: " + str(len(ips)))
    zones_struct["ip_context"] = ip_context

    if args.p == "443":
        zgrab_collection = rm_connector.get_zgrab_443_data_connection()
        run_command = run_port_443_command
    else:
        zgrab_collection = rm_connector.get_zgrab_80_data_connection()
        run_command = run_port_80_command

    check_save_location("./json_p" + args.p)

    global_zgrab_path = args.zgrab_path

    threads = []

    logger.debug("Creating " + str(args.t) + " threads")
    for thread_id in range(1, args.t + 1):
        thread = ZgrabThread(
            thread_id,
            global_work_queue,
            args.p,
            run_command,
            zones_struct,
            zgrab_collection,
        )
        thread.start()
        threads.append(thread)
        thread_id += 1

    logger.info("Populating Queue")
    global_queue_lock.acquire()
    for ip in ips:
        global_work_queue.put(ip)
    global_queue_lock.release()

    # Wait for queue to empty
    while not global_work_queue.empty():
        pass

    # Notify threads it's time to exit
    global_exit_flag = 1

    # Wait for all threads to complete
    for t in threads:
        t.join()

    logger.info("Exiting Main Thread")

    # Remove last week's old entries
    lastweek = datetime.now() - timedelta(days=7)
    zgrab_collection.delete_many({
        "ip": {
            "$ne": "<nil>"
        },
        "timestamp": {
            "$lt": lastweek
        }
    })

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Connect to the remote databases
    mongo_connector = MongoConnector.MongoConnector()
    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    zones = ZoneManager.get_distinct_zones(mongo_connector)

    jobs_manager = JobsManager.JobsManager(mongo_connector, "remote_download")
    jobs_manager.record_job_start()

    remote_jobs_collection = rm_connector.get_jobs_connection()

    # Check the status of the Censys job on the remote database
    try:
        status = remote_jobs_collection.find_one({"job_name": "censys"})
    except:
        logger.error("Can not connect to remote database")
        jobs_manager.record_job_error()
        exit(1)

    if (status is not None and "status" in status
            and status["status"] != jobs_manager.COMPLETE):
        logger.info("Censys scans status is not COMPLETE")
    elif (status is not None and "status" in status
          and status["status"] == jobs_manager.COMPLETE):
        # Get connections to the relevant collections.
        censys_collection = mongo_connector.get_zgrab_443_data_connection()
        remote_censys_collection = rm_connector.get_zgrab_443_data_connection()

        download_censys_scan_info(logger, censys_collection,
                                  remote_censys_collection)

        # Tell the remote database that is safe to start processing the next Censys file
        remote_jobs_collection.update_one(
            {"job_name": "censys"},
            {
                "$currentDate": {
                    "updated": True
                },
                "$set": {
                    "status": jobs_manager.READY
                }
            },
        )

    # Get connections to the relevant HTTPS collections.
    zgrab_443_data_collection = mongo_connector.get_zgrab_443_data_connection()
    remote_zgrab_443_data_collection = rm_connector.get_zgrab_443_data_connection(
    )

    download_zgrab_info(logger, zgrab_443_data_collection,
                        remote_zgrab_443_data_collection)

    # Get connections to the relevant HTTP collections.
    zgrab_80_data_collection = mongo_connector.get_zgrab_80_data_connection()
    remote_zgrab_80_data_collection = rm_connector.get_zgrab_80_data_connection(
    )

    download_zgrab_info(logger, zgrab_80_data_collection,
                        remote_zgrab_80_data_collection)

    # Get connections to the relevant port collections.
    zgrab_port_data_collection = mongo_connector.get_zgrab_port_data_connection(
    )
    remote_zgrab_port_data_collection = rm_connector.get_zgrab_port_data_connection(
    )

    download_zgrab_port_info(logger, zgrab_port_data_collection,
                             remote_zgrab_port_data_collection)

    # Download latest whois information
    status = remote_jobs_collection.find_one({"job_name": "whois_lookups"})
    if status["status"] == jobs_manager.COMPLETE:
        whois_collection = mongo_connector.get_whois_connection()
        remote_whois_collection = rm_connector.get_whois_connection()
        download_whois_data(logger, whois_collection, remote_whois_collection)
        remote_jobs_collection.update_one(
            {"job_name": "whois"}, {"$set": {
                "status": jobs_manager.READY
            }})

    # Download Amass results
    amass_collection = mongo_connector.get_owasp_amass_connection()
    remote_amass_collection = rm_connector.get_owasp_amass_connection()
    download_amass_data(logger, amass_collection, remote_amass_collection,
                        dns_manager, zones)

    # Download the status of the remote jobs
    download_jobs_status(logger, jobs_manager._jobs_collection,
                         remote_jobs_collection)

    # Download remote sonar DNS findings
    download_sonar_dns(logger, dns_manager, rm_connector)

    # Download remote sonar RDNS findings
    download_sonar_rdns(logger, mongo_connector, rm_connector)

    # Update the local jobs database to done
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
Beispiel #11
0
def main():
    """
    Begin Main...
    """
    global global_exit_flag
    global global_zgrab_path

    logger = LoggingUtil.create_log(__name__)

    parser = argparse.ArgumentParser(
        description='Launch zgrab against domains using port 80 or 443.')
    parser.add_argument('-p',
                        choices=['443', '80'],
                        metavar="port",
                        help='The web port: 80 or 443')
    parser.add_argument('-t',
                        default=5,
                        type=int,
                        metavar="threadCount",
                        help='The number of threads')
    parser.add_argument('--zgrab_path',
                        default=global_zgrab_path,
                        metavar='zgrabVersion',
                        help='The version of ZGrab to use')
    args = parser.parse_args()

    if args.p == None:
        logger.error("A port value (80 or 443) must be provided.")
        exit(1)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        logger.warning(str(now) + ": I am already running! Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    all_dns_collection = rm_connector.get_all_dns_connection()
    jobs_manager = JobsManager.JobsManager(rm_connector,
                                           "zgrab_http_domain-" + args.p)
    jobs_manager.record_job_start()

    if args.p == "443":
        zgrab_collection = rm_connector.get_zgrab_443_data_connection()
        run_command = run_port_443_command
    else:
        zgrab_collection = rm_connector.get_zgrab_80_data_connection()
        run_command = run_port_80_command

    check_save_location("./json_p" + args.p)

    global_zgrab_path = args.zgrab_path

    zones = ZoneManager.get_distinct_zones(rm_connector)
    ip_manager = IPManager.IPManager(rm_connector)

    for zone in zones:
        global_exit_flag = 0

        domains = get_domains(all_dns_collection, ip_manager, zone)

        if len(domains) == 0:
            continue

        num_threads = args.t
        if len(domains) < args.t:
            num_threads = len(domains)

        logger.debug("Creating " + str(num_threads) + " threads")

        threads = []
        for thread_id in range(1, num_threads + 1):
            thread = ZgrabThread(thread_id, global_work_queue, args.p,
                                 run_command, zone, zgrab_collection)
            thread.start()
            threads.append(thread)
            thread_id += 1

        logger.debug(zone + " length: " + str(len(domains)))

        logger.info("Populating Queue")
        global_queue_lock.acquire()
        for domain in domains:
            global_work_queue.put(domain)
        global_queue_lock.release()

        # Wait for queue to empty
        while not global_work_queue.empty():
            pass

        logger.info("Queue empty")
        # Notify threads it's time to exit
        global_exit_flag = 1

        # Wait for all threads to complete
        for t in threads:
            t.join()

    # Remove last week's old entries
    lastweek = datetime.now() - timedelta(days=7)
    zgrab_collection.remove({
        'domain': {
            "$ne": "<nil>"
        },
        'timestamp': {
            "$lt": lastweek
        }
    })

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Beispiel #12
0
def main():
    """
    Begin main...
    """

    # Don't run if the search files script is working on the existing file.
    if is_running("search_censys_files_new.py"):
        now = datetime.now()
        print("File search running: " + str(now))
        exit(0)

    # Record the start in the logs
    now = datetime.now()
    print("Starting: " + str(now))

    RMC = RemoteMongoConnector.RemoteMongoConnector()
    jobs_collection = RMC.get_jobs_connection()

    # Obtain the timestamp of the last file that was downloaded.
    last_timestamp = "0"
    try:
        f_time = open(TIMESTAMP_FILE, "r")
        last_timestamp = f_time.readline()
        f_time.close()
    except FileNotFoundError:
        last_timestamp = "0"

    # Get the meta data for the currently available file.
    req = requests.get(CENSYS_API + "data/ipv4",
                       auth=HTTPBasicAuth(CENSYS_APP_ID, CENSYS_SECRET))

    if req.status_code != 200:
        print("Error " + str(req.status_code) +
              ": Unable to query Censys Data API\n")
        print(req.text)

        time.sleep(60)
        req = requests.get(CENSYS_API + "data/ipv4",
                           auth=HTTPBasicAuth(CENSYS_APP_ID, CENSYS_SECRET))
        if req.status_code != 200:
            print("Error on IPv4 retry. Giving up...")
            exit(0)

    data_json = json.loads(req.text)

    # Get the timestamp for the currently available file
    timestamp = data_json['results']['latest']['timestamp']

    # If it is the same file as last time, then don't download again.
    if last_timestamp == timestamp:
        print("Already downloaded. Exiting...")
        exit(0)
    else:
        print("Old timestamp: " + last_timestamp)
        print("New timestamp: " + timestamp)

    # Get the location of the details for the new file
    details_url = data_json['results']['latest']['details_url']

    req = requests.get(details_url,
                       auth=HTTPBasicAuth(CENSYS_APP_ID, CENSYS_SECRET))

    if req.status_code != 200:
        print("Error " + str(req.status_code) +
              ": Unable to query Censys Details API\n")
        print(req.text)

        time.sleep(60)
        req = requests.get(details_url,
                           auth=HTTPBasicAuth(CENSYS_APP_ID, CENSYS_SECRET))
        if req.status_code != 200:
            print("Error on details retry. Giving up...")
            exit(0)

    data_json = json.loads(req.text)

    compressed_path = data_json['primary_file']['compressed_download_path']
    print(compressed_path)

    # Record the timestamp of the file that we are about to download.
    time_f = open(TIMESTAMP_FILE, "w")
    time_f.write(timestamp)
    time_f.close()

    # Remove any old files.
    subprocess.call(["rm", DECOMPRESSED_FILE])
    subprocess.call("rm *.lz4", shell=True)

    # Download the new file.
    filename = download_file(compressed_path)

    # Decompress the new file into the file indicated by "DECOMPRESSED_FILE"
    subprocess.check_call(["lz4", "-d", filename, DECOMPRESSED_FILE])

    # Record the name of the filename that has the output
    dec_f = open(FILENAME_FILE, "w")
    dec_f.write(DECOMPRESSED_FILE)
    dec_f.close()

    # Record that we successfully downloaded the file and that search files can start.
    jobs_collection.update_one({'job_name': 'censys'}, {
        '$currentDate': {
            "updated": True
        },
        "$set": {
            'status': 'DOWNLOADED'
        }
    })

    now = datetime.now()
    print("Complete: " + str(now))
Beispiel #13
0
def main():
    """
    Begin main...
    """
    logger = LoggingUtil.create_log(__name__)

    mongo_connector = RemoteMongoConnector.RemoteMongoConnector()

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    jobs_manager = JobsManager.JobsManager(mongo_connector, "owasp_amass")

    amass_collection = mongo_connector.get_owasp_amass_connection()

    output_dir = "./amass_files/"

    arg_parser = argparse.ArgumentParser(
        description=
        "Run the OWASP Amass tool and store the results in the database.")
    arg_parser.add_argument(
        "--config_file",
        required=False,
        help="An optional Amass config file. Otherwise, defaults will be used.",
    )
    arg_parser.add_argument("--amass_path",
                            required=True,
                            help="The path to the amass binary")
    arg_parser.add_argument(
        "--output_dir",
        default=output_dir,
        help="The local path where to save Amass files.",
    )
    arg_parser.add_argument(
        "--docker_output_dir",
        default=output_dir,
        help="The path within Docker where to save Amass files.",
    )
    arg_parser.add_argument(
        "--amass_version",
        type=int,
        default=3,
        help="The version of OWASP Amass being used.",
    )
    arg_parser.add_argument(
        "--amass_mode",
        required=False,
        type=str,
        default="local",
        choices=["local", "docker"],
        help="The version of OWASP Amass being used.",
    )
    arg_parser.add_argument(
        "--amass_timeout",
        required=False,
        type=str,
        help="The timeout value for the Amass command line",
    )
    arg_parser.add_argument(
        "--exclude_zones",
        required=False,
        type=str,
        default="",
        help="A comma delimited list of sub-strings used to exclude zones",
    )
    arg_parser.add_argument(
        "--exclude_regex",
        required=False,
        type=str,
        default="",
        help="Exclude a list of domains containing a substring",
    )
    arg_parser.add_argument(
        "--created_within_last",
        required=False,
        type=int,
        default=0,
        help="Only process zones created within the last x days",
    )
    arg_parser.add_argument(
        "--if_list",
        required=False,
        type=str,
        default="",
        help=
        "The amass -if list of sources to include. Can't be used with -ef.",
    )
    arg_parser.add_argument(
        "--ef_list",
        required=False,
        type=str,
        default="",
        help=
        "The amass -ef list of sources to exclude. Can't be used with -if.",
    )
    arg_parser.add_argument(
        "--sleep",
        type=int,
        default=5,
        help=
        "Sleep time in seconds between amass runs so as not to overuse service limits.",
    )
    args = arg_parser.parse_args()

    if args.amass_mode == "local" and not os.path.isfile(args.amass_path):
        logger.error("Incorrect amass_path argument provided")
        exit(1)

    # In Docker mode, this would be relative to the Docker path and not the system path
    if (args.amass_mode == "local" and "config_file" in args
            and not os.path.isfile(args.config_file)):
        logger.error("Incorrect config_file location")
        exit(1)

    if "output_dir" in args:
        output_dir = args.output_dir
        if not output_dir.endswith("/"):
            output_dir = output_dir + "/"

    # In Docker mode, this would be relative to the Docker path and not the system path
    if args.amass_mode == "local":
        check_save_location(output_dir)

    jobs_manager.record_job_start()

    if args.created_within_last > 0:
        zone_collection = mongo_connector.get_zone_connection()
        past_create_date = datetime.now() - timedelta(
            days=args.created_within_last)
        results = mongo_connector.perform_find(
            zone_collection, {"created": {
                "$gt": past_create_date
            }})
        zones = []
        for entry in results:
            zones.append(entry["zone"])
    elif args.exclude_regex is not None and len(args.exclude_regex) > 0:
        exclude_re = re.compile(".*" + args.exclude_regex + ".*")
        zone_collection = mongo_connector.get_zone_connection()
        results = mongo_connector.perform_find(
            zone_collection,
            {
                "$and": [
                    {
                        "zone": {
                            "$not": exclude_re
                        }
                    },
                    {
                        "status": {
                            "$nin":
                            [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]
                        }
                    },
                ]
            },
        )
        zones = []
        for entry in results:
            zones.append(entry["zone"])
    else:
        zones = ZoneManager.get_distinct_zones(mongo_connector)

    # If the job died half way through, you can skip over domains that were already processed
    # when you restart the script.
    new_zones = []
    for zone in zones:
        if not os.path.isfile(output_dir + zone + "-do.json"):
            new_zones.append(zone)

    exclude_strings = args.exclude_zones.split()

    # If exclude_strings was specified, then remove any matching zones
    if len(exclude_strings) > 0:
        for zone in new_zones:
            for entry in exclude_strings:
                if entry in zone:
                    new_zones.remove(zone)

    # Recently updated zones
    # This helps reduce the number of redundant scans if you stop and restart
    all_dns_collection = mongo_connector.get_all_dns_connection()
    scrub_date = datetime.now() - timedelta(days=120, hours=9)
    recent_zones = mongo_connector.perform_distinct(
        all_dns_collection,
        "zone",
        {
            "sources.source": {
                "$regex": "amass:.*"
            },
            "sources.updated": {
                "$gt": scrub_date
            },
        },
    )
    for zone in recent_zones:
        if zone in new_zones:
            new_zones.remove(zone)

    logger.info("New Zones Length: " + str(len(new_zones)))

    for zone in new_zones:
        # Pace out calls to the Amass services
        time.sleep(args.sleep)

        if args.amass_mode == "local":
            command_line = []

            command_line.append(args.amass_path)
        else:
            command_line = args.amass_path.split()

        if int(args.amass_version) >= 3:
            command_line.append("enum")

        if args.config_file:
            command_line.append("-config")
            command_line.append(args.config_file)

        if args.amass_timeout:
            command_line.append("-timeout")
            command_line.append(args.amass_timeout)

        if args.if_list:
            command_line.append("-if")
            command_line.append(args.if_list)

        if args.ef_list:
            command_line.append("-ef")
            command_line.append(args.ef_list)

        command_line.append("-d")
        command_line.append(zone)
        command_line.append("-src")
        command_line.append("-ip")
        command_line.append("-nolocaldb")
        command_line.append("-json")
        command_line.append(args.docker_output_dir + zone + "-do.json")

        try:
            subprocess.check_call(command_line)
        except subprocess.CalledProcessError as e:
            # Even when there is an error, there will likely still be results.
            # We can continue with the data that was collected thus far.
            logger.warning("ERROR: Amass run exited with a non-zero status: " +
                           str(e))

        if os.path.isfile(output_dir + zone + "-do.json"):
            output = open(output_dir + zone + "-do.json", "r")
            json_data = []
            for line in output:
                try:
                    json_data.append(json.loads(line))
                except:
                    logger.warning("Amass wrote an incomplete line: " +
                                   str(line))
            output.close()

            for finding in json_data:
                finding["timestamp"] = datetime.now()
                """
                Results from amass squash the cname records and only provides the final IPs.
                Therefore, we have to re-do the DNS lookup in download_from_remote_database.
                This collection is just a recording of the original results
                """
                mongo_connector.perform_insert(amass_collection, finding)

    # Clear old findings
    amass_collection.delete_many({"timestamp": {"$lt": now}})

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
"""
This script downloads updates from the remote MongoDB server that is used for larger jobs.
This script is only necessary if a remote MongoDB is set up.

This script can be run daily.
"""

import logging

from datetime import datetime, timedelta
from libs3 import MongoConnector, RemoteMongoConnector, JobsManager
from libs3.LoggingUtil import LoggingUtil

# Connect to the remote databases
mongo_connector = MongoConnector.MongoConnector()
rm_connector = RemoteMongoConnector.RemoteMongoConnector()


def download_censys_scan_info(censys_collection, remote_censys_collection):
    """
    Download the latest censys scan information
    """
    # Grab the new results from the remote server.
    results = remote_censys_collection.find({}, {"_id": 0})

    # Remove the previous results from the local Censys collection
    censys_collection.remove({})

    # Insert the new results from the remote server into the local server
    for result in results:
        censys_collection.insert(result)
Beispiel #15
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    if is_running(os.path.basename(__file__)):
        logger.warning("Already running...")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    parser = argparse.ArgumentParser(
        description="Parse Sonar files based on domain zones.")
    parser.add_argument(
        "--sonar_file_type",
        choices=["dns-any", "dns-a", "rdns"],
        required=True,
        help='Specify "dns-any", "dns-a", or "rdns"',
    )
    parser.add_argument(
        "--database",
        choices=["local", "remote"],
        required=False,
        default="local",
        help="Whether to use the local or remote DB",
    )
    args = parser.parse_args()

    r7 = Rapid7.Rapid7()

    if args.database == "remote":
        mongo_connector = RemoteMongoConnector.RemoteMongoConnector()
        dns_manager = DNSManager.DNSManager(mongo_connector,
                                            "get_sonar_data_dns")
    else:
        mongo_connector = MongoConnector.MongoConnector()
        dns_manager = DNSManager.DNSManager(mongo_connector)

    zones = ZoneManager.get_distinct_zones(mongo_connector)

    save_directory = "./files/"
    check_save_location(save_directory)

    # A session is necessary for the multi-step log-in process
    s = requests.Session()

    if args.sonar_file_type == "rdns":
        logger.info("Updating RDNS records")
        jobs_manager = JobsManager.JobsManager(mongo_connector,
                                               "get_sonar_data_rdns")
        jobs_manager.record_job_start()

        try:
            html_parser = r7.find_file_locations(s, "rdns", jobs_manager)
            if html_parser.rdns_url == "":
                logger.error("Unknown Error")
                jobs_manager.record_job_error()
                exit(0)

            unzipped_rdns = download_remote_files(logger, s,
                                                  html_parser.rdns_url,
                                                  save_directory, jobs_manager)
            update_rdns(logger, unzipped_rdns, zones, dns_manager,
                        mongo_connector)
        except Exception as ex:
            logger.error("Unexpected error: " + str(ex))
            jobs_manager.record_job_error()
            exit(0)

        jobs_manager.record_job_complete()
    elif args.sonar_file_type == "dns-any":
        logger.info("Updating DNS ANY records")

        jobs_manager = JobsManager.JobsManager(mongo_connector,
                                               "get_sonar_data_dns-any")
        jobs_manager.record_job_start()

        try:
            html_parser = r7.find_file_locations(s, "fdns", jobs_manager)
            if html_parser.any_url != "":
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.any_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, zones, dns_manager)
        except Exception as ex:
            logger.error("Unexpected error: " + str(ex))
            jobs_manager.record_job_error()
            exit(0)

        jobs_manager.record_job_complete()
    elif args.sonar_file_type == "dns-a":
        logger.info("Updating DNS A, AAAA, and CNAME records")

        jobs_manager = JobsManager.JobsManager(mongo_connector,
                                               "get_sonar_data_dns-a")
        jobs_manager.record_job_start()

        try:
            html_parser = r7.find_file_locations(s, "fdns", jobs_manager)
            if html_parser.a_url != "":
                logger.info("Updating A records")
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.a_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, zones, dns_manager)
            if html_parser.aaaa_url != "":
                logger.info("Updating AAAA records")
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.aaaa_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, zones, dns_manager)
            if html_parser.cname_url != "":
                logger.info("Updating CNAME records")
                unzipped_dns = download_remote_files(logger, s,
                                                     html_parser.cname_url,
                                                     save_directory,
                                                     jobs_manager)
                update_dns(logger, unzipped_dns, zones, dns_manager)
        except Exception as ex:
            logger.error("Unexpected error: " + str(ex))
            jobs_manager.record_job_error()
            exit(0)

        jobs_manager.record_job_complete()
    else:
        logger.error("Unrecognized sonar_file_type option. Exiting...")

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Beispiel #16
0
def main():
    """
    Beging Main...
    """
    global global_exit_flag
    global global_retest_list
    global global_sleep_time
    global global_queue_size
    global global_zgrab_path

    logger = LoggingUtil.create_log(__name__)

    global_retest_list = []

    parser = argparse.ArgumentParser(
        description="Launch zgrab against IPs using port 22, 25, 443, or 465.")
    parser.add_argument(
        "-p",
        choices=["22", "25", "443", "465"],
        metavar="port",
        help="The port to scan: 22, 25, 443, or 465",
    )
    parser.add_argument("-t",
                        default=5,
                        type=int,
                        metavar="threadCount",
                        help="The number of threads")
    parser.add_argument(
        "--mx",
        action="store_true",
        help="Scan only IPs from MX records. Useful for SMTP scans.",
    )
    parser.add_argument(
        "-s",
        default=0,
        type=int,
        metavar="sleepTime",
        help="Sleep time in order to spread out the batches",
    )
    parser.add_argument(
        "--qs",
        default=0,
        type=int,
        metavar="queueSize",
        help="How many hosts to scan in a batch",
    )
    parser.add_argument("--zones_only",
                        action="store_true",
                        help="Scan only IPs from IP zones.")
    parser.add_argument(
        "--zgrab_path",
        default=global_zgrab_path,
        metavar="zgrabVersion",
        help="The version of ZGrab to use",
    )
    args = parser.parse_args()

    if args.p == None:
        logger.error("A port value (22, 25, 443, or 465) must be provided.")
        exit(1)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        logger.warning(str(now) + ": I am already running! Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    all_dns_collection = rm_connector.get_all_dns_connection()
    ip_manager = IPManager.IPManager(rm_connector, True)

    jobs_manager = JobsManager.JobsManager(rm_connector,
                                           "zgrab_port_ip-" + args.p)
    jobs_manager.record_job_start()

    zones_struct = {}
    zones_struct["zones"] = ZoneManager.get_distinct_zones(rm_connector)

    # Not pretty but works
    zones_struct["ip_manager"] = ip_manager

    if args.mx:
        (ips, ip_context) = get_mx_ips(zones_struct["zones"], ip_manager,
                                       all_dns_collection)
    elif args.zones_only:
        (ips, ip_context) = get_only_ipzones(ip_manager.Tracked_CIDRs)
    else:
        (ips, ip_context) = get_ips(ip_manager, all_dns_collection)

    if args.s and int(args.s) > 0:
        global_sleep_time = int(args.s)

    if args.qs and int(args.qs) > 0:
        global_queue_size = int(args.qs)

    logger.info("Got IPs: " + str(len(ips)))
    zones_struct["ip_context"] = ip_context

    zgrab_collection = rm_connector.get_zgrab_port_data_connection()
    if args.p == "443":
        run_command = run_port_443_command
    elif args.p == "22":
        run_command = run_port_22_command
    elif args.p == "25":
        run_command = run_port_25_command
    elif args.p == "465":
        run_command = run_port_465_command

    check_save_location("./json_p" + args.p)

    global_zgrab_path = args.zgrab_path

    threads = []

    logger.debug("Creating " + str(args.t) + " threads")
    for thread_id in range(1, args.t + 1):
        thread = ZgrabThread(
            thread_id,
            global_work_queue,
            args.p,
            run_command,
            zones_struct,
            zgrab_collection,
        )
        thread.start()
        threads.append(thread)
        thread_id += 1

    logger.info("Populating Queue")
    global_queue_lock.acquire()
    for ip in ips:
        global_work_queue.put(ip)
    global_queue_lock.release()

    # Wait for queue to empty
    while not global_work_queue.empty():
        pass

    # Notify threads it's time to exit
    global_exit_flag = 1

    # Wait for all threads to complete
    for t in threads:
        t.join()

    logger.info("Exiting Main Thread")

    logger.info("Global retest list: " + str(len(global_retest_list)))

    # Retest any SMTP hosts that did not respond to the StartTLS handshake
    if args.p == "25" and len(global_retest_list) > 0:
        process_thread(
            logger,
            global_retest_list,
            args.p,
            run_port_25_no_tls_command,
            zones_struct,
            zgrab_collection,
            "retest",
        )

    # Remove old entries from before the scan
    if args.p == "443":
        other_results = zgrab_collection.find({
            "data.tls": {
                "$exists": True
            },
            "data.tls.timestamp": {
                "$lt": now
            }
        })
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                        {"$unset": {
                                            "data.tls": ""
                                        }})
    elif args.p == "22":
        if "zgrab2" in global_zgrab_path:
            other_results = zgrab_collection.find({
                "data.ssh": {
                    "$exists": True
                },
                "data.ssh.timestamp": {
                    "$lt": now
                }
            })
            for result in other_results:
                zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                            {"$unset": {
                                                "data.ssh": ""
                                            }})
        else:
            other_results = zgrab_collection.find({
                "data.xssh": {
                    "$exists": True
                },
                "data.xssh.timestamp": {
                    "$lt": now
                }
            })
            for result in other_results:
                zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                            {"$unset": {
                                                "data.xssh": ""
                                            }})
    elif args.p == "25":
        other_results = zgrab_collection.find({
            "data.smtp": {
                "$exists": True
            },
            "data.smtp.timestamp": {
                "$lt": now
            }
        })
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                        {"$unset": {
                                            "data.smtp": ""
                                        }})
    elif args.p == "465":
        other_results = zgrab_collection.find({
            "data.smtps": {
                "$exists": True
            },
            "data.smtps.timestamp": {
                "$lt": now
            }
        })
        for result in other_results:
            zgrab_collection.update_one({"_id": ObjectId(result["_id"])},
                                        {"$unset": {
                                            "data.smtps": ""
                                        }})

    # Remove any completely empty entries
    zgrab_collection.delete_many({"data": {}})

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
Beispiel #17
0
def main():
    global global_exit_flag

    parser = argparse.ArgumentParser(
        description='Launch zgrab against IPs using port 80 or 443.')
    parser.add_argument('-p',
                        choices=['443', '80'],
                        metavar="port",
                        help='The web port: 80 or 443')
    parser.add_argument('-t',
                        default=5,
                        type=int,
                        metavar="threadCount",
                        help='The number of threads')
    args = parser.parse_args()

    if args.p == None:
        print("A port value (80 or 443) must be provided.")
        exit(0)

    if is_running(os.path.basename(__file__)):
        """
        Check to see if a previous attempt to parse is still running...
        """
        now = datetime.now()
        print(str(now) + ": I am already running! Goodbye!")
        exit(0)

    now = datetime.now()
    print("Starting: " + str(now))

    rm_connector = RemoteMongoConnector.RemoteMongoConnector()
    all_dns_collection = rm_connector.get_all_dns_connection()

    zones_struct = {}
    zones_struct['zones'] = ZoneManager.get_distinct_zones(rm_connector)

    zones_struct['ip_zones'] = get_ip_zones(rm_connector)

    # Collect the list of AWS CIDRs
    zones_struct['aws_ips'] = get_aws_ips(rm_connector)

    # Collect the list of Azure CIDRs
    zones_struct['azure_ips'] = get_azure_ips(rm_connector)

    (ips, ip_context) = get_ips(zones_struct['ip_zones'], all_dns_collection)
    print("Got IPs: " + str(len(ips)))
    zones_struct['ip_context'] = ip_context

    if args.p == "443":
        zgrab_collection = rm_connector.get_zgrab_443_data_connection()
        run_command = run_port_443_command
    else:
        zgrab_collection = rm_connector.get_zgrab_80_data_connection()
        run_command = run_port_80_command

    threads = []

    print("Creating " + str(args.t) + " threads")
    for thread_id in range(1, args.t + 1):
        thread = ZgrabThread(thread_id, global_work_queue, args.p, run_command,
                             zones_struct, zgrab_collection)
        thread.start()
        threads.append(thread)
        thread_id += 1

    print("Populating Queue")
    global_queue_lock.acquire()
    for ip in ips:
        global_work_queue.put(ip)
    global_queue_lock.release()

    # Wait for queue to empty
    while not global_work_queue.empty():
        pass

    # Notify threads it's time to exit
    global_exit_flag = 1

    # Wait for all threads to complete
    for t in threads:
        t.join()

    print("Exiting Main Thread")

    # Remove last week's old entries
    lastweek = datetime.now() - timedelta(days=7)
    zgrab_collection.remove({
        'ip': {
            "$ne": "<nil>"
        },
        'timestamp': {
            "$lt": lastweek
        }
    })

    now = datetime.now()
    print("Complete: " + str(now))