示例#1
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    # Obtain the list of known email addresses from the config collection
    MC = MongoConnector.MongoConnector()
    PT = PassiveTotal.PassiveTotal()
    zi = ZoneIngestor.ZoneIngestor()
    config_collection = MC.get_config_connection()
    res = config_collection.find({})

    jobs_manager = JobsManager.JobsManager(MC, 'get_passivetotal_data')
    jobs_manager.record_job_start()

    # Perform a search for each email address
    for i in range(0, len(res[0]['DNS_Admins'])):
        search_pt_email(logger, res[0]['DNS_Admins'][i], PT, zi, jobs_manager)

    for i in range(0, len(res[0]['Whois_Orgs'])):
        search_pt_org(logger, res[0]['Whois_Orgs'][i], PT, zi, jobs_manager)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
示例#2
0
def main():
    """
    Begin Main...
    """
    now = datetime.now()
    print("Starting: " + str(now))

    # Obtain the list of known email addresses from the config collection
    MC = MongoConnector.MongoConnector()
    PT = PassiveTotal.PassiveTotal()
    zi = ZoneIngestor.ZoneIngestor()
    config_collection = MC.get_config_connection()
    res = config_collection.find({})

    jobs_collection = MC.get_jobs_connection()

    # Perform a search for each email address
    for i in range(0, len(res[0]['DNS_Admins'])):
        search_pt_email(res[0]['DNS_Admins'][i], PT, zi, jobs_collection)

    for i in range(0, len(res[0]['Whois_Orgs'])):
        search_pt_org(res[0]['Whois_Orgs'][i], PT, zi, jobs_collection)

    # Record status
    jobs_collection.update_one({'job_name': 'get_passivetotal_data'}, {
        '$currentDate': {
            "updated": True
        },
        "$set": {
            'status': 'COMPLETE'
        }
    })

    now = datetime.now()
    print("Complete: " + str(now))
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    umbrella = Umbrella.Umbrella()
    zi = ZoneIngestor.ZoneIngestor()

    # Obtain the list of known email addresses and name servers from the config collection
    config_collection = mongo_connector.get_config_connection()
    res = config_collection.find({})

    jobs_manager = JobsManager.JobsManager(mongo_connector, 'get_umbrella_whois')
    jobs_manager.record_job_start()

    # Perform a search for each email address
    for i in range(0, len(res[0]['DNS_Admins'])):
        search_umbrella_by_email(logger, res[0]['DNS_Admins'][i], umbrella, zi, jobs_manager)

    # Perform a search based on each name server
    for i in range(0, len(res[0]['Whois_Name_Servers'])):
        search_umbrella_by_nameserver(logger, res[0]['Whois_Name_Servers'][i], res[0]['Whois_Orgs'], umbrella, zi, jobs_manager)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")
示例#4
0
def main():
    """
    Begin Main...
    """
    now = datetime.now()
    print("Starting: " + str(now))

    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    jobs_collection = mongo_connector.get_jobs_connection()
    zone_ingestor = ZoneIngestor.ZoneIngestor()

    current_zones = ZoneManager.get_distinct_zones(mongo_connector)

    # For cases with multiple R53 accounts, include the account id for reference
    sts = boto3.client('sts')
    account_id = sts.get_caller_identity()["Arn"].split(':')[4]
    r53_source = "R53:" + str(account_id)

    r53_client = boto3.client('route53')

    r53_domains = r53_client.list_hosted_zones()
    r53_zone_list = []
    while r53_domains != {}:
        for zone_data in r53_domains['HostedZones']:
            # Only add public zones
            if zone_data['Config']['PrivateZone'] == False:
                r53_zone_list.append(zone_data)

        if r53_domains['IsTruncated'] == True:
            r53_domains = r53_client.list_domains(
                Marker=r53_domains['NextMarker'])
        else:
            r53_domains = {}

    for zone_data in r53_zone_list:
        # Double check that this is not a new zone
        zone_name = zone_data['Name'][:-1]
        if zone_name not in current_zones:
            print("Creating zone: " + zone_name)
            zone_ingestor.add_zone(zone_data['Name'], r53_source)

        # Add hosts to the zone
        update_records(r53_client, dns_manager, zone_data, r53_source)

    # Record status
    jobs_collection.update_one({'job_name': 'get_route53'}, {
        '$currentDate': {
            "updated": True
        },
        "$set": {
            'status': 'COMPLETE'
        }
    })

    now = datetime.now()
    print("Ending: " + str(now))
示例#5
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    zone_ingestor = ZoneIngestor.ZoneIngestor()

    jobs_manager = JobsManager.JobsManager(mongo_connector, "get_route53")
    jobs_manager.record_job_start()

    current_zones = ZoneManager.get_distinct_zones(mongo_connector)

    # For cases with multiple R53 accounts, include the account id for reference
    sts = boto3.client("sts")
    account_id = sts.get_caller_identity()["Arn"].split(":")[4]
    r53_source = "R53:" + str(account_id)

    r53_client = boto3.client("route53")

    r53_domains = r53_client.list_hosted_zones()
    r53_zone_list = []
    while r53_domains != {}:
        for zone_data in r53_domains["HostedZones"]:
            # Only add public zones
            if zone_data["Config"]["PrivateZone"] == False:
                r53_zone_list.append(zone_data)

        if r53_domains["IsTruncated"] == True:
            r53_domains = r53_client.list_domains(
                Marker=r53_domains["NextMarker"])
        else:
            r53_domains = {}

    for zone_data in r53_zone_list:
        # Double check that this is not a new zone
        zone_name = zone_data["Name"][:-1]
        if zone_name not in current_zones:
            logger.info("Creating zone: " + zone_name)
            zone_ingestor.add_zone(zone_data["Name"], r53_source)

        # Add hosts to the zone
        update_records(r53_client, dns_manager, zone_data, r53_source)

    # Record status
    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Ending: " + str(now))
    logger.info("Complete.")
示例#6
0
文件: setup.py 项目: Righard/Marinus
def create_zone(zone):
    """
    A zone refers to a root domain such as "example.org" or "example.net").
    It does not refer to fully qualified domain names (FQDNs) such as "www.example.org" or "images.example.net".
    Marins correlates records from different sources based on their zone.
    This will add a new zone for Marinus to track in the zones collection.
    """
    ZI = ZoneIngestor.ZoneIngestor()

    print("Adding zone: " + zone)
    ZI.add_zone(zone)
示例#7
0
class UltraDNSZone(object):

    UH = UltraDNSHelper.UltraDNSHelper('get_ultradns_zones')
    APIH = APIHelper.APIHelper()
    ZI = ZoneIngestor.ZoneIngestor()

    def __ultradns_zone_response_handler(self, response):
        """
        Handles the API response. Incorrect JSON parsing is allowed upto 20 times post which the
        script exits. No action is performed when the zone name ends in "in-addr.arpa".
        :param response: Response object
        """
        try:
            response = response.json()
        except (ValueError, AttributeError) as err:
            if self.UH.incorrect_response_json_allowed > 0:
                print(
                    'Unable to parse response JSON for retrieving UltraDNS zones for the offset'
                    + self.UH.offset)
                self.UH.incorrect_response_json_allowed -= 1
            else:
                self.APIH.handle_api_error(
                    'Unable to parse response JSON for 20 zones: ' + repr(err),
                    self.UH.jobs_manager,
                )
        else:
            # the zone names end in '.'. Removing that before ingesting into collection.
            for zone in response['zones']:
                zone_name = zone['properties']['name'][:-1]

                if not zone_name.endswith('in-addr.arpa'):
                    # Part of clean_collection code.
                    # if zone_name in self.UH.previous_zones:
                    #     del self.UH.previous_zones[zone_name]

                    # Add the zone to the zones collection
                    self.ZI.add_zone(zone_name, self.UH.source)

            self.UH.set_offset(response['resultInfo'])

    def __paginated_ultradns_zones_request(self):
        """
        Makes paginated API calls to UltraDNS. The API is retried 3 times in case of ConnectionError
        exception before the script exists. The script exists on encountering HTTPError or any other
        RequestException.
        The value of the limit has been set as mentioned in the docs.
        In case a 401 is encountered along with the required token expiration message, another login
        API is sent with grant_type set as 'refresh_token' to retrieve a valid access token.
        """
        url = self.UH.ULTRACONNECT.ZONES
        try:
            res = self.UH.backoff_api_retry(
                url,
                {
                    'limit': 1000,
                    'offset': self.UH.offset,
                    'q': 'zone_type:PRIMARY'
                },
                {'authorization': 'Bearer ' + self.UH.access_token},
            )
            res.raise_for_status()
        except requests.exceptions.HTTPError as herr:
            err_msg = json.loads(res.text)['errorMessage']
            if res.status_code == 401 and err_msg == self.UH.access_token_expiration_error:
                self.UH.login('refresh_token')
                self.__paginated_ultradns_zones_request()
            else:
                self.APIH.handle_api_error(herr, self.UH.jobs_manager)
        except requests.exceptions.RequestException as err:
            self.APIH.handle_api_error(err, self.UH.jobs_manager)
        else:
            self.__ultradns_zone_response_handler(res)

    def get_ultradns_zones(self):
        """
        Extracts the zones listing from UltraDNS in a paginated manner.
        """
        print("Starting: " + str(datetime.now()))
        self.UH.jobs_manager.record_job_start()

        # Part of clean_collection code.
        # self.UH.get_previous_zones()

        self.__paginated_ultradns_zones_request()
        while self.UH.offset:
            self.__paginated_ultradns_zones_request()

        # Record status
        self.UH.jobs_manager.record_job_complete()

        print("Ending: " + str(datetime.now()))

    def __init__(self):
        self.get_ultradns_zones()
示例#8
0
class InfobloxZone(object):
    alphabets = list(string.ascii_lowercase + string.digits)
    alphabet_queried = None
    APIH = APIHelper.APIHelper()
    IH = InfobloxHelper.InfobloxHelper()

    _logger = None

    # Connect to the database
    MC = MongoConnector.MongoConnector()
    zone_collection = MC.get_zone_connection()
    ip_collection = MC.get_ipzone_connection()
    job_manager = None

    ZI = ZoneIngestor.ZoneIngestor()

    next_page_id = None
    source = 'Infoblox'

    def __get_base_url(self):
        """
        Returns the Infoblox zone API URL
        :return: Infoblox zone API URL
        """
        return 'https://' + self.IH.IBLOX_HOST + '/wapi/v' + self.IH.IBLOX_VERSION + '/zone_auth'

    def __get_previous_zones(self):
        """
        Fetches the currently present zones/sub-zones in the zone collection with source 'Infoblox'.
        The result is a dictionary with the zones as keys. The value of the key is True if the zone
        is sub_zone.
        """
        zones = self.zone_collection.find(
            {
                '$or': [{
                    'reporting_sources.source': self.source
                }, {
                    'sub_zones.source': self.source
                }]
            }, {
                'reporting_sources': 1,
                'zone': 1,
                'sub_zones': 1
            })
        self.previous_zones = {}
        for zone in zones:
            for reporting_source in zone['reporting_sources']:
                if reporting_source['source'] == self.source:
                    self.previous_zones[zone['zone']] = False
            for sub_zone in zone['sub_zones']:
                if sub_zone['source'] == self.source:
                    self.previous_zones[sub_zone['sub_zone']] = True

    def __clean_collection(self):
        """
        Cleans the zone collection of the zones which were earlier seen in the Infoblox API
        but are not seen now. Such zones/sub-zones are marked with source 'Infoblox-Retired'.
        """
        parent_zones = []
        sub_zones = []
        for zone_name, is_sub_zone in self.previous_zones.items():
            if is_sub_zone:
                sub_zones.append(zone_name)
            else:
                parent_zones.append(zone_name)

        # Update the sub_zones from 'Infoblox' to 'Infoblox-Retired'
        self.zone_collection.update_many(
            {
                'sub_zones': {
                    '$elemMatch': {
                        'sub_zone': {
                            '$in': sub_zones
                        },
                        'source': self.source
                    }
                }
            }, {'$set': {
                'sub_zones.$.source': 'Infoblox-Retired'
            }})

        self.zone_collection.update_many(
            {
                'zone': {
                    '$in': parent_zones
                },
                'reporting_sources.source': self.source
            }, {'$set': {
                'reporting_sources.$.source': 'Infoblox-Retired'
            }})

    def __insert_zone(self, zone):
        """
        Inserts the zone into the zone collection or into ip_zones collection in case
        it is an IP.
        :param zone: Zone value to be inserted into collections. This is a dictionary
                     with keys 'fqdn' and 'parent'.
        """
        # Some zones are actually IP addresses.
        # If the IP address is new, add it.
        # Change the update date if it already exists
        utf8_zone = zone['fqdn'].encode('utf-8').decode('utf8')
        if re.match(r"^([0-9]{1,3}\.){3}[0-9]{1,3}\/\d\d$",
                    utf8_zone) is not None:
            cursor = self.ip_collection.find({"zone": zone['fqdn']})
            if cursor.count() == 0:
                insert_text = dict()
                insert_text['zone'] = utf8_zone
                insert_text['source'] = 'Infoblox'
                insert_text['status'] = 'unconfirmed'
                insert_text['created'] = datetime.now()
                insert_text['updated'] = datetime.now()
                self.ip_collection.insert_one(insert_text)
                self._logger.info("Added IP: " + utf8_zone)
            else:
                for _ in cursor:
                    self.ip_collection.update_one(
                        {'zone': zone['fqdn']},
                        {'$currentDate': {
                            "updated": True
                        }})
                    self._logger.info("Updated IP: " + utf8_zone)
        else:
            # cleaning the values from the previous zones found. The resultant set
            # will need to be cleared of the source value 'Infoblox'.
            if zone['fqdn'] in self.previous_zones:
                del self.previous_zones[zone['fqdn']]
            self.ZI.add_zone(zone['fqdn'], self.source, zone['parent'])

    def __infoblox_response_handler(self, response):
        """
        Handles the API response. Incorrect JSON parsing is allowed upto 20 times post which the
        script exits. If the 'next_page_id' is received in the response, then that is set as an
        identification for the next page of the API to be queried.
        :param response: Response object
        """
        try:
            response_data = response.json()
            response_result = response_data['result']
        except (ValueError, AttributeError) as err:
            if self.incorrect_response_json_allowed > 0:
                self._logger.warning(
                    'Unable to parse response JSON for alphabet ' +
                    self.alphabet_queried)
                self.incorrect_response_json_allowed -= 1
            else:
                self.APIH.handle_api_error(
                    'Unable to parse response JSON for 20 alphabets: ' +
                    repr(err),
                    self.job_manager,
                )
        else:
            for entry in response_result:
                zone = dict()
                zone['fqdn'] = entry['fqdn']
                zone['parent'] = entry['parent']
                self.__insert_zone(zone)

            if 'next_page_id' in response_data:
                self.next_page_id = response_data['next_page_id']

    @backoff.on_exception(backoff.expo,
                          requests.exceptions.ConnectionError,
                          max_tries=4,
                          factor=10,
                          on_backoff=APIH.connection_error_retry)
    def __backoff_api_retry(self):
        """
        Makes API calls to Infoblox with exponential retry capabilities using 'backoff'. The API is
        retried 3 times in case of ConnectionError exception before the script exists.
        """
        url = self.__get_base_url()
        params = {
            'view': 'External',
            'fqdn~': '.*' + self.alphabet_queried + '$',
            '_return_fields': 'parent,fqdn',
        }
        if not self.next_page_id:
            params.update({
                '_paging': '1',
                '_return_as_object': '1',
                '_max_results': '1500'
            })
        else:
            params.update({'_page_id': self.next_page_id})

        return requests.get(url,
                            params,
                            auth=HTTPBasicAuth(self.IH.IBLOX_UNAME,
                                               self.IH.IBLOX_PASSWD),
                            verify=False)

    def __infoblox_paginated_request(self):
        """
        Makes paginated API calls to Infoblox. The API is retried 3 times in case of ConnectionError
        exception before the script exists. The script exists on encountering HTTPError or any other
        RequestException.
        """
        try:
            response = self.__backoff_api_retry()
            response.raise_for_status()
        except requests.exceptions.HTTPError as herr:
            self.APIH.handle_api_error(herr, self.job_manager)
        except requests.exceptions.RequestException as err:
            self.APIH.handle_api_error(err, self.job_manager)
        else:
            self.next_page_id = None
            self.__infoblox_response_handler(response)

    def get_infoblox_zones(self):
        """
        Extracts the Infoblox zones using paginated requests.
        """
        print("Starting: " + str(datetime.now()))
        self._logger.info("Starting....")
        self.job_manager = JobsManager.JobsManager(self.MC,
                                                   'get_iblox_alpha_zones')
        self.job_manager.record_job_start()

        self.__get_previous_zones()
        for alphabet in self.alphabets:
            self.alphabet_queried = alphabet
            self.next_page_id = None
            self.__infoblox_paginated_request()
            while self.next_page_id:
                self.__infoblox_paginated_request()

        self.__clean_collection()

        # Record status
        self.job_manager.record_job_complete()

        print("Ending: " + str(datetime.now()))
        self._logger.info("Complete")

    def __init__(self):
        self._logger = LoggingUtil.create_log(__name__)
        self.incorrect_response_json_allowed = self.APIH.INCORRECT_RESPONSE_JSON_ALLOWED
        self.get_infoblox_zones()
示例#9
0
def main():

    now = datetime.now()
    print("Starting: " + str(now))

    azure_connector = AzureConnector.AzureConnector()
    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    zone_ingestor = ZoneIngestor.ZoneIngestor()
    jobs_manager = JobsManager.JobsManager(mongo_connector, 'fetch_azure_dns')
    jobs_manager.record_job_start()

    current_zones = ZoneManager.get_distinct_zones(mongo_connector)

    resource_client = azure_connector.get_resources_client()
    resources = []

    # The resource list is not currently used.
    for item in resource_client.resource_groups.list():
        resources.append(item.name)

    dns_client = azure_connector.get_dns_client()

    zones = dns_client.zones.list()

    # The type of records the Azure DNS will let you configure
    record_types = {
        'A': 'arecords',
        'AAAA': 'aaaa_records',
        'MX': 'mx_records',
        'NS': 'ns_records',
        'PTR': 'ptr_records',
        'SRV': 'srv_records',
        'TXT': 'txt_records',
        'CNAME': 'cname_record',
        'SOA': 'soa_record'
    }

    for zone in zones:
        print("Zone: " + zone.name)
        data = split_id(zone.id)

        if zone.zone_type == ZoneType.public:
            print(zone.name + " is public:")

            if zone.name not in current_zones:
                print("Creating zone: " + zone.name)
                zone_ingestor.add_zone(zone.name,
                                       "azure:" + data["resourceGroups"])

            try:
                print("ResourceGroup: " + data["resourceGroups"])
                records = dns_client.record_sets.list_all_by_dns_zone(
                    data["resourceGroups"], zone.name)
                for entry in records:
                    # The record_data id value ends in rtype/rvalue so you must guess the rtype
                    record_data = split_id(entry.id)
                    for rtype in record_types:
                        if rtype in record_data:
                            results = extract_record_set_value(rtype, entry)
                            for result in results:
                                result['zone'] = zone.name
                                result['created'] = datetime.now()
                                result['status'] = 'confirmed'
                                dns_manager.insert_record(
                                    result, "azure:" + data["resourceGroups"])
            except:
                print("No records found")

    jobs_manager.record_job_complete()
示例#10
0
def main():
    """
    Begin Main...
    """
    logger = LoggingUtil.create_log(__name__)

    now = datetime.now()
    print("Starting: " + str(now))
    logger.info("Starting...")

    azure_connector = AzureConnector.AzureConnector()
    mongo_connector = MongoConnector.MongoConnector()
    dns_manager = DNSManager.DNSManager(mongo_connector)
    zone_ingestor = ZoneIngestor.ZoneIngestor()
    jobs_manager = JobsManager.JobsManager(mongo_connector, "fetch_azure_dns")
    jobs_manager.record_job_start()

    current_zones = ZoneManager.get_distinct_zones(mongo_connector)

    resource_client = azure_connector.get_resources_client()
    resources = []

    # The resource list is not currently used.
    for item in resource_client.resource_groups.list():
        resources.append(item.name)

    dns_client = azure_connector.get_dns_client()

    zones = dns_client.zones.list()

    # The type of records the Azure DNS will let you configure
    record_types = {
        "A": "arecords",
        "AAAA": "aaaa_records",
        "MX": "mx_records",
        "NS": "ns_records",
        "PTR": "ptr_records",
        "SRV": "srv_records",
        "TXT": "txt_records",
        "CNAME": "cname_record",
        "SOA": "soa_record",
    }

    for zone in zones:
        logger.info("Zone: " + zone.name)
        data = split_id(zone.id)

        if zone.zone_type == ZoneType.public:
            logger.info(zone.name + " is public:")

            if zone.name not in current_zones:
                logger.debug("Creating zone: " + zone.name)
                zone_ingestor.add_zone(zone.name, "azure:" + data["resourceGroups"])

            try:
                logger.info("ResourceGroup: " + data["resourceGroups"])
                records = dns_client.record_sets.list_all_by_dns_zone(
                    data["resourceGroups"], zone.name
                )
                for entry in records:
                    # The record_data id value ends in rtype/rvalue so you must guess the rtype
                    record_data = split_id(entry.id)
                    for rtype in record_types:
                        if rtype in record_data:
                            results = extract_record_set_value(logger, rtype, entry)
                            for result in results:
                                result["zone"] = zone.name
                                result["created"] = datetime.now()
                                result["status"] = "confirmed"
                                dns_manager.insert_record(
                                    result, "azure:" + data["resourceGroups"]
                                )
            except:
                logger.warning("No records found")

    jobs_manager.record_job_complete()

    now = datetime.now()
    print("Complete: " + str(now))
    logger.info("Complete.")