Esempio n. 1
0
def main():
    '''
    Get aggregated statistics on incoming events
    to use in alerting/notices/queries about event patterns over time
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    index = options.index
    stats = esSearch(es)
    logger.debug(json.dumps(stats))
    sleepcycles = 0
    try:
        while not es.index_exists(index):
            sleep(3)
            if sleepcycles == 3:
                logger.debug("The index is not created. Terminating eventStats.py cron job.")
                exit(1)
            sleepcycles += 1
        if es.index_exists(index):
            # post to elastic search servers directly without going through
            # message queues in case there is an availability issue
            es.save_event(index=index, body=json.dumps(stats))

    except Exception as e:
        logger.error("Exception %r when gathering statistics " % e)

    logger.debug('finished')
Esempio n. 2
0
def clearESCache():
    es = esConnect(None)
    indexes = es.get_indices()
    # assums index names  like events-YYYYMMDD etc.
    # used to avoid operating on current indexes
    dtNow = datetime.utcnow()
    indexSuffix = date.strftime(dtNow, '%Y%m%d')
    previousSuffix = date.strftime(dtNow - timedelta(days=1), '%Y%m%d')
    for targetindex in sorted(indexes):
        if indexSuffix not in targetindex and previousSuffix not in targetindex:
            url = '{0}/{1}/_stats'.format(random.choice(options.esservers), targetindex)
            r = requests.get(url)
            if r.status_code == 200:
                indexstats = json.loads(r.text)
                if indexstats['_all']['total']['search']['query_current'] == 0:
                    fielddata = indexstats['_all']['total']['fielddata']['memory_size_in_bytes']
                    if fielddata > 0:
                        logger.info('target: {0}: field data {1}'.format(targetindex, indexstats['_all']['total']['fielddata']['memory_size_in_bytes']))
                        clearurl = '{0}/{1}/_cache/clear'.format(random.choice(options.esservers), targetindex)
                        clearRequest = requests.post(clearurl)
                        logger.info(clearRequest.text)
                        # stop at one?
                        if options.conservative:
                            return
                else:
                    logger.debug('{0}: <ignoring due to current search > field data {1}'.format(targetindex, indexstats['_all']['total']['fielddata']['memory_size_in_bytes']))
            else:
                logger.error('{0} returned {1}'.format(url, r.status_code))
Esempio n. 3
0
 def process_file(self, s3file):
     logger.debug("Fetching %s" % s3file.name)
     compressedData = s3file.read()
     databuf = StringIO(compressedData)
     gzip_file = gzip.GzipFile(fileobj=databuf)
     json_logs = json.loads(gzip_file.read())
     return json_logs['Records']
Esempio n. 4
0
def esPruneIndexes():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
        # do the pruning
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.debug('Deleting index: %s' % index_to_prune)
                        es.delete_index(index_to_prune, True)
                    else:
                        logger.error('Error deleting index %s, index missing' % index_to_prune)
            except Exception as e:
                logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Esempio n. 5
0
def fetch_ip_list(aws_key_id, aws_secret_key, s3_bucket, ip_list_filename):
    logger.debug("Fetching ip list from s3")
    s3 = boto.connect_s3(
        aws_access_key_id=aws_key_id,
        aws_secret_access_key=aws_secret_key
    )
    bucket = s3.get_bucket(s3_bucket)
    ip_list_key = bucket.lookup(ip_list_filename)
    contents = ip_list_key.get_contents_as_string().rstrip()
    return contents.split("\n")
Esempio n. 6
0
def download_generic_alerts(repo_url, save_location, deploy_key):
    git_obj = cmd.Git(save_location)
    git_ssh_cmd = 'ssh -i %s' % deploy_key

    git_obj.update_environment(GIT_SSH_COMMAND=git_ssh_cmd)

    if not os.path.isdir(save_location):
        logger.debug("Cloning " + str(repo_url) + " into " + str(save_location))
        Repo.clone_from(repo_url, save_location, env={'GIT_SSH_COMMAND': git_ssh_cmd})
    else:
        logger.debug("Updating " + str(save_location))
        git_obj.pull()
Esempio n. 7
0
def main():
    logger.debug('starting')
    logger.debug(options)

    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    client = MongoClient(options.mongohost, options.mongoport)
    # use meteor db
    mongo = client.meteor
    writeFrontendStats(getFrontendStats(es), mongo)
    writeSqsStats(getSqsStats(es), mongo)
    writeEsClusterStats(es.get_cluster_health(), mongo)
    writeEsNodesStats(getEsNodesStats(), mongo)
    writeEsHotThreads(getEsHotThreads(), mongo)
Esempio n. 8
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        mozdefdb = client.meteor
        ensureIndexes(mozdefdb)
        esResults = getESAlerts(es)
        updateMongo(mozdefdb, esResults)

    except Exception as e:
        logger.error("Exception %r sending health to mongo" % e)
Esempio n. 9
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        # use meteor db
        mozdefdb = client.meteor
        esResults = searchESForBROAttackers(es, 100)
        updateMongoWithESEvents(mozdefdb, esResults)
        searchMongoAlerts(mozdefdb)

    except ValueError as e:
        logger.error("Exception %r collecting attackers to mongo" % e)
Esempio n. 10
0
def esCloseIndices():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
    except Exception as e:
        logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e))

    # examine each index pulled from get_indice
    # to determine if it meets aging criteria
    month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))
    month_ago_date = month_ago_date.replace(tzinfo=None)
    for index in indices:
        if 'events' in index:
            index_date = index.rsplit('-', 1)[1]
            logger.debug("Checking to see if Index: %s can be closed." % (index))
            if len(index_date) == 8:
                index_date_obj = datetime.strptime(index_date, '%Y%m%d')
                try:
                    if month_ago_date > index_date_obj:
                        logger.debug("Index: %s will be closed." % (index))
                        es.close_index(index)
                    else:
                        logger.debug("Index: %s  does not meet aging criteria and will not be closed." % (index))
                except Exception as e:
                    logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
def download_generic_alerts(repo_url, save_location, deploy_key):
    git_obj = cmd.Git(save_location)
    git_ssh_cmd = 'ssh -i %s' % deploy_key

    git_obj.update_environment(GIT_SSH_COMMAND=git_ssh_cmd)

    if not os.path.isdir(save_location):
        logger.debug("Cloning " + str(repo_url) + " into " +
                     str(save_location))
        Repo.clone_from(repo_url,
                        save_location,
                        env={'GIT_SSH_COMMAND': git_ssh_cmd})
    else:
        logger.debug("Updating " + str(save_location))
        git_obj.pull()
Esempio n. 12
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        # use meteor db
        mozdefdb = client.meteor
        esResults = searchESForBROAttackers(es, 100)
        updateMongoWithESEvents(mozdefdb, esResults)
        searchMongoAlerts(mozdefdb)

    except ValueError as e:
        logger.error("Exception %r collecting attackers to mongo" % e)
Esempio n. 13
0
    def run(self):
        while True:
            try:
                records = self.sqs_queue.receive_messages(
                    MaxNumberOfMessages=options.prefetch)
                for msg in records:
                    body_message = msg.body
                    event = json.loads(body_message)

                    if not event['Message']:
                        logger.error(
                            'Invalid message format for cloudtrail SQS messages'
                        )
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    if event['Message'] == 'CloudTrail validation message.':
                        # We don't care about these messages
                        continue

                    message_json = json.loads(event['Message'])

                    if 's3ObjectKey' not in message_json:
                        logger.error(
                            'Invalid message format, expecting an s3ObjectKey in Message'
                        )
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    s3_log_files = message_json['s3ObjectKey']
                    for log_file in s3_log_files:
                        logger.debug('Downloading and parsing ' + log_file)
                        s3_obj = self.s3_client.get_object(
                            Bucket=message_json['s3Bucket'], Key=log_file)
                        events = self.parse_s3_file(s3_obj)
                        for event in events:
                            self.on_message(event)

                    msg.delete()
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.sqs_queue = connect_sqs(
                    region_name=options.region,
                    aws_access_key_id=options.accesskey,
                    aws_secret_access_key=options.secretkey,
                    task_exchange=options.taskexchange)
            time.sleep(options.sleep_time)
Esempio n. 14
0
def updateAttackerGeoIP(mozdefdb, attackerID, eventDictionary):
    '''given an attacker ID and a dictionary of an elastic search event
       look for a valid geoIP in the dict and update the attacker's geo coordinates
    '''

    # geo ip should be in eventDictionary['details']['sourceipgeolocation']
    # "sourceipgeolocation": {
    #     "city": "Polska",
    #     "region_code": "73",
    #     "area_code": 0,
    #     "time_zone": "Europe/Warsaw",
    #     "dma_code": 0,
    #     "metro_code": null,
    #     "country_code3": "POL",
    #     "latitude": 52.59309999999999,
    #     "postal_code": null,
    #     "longitude": 19.089400000000012,
    #     "country_code": "PL",
    #     "country_name": "Poland",
    #     "continent": "EU"
    # }
    # logger.debug(eventDictionary)
    if 'details' in eventDictionary:
        if 'sourceipgeolocation' in eventDictionary['details']:
            attackers = mozdefdb['attackers']
            attacker = attackers.find_one({'_id': attackerID})
            if attacker is not None:
                attacker['geocoordinates'] = dict(countrycode='',
                                                  longitude=0,
                                                  latitude=0)
                if 'country_code' in eventDictionary['details'][
                        'sourceipgeolocation']:
                    attacker['geocoordinates'][
                        'countrycode'] = eventDictionary['details'][
                            'sourceipgeolocation']['country_code']
                if 'longitude' in eventDictionary['details'][
                        'sourceipgeolocation']:
                    attacker['geocoordinates']['longitude'] = eventDictionary[
                        'details']['sourceipgeolocation']['longitude']
                if 'latitude' in eventDictionary['details'][
                        'sourceipgeolocation']:
                    attacker['geocoordinates']['latitude'] = eventDictionary[
                        'details']['sourceipgeolocation']['latitude']
                attackers.save(attacker)
    else:
        logger.debug('no details in the dictionary')
        logger.debug(eventDictionary)
Esempio n. 15
0
    def run(self):
        self.taskQueue.set_message_class(RawMessage)
        while True:
            try:
                records = self.taskQueue.get_messages(options.prefetch)
                for msg in records:
                    body_message = msg.get_body()
                    event = json.loads(body_message)

                    if not event['Message']:
                        logger.error('Invalid message format for cloudtrail SQS messages')
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    if event['Message'] == 'CloudTrail validation message.':
                        # We don't care about these messages
                        continue

                    message_json = json.loads(event['Message'])

                    if 's3ObjectKey' not in message_json:
                        logger.error('Invalid message format, expecting an s3ObjectKey in Message')
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    s3_log_files = message_json['s3ObjectKey']
                    for log_file in s3_log_files:
                        logger.debug('Downloading and parsing ' + log_file)
                        bucket = self.s3_connection.get_bucket(message_json['s3Bucket'])

                        log_file_lookup = bucket.lookup(log_file)
                        events = self.process_file(log_file_lookup)
                        for event in events:
                            self.on_message(event)

                    self.taskQueue.delete_message(msg)
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.connection, self.taskQueue = connect_sqs(
                    task_exchange=options.taskexchange,
                    **get_aws_credentials(
                        options.region,
                        options.accesskey,
                        options.secretkey))
                self.taskQueue.set_message_class(RawMessage)
Esempio n. 16
0
def broadcastAttacker(attacker):
    '''
    send this attacker info to our message queue
    '''
    try:
        connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(options.mquser,
                                                         options.mqpassword,
                                                         options.mqserver,
                                                         options.mqport,
                                                         options.mqvhost)
        if options.mqprotocol == 'amqps':
            mqSSL = True
        else:
            mqSSL = False
        mqConn = Connection(connString, ssl=mqSSL)

        alertExchange = Exchange(
            name=options.alertexchange,
            type='topic',
            durable=True)
        alertExchange(mqConn).declare()
        mqproducer = mqConn.Producer(serializer='json')

        logger.debug('Kombu configured')
    except Exception as e:
        logger.error('Exception while configuring kombu for alerts: {0}'.format(e))
    try:
        # generate an 'alert' structure for this attacker:
        mqAlert = dict(severity='NOTICE', category='attacker')

        if 'datecreated' in attacker:
            mqAlert['utctimestamp'] = attacker['datecreated'].isoformat()

        mqAlert['summary'] = 'New Attacker: {0} events: {1}, alerts: {2}'.format(attacker['indicators'], attacker['eventscount'], attacker['alertscount'])
        logger.debug(mqAlert)
        ensurePublish = mqConn.ensure(
            mqproducer,
            mqproducer.publish,
            max_retries=10)
        ensurePublish(
            mqAlert,
            exchange=alertExchange,
            routing_key=options.routingkey
        )
    except Exception as e:
        logger.error('Exception while publishing attacker: {0}'.format(e))
Esempio n. 17
0
def updateMongoWithESEvents(mozdefdb, results):
    logger.debug('Looping through events identified as malicious from bro')
    attackers = mozdefdb['attackers']
    for r in results:
        if 'sourceipaddress' in r['_source']['details']:
            if netaddr.valid_ipv4(r['_source']['details']['sourceipaddress']):
                sourceIP = netaddr.IPNetwork(
                    r['_source']['details']['sourceipaddress'])
                # expand it to a /24 CIDR
                # todo: lookup ipwhois for asn_cidr value
                # potentially with a max mask value (i.e. asn is /8, limit attackers to /24)
                sourceIP.prefixlen = 24
                if not sourceIP.ip.is_loopback(
                ) and not sourceIP.ip.is_private(
                ) and not sourceIP.ip.is_reserved():
                    esrecord = dict(documentid=r['_id'],
                                    documenttype=r['_type'],
                                    documentindex=r['_index'],
                                    documentsource=r['_source'])

                    logger.debug('Trying to find existing attacker at ' +
                                 str(sourceIP))
                    attacker = attackers.find_one(
                        {'indicators.ipv4address': str(sourceIP)})
                    if attacker is None:
                        # new attacker
                        # generate a meteor-compatible ID
                        # save the ES document type, index, id
                        # and add a sub list for future events
                        logger.debug('Creating new attacker from ' +
                                     str(sourceIP))
                        newAttacker = genNewAttacker()

                        # expand the source ip to a /24 for the indicator match.
                        sourceIP.prefixlen = 24
                        # str sourceIP to get the ip/cidr rather than netblock cidr.
                        newAttacker['indicators'].append(
                            dict(ipv4address=str(sourceIP)))
                        newAttacker['eventscount'] = 1
                        newAttacker['lastseentimestamp'] = esrecord[
                            'documentsource']['utctimestamp']
                        attackers.insert(newAttacker)
                        updateAttackerGeoIP(mozdefdb, newAttacker['_id'],
                                            esrecord['documentsource'])
                    else:
                        logger.debug(
                            'Attacker found, increasing eventscount and modding geoip'
                        )
                        attacker['eventscount'] += 1
                        attacker['lastseentimestamp'] = esrecord[
                            'documentsource']['utctimestamp']
                        attackers.save(attacker)
                        # geo ip could have changed, update it
                        updateAttackerGeoIP(mozdefdb, attacker['_id'],
                                            esrecord['documentsource'])
Esempio n. 18
0
def broadcastAttacker(attacker):
    '''
    send this attacker info to our message queue
    '''
    try:
        connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(
            options.mquser, options.mqpassword, options.mqserver,
            options.mqport, options.mqvhost)
        if options.mqprotocol == 'amqps':
            mqSSL = True
        else:
            mqSSL = False
        mqConn = Connection(connString, ssl=mqSSL)

        alertExchange = Exchange(name=options.alertexchange,
                                 type='topic',
                                 durable=True)
        alertExchange(mqConn).declare()
        mqproducer = mqConn.Producer(serializer='json')

        logger.debug('Kombu configured')
    except Exception as e:
        logger.error(
            'Exception while configuring kombu for alerts: {0}'.format(e))
    try:
        # generate an 'alert' structure for this attacker:
        mqAlert = dict(severity='NOTICE', category='attacker')

        if 'datecreated' in attacker:
            mqAlert['utctimestamp'] = attacker['datecreated'].isoformat()

        mqAlert[
            'summary'] = 'New Attacker: {0} events: {1}, alerts: {2}'.format(
                attacker['indicators'], attacker['eventscount'],
                attacker['alertscount'])
        logger.debug(mqAlert)
        ensurePublish = mqConn.ensure(mqproducer,
                                      mqproducer.publish,
                                      max_retries=10)
        ensurePublish(mqAlert,
                      exchange=alertExchange,
                      routing_key=options.routingkey)
    except Exception as e:
        logger.error('Exception while publishing attacker: {0}'.format(e))
Esempio n. 19
0
def isJVMMemoryHigh():
    url = "{0}/_nodes/stats?pretty=true".format(random.choice(options.esservers))
    r = requests.get(url)
    logger.debug(r)
    if r.status_code == 200:
        nodestats = r.json()

        for node in nodestats['nodes']:
            loadaverage = nodestats['nodes'][node]['os']['cpu']['load_average']
            cpuusage = nodestats['nodes'][node]['os']['cpu']['percent']
            nodename = nodestats['nodes'][node]['name']
            jvmused = nodestats['nodes'][node]['jvm']['mem']['heap_used_percent']
            logger.debug('{0}: cpu {1}%  jvm {2}% load average: {3}'.format(nodename, cpuusage, jvmused, loadaverage))
            if jvmused > options.jvmlimit:
                logger.info('{0}: cpu {1}%  jvm {2}% load average: {3} recommending cache clear'.format(nodename, cpuusage, jvmused, loadaverage))
                return True
        return False
    else:
        logger.error(r)
        return False
Esempio n. 20
0
def isJVMMemoryHigh():
    url = "{0}/_nodes/stats?pretty=true".format(random.choice(options.esservers))
    r = requests.get(url)
    logger.debug(r)
    if r.status_code == 200:
        nodestats = r.json()

        for node in nodestats['nodes']:
            loadaverage = nodestats['nodes'][node]['os']['cpu']['load_average']
            cpuusage = nodestats['nodes'][node]['os']['cpu']['percent']
            nodename = nodestats['nodes'][node]['name']
            jvmused = nodestats['nodes'][node]['jvm']['mem']['heap_used_percent']
            logger.debug('{0}: cpu {1}%  jvm {2}% load average: {3}'.format(nodename, cpuusage, jvmused, loadaverage))
            if jvmused > options.jvmlimit:
                logger.info('{0}: cpu {1}%  jvm {2}% load average: {3} recommending cache clear'.format(nodename, cpuusage, jvmused, loadaverage))
                return True
        return False
    else:
        logger.error(r)
        return False
Esempio n. 21
0
def verify_events(options):
    es_client = ElasticsearchClient(options.esservers)
    for required_field in options.required_fields:
        logger.debug('Looking for events without ' + required_field)
        search_query = SearchQuery(hours=12)
        search_query.add_must_not(ExistsMatch(required_field))

        # Exclude all events that are mozdef related health and stats
        search_query.add_must_not(TermMatch('_type', 'mozdefstats'))
        search_query.add_must_not(TermMatch('_type', 'mozdefhealth'))

        search_query.add_aggregation(Aggregation('_type'))
        # We don't care about the actual events, we only want the numbers
        results = search_query.execute(es_client, size=1)
        for aggreg_term in results['aggregations']['_type']['terms']:
            count = aggreg_term['count']
            category = aggreg_term['key']
            logger.error(
                "Found {0} bad events of _type '{1}' missing '{2}' field".
                format(count, category, required_field))
Esempio n. 22
0
def update_alert_schedules():
    '''an endpoint to return alerts schedules'''
    if not request.body:
        response.status = 503
        return response

    alert_schedules = json.loads(request.body.read())
    request.body.close()

    response.content_type = "application/json"
    mongoclient = MongoClient(options.mongohost, options.mongoport)
    schedulers_db = mongoclient.meteor['alertschedules']
    schedulers_db.remove()

    for alert_name, alert_schedule in alert_schedules.items():
        logger.debug(
            "Inserting schedule for {0} into mongodb".format(alert_name))
        schedulers_db.insert(alert_schedule)

    response.status = 200
    return response
Esempio n. 23
0
def aggregateAttackerIPs(attackers):
    iplist = []

    # Set the attacker age timestamp
    attackerage = datetime.now() - timedelta(days=options.attackerage)

    ips = attackers.aggregate([
        {"$sort": {"lastseentimestamp": -1}},
        {"$match": {"category": options.category}},
        {"$match": {"lastseentimestamp": {"$gte": attackerage}}},
        {"$match": {"indicators.ipv4address": {"$exists": True}}},
        {"$group": {"_id": {"ipv4address": "$indicators.ipv4address"}}},
        {"$unwind": "$_id.ipv4address"},
        {"$limit": options.iplimit}
    ])

    for i in ips:
        whitelisted = False
        logger.debug('working {0}'.format(i))
        ip = i['_id']['ipv4address']
        ipcidr = netaddr.IPNetwork(ip)
        if not ipcidr.ip.is_loopback() and not ipcidr.ip.is_private() and not ipcidr.ip.is_reserved():
            for whitelist_range in options.ipwhitelist:
                whitelist_network = netaddr.IPNetwork(whitelist_range)
                if ipcidr in whitelist_network:
                    logger.debug(str(ipcidr) + " is whitelisted as part of " + str(whitelist_network))
                    whitelisted = True

            # strip any host bits 192.168.10/24 -> 192.168.0/24
            ipcidrnet = str(ipcidr.cidr)
            if ipcidrnet not in iplist and not whitelisted:
                iplist.append(ipcidrnet)
        else:
            logger.debug('invalid:' + ip)
    return iplist
Esempio n. 24
0
    def makerequest(self, query, stime, etime, maxid):
        payload = {
            'min_time': calendar.timegm(stime.utctimetuple()),
            'max_time': calendar.timegm(etime.utctimetuple()),
            'q': query
        }
        if maxid is not None:
            payload['max_id'] = maxid
        hdrs = {'X-Papertrail-Token': self._apikey}

        max_retries = 3
        total_retries = 0
        while True:
            logger.debug("Sending request to papertrail API")
            resp = requests.get(self._papertrail_api, headers=hdrs, params=payload)
            if resp.status_code == 200:
                break
            else:
                logger.debug("Received invalid status code: {0}: {1}".format(resp.status_code, resp.text))
                total_retries += 1
                if total_retries < max_retries:
                    logger.debug("Sleeping a bit then retrying")
                    time.sleep(2)
                else:
                    logger.error("Received too many error messages...exiting")
                    logger.error("Last malformed response: {0}: {1}".format(resp.status_code, resp.text))
                    sys.exit(1)

        return self.parse_events(resp.json())
Esempio n. 25
0
def getQueueSizes():
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(options.esservers)

    sqs_client = boto3.client("sqs",
                              region_name=options.region,
                              aws_access_key_id=options.accesskey,
                              aws_secret_access_key=options.secretkey)
    queues_stats = {
        'queues': [],
        'total_feeds': len(options.taskexchange),
        'total_messages_ready': 0,
        'username': '******'
    }
    for queue_name in options.taskexchange:
        logger.debug('Looking for sqs queue stats in queue' + queue_name)
        queue_url = sqs_client.get_queue_url(QueueName=queue_name)['QueueUrl']
        queue_attributes = sqs_client.get_queue_attributes(
            QueueUrl=queue_url, AttributeNames=['All'])['Attributes']
        queue_stats = {
            'queue': queue_name,
        }
        if 'ApproximateNumberOfMessages' in queue_attributes:
            queue_stats['messages_ready'] = int(
                queue_attributes['ApproximateNumberOfMessages'])
            queues_stats['total_messages_ready'] += queue_stats[
                'messages_ready']
        if 'ApproximateNumberOfMessagesNotVisible' in queue_attributes:
            queue_stats['messages_inflight'] = int(
                queue_attributes['ApproximateNumberOfMessagesNotVisible'])
        if 'ApproximateNumberOfMessagesDelayed' in queue_attributes:
            queue_stats['messages_delayed'] = int(
                queue_attributes['ApproximateNumberOfMessagesDelayed'])

        queues_stats['queues'].append(queue_stats)

    # setup a log entry for health/status.
    sqsid = '{0}-{1}'.format(options.account, options.region)
    healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(),
                     hostname=sqsid,
                     processid=os.getpid(),
                     processname=sys.argv[0],
                     severity='INFO',
                     summary='mozdef health/status',
                     category='mozdef',
                     source='aws-sqs',
                     tags=[],
                     details=queues_stats)
    healthlog['tags'] = ['mozdef', 'status', 'sqs']
    healthlog['type'] = 'mozdefhealth'
    # post to elasticsearch servers directly without going through
    # message queues in case there is an availability issue
    es.save_event(index=options.index, body=json.dumps(healthlog))
    # post another doc with a static docid and tag
    # for use when querying for the latest sqs status
    healthlog['tags'] = ['mozdef', 'status', 'sqs-latest']
    es.save_event(index=options.index,
                  doc_id=getDocID(sqsid),
                  body=json.dumps(healthlog))
Esempio n. 26
0
    def makerequest(self, query, stime, etime, maxid):
        payload = {
            'min_time': calendar.timegm(stime.utctimetuple()),
            'max_time': calendar.timegm(etime.utctimetuple()),
            'q': query
        }
        if maxid is not None:
            payload['max_id'] = maxid
        hdrs = {'X-Papertrail-Token': self._apikey}

        max_retries = 3
        total_retries = 0
        while True:
            logger.debug("Sending request to papertrail API")
            resp = requests.get(self._papertrail_api,
                                headers=hdrs,
                                params=payload)
            if resp.status_code == 200:
                break
            else:
                logger.debug("Received invalid status code: {0}: {1}".format(
                    resp.status_code, resp.text))
                total_retries += 1
                if total_retries < max_retries:
                    logger.debug("Sleeping a bit then retrying")
                    time.sleep(2)
                else:
                    logger.error("Received too many error messages...exiting")
                    logger.error("Last malformed response: {0}: {1}".format(
                        resp.status_code, resp.text))
                    sys.exit(1)

        return self.parse_events(resp.json())
Esempio n. 27
0
def updateAttackerGeoIP(mozdefdb, attackerID, eventDictionary):
    '''given an attacker ID and a dictionary of an elastic search event
       look for a valid geoIP in the dict and update the attacker's geo coordinates
    '''

    # geo ip should be in eventDictionary['details']['sourceipgeolocation']
    # "sourceipgeolocation": {
    #     "city": "Polska",
    #     "region_code": "73",
    #     "area_code": 0,
    #     "time_zone": "Europe/Warsaw",
    #     "dma_code": 0,
    #     "metro_code": null,
    #     "country_code3": "POL",
    #     "latitude": 52.59309999999999,
    #     "postal_code": null,
    #     "longitude": 19.089400000000012,
    #     "country_code": "PL",
    #     "country_name": "Poland",
    #     "continent": "EU"
    # }
    # logger.debug(eventDictionary)
    if 'details' in eventDictionary:
        if 'sourceipgeolocation' in eventDictionary['details']:
            attackers=mozdefdb['attackers']
            attacker = attackers.find_one({'_id': attackerID})
            if attacker is not None:
                attacker['geocoordinates'] = dict(countrycode='',
                                                  longitude=0,
                                                  latitude=0)
                if 'country_code' in eventDictionary['details']['sourceipgeolocation']:
                    attacker['geocoordinates']['countrycode'] = eventDictionary['details']['sourceipgeolocation']['country_code']
                if 'longitude' in eventDictionary['details']['sourceipgeolocation']:
                    attacker['geocoordinates']['longitude'] = eventDictionary['details']['sourceipgeolocation']['longitude']
                if 'latitude' in eventDictionary['details']['sourceipgeolocation']:
                    attacker['geocoordinates']['latitude'] = eventDictionary['details']['sourceipgeolocation']['latitude']
                attackers.save(attacker)
    else:
        logger.debug('no details in the dictionary')
        logger.debug(eventDictionary)
Esempio n. 28
0
def aggregateAttackerIPs(attackers):
    iplist = []

    # Set the attacker age timestamp
    attackerage = datetime.now() - timedelta(days=options.attackerage)

    ips = attackers.aggregate([{
        "$sort": {
            "lastseentimestamp": -1
        }
    }, {
        "$match": {
            "category": options.category
        }
    }, {
        "$match": {
            "lastseentimestamp": {
                "$gte": attackerage
            }
        }
    }, {
        "$match": {
            "indicators.ipv4address": {
                "$exists": True
            }
        }
    }, {
        "$group": {
            "_id": {
                "ipv4address": "$indicators.ipv4address"
            }
        }
    }, {
        "$unwind": "$_id.ipv4address"
    }, {
        "$limit": options.iplimit
    }])

    for i in ips:
        whitelisted = False
        logger.debug('working {0}'.format(i))
        ip = i['_id']['ipv4address']
        ipcidr = netaddr.IPNetwork(ip)
        if not ipcidr.ip.is_loopback() and not ipcidr.ip.is_private(
        ) and not ipcidr.ip.is_reserved():
            for whitelist_range in options.ipwhitelist:
                whitelist_network = netaddr.IPNetwork(whitelist_range)
                if ipcidr in whitelist_network:
                    logger.debug(
                        str(ipcidr) + " is whitelisted as part of " +
                        str(whitelist_network))
                    whitelisted = True

            # strip any host bits 192.168.10/24 -> 192.168.0/24
            ipcidrnet = str(ipcidr.cidr)
            if ipcidrnet not in iplist and not whitelisted:
                iplist.append(ipcidrnet)
        else:
            logger.debug('invalid:' + ip)
    return iplist
Esempio n. 29
0
def verify_events(options):
    es_client = ElasticsearchClient(options.esservers)
    for required_field in options.required_fields:
        logger.debug('Looking for events without ' + required_field)
        search_query = SearchQuery(hours=12)
        search_query.add_must_not(ExistsMatch(required_field))

        # Exclude all events that are mozdef related health and stats
        search_query.add_must_not(TermMatch('_type', 'mozdefstats'))
        search_query.add_must_not(TermMatch('_type', 'mozdefhealth'))

        search_query.add_aggregation(Aggregation('_type'))
        # We don't care about the actual events, we only want the numbers
        results = search_query.execute(es_client, size=1)
        for aggreg_term in results['aggregations']['_type']['terms']:
            count = aggreg_term['count']
            category = aggreg_term['key']
            logger.error("Found {0} bad events of _type '{1}' missing '{2}' field".format(
                count,
                category,
                required_field
            ))
Esempio n. 30
0
def esPruneIndexes():
    logger.debug('started')
    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
        # do the pruning
        for (index, dobackup, rotation,
             pruning) in zip(options.indices, options.dobackup,
                             options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(
                            toUTC(datetime.now()) -
                            timedelta(days=int(pruning)), '%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(
                            datetime.utcnow() -
                            timedelta(days=31 * int(pruning)), '%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.debug('Deleting index: %s' % index_to_prune)
                        es.delete_index(index_to_prune, True)
                    else:
                        logger.error('Error deleting index %s, index missing' %
                                     index_to_prune)
            except Exception as e:
                logger.error(
                    "Unhandled exception while deleting %s, terminating: %r" %
                    (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Esempio n. 31
0
def save_ip_list(save_path, ips):
    ip_list_contents = '\n'.join(ips)
    logger.debug("Saving ip list")
    if os.path.isfile(save_path):
        logger.debug("Overwriting ip list file in " + str(save_path))
    else:
        logger.debug("Creating new ip list file at " + str(save_path))
    with open(save_path, "w+") as text_file:
        text_file.write(ip_list_contents)
Esempio n. 32
0
def save_ip_list(save_path, ips):
    ip_list_contents = '\n'.join(ips)
    logger.debug("Saving ip list")
    if os.path.isfile(save_path):
        logger.debug("Overwriting ip list file in " + str(save_path))
    else:
        logger.debug("Creating new ip list file at " + str(save_path))
    with open(save_path, "w+") as text_file:
        text_file.write(ip_list_contents)
Esempio n. 33
0
def clearESCache():
    es = esConnect(None)
    indexes = es.get_indices()
    # assums index names  like events-YYYYMMDD etc.
    # used to avoid operating on current indexes
    dtNow = datetime.utcnow()
    indexSuffix = date.strftime(dtNow, '%Y%m%d')
    previousSuffix = date.strftime(dtNow - timedelta(days=1), '%Y%m%d')
    for targetindex in sorted(indexes):
        if indexSuffix not in targetindex and previousSuffix not in targetindex:
            url = '{0}/{1}/_stats'.format(random.choice(options.esservers),
                                          targetindex)
            r = requests.get(url)
            if r.status_code == 200:
                indexstats = json.loads(r.text)
                if indexstats['_all']['total']['search']['query_current'] == 0:
                    fielddata = indexstats['_all']['total']['fielddata'][
                        'memory_size_in_bytes']
                    if fielddata > 0:
                        logger.info('target: {0}: field data {1}'.format(
                            targetindex, indexstats['_all']['total']
                            ['fielddata']['memory_size_in_bytes']))
                        clearurl = '{0}/{1}/_cache/clear'.format(
                            random.choice(options.esservers), targetindex)
                        clearRequest = requests.post(clearurl)
                        logger.info(clearRequest.text)
                        # stop at one?
                        if options.conservative:
                            return
                else:
                    logger.debug(
                        '{0}: <ignoring due to current search > field data {1}'
                        .format(
                            targetindex, indexstats['_all']['total']
                            ['fielddata']['memory_size_in_bytes']))
            else:
                logger.error('{0} returned {1}'.format(url, r.status_code))
 def assume_role(self, role_arn, role_session_name='unknown', policy=None):
     '''Return a boto.sts.credential.Credential object given a role_arn.
     First check if a Credential oject exists in the local self.credentials
     cache that is not expired. If there isn't one, assume the role of role_arn
     store the Credential in the credentials cache and return it'''
     logger.debug("Connecting to sts")
     if role_arn in self.credentials:
         if not self.credentials[role_arn] or not self.credentials[
                 role_arn].is_expired():
             # Return the cached value if it's False (indicating a permissions issue) or if
             # it hasn't expired.
             return self.credentials[role_arn]
     try:
         self.credentials[role_arn] = self.conn_sts.assume_role(
             role_arn=role_arn,
             role_session_name=role_session_name,
             policy=policy).credentials
         logger.debug("Assumed new role with credential %s" %
                      self.credentials[role_arn].to_dict())
     except Exception, e:
         print e
         logger.error("Unable to assume role %s due to exception %s" %
                      (role_arn, e.message))
         self.credentials[role_arn] = False
Esempio n. 35
0
 def assume_role(self,
                 role_arn,
                 role_session_name='unknown',
                 policy=None):
     '''Return a boto.sts.credential.Credential object given a role_arn.
     First check if a Credential oject exists in the local self.credentials
     cache that is not expired. If there isn't one, assume the role of role_arn
     store the Credential in the credentials cache and return it'''
     logger.debug("Connecting to sts")
     if role_arn in self.credentials:
         if not self.credentials[role_arn] or not self.credentials[role_arn].is_expired():
             # Return the cached value if it's False (indicating a permissions issue) or if
             # it hasn't expired.
             return self.credentials[role_arn]
     try:
         self.credentials[role_arn] = self.conn_sts.assume_role(
             role_arn=role_arn,
             role_session_name=role_session_name,
             policy=policy).credentials
         logger.debug("Assumed new role with credential %s" % self.credentials[role_arn].to_dict())
     except Exception, e:
         print e
         logger.error("Unable to assume role %s due to exception %s" % (role_arn, e.message))
         self.credentials[role_arn] = False
Esempio n. 36
0
def save_db_data(save_path, db_data):
    temp_save_path = save_path + ".tmp"
    logger.debug("Saving db data to " + temp_save_path)
    with open(temp_save_path, "wb+") as text_file:
        text_file.write(db_data)
    logger.debug("Testing temp geolite db file")
    geo_ip = GeoIP(temp_save_path)
    # Do a generic lookup to verify we don't get any errors (malformed data)
    geo_ip.lookup_ip('8.8.8.8')
    logger.debug("Moving temp file to " + save_path)
    os.rename(temp_save_path, save_path)
Esempio n. 37
0
def save_db_data(save_path, db_data):
    temp_save_path = save_path + ".tmp"
    logger.debug("Saving db data to " + temp_save_path)
    with open(temp_save_path, "wb+") as text_file:
        text_file.write(db_data)
    logger.debug("Testing temp geolite db file")
    geo_ip = GeoIP(temp_save_path)
    # Do a generic lookup to verify we don't get any errors (malformed data)
    geo_ip.lookup_ip('8.8.8.8')
    logger.debug("Moving temp file to " + save_path)
    os.rename(temp_save_path, save_path)
Esempio n. 38
0
def updateMongoWithESEvents(mozdefdb, results):
    logger.debug('Looping through events identified as malicious from bro')
    attackers = mozdefdb['attackers']
    for r in results:
        if 'sourceipaddress' in r['_source']['details']:
            if netaddr.valid_ipv4(r['_source']['details']['sourceipaddress']):
                sourceIP = netaddr.IPNetwork(r['_source']['details']['sourceipaddress'])
                # expand it to a /24 CIDR
                # todo: lookup ipwhois for asn_cidr value
                # potentially with a max mask value (i.e. asn is /8, limit attackers to /24)
                sourceIP.prefixlen = 24
                if not sourceIP.ip.is_loopback() and not sourceIP.ip.is_private() and not sourceIP.ip.is_reserved():
                    esrecord = dict(
                        documentid=r['_id'],
                        documentindex=r['_index'],
                        documentsource=r['_source']
                    )

                    logger.debug('Trying to find existing attacker at ' + str(sourceIP))
                    attacker = attackers.find_one({'indicators.ipv4address': str(sourceIP)})
                    if attacker is None:
                        # new attacker
                        # generate a meteor-compatible ID
                        # save the ES document type, index, id
                        # and add a sub list for future events
                        logger.debug('Creating new attacker from ' + str(sourceIP))
                        newAttacker = genNewAttacker()

                        # expand the source ip to a /24 for the indicator match.
                        sourceIP.prefixlen = 24
                        # str sourceIP to get the ip/cidr rather than netblock cidr.
                        newAttacker['indicators'].append(dict(ipv4address=str(sourceIP)))
                        newAttacker['eventscount'] = 1
                        newAttacker['lastseentimestamp'] = esrecord['documentsource']['utctimestamp']
                        attackers.insert(newAttacker)
                        updateAttackerGeoIP(mozdefdb, newAttacker['_id'], esrecord['documentsource'])
                    else:
                        logger.debug('Attacker found, increasing eventscount and modding geoip')
                        attacker['eventscount'] += 1
                        attacker['lastseentimestamp'] = esrecord['documentsource']['utctimestamp']
                        attackers.save(attacker)
                        # geo ip could have changed, update it
                        updateAttackerGeoIP(mozdefdb, attacker['_id'], esrecord['documentsource'])
Esempio n. 39
0
def fetch_db_data(db_download_location):
    logger.debug('Fetching db data from ' + db_download_location)
    auth_creds = None
    if options.account_id != '' and options.license_key != '':
        logger.debug('Using credentials for maxmind')
        auth_creds = (options.account_id, options.license_key)
    response = requests.get(db_download_location, auth=auth_creds)
    if not response.ok:
        raise Exception(
            "Received bad response from maxmind server: {0}".format(
                response.text))
    db_raw_data = response.content
    with tempfile.NamedTemporaryFile(mode='wb') as temp:
        logger.debug('Writing compressed gzip to temp file: ' + temp.name)
        temp.write(db_raw_data)
        temp.flush()
        logger.debug('Extracting gzip data from ' + temp.name)
        gfile = gzip.GzipFile(temp.name, "rb")
        data = gfile.read()
        return data
def main():
    logger.debug('Starting')
    logger.debug(options)
    ips = fetch_ip_list(options.aws_access_key_id, options.aws_secret_access_key, options.aws_bucket_name, options.aws_document_key_name)

    for manual_addition in options.manual_additions:
        if manual_addition == '':
            continue
        logger.debug("Adding manual addition: " + manual_addition)
        ips.append(manual_addition)

    if len(ips) < options.ips_list_threshold:
        raise LookupError('IP List contains less than ' + str(options.ips_list_threshold) + ' entries...something is probably up here.')
    save_ip_list(options.local_ip_list_path, ips)
Esempio n. 41
0
def fetch_db_data(db_download_location):
    logger.debug('Fetching db data from ' + db_download_location)
    response = urllib2.urlopen(db_download_location)
    db_raw_data = response.read()
    with tempfile.NamedTemporaryFile(mode='wb') as temp:
        logger.debug('Writing compressed gzip to temp file: ' + temp.name)
        temp.write(db_raw_data)
        temp.flush()
        logger.debug('Extracting gzip data from ' + temp.name)
        tar = tarfile.open(temp.name)
        for tarinfo in tar:
            if tarinfo.name.endswith('GeoLite2-City.mmdb'):
                extracted_file = tar.extractfile(tarinfo.name)
                return extracted_file.read()
Esempio n. 42
0
def fetch_db_data(db_download_location):
    logger.debug('Fetching db data from ' + db_download_location)
    response = urllib.request.urlopen(db_download_location)
    db_raw_data = response.read()
    with tempfile.NamedTemporaryFile(mode='wb') as temp:
        logger.debug('Writing compressed gzip to temp file: ' + temp.name)
        temp.write(db_raw_data)
        temp.flush()
        logger.debug('Extracting gzip data from ' + temp.name)
        tar = tarfile.open(temp.name)
        for tarinfo in tar:
            if tarinfo.name.endswith('GeoLite2-City.mmdb'):
                extracted_file = tar.extractfile(tarinfo.name)
                return extracted_file.read()
Esempio n. 43
0
def save_db_data(db_file, db_data):
    save_path = path.join(options.db_store_location, db_file)
    fd, temp_path = mkstemp(suffix='.tmp',
                            prefix=db_file,
                            dir=options.db_store_location)
    with open(temp_path, 'wb') as temp:
        logger.debug("Saving db data to " + temp_path)
        temp.write(db_data)
        fsync(temp.fileno())
        temp.flush()
        logger.debug("Testing temp geolite db file")
        geo_ip = GeoIP(temp_path)
        # Do a generic lookup to verify we don't get any errors (malformed data)
        geo_ip.lookup_ip('8.8.8.8')
        logger.debug("Moving temp file to " + save_path)
    close(fd)
    rename(temp_path, save_path)
Esempio n. 44
0
def main():
    '''
    Look for events that contain username and a mac address
    Add the correlation to the intelligence index.
    '''
    logger.debug('starting')
    logger.debug(options)

    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    # create intelligence index if it's not already there
    es.create_index('intelligence')

    # read in the OUI file for mac prefix to vendor dictionary
    macassignments = readOUIFile(options.ouifilename)

    # search ES for events containing username and mac address
    correlations = esSearch(es, macassignments=macassignments)

    # store the correlation in the intelligence index
    esStoreCorrelations(es, correlations)

    logger.debug('finished')
Esempio n. 45
0
def fetch_db_data(db_file):
    db_download_location = 'https://updates.maxmind.com/geoip/databases/' + db_file[:
                                                                                    -5] + '/update'
    logger.debug('Fetching db data from ' + db_download_location)
    auth_creds = (options.account_id, options.license_key)
    response = requests.get(db_download_location, auth=auth_creds)
    if not response.ok:
        raise Exception(
            "Received bad response from maxmind server: {0}".format(
                response.text))
    db_raw_data = response.content
    with tempfile.NamedTemporaryFile(mode='wb',
                                     prefix=db_file + '.zip.',
                                     suffix='.tmp',
                                     dir=options.db_store_location) as temp:
        logger.debug('Writing compressed gzip to temp file: ' + temp.name)
        temp.write(db_raw_data)
        temp.flush()
        logger.debug('Extracting gzip data from ' + temp.name)
        gfile = gzip.GzipFile(temp.name, "rb")
        data = gfile.read()
        return data
Esempio n. 46
0
def main():
    args_parser = argparse.ArgumentParser(
        description='Task to update IP Intel JSON')
    args_parser.add_argument('-c',
                             '--configfile',
                             help='Path to JSON configuration file to use.')

    args = args_parser.parse_args()

    cfg = Config.load(args.configfile)

    initLogger()

    logger.debug('Downloading IP intel JSON')

    ip_intel_json = download_intel_file(cfg.source_url)

    logger.debug('Writing intel JSON to file')

    with open(cfg.download_location, 'w') as download_location:
        json.dump(ip_intel_json, download_location)

    logger.debug('Terminating successfully')
Esempio n. 47
0
 def reauth_timer(self):
     while True:
         time.sleep(self.flush_wait_time)
         logger.debug('Recycling credentials and reassuming role')
         self.authenticate()
Esempio n. 48
0
def searchMongoAlerts(mozdefdb):
    attackers = mozdefdb['attackers']
    alerts = mozdefdb['alerts']
    # search the last X alerts for IP addresses
    # aggregated by CIDR mask/24

    # aggregate IPv4 addresses in the most recent alerts
    # to find common attackers.
    ipv4TopHits = alerts.aggregate([
        # reverse sort the current alerts
        {"$sort": {"utcepoch": -1}},
        # most recent 100
        {"$limit": 100},
        # must have an ip address
        {"$match": {"events.documentsource.details.sourceipaddress": {"$exists": True}}},
        # must not be already related to an attacker
        {"$match": {"attackerid": {"$exists": False}}},
        # make each event into it's own doc
        {"$unwind": "$events"},
        {"$project": {
            "_id": 0,
            # emit the source ip only
            "sourceip": "$events.documentsource.details.sourceipaddress"
        }},
        # count by ip
        {"$group": {"_id": "$sourceip", "hitcount": {"$sum": 1}}},
        # limit to those with X observances
        {"$match": {"hitcount": {"$gt": options.ipv4attackerhitcount}}},
        # sort
        {"$sort": SON([("hitcount", -1), ("_id", -1)])},
        # top 10
        {"$limit": 10}
    ])
    for ip in ipv4TopHits:
        # sanity check ip['_id'] which should be the ipv4 address
        if isIPv4(ip['_id']) and ip['_id'] not in netaddr.IPSet(['0.0.0.0']):
            ipcidr = netaddr.IPNetwork(ip['_id'])
            # set CIDR
            # todo: lookup ipwhois for asn_cidr value
            # potentially with a max mask value (i.e. asn is /8, limit attackers to /24)
            ipcidr.prefixlen = options.ipv4attackerprefixlength

            # append to or create attacker.
            # does this match an existing attacker's indicators
            if not ipcidr.ip.is_loopback() and not ipcidr.ip.is_private() and not ipcidr.ip.is_reserved():
                logger.debug('Searching for existing attacker with ip ' + str(ipcidr))
                attacker = attackers.find_one({'indicators.ipv4address': str(ipcidr)})

                if attacker is None:
                    logger.debug('Attacker not found, creating new one')
                    # new attacker
                    # generate a meteor-compatible ID
                    # save the ES document type, index, id
                    newAttacker = genNewAttacker()

                    # str to get the ip/cidr rather than netblock cidr.
                    # i.e. '1.2.3.4/24' not '1.2.3.0/24'
                    newAttacker['indicators'].append(dict(ipv4address=str(ipcidr)))
                    matchingalerts = alerts.find(
                        {"events.documentsource.details.sourceipaddress":
                         str(ipcidr.ip),
                         })
                    total_events = 0
                    if matchingalerts is not None:
                        # update list of alerts this attacker matched.
                        for alert in matchingalerts:
                            newAttacker['alerts'].append(
                                dict(alertid=alert['_id'])
                            )
                            # update alert with attackerID
                            alert['attackerid'] = newAttacker['_id']
                            alerts.save(alert)

                            total_events += len(alert['events'])
                            if len(alert['events']) > 0:
                                newAttacker['lastseentimestamp'] = toUTC(alert['events'][-1]['documentsource']['utctimestamp'])
                    newAttacker['alertscount'] = len(newAttacker['alerts'])
                    newAttacker['eventscount'] = total_events
                    attackers.insert(newAttacker)
                    # update geoIP info
                    latestGeoIP = [a['events'] for a in alerts.find(
                        {"events.documentsource.details.sourceipaddress":
                         str(ipcidr.ip),
                         })][-1][0]['documentsource']
                    updateAttackerGeoIP(mozdefdb, newAttacker['_id'], latestGeoIP)

                    if options.broadcastattackers:
                        broadcastAttacker(newAttacker)

                else:
                    logger.debug('Found existing attacker')
                    # if alert not present in this attackers list
                    # append this to the list
                    # todo: trim the list at X (i.e. last 100)
                    # search alerts without attackerid
                    matchingalerts = alerts.find(
                        {"events.documentsource.details.sourceipaddress":
                         str(ipcidr.ip),
                         "attackerid":{"$exists": False}
                         })
                    if matchingalerts is not None:
                        logger.debug('Matched alert with attacker')

                        # update list of alerts this attacker matched.
                        for alert in matchingalerts:
                            attacker['alerts'].append(
                                dict(alertid=alert['_id'])
                            )
                            # update alert with attackerID
                            alert['attackerid'] = attacker['_id']
                            alerts.save(alert)

                            attacker['eventscount'] += len(alert['events'])
                            attacker['lastseentimestamp'] = toUTC(alert['events'][-1]['documentsource']['utctimestamp'])

                            # geo ip could have changed, update it to the latest
                            updateAttackerGeoIP(mozdefdb, attacker['_id'], alert['events'][-1]['documentsource'])

                        # update counts
                        attacker['alertscount'] = len(attacker['alerts'])
                        attackers.save(attacker)

                    # should we autocategorize the attacker
                    # based on their alerts?
                    if attacker['category'] == 'unknown' and options.autocategorize:
                        # take a look at recent alerts for this attacker
                        # and if they are all the same category
                        # auto-categorize the attacker
                        matchingalerts = alerts.find(
                            {"attackerid": attacker['_id']}
                        ).sort('utcepoch', -1).limit(50)
                        # summarize the alert categories
                        # returns list of tuples: [(u'bruteforce', 8)]
                        categoryCounts= mostCommon(matchingalerts,'category')
                        # are the alerts all the same category?

                        if len(categoryCounts) == 1:
                            # is the alert category mapped to an attacker category?
                            for category in options.categorymapping:
                                if category.keys()[0] == categoryCounts[0][0]:
                                    attacker['category'] = category[category.keys()[0]]
                                    attackers.save(attacker)
Esempio n. 49
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    getQueueSizes()
 def reauth_timer(self):
     while True:
         time.sleep(self.flush_wait_time)
         logger.debug('Recycling credentials and reassuming role')
         self.authenticate()
Esempio n. 51
0
def getQueueSizes():
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(options.esservers)
    sqslist = {}
    sqslist['queue_stats'] = {}
    qcount = len(options.taskexchange)
    qcounter = qcount - 1

    mqConn = boto.sqs.connect_to_region(
        options.region,
        aws_access_key_id=options.accesskey,
        aws_secret_access_key=options.secretkey
    )

    while qcounter >= 0:
        for exchange in options.taskexchange:
            logger.debug('Looking for sqs queue stats in queue' + exchange)
            eventTaskQueue = mqConn.get_queue(exchange)
            # get queue stats
            taskQueueStats = eventTaskQueue.get_attributes('All')
            sqslist['queue_stats'][qcounter] = taskQueueStats
            sqslist['queue_stats'][qcounter]['name'] = exchange
            qcounter -= 1

    # setup a log entry for health/status.
    sqsid = '{0}-{1}'.format(options.account, options.region)
    healthlog = dict(
        utctimestamp=toUTC(datetime.now()).isoformat(),
        hostname=sqsid,
        processid=os.getpid(),
        processname=sys.argv[0],
        severity='INFO',
        summary='mozdef health/status',
        category='mozdef',
        source='aws-sqs',
        tags=[],
        details=[])
    healthlog['details'] = dict(username='******')
    healthlog['details']['queues']= list()
    healthlog['details']['total_messages_ready'] = 0
    healthlog['details']['total_feeds'] = qcount
    healthlog['tags'] = ['mozdef', 'status', 'sqs']
    ready = 0
    qcounter = qcount - 1
    for q in sqslist['queue_stats'].keys():
        queuelist = sqslist['queue_stats'][qcounter]
        if 'ApproximateNumberOfMessages' in queuelist:
            ready1 = int(queuelist['ApproximateNumberOfMessages'])
            ready = ready1 + ready
            healthlog['details']['total_messages_ready'] = ready
        if 'ApproximateNumberOfMessages' in queuelist:
            messages = int(queuelist['ApproximateNumberOfMessages'])
        if 'ApproximateNumberOfMessagesNotVisible' in queuelist:
            inflight = int(queuelist['ApproximateNumberOfMessagesNotVisible'])
        if 'ApproximateNumberOfMessagesDelayed' in queuelist:
            delayed = int(queuelist['ApproximateNumberOfMessagesDelayed'])
        if 'name' in queuelist:
            name = queuelist['name']
        queueinfo=dict(
            queue=name,
            messages_delayed=delayed,
            messages_ready=messages,
            messages_inflight=inflight)
        healthlog['details']['queues'].append(queueinfo)
        qcounter -= 1
    # post to elasticsearch servers directly without going through
    # message queues in case there is an availability issue
    es.save_event(index=options.index, doc_type='mozdefhealth', body=json.dumps(healthlog))
    # post another doc with a static docid and tag
    # for use when querying for the latest sqs status
    healthlog['tags'] = ['mozdef', 'status', 'sqs-latest']
    es.save_event(index=options.index, doc_type='mozdefhealth', doc_id=getDocID(sqsid), body=json.dumps(healthlog))
Esempio n. 52
0
    def run(self):
        while True:
            try:
                records = self.sqs_queue.receive_messages(
                    MaxNumberOfMessages=options.prefetch)
                for msg in records:
                    # msg.id is the id,
                    # get_body() should be json

                    # pre process the message a bit
                    tmp = msg.body
                    try:
                        msgbody = json.loads(tmp)
                    except ValueError:
                        # If Boto wrote to the queue, it might be base64 encoded, so let's decode that
                        try:
                            tmp = base64.b64decode(tmp)
                            msgbody = json.loads(tmp)
                        except Exception as e:
                            logger.error(
                                'Invalid message, not JSON <dropping message and continuing>: %r'
                                % msg.get_body())
                            msg.delete()
                            continue

                    # If this is still not a dict,
                    # let's just drop the message and move on
                    if type(msgbody) is not dict:
                        logger.debug(
                            "Message is not a dictionary, dropping message.")
                        msg.delete()
                        continue

                    event = dict()
                    event = msgbody

                    # Was this message sent by fluentd-sqs
                    fluentd_sqs_specific_fields = {
                        'az', 'instance_id', '__tag'
                    }
                    if fluentd_sqs_specific_fields.issubset(set(
                            msgbody.keys())):
                        # Until we can influence fluentd-sqs to set the
                        # 'customendpoint' key before submitting to SQS, we'll
                        # need to do it here
                        # TODO : Change nubis fluentd output to include
                        # 'customendpoint'
                        event['customendpoint'] = True

                    if 'tags' in event:
                        event['tags'].extend([options.taskexchange])
                    else:
                        event['tags'] = [options.taskexchange]

                    # process message
                    self.on_message(event, msg)

                    # delete message from queue
                    msg.delete()
                time.sleep(.1)

            except ValueError as e:
                logger.exception('Exception while handling message: %r' % e)
                msg.delete()
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.sqs_queue = connect_sqs(options.region, options.accesskey,
                                             options.secretkey,
                                             options.taskexchange)
Esempio n. 53
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    getQueueSizes()
Esempio n. 54
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    verify_events(options)
Esempio n. 55
0
def main():
    logger.debug('Starting')
    logger.debug(options)
    download_generic_alerts(options.alert_repo_url, options.alert_data_location, options.deploy_key_location)
Esempio n. 56
0
def main():
    logger.debug('started')
    state = State(options.state_file_name)
    try:
        # capture the time we start running so next time we catch any events
        # created while we run.
        lastrun = toUTC(datetime.now()).isoformat()

        scope = [
            'https://www.googleapis.com/auth/admin.reports.audit.readonly',
            'https://www.googleapis.com/auth/admin.reports.usage.readonly'
        ]

        # get our credentials
        credentials = service_account.Credentials.from_service_account_file(
            options.jsoncredentialfile,
            scopes=scope,
            subject=options.impersonate)

        # build a request to the admin sdk
        api = googleapiclient.discovery.build('admin',
                                              'reports_v1',
                                              credentials=credentials)
        response = api.activities().list(
            userKey='all',
            applicationName='login',
            startTime=toUTC(
                state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            maxResults=options.recordlimit).execute()

        # fix up the event craziness to a flatter format
        events = []
        if 'items' in response:
            for i in response['items']:
                # flatten the sub dict/lists to pull out the good parts
                mozmsg = mozdef.MozDefEvent(options.url)
                mozmsg.category = 'google'
                mozmsg.tags = ['google', 'authentication']
                mozmsg.severity = 'INFO'
                mozmsg.summary = 'google authentication: '

                details = dict()
                for keyValue in flattenDict(i):
                    # change key/values like:
                    # [email protected]
                    # to actor_email=value
                    try:
                        key, value = keyValue.split('=')
                    except ValueError as e:
                        continue
                    key = key.replace('.', '_').lower()
                    details[key] = value

                # find important keys
                # and adjust their location/name
                if 'ipaddress' in details:
                    # it's the source ip
                    details['sourceipaddress'] = details['ipaddress']
                    del details['ipaddress']

                if 'id_time' in details:
                    mozmsg.timestamp = details['id_time']
                    mozmsg.utctimestamp = details['id_time']
                if 'events_name' in details:
                    mozmsg.summary += details['events_name'] + ' '
                if 'actor_email' in details:
                    mozmsg.summary += details['actor_email'] + ' '

                mozmsg.details = details
                events.append(mozmsg)

        # post events to mozdef
        logger.debug('posting {0} google events to mozdef'.format(len(events)))
        for e in events:
            e.send()

        # record the time we started as
        # the start time for next time.
        state.data['lastrun'] = lastrun
        state.write_state_file()
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Esempio n. 57
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        # connect to mongo
        client = MongoClient(options.mongohost, options.mongoport)
        mozdefdb = client.meteor
        ipblocklist = mozdefdb['ipblocklist']
        attackers = mozdefdb['attackers']
        # ensure indexes
        ipblocklist.create_index([('dateExpiring', -1)])
        attackers.create_index([('lastseentimestamp', -1)])
        attackers.create_index([('category', 1)])

        # First, gather IP addresses from recent attackers and add to the block list
        attackerIPList = aggregateAttackerIPs(attackers)

        # add attacker IPs to the blocklist
        # first delete ones we've created from an attacker
        ipblocklist.delete_many({'creator': 'mozdef', 'reference': 'attacker'})

        # delete any that expired
        ipblocklist.delete_many({'dateExpiring': {"$lte": datetime.utcnow() - timedelta(days=options.expireage)}})

        # add the aggregations we've found recently
        for ip in attackerIPList:
            ipblocklist.insert_one(
                {'_id': genMeteorID(),
                 'address': ip,
                 'reference': 'attacker',
                 'creator': 'mozdef',
                 'dateAdded': datetime.utcnow()})

        # Lastly, export the combined blocklist
        ipCursor = mozdefdb['ipblocklist'].aggregate([
            {"$sort": {"dateAdded": -1}},
            {"$match": {"address": {"$exists": True}}},
            {"$match": {
                "$or": [
                    {"dateExpiring": {"$gte": datetime.utcnow()}},
                    {"dateExpiring": {"$exists": False}},
                ]},
             },
            {"$project": {"address": 1}},
            {"$limit": options.iplimit}
        ])
        ips = []
        for ip in ipCursor:
            ips.append(ip['address'])
        uniq_ranges = netaddr.cidr_merge(ips)
        # to text
        with open(options.outputfile, 'w') as outputfile:
            for ip in uniq_ranges:
                outputfile.write("{0}\n".format(ip))
        outputfile.close()
        # to s3?
        if len(options.aws_bucket_name) > 0:
            s3_upload_file(options.outputfile, options.aws_bucket_name, options.aws_document_key_name)

    except ValueError as e:
        logger.error("Exception %r generating IP block list" % e)