Пример #1
0
def main():
    '''
    Get aggregated statistics on incoming events
    to use in alerting/notices/queries about event patterns over time
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(
        (list('{0}'.format(s) for s in options.esservers)))
    index = options.index
    stats = esSearch(es)
    logger.debug(json.dumps(stats))
    sleepcycles = 0
    try:
        while not es.index_exists(index):
            sleep(3)
            if sleepcycles == 3:
                logger.debug(
                    "The index is not created. Terminating eventStats.py cron job."
                )
                exit(1)
            sleepcycles += 1
            if es.index_exists(index):
                # post to elastic search servers directly without going through
                # message queues in case there is an availability issue
                es.save_event(index=index,
                              body=json.dumps(stats),
                              doc_type='mozdefstats')

    except Exception as e:
        logger.error("Exception %r when gathering statistics " % e)

    logger.debug('finished')
Пример #2
0
def main():
    '''
    Get aggregated statistics on incoming events
    to use in alerting/notices/queries about event patterns over time
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    stats = esSearch(es)
    logger.debug(json.dumps(stats))
    try:
        # post to elastic search servers directly without going through
        # message queues in case there is an availability issue
        es.save_event(body=json.dumps(stats), doc_type='mozdefstats')

    except Exception as e:
        logger.error("Exception %r when gathering statistics " % e)

    logger.debug('finished')
Пример #3
0
def main():
    '''
    Get health and status stats and post to ES
    Post both as a historical reference (for charts)
    and as a static docid (for realtime current health/EPS displays)
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(
        (list('{0}'.format(s) for s in options.esservers)))
    try:
        auth = HTTPBasicAuth(options.mquser, options.mqpassword)

        for server in options.mqservers:
            logger.debug('checking message queues on {0}'.format(server))
            r = requests.get('http://{0}:{1}/api/queues'.format(
                server, options.mqapiport),
                             auth=auth)

            mq = r.json()
            # setup a log entry for health/status.
            healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(),
                             hostname=server,
                             processid=os.getpid(),
                             processname=sys.argv[0],
                             severity='INFO',
                             summary='mozdef health/status',
                             category='mozdef',
                             source='mozdef',
                             tags=[],
                             details=[])

            healthlog['details'] = dict(username='******')
            healthlog['details']['loadaverage'] = list(os.getloadavg())
            healthlog['details']['queues'] = list()
            healthlog['details']['total_deliver_eps'] = 0
            healthlog['details']['total_publish_eps'] = 0
            healthlog['details']['total_messages_ready'] = 0
            healthlog['tags'] = ['mozdef', 'status']
            for m in mq:
                if 'message_stats' in m.keys() and isinstance(
                        m['message_stats'], dict):
                    if 'messages_ready' in m.keys():
                        mready = m['messages_ready']
                        healthlog['details']['total_messages_ready'] += m[
                            'messages_ready']
                    else:
                        mready = 0
                    if 'messages_unacknowledged' in m.keys():
                        munack = m['messages_unacknowledged']
                    else:
                        munack = 0
                    queueinfo = dict(queue=m['name'],
                                     vhost=m['vhost'],
                                     messages_ready=mready,
                                     messages_unacknowledged=munack)

                    if 'deliver_details' in m['message_stats'].keys():
                        queueinfo['deliver_eps'] = round(
                            m['message_stats']['deliver_details']['rate'], 2)
                        healthlog['details']['total_deliver_eps'] += round(
                            m['message_stats']['deliver_details']['rate'], 2)
                    if 'deliver_no_ack_details' in m['message_stats'].keys():
                        queueinfo['deliver_eps'] = round(
                            m['message_stats']['deliver_no_ack_details']
                            ['rate'], 2)
                        healthlog['details']['total_deliver_eps'] += round(
                            m['message_stats']['deliver_no_ack_details']
                            ['rate'], 2)
                    if 'publish_details' in m['message_stats'].keys():
                        queueinfo['publish_eps'] = round(
                            m['message_stats']['publish_details']['rate'], 2)
                        healthlog['details']['total_publish_eps'] += round(
                            m['message_stats']['publish_details']['rate'], 2)
                    healthlog['details']['queues'].append(queueinfo)

            # post to elastic search servers directly without going through
            # message queues in case there is an availability issue
            es.save_event(doc_type='mozdefhealth', body=json.dumps(healthlog))
            # post another doc with a static docid and tag
            # for use when querying for the latest status
            healthlog['tags'] = ['mozdef', 'status', 'latest']
            es.save_event(doc_type='mozdefhealth',
                          doc_id=getDocID(server),
                          body=json.dumps(healthlog))
    except Exception as e:
        logger.error("Exception %r when gathering health and status " % e)
Пример #4
0
class UnitTestSuite(object):
    def setup(self):
        current_date = datetime.now()
        self.event_index_name = current_date.strftime("events-%Y%m%d")
        self.previous_event_index_name = (
            current_date - timedelta(days=1)).strftime("events-%Y%m%d")
        self.alert_index_name = current_date.strftime("alerts-%Y%m")
        self.parse_config()

        # Elasticsearch
        self.es_client = ElasticsearchClient(
            list('{0}'.format(s) for s in self.options.esservers))

        # RabbitMQ
        mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(
            self.options.mquser, self.options.mqpassword,
            self.options.mqalertserver, self.options.mqport)

        mqAlertConn = Connection(mqConnString)
        alertExchange = Exchange(name=self.options.alertExchange,
                                 type='topic',
                                 durable=True,
                                 delivery_mode=1)
        alertExchange(mqAlertConn).declare()

        alertQueue = Queue(self.options.queueName,
                           exchange=alertExchange,
                           routing_key=self.options.alerttopic,
                           durable=False,
                           no_ack=(not self.options.mqack))
        alertQueue(mqAlertConn).declare()

        self.rabbitmq_alerts_consumer = mqAlertConn.Consumer(alertQueue,
                                                             accept=['json'])

        if pytest.config.option.delete_indexes:
            self.reset_elasticsearch()
            self.setup_elasticsearch()

        if pytest.config.option.delete_queues:
            self.reset_rabbitmq()

    def parse_config(self):
        default_config = os.path.join(os.path.dirname(__file__), "config.conf")
        options = DotDict()
        options.configfile = default_config

        options.esservers = list(
            getConfig('esservers', 'http://localhost:9200',
                      options.configfile).split(','))

        options.alertExchange = getConfig('alertexchange', 'alerts',
                                          options.configfile)
        options.queueName = getConfig('alertqueuename', 'alertBot',
                                      options.configfile)
        options.alerttopic = getConfig('alerttopic', 'mozdef.*',
                                       options.configfile)

        options.mquser = getConfig('mquser', 'guest', options.configfile)
        options.mqalertserver = getConfig('mqalertserver', 'localhost',
                                          options.configfile)
        options.mqpassword = getConfig('mqpassword', 'guest',
                                       options.configfile)
        options.mqport = getConfig('mqport', 5672, options.configfile)
        options.mqack = getConfig('mqack', True, options.configfile)

        self.options = options

    def reset_rabbitmq(self):
        self.rabbitmq_alerts_consumer.channel.queue_purge()

    def teardown(self):
        if pytest.config.option.delete_indexes:
            self.reset_elasticsearch()
        if pytest.config.option.delete_queues:
            self.reset_rabbitmq()

        self.rabbitmq_alerts_consumer.connection.close()
        self.rabbitmq_alerts_consumer.close()

    def populate_test_event(self, event, event_type='event'):
        self.es_client.save_event(body=event, doc_type=event_type)

    def populate_test_object(self, event, event_type='event'):
        self.es_client.save_object(index='events',
                                   body=event,
                                   doc_type=event_type)

    def setup_elasticsearch(self):
        default_mapping_file = os.path.join(
            os.path.dirname(__file__), "../config/defaultMappingTemplate.json")
        mapping_str = ''
        with open(default_mapping_file) as data_file:
            mapping_str = data_file.read()

        self.es_client.create_index(self.event_index_name, mapping=mapping_str)
        self.es_client.create_alias('events', self.event_index_name)
        self.es_client.create_index(self.previous_event_index_name,
                                    mapping=mapping_str)
        self.es_client.create_alias('events-previous',
                                    self.previous_event_index_name)
        self.es_client.create_index(self.alert_index_name, mapping=mapping_str)
        self.es_client.create_alias('alerts', self.alert_index_name)

    def reset_elasticsearch(self):
        self.es_client.delete_index(self.event_index_name, True)
        self.es_client.delete_index('events', True)
        self.es_client.delete_index(self.previous_event_index_name, True)
        self.es_client.delete_index('events-previous', True)
        self.es_client.delete_index(self.alert_index_name, True)
        self.es_client.delete_index('alerts', True)

    def flush(self, index_name):
        self.es_client.flush(index_name)

    def random_ip(self):
        return str(random.randint(1, 255)) + "." + str(random.randint(
            1, 255)) + "." + str(random.randint(1, 255)) + "." + str(
                random.randint(1, 255))

    def generate_default_event(self):
        current_timestamp = UnitTestSuite.current_timestamp_lambda()

        source_ip = self.random_ip()

        event = {
            "_index": "events",
            "_type": "event",
            "_source": {
                "category": "excategory",
                "utctimestamp": current_timestamp,
                "receivedtimestamp": current_timestamp,
                "mozdefhostname": "mozdefhost",
                "hostname": "exhostname",
                "severity": "NOTICE",
                "source": "exsource",
                "summary": "Example summary",
                "tags": ['tag1', 'tag2'],
                "details": {
                    "sourceipaddress": source_ip,
                    "hostname": "exhostname"
                }
            }
        }

        return event

    def verify_event(self, event, expected_event):
        assert sorted(event.keys()) == sorted(expected_event.keys())
        for key, value in expected_event.iteritems():
            if key == 'receivedtimestamp':
                assert type(event[key]) == unicode
            else:
                assert event[
                    key] == value, 'Incorrect match for {0}, expected: {1}'.format(
                        key, value)

    @staticmethod
    def current_timestamp():
        return toUTC(datetime.now()).isoformat()

    @staticmethod
    def subtract_from_timestamp(date_timedelta, timestamp=None):
        if timestamp is None:
            timestamp = UnitTestSuite.current_timestamp()
        utc_time = parse(timestamp)
        custom_date = utc_time - timedelta(**date_timedelta)
        return custom_date.isoformat()

    @staticmethod
    def create_timestamp_from_now(hour, minute, second):
        return toUTC(datetime.now().replace(hour=hour,
                                            minute=minute,
                                            second=second).isoformat())

    @staticmethod
    def current_timestamp_lambda():
        return lambda: UnitTestSuite.current_timestamp()

    @staticmethod
    def subtract_from_timestamp_lambda(date_timedelta, timestamp=None):
        return lambda: UnitTestSuite.subtract_from_timestamp(
            date_timedelta, timestamp)

    @staticmethod
    def create_timestamp_from_now_lambda(hour, minute, second):
        return lambda: UnitTestSuite.create_timestamp_from_now(
            hour, minute, second)
Пример #5
0
class UnitTestSuite(object):
    def setup(self):
        self.event_index_name = datetime.now().strftime("events-%Y%m%d")
        self.previous_event_index_name = (
            datetime.now() - timedelta(days=1)).strftime("events-%Y%m%d")
        self.alert_index_name = datetime.now().strftime("alerts-%Y%m")
        self.es_client = ElasticsearchClient(ES['servers'])

        if pytest.config.option.delete_indexes:
            self.reset_elasticsearch()
            self.setup_elasticsearch()

    def teardown(self):
        if pytest.config.option.delete_indexes:
            self.reset_elasticsearch()

    def populate_test_event(self, event, event_type='event'):
        self.es_client.save_event(body=event, doc_type=event_type)
        self.es_client.flush(self.event_index_name)

    def setup_elasticsearch(self):
        self.es_client.create_index(self.event_index_name)
        self.es_client.create_alias('events', self.event_index_name)
        self.es_client.create_index(self.previous_event_index_name)
        self.es_client.create_alias('events-previous',
                                    self.previous_event_index_name)
        self.es_client.create_index(self.alert_index_name)
        self.es_client.create_alias('alerts', self.alert_index_name)

    def reset_elasticsearch(self):
        self.es_client.delete_index(self.event_index_name, True)
        self.es_client.delete_index('events', True)
        self.es_client.delete_index(self.previous_event_index_name, True)
        self.es_client.delete_index('events-previous', True)
        self.es_client.delete_index(self.alert_index_name, True)
        self.es_client.delete_index('alerts', True)

    def random_ip(self):
        return str(random.randint(1, 255)) + "." + str(random.randint(
            1, 255)) + "." + str(random.randint(1, 255)) + "." + str(
                random.randint(1, 255))

    def generate_default_event(self):
        current_timestamp = UnitTestSuite.current_timestamp_lambda()

        source_ip = self.random_ip()

        event = {
            "_index": "events",
            "_type": "event",
            "_source": {
                "category": "excategory",
                "utctimestamp": current_timestamp,
                "hostname": "exhostname",
                "severity": "NOTICE",
                "source": "exsource",
                "summary": "Example summary",
                "tags": ['tag1', 'tag2'],
                "details": {
                    "sourceipaddress": source_ip,
                    "hostname": "exhostname"
                }
            }
        }

        return event

    def verify_event(self, event, expected_event):
        assert sorted(event.keys()) == sorted(expected_event.keys())
        for key, value in expected_event.iteritems():
            if key == 'receivedtimestamp':
                assert type(event[key]) == unicode
            else:
                assert event[
                    key] == value, 'Incorrect match for {0}, expected: {1}'.format(
                        key, value)

    @staticmethod
    def current_timestamp():
        return toUTC(datetime.now()).isoformat()

    @staticmethod
    def subtract_from_timestamp(date_timedelta, timestamp=None):
        if timestamp is None:
            timestamp = UnitTestSuite.current_timestamp()
        utc_time = parse(timestamp)
        custom_date = utc_time - timedelta(**date_timedelta)
        return custom_date.isoformat()

    @staticmethod
    def current_timestamp_lambda():
        return lambda: UnitTestSuite.current_timestamp()

    @staticmethod
    def subtract_from_timestamp_lambda(date_timedelta, timestamp=None):
        return lambda: UnitTestSuite.subtract_from_timestamp(
            date_timedelta, timestamp)
Пример #6
0
class AlertTask(Task):

    abstract = True

    def __init__(self):
        self.alert_name = self.__class__.__name__
        self.main_query = None

        # Used to store any alerts that were thrown
        self.alert_ids = []

        # List of events
        self.events = None
        # List of aggregations
        # e.g. when aggregField is email: [{value:'*****@*****.**',count:1337,events:[...]}, ...]
        self.aggregations = None

        self.log.debug('starting {0}'.format(self.alert_name))
        self.log.debug(RABBITMQ)
        self.log.debug(ES)

        self._configureKombu()
        self._configureES()

        self.event_indices = ['events', 'events-previous']

    def classname(self):
        return self.__class__.__name__

    @property
    def log(self):
        return get_task_logger('%s.%s' % (__name__, self.alert_name))

    def parse_config(self, config_filename, config_keys):
        myparser = OptionParser()
        self.config = None
        (self.config, args) = myparser.parse_args([])
        for config_key in config_keys:
            temp_value = getConfig(config_key, '', config_filename)
            setattr(self.config, config_key, temp_value)

    def _configureKombu(self):
        """
        Configure kombu for rabbitmq
        """
        try:
            connString = 'amqp://{0}:{1}@{2}:{3}//'.format(
                RABBITMQ['mquser'], RABBITMQ['mqpassword'],
                RABBITMQ['mqserver'], RABBITMQ['mqport'])
            self.mqConn = kombu.Connection(connString)

            self.alertExchange = kombu.Exchange(name=RABBITMQ['alertexchange'],
                                                type='topic',
                                                durable=True)
            self.alertExchange(self.mqConn).declare()
            alertQueue = kombu.Queue(RABBITMQ['alertqueue'],
                                     exchange=self.alertExchange)
            alertQueue(self.mqConn).declare()
            self.mqproducer = self.mqConn.Producer(serializer='json')
            self.log.debug('Kombu configured')
        except Exception as e:
            self.log.error(
                'Exception while configuring kombu for alerts: {0}'.format(e))

    def _configureES(self):
        """
        Configure elasticsearch client
        """
        try:
            self.es = ElasticsearchClient(ES['servers'])
            self.log.debug('ES configured')
        except Exception as e:
            self.log.error(
                'Exception while configuring ES for alerts: {0}'.format(e))

    def mostCommon(self, listofdicts, dictkeypath):
        """
            Given a list containing dictionaries,
            return the most common entries
            along a key path separated by .
            i.e. dictkey.subkey.subkey
            returned as a list of tuples
            [(value,count),(value,count)]
        """
        inspectlist = list()
        path = list(dictpath(dictkeypath))
        for i in listofdicts:
            for k in list(keypaths(i)):
                if not (set(k[0]).symmetric_difference(path)):
                    inspectlist.append(k[1])

        return Counter(inspectlist).most_common()

    def alertToMessageQueue(self, alertDict):
        """
        Send alert to the rabbit message queue
        """
        try:
            # cherry pick items from the alertDict to send to the alerts messageQueue
            mqAlert = dict(severity='INFO', category='')
            if 'severity' in alertDict.keys():
                mqAlert['severity'] = alertDict['severity']
            if 'category' in alertDict.keys():
                mqAlert['category'] = alertDict['category']
            if 'utctimestamp' in alertDict.keys():
                mqAlert['utctimestamp'] = alertDict['utctimestamp']
            if 'eventtimestamp' in alertDict.keys():
                mqAlert['eventtimestamp'] = alertDict['eventtimestamp']
            mqAlert['summary'] = alertDict['summary']
            self.log.debug(mqAlert)
            ensurePublish = self.mqConn.ensure(self.mqproducer,
                                               self.mqproducer.publish,
                                               max_retries=10)
            ensurePublish(alertDict,
                          exchange=self.alertExchange,
                          routing_key=RABBITMQ['alertqueue'])
            self.log.debug('alert sent to the alert queue')
        except Exception as e:
            self.log.error(
                'Exception while sending alert to message queue: {0}'.format(
                    e))

    def alertToES(self, alertDict):
        """
        Send alert to elasticsearch
        """
        try:
            res = self.es.save_alert(body=alertDict)
            self.log.debug('alert sent to ES')
            self.log.debug(res)
            return res
        except Exception as e:
            self.log.error(
                'Exception while pushing alert to ES: {0}'.format(e))

    def tagBotNotify(self, alert):
        """
            Tag alert to be excluded based on severity
            If 'ircchannel' is set in an alert, we automatically notify mozdefbot
        """
        alert['notify_mozdefbot'] = True
        if alert['severity'] == 'NOTICE' or alert['severity'] == 'INFO':
            alert['notify_mozdefbot'] = False

        # If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
        if 'ircchannel' in alert and alert['ircchannel'] != '' and alert[
                'ircchannel'] != None:
            alert['notify_mozdefbot'] = True
        return alert

    def saveAlertID(self, saved_alert):
        """
        Save alert to self so we can analyze it later
        """
        self.alert_ids.append(saved_alert['_id'])

    def filtersManual(self, query):
        """
        Configure filters manually

        query is a search query object with date_timedelta populated

        """
        # Don't fire on already alerted events
        duplicate_matcher = TermMatch('alert_names', self.classname())
        if duplicate_matcher not in query.must_not:
            query.add_must_not(duplicate_matcher)

        self.main_query = query

    def searchEventsSimple(self):
        """
        Search events matching filters, store events in self.events
        """
        try:
            results = self.main_query.execute(self.es,
                                              indices=self.event_indices)
            self.events = results['hits']
            self.log.debug(self.events)
        except Exception as e:
            self.log.error('Error while searching events in ES: {0}'.format(e))

    def searchEventsAggregated(self, aggregationPath, samplesLimit=5):
        """
        Search events, aggregate matching ES filters by aggregationPath,
        store them in self.aggregations as a list of dictionaries
        keys:
          value: the text value that was found in the aggregationPath
          count: the hitcount of the text value
          events: the sampled list of events that matched
          allevents: the unsample, total list of matching events
        aggregationPath can be key.subkey.subkey to specify a path to a dictionary value
        relative to the _source that's returned from elastic search.
        ex: details.sourceipaddress
        """
        try:
            esresults = self.main_query.execute(self.es,
                                                indices=self.event_indices)
            results = esresults['hits']

            # List of aggregation values that can be counted/summarized by Counter
            # Example: ['*****@*****.**','*****@*****.**', '*****@*****.**'] for an email aggregField
            aggregationValues = []
            for r in results:
                aggregationValues.append(
                    getValueByPath(r['_source'], aggregationPath))

            # [{value:'*****@*****.**',count:1337,events:[...]}, ...]
            aggregationList = []
            for i in Counter(aggregationValues).most_common():
                idict = {
                    'value': i[0],
                    'count': i[1],
                    'events': [],
                    'allevents': []
                }
                for r in results:
                    if getValueByPath(r['_source'], aggregationPath).encode(
                            'ascii', 'ignore') == i[0]:
                        # copy events detail into this aggregation up to our samples limit
                        if len(idict['events']) < samplesLimit:
                            idict['events'].append(r)
                        # also copy all events to a non-sampled list
                        # so we mark all events as alerted and don't re-alert
                        idict['allevents'].append(r)
                aggregationList.append(idict)

            self.aggregations = aggregationList
            self.log.debug(self.aggregations)
        except Exception as e:
            self.log.error('Error while searching events in ES: {0}'.format(e))

    def walkEvents(self, **kwargs):
        """
        Walk through events, provide some methods to hook in alerts
        """
        if len(self.events) > 0:
            for i in self.events:
                alert = self.onEvent(i, **kwargs)
                if alert:
                    alert = self.tagBotNotify(alert)
                    self.log.debug(alert)
                    alertResultES = self.alertToES(alert)
                    self.tagEventsAlert([i], alertResultES)
                    self.alertToMessageQueue(alert)
                    self.hookAfterInsertion(alert)
                    self.saveAlertID(alertResultES)
        # did we not match anything?
        # can also be used as an alert trigger
        if len(self.events) == 0:
            alert = self.onNoEvent(**kwargs)
            if alert:
                alert = self.tagBotNotify(alert)
                self.log.debug(alert)
                alertResultES = self.alertToES(alert)
                self.alertToMessageQueue(alert)
                self.hookAfterInsertion(alert)
                self.saveAlertID(alertResultES)

    def walkAggregations(self, threshold, config=None):
        """
        Walk through aggregations, provide some methods to hook in alerts
        """
        if len(self.aggregations) > 0:
            for aggregation in self.aggregations:
                if aggregation['count'] >= threshold:
                    aggregation['config'] = config
                    alert = self.onAggregation(aggregation)
                    if alert:
                        alert = self.tagBotNotify(alert)
                        self.log.debug(alert)
                        alertResultES = self.alertToES(alert)
                        # even though we only sample events in the alert
                        # tag all events as alerted to avoid re-alerting
                        # on events we've already processed.
                        self.tagEventsAlert(aggregation['allevents'],
                                            alertResultES)
                        self.alertToMessageQueue(alert)
                        self.saveAlertID(alertResultES)

    def createAlertDict(self,
                        summary,
                        category,
                        tags,
                        events,
                        severity='NOTICE',
                        url=None,
                        ircchannel=None):
        """
        Create an alert dict
        """
        alert = {
            'utctimestamp': toUTC(datetime.now()).isoformat(),
            'severity': severity,
            'summary': summary,
            'category': category,
            'tags': tags,
            'events': [],
            'ircchannel': ircchannel,
        }
        if url:
            alert['url'] = url

        for e in events:
            alert['events'].append({
                'documentindex': e['_index'],
                'documenttype': e['_type'],
                'documentsource': e['_source'],
                'documentid': e['_id']
            })
        self.log.debug(alert)
        return alert

    def onEvent(self, event, *args, **kwargs):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onNoEvent(self, *args, **kwargs):
        """
        To be overriden by children to run their code
        when NOTHING matches a filter
        which can be used to trigger on the absence of
        events much like a dead man switch.
        This is to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onAggregation(self, aggregation):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        must return an alert dict or None
        """
        pass

    def hookAfterInsertion(self, alert):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        """
        pass

    def tagEventsAlert(self, events, alertResultES):
        """
        Update the event with the alertid/index
        and update the alert_names on the event itself so it's
        not re-alerted
        """
        try:
            for event in events:
                if 'alerts' not in event['_source'].keys():
                    event['_source']['alerts'] = []
                event['_source']['alerts'].append({
                    'index':
                    alertResultES['_index'],
                    'type':
                    alertResultES['_type'],
                    'id':
                    alertResultES['_id']
                })

                if 'alert_names' not in event['_source']:
                    event['_source']['alert_names'] = []
                event['_source']['alert_names'].append(self.classname())

                self.es.save_event(index=event['_index'],
                                   doc_type=event['_type'],
                                   body=event['_source'],
                                   doc_id=event['_id'])
        except Exception as e:
            self.log.error('Error while updating events in ES: {0}'.format(e))

    def main(self):
        """
        To be overriden by children to run their code
        """
        pass

    def run(self, *args, **kwargs):
        """
        Main method launched by celery periodically
        """
        try:
            self.main(*args, **kwargs)
            self.log.debug('finished')
        except Exception as e:
            self.log.error('Exception in main() method: {0}'.format(e))

    def parse_json_alert_config(self, config_file):
        """
        Helper function to parse an alert config file
        """
        alert_dir = os.path.join(os.path.dirname(__file__), '..')
        config_file_path = os.path.join(alert_dir, config_file)
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = json.load(fd)
            except ValueError:
                sys.stderr.write("FAILED to open the configuration file\n")

        return json_obj
Пример #7
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        s = requests.Session()
        s.headers.update({'Accept': 'application/json'})
        s.headers.update({'Content-type': 'application/json'})
        s.headers.update({'Authorization':'SSWS {0}'.format(options.apikey)})

        #capture the time we start running so next time we catch any events created while we run.
        state = State(options.state_file)
        lastrun = toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday=date.strftime(datetime.utcnow()-timedelta(days=1),'%Y/%m/%d')
        today = date.strftime(datetime.utcnow(),'%Y/%m/%d')

        r = s.get('https://{0}/api/v1/events?startDate={1}&limit={2}'.format(
            options.oktadomain,
            toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            options.recordlimit
        ))

        if r.status_code == 200:
            oktaevents = json.loads(r.text)
            for event in oktaevents:
                if 'published' in event.keys():
                    if toUTC(event['published']) > toUTC(state.data['lastrun']):
                        try:
                            mozdefEvent = dict()
                            mozdefEvent['utctimestamp']=toUTC(event['published']).isoformat()
                            mozdefEvent['category'] = 'okta'
                            mozdefEvent['tags'] = ['okta']
                            if 'action' in event.keys() and 'message' in event['action'].keys():
                                mozdefEvent['summary'] = event['action']['message']
                            mozdefEvent['details'] = event
                            # Actor parsing
                            # While there are various objectTypes attributes, we just take any attribute that matches
                            # in case Okta changes it's structure around a bit
                            # This means the last instance of each attribute in all actors will be recorded in mozdef
                            # while others will be discarded
                            # Which ends up working out well in Okta's case.
                            if 'actors' in event.keys():
                                for actor in event['actors']:
                                    if 'ipAddress' in actor.keys():
                                        mozdefEvent['details']['sourceipaddress'] = actor['ipAddress']
                                    if 'login' in actor.keys():
                                        mozdefEvent['details']['username'] = actor['login']
                                    if 'requestUri' in actor.keys():
                                        mozdefEvent['details']['source_uri'] = actor['requestUri']

                            # We are renaming action to activity because there are
                            # currently mapping problems with the details.action field
                            mozdefEvent['details']['activity'] = mozdefEvent['details']['action']
                            mozdefEvent['details'].pop('action')

                            jbody=json.dumps(mozdefEvent)
                            res = es.save_event(doc_type='okta',body=jbody)
                            logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
                else:
                    logger.error('Okta event does not contain published date: {0}'.format(event))
            state.data['lastrun'] = lastrun
            state.write_state_file()
        else:
            logger.error('Could not get Okta events HTTP error code {} reason {}'.format(r.status_code, r.reason))
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Пример #8
0
def getQueueSizes():
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(options.esservers)
    sqslist = {}
    sqslist['queue_stats'] = {}
    qcount = len(options.taskexchange)
    qcounter = qcount - 1
    try:
        # meant only to talk to SQS using boto
        # and return queue attributes.a

        mqConn = boto.sqs.connect_to_region(
            options.region,
            aws_access_key_id=options.accesskey,
            aws_secret_access_key=options.secretkey)

        while qcounter >= 0:
            for exchange in options.taskexchange:
                logger.debug('Looking for sqs queue stats in queue' + exchange)
                eventTaskQueue = mqConn.get_queue(exchange)
                # get queue stats
                taskQueueStats = eventTaskQueue.get_attributes('All')
                sqslist['queue_stats'][qcounter] = taskQueueStats
                sqslist['queue_stats'][qcounter]['name'] = exchange
                qcounter -= 1
    except Exception as e:
        logger.error("Exception %r when gathering health and status " % e)

    # setup a log entry for health/status.
    sqsid = '{0}-{1}'.format(options.account, options.region)
    healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(),
                     hostname=sqsid,
                     processid=os.getpid(),
                     processname=sys.argv[0],
                     severity='INFO',
                     summary='mozdef health/status',
                     category='mozdef',
                     source='aws-sqs',
                     tags=[],
                     details=[])
    healthlog['details'] = dict(username='******')
    healthlog['details']['queues'] = list()
    healthlog['details']['total_messages_ready'] = 0
    healthlog['details']['total_feeds'] = qcount
    healthlog['tags'] = ['mozdef', 'status', 'sqs']
    ready = 0
    qcounter = qcount - 1
    for q in sqslist['queue_stats'].keys():
        queuelist = sqslist['queue_stats'][qcounter]
        if 'ApproximateNumberOfMessages' in queuelist:
            ready1 = int(queuelist['ApproximateNumberOfMessages'])
            ready = ready1 + ready
            healthlog['details']['total_messages_ready'] = ready
        if 'ApproximateNumberOfMessages' in queuelist:
            messages = int(queuelist['ApproximateNumberOfMessages'])
        if 'ApproximateNumberOfMessagesNotVisible' in queuelist:
            inflight = int(queuelist['ApproximateNumberOfMessagesNotVisible'])
        if 'ApproximateNumberOfMessagesDelayed' in queuelist:
            delayed = int(queuelist['ApproximateNumberOfMessagesDelayed'])
        if 'name' in queuelist:
            name = queuelist['name']
        queueinfo = dict(queue=name,
                         messages_delayed=delayed,
                         messages_ready=messages,
                         messages_inflight=inflight)
        healthlog['details']['queues'].append(queueinfo)
        qcounter -= 1
    # post to elasticsearch servers directly without going through
    # message queues in case there is an availability issue
    es.save_event(index=options.index,
                  doc_type='mozdefhealth',
                  body=json.dumps(healthlog))
    # post another doc with a static docid and tag
    # for use when querying for the latest sqs status
    healthlog['tags'] = ['mozdef', 'status', 'sqs-latest']
    es.save_event(index=options.index,
                  doc_type='mozdefhealth',
                  doc_id=getDocID(sqsid),
                  body=json.dumps(healthlog))