Пример #1
0
def esCloseIndices():
    logger.debug('started')
    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_open_indices()
    except Exception as e:
        logger.error(
            "Unhandled exception while connecting to ES, terminating: %r" %
            (e))

    # examine each index pulled from get_indice
    # to determine if it meets aging criteria
    month_ago_date = toUTC(
        datetime.now()) - timedelta(days=int(options.index_age))
    month_ago_date = month_ago_date.replace(tzinfo=None)
    for index in indices:
        if 'events' in index:
            index_date = index.rsplit('-', 1)[1]
            logger.debug("Checking to see if Index: %s can be closed." %
                         (index))
            if len(index_date) == 8:
                index_date_obj = datetime.strptime(index_date, '%Y%m%d')
                try:
                    if month_ago_date > index_date_obj:
                        logger.debug("Index: %s will be closed." % (index))
                        es.close_index(index)
                    else:
                        logger.debug(
                            "Index: %s  does not meet aging criteria and will not be closed."
                            % (index))
                except Exception as e:
                    logger.error(
                        "Unhandled exception while closing indices, terminating: %r"
                        % (e))
Пример #2
0
def esPruneIndexes():
    if options.output == 'syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
        # do the pruning
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.debug('Deleting index: %s' % index_to_prune)
                        es.delete_index(index_to_prune, True)
                    else:
                        logger.error('Error deleting index %s, index missing' % index_to_prune)
            except Exception as e:
                logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Пример #3
0
def esCloseIndices():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
    except Exception as e:
        logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e))

    # examine each index pulled from get_indice
    # to determine if it meets aging criteria
    month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))
    month_ago_date = month_ago_date.replace(tzinfo=None)
    for index in indices:
        if 'events' in index:
            index_date = index.rsplit('-', 1)[1]
            logger.debug("Checking to see if Index: %s can be closed." % (index))
            if len(index_date) == 8:
                index_date_obj = datetime.strptime(index_date, '%Y%m%d')
                try:
                    if month_ago_date > index_date_obj:
                        logger.debug("Index: %s will be closed." % (index))
                        es.close_index(index)
                    else:
                        logger.debug("Index: %s  does not meet aging criteria and will not be closed." % (index))
                except Exception as e:
                    logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
Пример #4
0
def main():
    '''
    Get aggregated statistics on incoming events
    to use in alerting/notices/queries about event patterns over time
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    index = options.index
    stats = esSearch(es)
    logger.debug(json.dumps(stats))
    sleepcycles = 0
    try:
        while not es.index_exists(index):
            sleep(3)
            if sleepcycles == 3:
                logger.debug("The index is not created. Terminating eventStats.py cron job.")
                exit(1)
            sleepcycles += 1
        if es.index_exists(index):
            # post to elastic search servers directly without going through
            # message queues in case there is an availability issue
            es.save_event(index=index, body=json.dumps(stats))

    except Exception as e:
        logger.error("Exception %r when gathering statistics " % e)

    logger.debug('finished')
Пример #5
0
 def _configureES(self):
     """
     Configure elasticsearch client
     """
     try:
         self.es = ElasticsearchClient(ES["servers"])
         self.log.debug("ES configured")
     except Exception as e:
         self.log.error("Exception while configuring ES for alerts: {0}".format(e))
Пример #6
0
def main():
    logger.debug('starting')
    logger.debug(options)

    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    client = MongoClient(options.mongohost, options.mongoport)
    # use meteor db
    mongo = client.meteor
    writeFrontendStats(getFrontendStats(es), mongo)
    writeSqsStats(getSqsStats(es), mongo)
    writeEsClusterStats(es.get_cluster_health(), mongo)
    writeEsNodesStats(getEsNodesStats(), mongo)
    writeEsHotThreads(getEsHotThreads(), mongo)
Пример #7
0
def main():
    logger.debug('starting')
    logger.debug(options)

    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    client = MongoClient(options.mongohost, options.mongoport)
    # use meteor db
    mongo = client.meteor
    writeFrontendStats(getFrontendStats(es), mongo)
    writeSqsStats(getSqsStats(es), mongo)
    writeEsClusterStats(es.get_cluster_health(), mongo)
    writeEsNodesStats(getEsNodesStats(), mongo)
    writeEsHotThreads(getEsHotThreads(), mongo)
Пример #8
0
def kibanaDashboards():
    resultsList = []
    try:
        es_client = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        search_query = SearchQuery()
        search_query.add_must(TermMatch('_type', 'dashboard'))
        results = search_query.execute(es_client, indices=['.kibana'])

        for dashboard in results['hits']:
            resultsList.append({
                'name': dashboard['_source']['title'],
                'url': "%s#/%s/%s" % (
                    options.kibanaurl,
                    "dashboard",
                    dashboard['_id']
                )
            })

    except ElasticsearchInvalidIndex as e:
        sys.stderr.write('Kibana dashboard index not found: {0}\n'.format(e))

    except Exception as e:
        sys.stderr.write('Kibana dashboard received error: {0}\n'.format(e))

    return json.dumps(resultsList)
Пример #9
0
def setup_es_client(options):
    global ES_CLIENT
    try:
        ES_CLIENT
    except NameError:
        ES_CLIENT = ElasticsearchClient(list('{0}'.format(s) for s in options.esservers))
    return ES_CLIENT
Пример #10
0
def esConnect():
    """open or re-open a connection to elastic search"""
    return ElasticsearchClient(
        (list("{0}".format(s) for s in options.esservers)),
        bulk_amount=options.esbulksize,
        bulk_refresh_time=options.esbulktimeout,
    )
Пример #11
0
    def __init__(self):
        config = _load_config(CONFIG_FILE)

        try:
            es_address = config['elasticSearchAddress']
        except KeyError:
            raise KeyError(MISSING_REQUIRED_KEY_ERR_MSG)

        es_client = ElasticsearchClient(es_address)

        search_indices = config.get('searchIndices', [])

        self.max_connections = config.get('maxConnections', 0)
        self.match_tags = config.get('matchTags', [])
        self.search_window = config.get('searchWindow', {})

        if len(search_indices) == 0:
            search_indices = ['alerts']

        if self.max_connections == 0:
            self.max_connections = None

        if len(self.match_tags) == 0:
            self.match_tags = ['portscan']

        if len(self.search_window) == 0:
            self.search_window = {'hours': 24}

        # Store our ES client in a closure bound to the plugin object.
        # The intent behind this approach is to make the interface to
        # the `enrich` function require dependency injection for testing.
        def search_fn(query):
            return query.execute(es_client, indices=search_indices)

        self.search = search_fn
Пример #12
0
def esLdapResults(begindateUTC=None, enddateUTC=None):
    '''an ES query/facet to count success/failed logins'''
    resultsList = list()
    if begindateUTC is None:
        begindateUTC = datetime.now() - timedelta(hours=1)
        begindateUTC = toUTC(begindateUTC)
    if enddateUTC is None:
        enddateUTC = datetime.now()
        enddateUTC = toUTC(enddateUTC)

    try:
        es_client = ElasticsearchClient(list('{0}'.format(s) for s in options.esservers))
        search_query = SearchQuery()
        range_match = RangeMatch('utctimestamp', begindateUTC, enddateUTC)

        search_query.add_must(range_match)
        search_query.add_must(TermMatch('tags', 'ldap'))

        search_query.add_must(TermMatch('details.result', 'LDAP_INVALID_CREDENTIALS'))

        search_query.add_aggregation(Aggregation('details.result'))
        search_query.add_aggregation(Aggregation('details.dn'))

        results = search_query.execute(es_client, indices=['events'])

        stoplist = ('o', 'mozilla', 'dc', 'com', 'mozilla.com', 'mozillafoundation.org', 'org', 'mozillafoundation')

        for t in results['aggregations']['details.dn']['terms']:
            if t['key'] in stoplist:
                continue
            failures = 0
            success = 0
            dn = t['key']

            details_query = SearchQuery()
            details_query.add_must(range_match)
            details_query.add_must(TermMatch('tags', 'ldap'))
            details_query.add_must(TermMatch('details.dn', dn))
            details_query.add_aggregation(Aggregation('details.result'))

            results = details_query.execute(es_client)

            for t in results['aggregations']['details.result']['terms']:
                if t['key'].upper() == 'LDAP_SUCCESS':
                    success = t['count']
                if t['key'].upper() == 'LDAP_INVALID_CREDENTIALS':
                    failures = t['count']
            resultsList.append(
                dict(
                    dn=dn,
                    failures=failures,
                    success=success,
                    begin=begindateUTC.isoformat(),
                    end=enddateUTC.isoformat()
                )
            )

        return(json.dumps(resultsList))
    except Exception as e:
        sys.stderr.write('Error trying to get ldap results: {0}\n'.format(e))
Пример #13
0
    def __init__(self):
        # Run plugin on portscan alerts
        self.registration = ['portscan']

        config = _load_config(CONFIG_FILE)

        es_client = ElasticsearchClient(ES['servers'])

        search_indices = config.get('searchIndices', [])

        self.max_connections = config.get('maxConnections', 0)
        self.match_tags = config.get('matchTags', [])
        self.search_window = config.get('searchWindow', {})

        if len(search_indices) == 0:
            search_indices = ['alerts']

        if self.max_connections == 0:
            self.max_connections = None

        if len(self.match_tags) == 0:
            self.match_tags = ['portscan']

        if len(self.search_window) == 0:
            self.search_window = {'hours': 24}

        # Store our ES client in a closure bound to the plugin object.
        # The intent behind this approach is to make the interface to
        # the `enrich` function require dependency injection for testing.
        def search_fn(query):
            return query.execute(es_client, indices=search_indices)

        self.search = search_fn
Пример #14
0
def kibanaDashboards():
    resultsList = []
    try:
        es_client = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))
        search_query = SearchQuery()
        search_query.add_must(TermMatch('type', 'dashboard'))
        results = search_query.execute(es_client, indices=['.kibana'])

        for dashboard in results['hits']:
            dashboard_id = dashboard['_id']
            if dashboard_id.startswith('dashboard:'):
                dashboard_id = dashboard_id.replace('dashboard:', '')

            resultsList.append({
                'name':
                dashboard['_source']['dashboard']['title'],
                'id':
                dashboard_id
            })

    except ElasticsearchInvalidIndex as e:
        logger.error('Kibana dashboard index not found: {0}\n'.format(e))

    except Exception as e:
        logger.error('Kibana dashboard received error: {0}\n'.format(e))

    return json.dumps(resultsList)
Пример #15
0
 def _configureES(self):
     """
     Configure elasticsearch client
     """
     try:
         self.es = ElasticsearchClient(ES["servers"])
         self.log.debug("ES configured")
     except Exception as e:
         self.log.error("Exception while configuring ES for alerts: {0}".format(e))
Пример #16
0
def getQueueSizes():
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(options.esservers)

    sqs_client = boto3.client("sqs",
                              region_name=options.region,
                              aws_access_key_id=options.accesskey,
                              aws_secret_access_key=options.secretkey)
    queues_stats = {
        'queues': [],
        'total_feeds': len(options.taskexchange),
        'total_messages_ready': 0,
        'username': '******'
    }
    for queue_name in options.taskexchange:
        logger.debug('Looking for sqs queue stats in queue' + queue_name)
        queue_url = sqs_client.get_queue_url(QueueName=queue_name)['QueueUrl']
        queue_attributes = sqs_client.get_queue_attributes(
            QueueUrl=queue_url, AttributeNames=['All'])['Attributes']
        queue_stats = {
            'queue': queue_name,
        }
        if 'ApproximateNumberOfMessages' in queue_attributes:
            queue_stats['messages_ready'] = int(
                queue_attributes['ApproximateNumberOfMessages'])
            queues_stats['total_messages_ready'] += queue_stats[
                'messages_ready']
        if 'ApproximateNumberOfMessagesNotVisible' in queue_attributes:
            queue_stats['messages_inflight'] = int(
                queue_attributes['ApproximateNumberOfMessagesNotVisible'])
        if 'ApproximateNumberOfMessagesDelayed' in queue_attributes:
            queue_stats['messages_delayed'] = int(
                queue_attributes['ApproximateNumberOfMessagesDelayed'])

        queues_stats['queues'].append(queue_stats)

    # setup a log entry for health/status.
    sqsid = '{0}-{1}'.format(options.account, options.region)
    healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(),
                     hostname=sqsid,
                     processid=os.getpid(),
                     processname=sys.argv[0],
                     severity='INFO',
                     summary='mozdef health/status',
                     category='mozdef',
                     source='aws-sqs',
                     tags=[],
                     details=queues_stats)
    healthlog['tags'] = ['mozdef', 'status', 'sqs']
    healthlog['type'] = 'mozdefhealth'
    # post to elasticsearch servers directly without going through
    # message queues in case there is an availability issue
    es.save_event(index=options.index, body=json.dumps(healthlog))
    # post another doc with a static docid and tag
    # for use when querying for the latest sqs status
    healthlog['tags'] = ['mozdef', 'status', 'sqs-latest']
    es.save_event(index=options.index,
                  doc_id=getDocID(sqsid),
                  body=json.dumps(healthlog))
Пример #17
0
def main():
    '''
    Get aggregated statistics on incoming events
    to use in alerting/notices/queries about event patterns over time
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(
        (list('{0}'.format(s) for s in options.esservers)))
    index = options.index
    stats = esSearch(es)
    logger.debug(json.dumps(stats))
    sleepcycles = 0
    try:
        while not es.index_exists(index):
            sleep(3)
            if sleepcycles == 3:
                logger.debug(
                    "The index is not created. Terminating eventStats.py cron job."
                )
                exit(1)
            sleepcycles += 1
        if es.index_exists(index):
            # post to elastic search servers directly without going through
            # message queues in case there is an availability issue
            es.save_event(index=index, body=json.dumps(stats))

    except Exception as e:
        logger.error("Exception %r when gathering statistics " % e)

    logger.debug('finished')
Пример #18
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        mozdefdb = client.meteor
        ensureIndexes(mozdefdb)
        esResults = getESAlerts(es)
        updateMongo(mozdefdb, esResults)

    except Exception as e:
        logger.error("Exception %r sending health to mongo" % e)
Пример #19
0
def main():
    '''
    Look for events that contain username and a mac address
    Add the correlation to the intelligence index.
    '''
    logger.debug('starting')
    logger.debug(options)

    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    # create intelligence index if it's not already there
    es.create_index('intelligence')

    # read in the OUI file for mac prefix to vendor dictionary
    macassignments = readOUIFile(options.ouifilename)

    # search ES for events containing username and mac address
    correlations = esSearch(es, macassignments=macassignments)

    # store the correlation in the intelligence index
    esStoreCorrelations(es, correlations)

    logger.debug('finished')
Пример #20
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        # use meteor db
        mozdefdb = client.meteor
        esResults = searchESForBROAttackers(es, 100)
        updateMongoWithESEvents(mozdefdb, esResults)
        searchMongoAlerts(mozdefdb)

    except ValueError as e:
        logger.error("Exception %r collecting attackers to mongo" % e)
Пример #21
0
    def __init__(self):
        self.registration = ['portscan']

        self.config = Config.load(CONFIG_FILE)

        # Create a closure around an Elasticsearch client that can be invoked
        # with search terms to find events in the configured indices.
        es_client = ElasticsearchClient(ES['servers'])

        def search_fn(query):
            return query.execute(
                es_client, indices=self.config.indices_to_search)

        self.search = search_fn
Пример #22
0
def verify_events(options):
    es_client = ElasticsearchClient(options.esservers)
    for required_field in options.required_fields:
        logger.debug('Looking for events without ' + required_field)
        search_query = SearchQuery(hours=12)
        search_query.add_must_not(ExistsMatch(required_field))

        # Exclude all events that are mozdef related health and stats
        search_query.add_must_not(TermMatch('_type', 'mozdefstats'))
        search_query.add_must_not(TermMatch('_type', 'mozdefhealth'))

        search_query.add_aggregation(Aggregation('_type'))
        # We don't care about the actual events, we only want the numbers
        results = search_query.execute(es_client, size=1)
        for aggreg_term in results['aggregations']['_type']['terms']:
            count = aggreg_term['count']
            category = aggreg_term['key']
            logger.error(
                "Found {0} bad events of _type '{1}' missing '{2}' field".
                format(count, category, required_field))
Пример #23
0
parser.add_argument(
    'default_mapping_file',
    help=
    'The relative path to default mapping json file (ex: cron/defaultTemplateMapping.json)'
)
parser.add_argument(
    'backup_conf_file',
    help='The relative path to backup.conf file (ex: cron/backup.conf)')
args = parser.parse_args()

esserver = os.environ.get('OPTIONS_ESSERVERS')
if esserver is None:
    esserver = args.esserver
esserver = esserver.strip('/')
print "Connecting to " + esserver
client = ElasticsearchClient(esserver)

current_date = datetime.now()
event_index_name = current_date.strftime("events-%Y%m%d")
previous_event_index_name = (current_date -
                             timedelta(days=1)).strftime("events-%Y%m%d")
weekly_index_alias = 'events-weekly'
alert_index_name = current_date.strftime("alerts-%Y%m")
kibana_index_name = '.kibana'
kibana_version = '5.6.14'

index_settings_str = ''
with open(args.default_mapping_file) as data_file:
    index_settings_str = data_file.read()

index_settings = json.loads(index_settings_str)
Пример #24
0
class AlertTask(Task):

    abstract = True

    def __init__(self):
        self.alert_name = self.__class__.__name__
        self.main_query = None

        # Used to store any alerts that were thrown
        self.alert_ids = []

        # List of events
        self.events = None
        # List of aggregations
        # e.g. when aggregField is email: [{value:'*****@*****.**',count:1337,events:[...]}, ...]
        self.aggregations = None

        self.log.debug('starting {0}'.format(self.alert_name))
        self.log.debug(RABBITMQ)
        self.log.debug(ES)

        self._configureKombu()
        self._configureES()

        self.event_indices = ['events', 'events-previous']

    def classname(self):
        return self.__class__.__name__

    @property
    def log(self):
        return get_task_logger('%s.%s' % (__name__, self.alert_name))

    def parse_config(self, config_filename, config_keys):
        myparser = OptionParser()
        self.config = None
        (self.config, args) = myparser.parse_args([])
        for config_key in config_keys:
            temp_value = getConfig(config_key, '', config_filename)
            setattr(self.config, config_key, temp_value)

    def _configureKombu(self):
        """
        Configure kombu for rabbitmq
        """
        try:
            connString = 'amqp://{0}:{1}@{2}:{3}//'.format(
                RABBITMQ['mquser'], RABBITMQ['mqpassword'],
                RABBITMQ['mqserver'], RABBITMQ['mqport'])
            self.mqConn = kombu.Connection(connString)

            self.alertExchange = kombu.Exchange(name=RABBITMQ['alertexchange'],
                                                type='topic',
                                                durable=True)
            self.alertExchange(self.mqConn).declare()
            alertQueue = kombu.Queue(RABBITMQ['alertqueue'],
                                     exchange=self.alertExchange)
            alertQueue(self.mqConn).declare()
            self.mqproducer = self.mqConn.Producer(serializer='json')
            self.log.debug('Kombu configured')
        except Exception as e:
            self.log.error(
                'Exception while configuring kombu for alerts: {0}'.format(e))

    def _configureES(self):
        """
        Configure elasticsearch client
        """
        try:
            self.es = ElasticsearchClient(ES['servers'])
            self.log.debug('ES configured')
        except Exception as e:
            self.log.error(
                'Exception while configuring ES for alerts: {0}'.format(e))

    def mostCommon(self, listofdicts, dictkeypath):
        """
            Given a list containing dictionaries,
            return the most common entries
            along a key path separated by .
            i.e. dictkey.subkey.subkey
            returned as a list of tuples
            [(value,count),(value,count)]
        """
        inspectlist = list()
        path = list(dictpath(dictkeypath))
        for i in listofdicts:
            for k in list(keypaths(i)):
                if not (set(k[0]).symmetric_difference(path)):
                    inspectlist.append(k[1])

        return Counter(inspectlist).most_common()

    def alertToMessageQueue(self, alertDict):
        """
        Send alert to the rabbit message queue
        """
        try:
            # cherry pick items from the alertDict to send to the alerts messageQueue
            mqAlert = dict(severity='INFO', category='')
            if 'severity' in alertDict:
                mqAlert['severity'] = alertDict['severity']
            if 'category' in alertDict:
                mqAlert['category'] = alertDict['category']
            if 'utctimestamp' in alertDict:
                mqAlert['utctimestamp'] = alertDict['utctimestamp']
            if 'eventtimestamp' in alertDict:
                mqAlert['eventtimestamp'] = alertDict['eventtimestamp']
            mqAlert['summary'] = alertDict['summary']
            self.log.debug(mqAlert)
            ensurePublish = self.mqConn.ensure(self.mqproducer,
                                               self.mqproducer.publish,
                                               max_retries=10)
            ensurePublish(alertDict,
                          exchange=self.alertExchange,
                          routing_key=RABBITMQ['alertqueue'])
            self.log.debug('alert sent to the alert queue')
        except Exception as e:
            self.log.error(
                'Exception while sending alert to message queue: {0}'.format(
                    e))

    def alertToES(self, alertDict):
        """
        Send alert to elasticsearch
        """
        try:
            res = self.es.save_alert(body=alertDict)
            self.log.debug('alert sent to ES')
            self.log.debug(res)
            return res
        except Exception as e:
            self.log.error(
                'Exception while pushing alert to ES: {0}'.format(e))

    def tagBotNotify(self, alert):
        """
            Tag alert to be excluded based on severity
            If 'ircchannel' is set in an alert, we automatically notify mozdefbot
        """
        alert['notify_mozdefbot'] = True
        if alert['severity'] == 'NOTICE' or alert['severity'] == 'INFO':
            alert['notify_mozdefbot'] = False

        # If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
        if 'ircchannel' in alert and alert['ircchannel'] != '' and alert[
                'ircchannel'] is not None:
            alert['notify_mozdefbot'] = True
        return alert

    def saveAlertID(self, saved_alert):
        """
        Save alert to self so we can analyze it later
        """
        self.alert_ids.append(saved_alert['_id'])

    def filtersManual(self, query):
        """
        Configure filters manually

        query is a search query object with date_timedelta populated

        """
        # Don't fire on already alerted events
        duplicate_matcher = TermMatch('alert_names',
                                      self.determine_alert_classname())
        if duplicate_matcher not in query.must_not:
            query.add_must_not(duplicate_matcher)

        self.main_query = query

    def determine_alert_classname(self):
        alert_name = self.classname()
        # Allow alerts like the generic alerts (one python alert but represents many 'alerts')
        # can customize the alert name
        if hasattr(self, 'custom_alert_name'):
            alert_name = self.custom_alert_name
        return alert_name

    def executeSearchEventsSimple(self):
        """
        Execute the search for simple events
        """
        return self.main_query.execute(self.es, indices=self.event_indices)

    def searchEventsSimple(self):
        """
        Search events matching filters, store events in self.events
        """
        try:
            results = self.executeSearchEventsSimple()
            self.events = results['hits']
            self.log.debug(self.events)
        except Exception as e:
            self.log.error('Error while searching events in ES: {0}'.format(e))

    def searchEventsAggregated(self, aggregationPath, samplesLimit=5):
        """
        Search events, aggregate matching ES filters by aggregationPath,
        store them in self.aggregations as a list of dictionaries
        keys:
          value: the text value that was found in the aggregationPath
          count: the hitcount of the text value
          events: the sampled list of events that matched
          allevents: the unsample, total list of matching events
        aggregationPath can be key.subkey.subkey to specify a path to a dictionary value
        relative to the _source that's returned from elastic search.
        ex: details.sourceipaddress
        """

        # We automatically add the key that we're matching on
        # for aggregation, as a query requirement
        aggreg_key_exists = ExistsMatch(aggregationPath)
        if aggreg_key_exists not in self.main_query.must:
            self.main_query.add_must(aggreg_key_exists)

        try:
            esresults = self.main_query.execute(self.es,
                                                indices=self.event_indices)
            results = esresults['hits']

            # List of aggregation values that can be counted/summarized by Counter
            # Example: ['*****@*****.**','*****@*****.**', '*****@*****.**'] for an email aggregField
            aggregationValues = []
            for r in results:
                aggregationValues.append(
                    getValueByPath(r['_source'], aggregationPath))

            # [{value:'*****@*****.**',count:1337,events:[...]}, ...]
            aggregationList = []
            for i in Counter(aggregationValues).most_common():
                idict = {
                    'value': i[0],
                    'count': i[1],
                    'events': [],
                    'allevents': []
                }
                for r in results:
                    if getValueByPath(r['_source'], aggregationPath).encode(
                            'ascii', 'ignore') == i[0]:
                        # copy events detail into this aggregation up to our samples limit
                        if len(idict['events']) < samplesLimit:
                            idict['events'].append(r)
                        # also copy all events to a non-sampled list
                        # so we mark all events as alerted and don't re-alert
                        idict['allevents'].append(r)
                aggregationList.append(idict)

            self.aggregations = aggregationList
            self.log.debug(self.aggregations)
        except Exception as e:
            self.log.error('Error while searching events in ES: {0}'.format(e))

    def walkEvents(self, **kwargs):
        """
        Walk through events, provide some methods to hook in alerts
        """
        if len(self.events) > 0:
            for i in self.events:
                alert = self.onEvent(i, **kwargs)
                if alert:
                    alert = self.tagBotNotify(alert)
                    self.log.debug(alert)
                    alert = self.alertPlugins(alert)
                    alertResultES = self.alertToES(alert)
                    self.tagEventsAlert([i], alertResultES)
                    self.alertToMessageQueue(alert)
                    self.hookAfterInsertion(alert)
                    self.saveAlertID(alertResultES)
        # did we not match anything?
        # can also be used as an alert trigger
        if len(self.events) == 0:
            alert = self.onNoEvent(**kwargs)
            if alert:
                alert = self.tagBotNotify(alert)
                self.log.debug(alert)
                alertResultES = self.alertToES(alert)
                self.alertToMessageQueue(alert)
                self.hookAfterInsertion(alert)
                self.saveAlertID(alertResultES)

    def walkAggregations(self, threshold, config=None):
        """
        Walk through aggregations, provide some methods to hook in alerts
        """
        if len(self.aggregations) > 0:
            for aggregation in self.aggregations:
                if aggregation['count'] >= threshold:
                    aggregation['config'] = config
                    alert = self.onAggregation(aggregation)
                    if alert:
                        alert = self.tagBotNotify(alert)
                        self.log.debug(alert)
                        alert = self.alertPlugins(alert)
                        alertResultES = self.alertToES(alert)
                        # even though we only sample events in the alert
                        # tag all events as alerted to avoid re-alerting
                        # on events we've already processed.
                        self.tagEventsAlert(aggregation['allevents'],
                                            alertResultES)
                        self.alertToMessageQueue(alert)
                        self.saveAlertID(alertResultES)

    def alertPlugins(self, alert):
        """
        Send alerts through a plugin system
        """

        plugin_dir = os.path.join(os.path.dirname(__file__), '../plugins')
        plugin_set = AlertPluginSet(plugin_dir, ALERT_PLUGINS)
        alertDict = plugin_set.run_plugins(alert)[0]

        return alertDict

    def createAlertDict(self,
                        summary,
                        category,
                        tags,
                        events,
                        severity='NOTICE',
                        url=None,
                        ircchannel=None):
        """
        Create an alert dict
        """
        alert = {
            'utctimestamp': toUTC(datetime.now()).isoformat(),
            'severity': severity,
            'summary': summary,
            'category': category,
            'tags': tags,
            'events': [],
            'ircchannel': ircchannel,
        }
        if url:
            alert['url'] = url

        for e in events:
            alert['events'].append({
                'documentindex': e['_index'],
                'documentsource': e['_source'],
                'documentid': e['_id']
            })
        self.log.debug(alert)
        return alert

    def onEvent(self, event, *args, **kwargs):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onNoEvent(self, *args, **kwargs):
        """
        To be overriden by children to run their code
        when NOTHING matches a filter
        which can be used to trigger on the absence of
        events much like a dead man switch.
        This is to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onAggregation(self, aggregation):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        must return an alert dict or None
        """
        pass

    def hookAfterInsertion(self, alert):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        """
        pass

    def tagEventsAlert(self, events, alertResultES):
        """
        Update the event with the alertid/index
        and update the alert_names on the event itself so it's
        not re-alerted
        """
        try:
            for event in events:
                if 'alerts' not in event['_source']:
                    event['_source']['alerts'] = []
                event['_source']['alerts'].append({
                    'index':
                    alertResultES['_index'],
                    'id':
                    alertResultES['_id']
                })

                if 'alert_names' not in event['_source']:
                    event['_source']['alert_names'] = []
                event['_source']['alert_names'].append(
                    self.determine_alert_classname())

                self.es.save_event(index=event['_index'],
                                   body=event['_source'],
                                   doc_id=event['_id'])
            # We refresh here to ensure our changes to the events will show up for the next search query results
            self.es.refresh(event['_index'])
        except Exception as e:
            self.log.error('Error while updating events in ES: {0}'.format(e))

    def main(self):
        """
        To be overriden by children to run their code
        """
        pass

    def run(self, *args, **kwargs):
        """
        Main method launched by celery periodically
        """
        try:
            self.main(*args, **kwargs)
            self.log.debug('finished')
        except Exception as e:
            self.log.exception('Exception in main() method: {0}'.format(e))

    def parse_json_alert_config(self, config_file):
        """
        Helper function to parse an alert config file
        """
        alert_dir = os.path.join(os.path.dirname(__file__), '..')
        config_file_path = os.path.abspath(os.path.join(
            alert_dir, config_file))
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = json.load(fd)
            except ValueError:
                sys.stderr.write("FAILED to open the configuration file\n")

        return json_obj
Пример #25
0
def esRotateIndexes():
    if options.output == 'syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))

        indices = es.get_indices()

        # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
        odate_day = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
        odate_month = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
        ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
        ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
        # examine each index in the .conf file
        # for rotation settings
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if rotation != 'none':
                    oldindex = index
                    newindex = index
                    if rotation == 'daily':
                        oldindex += '-%s' % odate_day
                        newindex += '-%s' % ndate_day
                    elif rotation == 'monthly':
                        oldindex += '-%s' % odate_month
                        newindex += '-%s' % ndate_month
                        # do not rotate before the month ends
                        if oldindex == newindex:
                            logger.debug('do not rotate %s index, month has not changed yet' % index)
                            continue
                    if newindex not in indices:
                        index_settings = {}
                        if 'events' in newindex:
                            index_settings = {
                                "index": {
                                    "refresh_interval": options.refresh_interval,
                                    "number_of_shards": options.number_of_shards,
                                    "number_of_replicas": options.number_of_replicas,
                                    "search.slowlog.threshold.query.warn": options.slowlog_threshold_query_warn,
                                    "search.slowlog.threshold.fetch.warn": options.slowlog_threshold_fetch_warn,
                                    "mapping.total_fields.limit": options.mapping_total_fields_limit
                                }
                            }
                        elif 'alerts' in newindex:
                            index_settings = {
                                "index": {
                                    "number_of_shards": 1
                                }
                            }
                        default_mapping_contents['settings'] = index_settings
                        logger.debug('Creating %s index' % newindex)
                        es.create_index(newindex, default_mapping_contents)
                    # set aliases: events to events-YYYYMMDD
                    # and events-previous to events-YYYYMMDD-1
                    logger.debug('Setting {0} alias to index: {1}'.format(index, newindex))
                    es.create_alias(index, newindex)
                    if oldindex in indices:
                        logger.debug('Setting {0}-previous alias to index: {1}'.format(index, oldindex))
                        es.create_alias('%s-previous' % index, oldindex)
                    else:
                        logger.debug('Old index %s is missing, do not change %s-previous alias' % (oldindex, index))
            except Exception as e:
                logger.error("Unhandled exception while rotating %s, terminating: %r" % (index, e))

        indices = es.get_indices()
        # Create weekly aliases for certain indices
        week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
        week_ago_str = week_ago_date.strftime('%Y%m%d')
        current_date = toUTC(datetime.now())
        for index in options.weekly_rotation_indices:
            weekly_index_alias = '%s-weekly' % index
            logger.debug('Trying to re-alias {0} to indices since {1}'.format(weekly_index_alias, week_ago_str))
            existing_weekly_indices = []
            for day_obj in daterange(week_ago_date, current_date):
                day_str = day_obj.strftime('%Y%m%d')
                day_index = index + '-' + str(day_str)
                if day_index in indices:
                    existing_weekly_indices.append(day_index)
                else:
                    logger.debug('%s not found, so cant assign weekly alias' % day_index)
            if existing_weekly_indices:
                logger.debug('Creating {0} alias for {1}'.format(weekly_index_alias, existing_weekly_indices))
                es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices)
            else:
                logger.warning('No indices within the past week to assign events-weekly to')
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Пример #26
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    # logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        s = requests.Session()
        s.headers.update({'Accept': 'application/json'})
        s.headers.update({'Content-type': 'application/json'})
        s.headers.update({'Authorization': 'SSWS {0}'.format(options.apikey)})

        # capture the time we start running so next time we catch any events created while we run.
        state = State(options.state_file)
        lastrun = toUTC(datetime.now()).isoformat()

        r = s.get('https://{0}/api/v1/events?startDate={1}&limit={2}'.format(
            options.oktadomain,
            toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            options.recordlimit
        ))

        if r.status_code == 200:
            oktaevents = json.loads(r.text)
            for event in oktaevents:
                if 'published' in event:
                    if toUTC(event['published']) > toUTC(state.data['lastrun']):
                        try:
                            mozdefEvent = dict()
                            mozdefEvent['utctimestamp']=toUTC(event['published']).isoformat()
                            mozdefEvent['receivedtimestamp']=toUTC(datetime.now()).isoformat()
                            mozdefEvent['category'] = 'okta'
                            mozdefEvent['tags'] = ['okta']
                            if 'action' in event and 'message' in event['action']:
                                mozdefEvent['summary'] = event['action']['message']
                            mozdefEvent['details'] = event
                            # Actor parsing
                            # While there are various objectTypes attributes, we just take any attribute that matches
                            # in case Okta changes it's structure around a bit
                            # This means the last instance of each attribute in all actors will be recorded in mozdef
                            # while others will be discarded
                            # Which ends up working out well in Okta's case.
                            if 'actors' in event:
                                for actor in event['actors']:
                                    if 'ipAddress' in actor:
                                        if netaddr.valid_ipv4(actor['ipAddress']):
                                            mozdefEvent['details']['sourceipaddress'] = actor['ipAddress']
                                    if 'login' in actor:
                                        mozdefEvent['details']['username'] = actor['login']
                                    if 'requestUri' in actor:
                                        mozdefEvent['details']['source_uri'] = actor['requestUri']

                            # We are renaming action to activity because there are
                            # currently mapping problems with the details.action field
                            mozdefEvent['details']['activity'] = mozdefEvent['details']['action']
                            mozdefEvent['details'].pop('action')

                            jbody=json.dumps(mozdefEvent)
                            res = es.save_event(doc_type='okta',body=jbody)
                            logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
                else:
                    logger.error('Okta event does not contain published date: {0}'.format(event))
            state.data['lastrun'] = lastrun
            state.write_state_file()
        else:
            logger.error('Could not get Okta events HTTP error code {} reason {}'.format(r.status_code, r.reason))
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Пример #27
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        s = requests.Session()
        s.headers.update({'Accept': 'application/json'})
        s.headers.update({'Content-type': 'application/json'})
        s.headers.update({'Authorization':'SSWS {0}'.format(options.apikey)})

        #capture the time we start running so next time we catch any events created while we run.
        state = State(options.state_file)
        lastrun = toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday=date.strftime(datetime.utcnow()-timedelta(days=1),'%Y/%m/%d')
        today = date.strftime(datetime.utcnow(),'%Y/%m/%d')

        r = s.get('https://{0}/api/v1/events?startDate={1}&limit={2}'.format(
            options.oktadomain,
            toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            options.recordlimit
        ))

        if r.status_code == 200:
            oktaevents = json.loads(r.text)
            for event in oktaevents:
                if 'published' in event.keys():
                    if toUTC(event['published']) > toUTC(state.data['lastrun']):
                        try:
                            mozdefEvent = dict()
                            mozdefEvent['utctimestamp']=toUTC(event['published']).isoformat()
                            mozdefEvent['receivedtimestamp']=toUTC(datetime.now()).isoformat()
                            mozdefEvent['category'] = 'okta'
                            mozdefEvent['tags'] = ['okta']
                            if 'action' in event.keys() and 'message' in event['action'].keys():
                                mozdefEvent['summary'] = event['action']['message']
                            mozdefEvent['details'] = event
                            # Actor parsing
                            # While there are various objectTypes attributes, we just take any attribute that matches
                            # in case Okta changes it's structure around a bit
                            # This means the last instance of each attribute in all actors will be recorded in mozdef
                            # while others will be discarded
                            # Which ends up working out well in Okta's case.
                            if 'actors' in event.keys():
                                for actor in event['actors']:
                                    if 'ipAddress' in actor.keys():
                                        if netaddr.valid_ipv4(actor['ipAddress']):
                                            mozdefEvent['details']['sourceipaddress'] = actor['ipAddress']
                                    if 'login' in actor.keys():
                                        mozdefEvent['details']['username'] = actor['login']
                                    if 'requestUri' in actor.keys():
                                        mozdefEvent['details']['source_uri'] = actor['requestUri']

                            # We are renaming action to activity because there are
                            # currently mapping problems with the details.action field
                            mozdefEvent['details']['activity'] = mozdefEvent['details']['action']
                            mozdefEvent['details'].pop('action')

                            jbody=json.dumps(mozdefEvent)
                            res = es.save_event(doc_type='okta',body=jbody)
                            logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
                else:
                    logger.error('Okta event does not contain published date: {0}'.format(event))
            state.data['lastrun'] = lastrun
            state.write_state_file()
        else:
            logger.error('Could not get Okta events HTTP error code {} reason {}'.format(r.status_code, r.reason))
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Пример #28
0
class AlertTask(Task):

    abstract = True

    def __init__(self):
        self.alert_name = self.__class__.__name__
        self.main_query = None

        # Used to store any alerts that were thrown
        self.alert_ids = []

        # List of events
        self.events = None
        # List of aggregations
        # e.g. when aggregField is email: [{value:'*****@*****.**',count:1337,events:[...]}, ...]
        self.aggregations = None

        self.log.debug('starting {0}'.format(self.alert_name))
        self.log.debug(RABBITMQ)
        self.log.debug(ES)

        self._configureKombu()
        self._configureES()

        self.event_indices = ['events', 'events-previous']

    def classname(self):
        return self.__class__.__name__

    @property
    def log(self):
        return get_task_logger('%s.%s' % (__name__, self.alert_name))

    def parse_config(self, config_filename, config_keys):
        myparser = OptionParser()
        self.config = None
        (self.config, args) = myparser.parse_args([])
        for config_key in config_keys:
            temp_value = getConfig(config_key, '', config_filename)
            setattr(self.config, config_key, temp_value)

    def _configureKombu(self):
        """
        Configure kombu for rabbitmq
        """
        try:
            connString = 'amqp://{0}:{1}@{2}:{3}//'.format(
                RABBITMQ['mquser'],
                RABBITMQ['mqpassword'],
                RABBITMQ['mqserver'],
                RABBITMQ['mqport'])
            self.mqConn = kombu.Connection(connString)

            self.alertExchange = kombu.Exchange(
                name=RABBITMQ['alertexchange'],
                type='topic',
                durable=True)
            self.alertExchange(self.mqConn).declare()
            alertQueue = kombu.Queue(RABBITMQ['alertqueue'], exchange=self.alertExchange)
            alertQueue(self.mqConn).declare()
            self.mqproducer = self.mqConn.Producer(serializer='json')
            self.log.debug('Kombu configured')
        except Exception as e:
            self.log.error('Exception while configuring kombu for alerts: {0}'.format(e))

    def _configureES(self):
        """
        Configure elasticsearch client
        """
        try:
            self.es = ElasticsearchClient(ES['servers'])
            self.log.debug('ES configured')
        except Exception as e:
            self.log.error('Exception while configuring ES for alerts: {0}'.format(e))

    def mostCommon(self, listofdicts, dictkeypath):
        """
            Given a list containing dictionaries,
            return the most common entries
            along a key path separated by .
            i.e. dictkey.subkey.subkey
            returned as a list of tuples
            [(value,count),(value,count)]
        """
        inspectlist=list()
        path=list(dictpath(dictkeypath))
        for i in listofdicts:
            for k in list(keypaths(i)):
                if not (set(k[0]).symmetric_difference(path)):
                    inspectlist.append(k[1])

        return Counter(inspectlist).most_common()

    def alertToMessageQueue(self, alertDict):
        """
        Send alert to the rabbit message queue
        """
        try:
            # cherry pick items from the alertDict to send to the alerts messageQueue
            mqAlert = dict(severity='INFO', category='')
            if 'severity' in alertDict:
                mqAlert['severity'] = alertDict['severity']
            if 'category' in alertDict:
                mqAlert['category'] = alertDict['category']
            if 'utctimestamp' in alertDict:
                mqAlert['utctimestamp'] = alertDict['utctimestamp']
            if 'eventtimestamp' in alertDict:
                mqAlert['eventtimestamp'] = alertDict['eventtimestamp']
            mqAlert['summary'] = alertDict['summary']
            self.log.debug(mqAlert)
            ensurePublish = self.mqConn.ensure(
                self.mqproducer,
                self.mqproducer.publish,
                max_retries=10)
            ensurePublish(
                alertDict,
                exchange=self.alertExchange,
                routing_key=RABBITMQ['alertqueue']
            )
            self.log.debug('alert sent to the alert queue')
        except Exception as e:
            self.log.error('Exception while sending alert to message queue: {0}'.format(e))

    def alertToES(self, alertDict):
        """
        Send alert to elasticsearch
        """
        try:
            res = self.es.save_alert(body=alertDict)
            self.log.debug('alert sent to ES')
            self.log.debug(res)
            return res
        except Exception as e:
            self.log.error('Exception while pushing alert to ES: {0}'.format(e))

    def tagBotNotify(self, alert):
        """
            Tag alert to be excluded based on severity
            If 'ircchannel' is set in an alert, we automatically notify mozdefbot
        """
        alert['notify_mozdefbot'] = True
        if alert['severity'] == 'NOTICE' or alert['severity'] == 'INFO':
            alert['notify_mozdefbot'] = False

        # If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
        if 'ircchannel' in alert and alert['ircchannel'] != '' and alert['ircchannel'] is not None:
            alert['notify_mozdefbot'] = True
        return alert

    def saveAlertID(self, saved_alert):
        """
        Save alert to self so we can analyze it later
        """
        self.alert_ids.append(saved_alert['_id'])

    def filtersManual(self, query):
        """
        Configure filters manually

        query is a search query object with date_timedelta populated

        """
        # Don't fire on already alerted events
        duplicate_matcher = TermMatch('alert_names', self.determine_alert_classname())
        if duplicate_matcher not in query.must_not:
            query.add_must_not(duplicate_matcher)

        self.main_query = query

    def determine_alert_classname(self):
        alert_name = self.classname()
        # Allow alerts like the generic alerts (one python alert but represents many 'alerts')
        # can customize the alert name
        if hasattr(self, 'custom_alert_name'):
            alert_name = self.custom_alert_name
        return alert_name

    def executeSearchEventsSimple(self):
        """
        Execute the search for simple events
        """
        return self.main_query.execute(self.es, indices=self.event_indices)

    def searchEventsSimple(self):
        """
        Search events matching filters, store events in self.events
        """
        try:
            results = self.executeSearchEventsSimple()
            self.events = results['hits']
            self.log.debug(self.events)
        except Exception as e:
            self.log.error('Error while searching events in ES: {0}'.format(e))

    def searchEventsAggregated(self, aggregationPath, samplesLimit=5):
        """
        Search events, aggregate matching ES filters by aggregationPath,
        store them in self.aggregations as a list of dictionaries
        keys:
          value: the text value that was found in the aggregationPath
          count: the hitcount of the text value
          events: the sampled list of events that matched
          allevents: the unsample, total list of matching events
        aggregationPath can be key.subkey.subkey to specify a path to a dictionary value
        relative to the _source that's returned from elastic search.
        ex: details.sourceipaddress
        """

        # We automatically add the key that we're matching on
        # for aggregation, as a query requirement
        aggreg_key_exists = ExistsMatch(aggregationPath)
        if aggreg_key_exists not in self.main_query.must:
            self.main_query.add_must(aggreg_key_exists)

        try:
            esresults = self.main_query.execute(self.es, indices=self.event_indices)
            results = esresults['hits']

            # List of aggregation values that can be counted/summarized by Counter
            # Example: ['*****@*****.**','*****@*****.**', '*****@*****.**'] for an email aggregField
            aggregationValues = []
            for r in results:
                aggregationValues.append(getValueByPath(r['_source'], aggregationPath))

            # [{value:'*****@*****.**',count:1337,events:[...]}, ...]
            aggregationList = []
            for i in Counter(aggregationValues).most_common():
                idict = {
                    'value': i[0],
                    'count': i[1],
                    'events': [],
                    'allevents': []
                }
                for r in results:
                    if getValueByPath(r['_source'], aggregationPath).encode('ascii', 'ignore') == i[0]:
                        # copy events detail into this aggregation up to our samples limit
                        if len(idict['events']) < samplesLimit:
                            idict['events'].append(r)
                        # also copy all events to a non-sampled list
                        # so we mark all events as alerted and don't re-alert
                        idict['allevents'].append(r)
                aggregationList.append(idict)

            self.aggregations = aggregationList
            self.log.debug(self.aggregations)
        except Exception as e:
            self.log.error('Error while searching events in ES: {0}'.format(e))

    def walkEvents(self, **kwargs):
        """
        Walk through events, provide some methods to hook in alerts
        """
        if len(self.events) > 0:
            for i in self.events:
                alert = self.onEvent(i, **kwargs)
                if alert:
                    alert = self.tagBotNotify(alert)
                    self.log.debug(alert)
                    alert = self.alertPlugins(alert)
                    alertResultES = self.alertToES(alert)
                    self.tagEventsAlert([i], alertResultES)
                    self.alertToMessageQueue(alert)
                    self.hookAfterInsertion(alert)
                    self.saveAlertID(alertResultES)
        # did we not match anything?
        # can also be used as an alert trigger
        if len(self.events) == 0:
            alert = self.onNoEvent(**kwargs)
            if alert:
                alert = self.tagBotNotify(alert)
                self.log.debug(alert)
                alertResultES = self.alertToES(alert)
                self.alertToMessageQueue(alert)
                self.hookAfterInsertion(alert)
                self.saveAlertID(alertResultES)

    def walkAggregations(self, threshold, config=None):
        """
        Walk through aggregations, provide some methods to hook in alerts
        """
        if len(self.aggregations) > 0:
            for aggregation in self.aggregations:
                if aggregation['count'] >= threshold:
                    aggregation['config']=config
                    alert = self.onAggregation(aggregation)
                    if alert:
                        alert = self.tagBotNotify(alert)
                        self.log.debug(alert)
                        alert = self.alertPlugins(alert)
                        alertResultES = self.alertToES(alert)
                        # even though we only sample events in the alert
                        # tag all events as alerted to avoid re-alerting
                        # on events we've already processed.
                        self.tagEventsAlert(aggregation['allevents'], alertResultES)
                        self.alertToMessageQueue(alert)
                        self.saveAlertID(alertResultES)

    def alertPlugins(self, alert):
        """
        Send alerts through a plugin system
        """

        plugin_dir = os.path.join(os.path.dirname(__file__), '../plugins')
        plugin_set = AlertPluginSet(plugin_dir, ALERT_PLUGINS)
        alertDict = plugin_set.run_plugins(alert)[0]

        return alertDict

    def createAlertDict(self, summary, category, tags, events, severity='NOTICE', url=None, ircchannel=None):
        """
        Create an alert dict
        """
        alert = {
            'utctimestamp': toUTC(datetime.now()).isoformat(),
            'severity': severity,
            'summary': summary,
            'category': category,
            'tags': tags,
            'events': [],
            'ircchannel': ircchannel,
        }
        if url:
            alert['url'] = url

        for e in events:
            alert['events'].append({
                'documentindex': e['_index'],
                'documentsource': e['_source'],
                'documentid': e['_id']})
        self.log.debug(alert)
        return alert

    def onEvent(self, event, *args, **kwargs):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onNoEvent(self, *args, **kwargs):
        """
        To be overriden by children to run their code
        when NOTHING matches a filter
        which can be used to trigger on the absence of
        events much like a dead man switch.
        This is to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onAggregation(self, aggregation):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        must return an alert dict or None
        """
        pass

    def hookAfterInsertion(self, alert):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        """
        pass

    def tagEventsAlert(self, events, alertResultES):
        """
        Update the event with the alertid/index
        and update the alert_names on the event itself so it's
        not re-alerted
        """
        try:
            for event in events:
                if 'alerts' not in event['_source']:
                    event['_source']['alerts'] = []
                event['_source']['alerts'].append({
                    'index': alertResultES['_index'],
                    'id': alertResultES['_id']})

                if 'alert_names' not in event['_source']:
                    event['_source']['alert_names'] = []
                event['_source']['alert_names'].append(self.determine_alert_classname())

                self.es.save_event(index=event['_index'], body=event['_source'], doc_id=event['_id'])
            # We refresh here to ensure our changes to the events will show up for the next search query results
            self.es.refresh(event['_index'])
        except Exception as e:
            self.log.error('Error while updating events in ES: {0}'.format(e))

    def main(self):
        """
        To be overriden by children to run their code
        """
        pass

    def run(self, *args, **kwargs):
        """
        Main method launched by celery periodically
        """
        try:
            self.main(*args, **kwargs)
            self.log.debug('finished')
        except Exception as e:
            self.log.exception('Exception in main() method: {0}'.format(e))

    def parse_json_alert_config(self, config_file):
        """
        Helper function to parse an alert config file
        """
        alert_dir = os.path.join(os.path.dirname(__file__), '..')
        config_file_path = os.path.abspath(os.path.join(alert_dir, config_file))
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = json.load(fd)
            except ValueError:
                sys.stderr.write("FAILED to open the configuration file\n")

        return json_obj
Пример #29
0
class AlertTask(Task):

    abstract = True

    def __init__(self):
        self.alert_name = self.__class__.__name__
        self.main_query = None

        # Used to store any alerts that were thrown
        self.alert_ids = []

        # List of events
        self.events = None
        # List of aggregations
        # e.g. when aggregField is email: [{value:'*****@*****.**',count:1337,events:[...]}, ...]
        self.aggregations = None

        self.log.debug("starting {0}".format(self.alert_name))
        self.log.debug(RABBITMQ)
        self.log.debug(ES)

        self._configureKombu()
        self._configureES()

        self.event_indices = ['events', 'events-previous']
        plugin_dir = os.path.join(os.path.dirname(__file__), "../plugins")
        self.plugin_set = AlertPluginSet(plugin_dir, ALERT_PLUGINS)

    def classname(self):
        return self.__class__.__name__

    @property
    def log(self):
        return get_task_logger("%s.%s" % (__name__, self.alert_name))

    def parse_config(self, config_filename, config_keys):
        myparser = OptionParser()
        self.config = None
        (self.config, args) = myparser.parse_args([])
        full_config_filename = os.path.join(os.path.dirname(__file__), "../",
                                            config_filename)
        for config_key in config_keys:
            temp_value = getConfig(config_key, "", full_config_filename)
            setattr(self.config, config_key, temp_value)

    def close_connections(self):
        self.mqConn.release()

    def _discover_task_exchange(self):
        """Use configuration information to understand the message queue protocol.
        return: amqp, sqs
        """
        return getConfig("mqprotocol", "amqp", None)

    def __build_conn_string(self):
        exchange_protocol = self._discover_task_exchange()
        if exchange_protocol == "amqp":
            connString = "amqp://{0}:{1}@{2}:{3}//".format(
                RABBITMQ["mquser"],
                RABBITMQ["mqpassword"],
                RABBITMQ["mqserver"],
                RABBITMQ["mqport"],
            )
            return connString
        elif exchange_protocol == "sqs":
            connString = "sqs://{}".format(
                getConfig("alertSqsQueueUrl", None, None))
            if connString:
                connString = connString.replace('https://', '')
            return connString

    def _configureKombu(self):
        """
        Configure kombu for amqp or sqs
        """
        try:
            connString = self.__build_conn_string()
            self.mqConn = kombu.Connection(connString)
            if connString.find('sqs') == 0:
                self.mqConn.transport_options['region'] = os.getenv(
                    'DEFAULT_AWS_REGION', 'us-west-2')
                self.mqConn.transport_options['is_secure'] = True
                self.alertExchange = kombu.Exchange(
                    name=RABBITMQ["alertexchange"], type="topic", durable=True)
                self.alertExchange(self.mqConn).declare()
                alertQueue = kombu.Queue(
                    os.getenv('OPTIONS_ALERTSQSQUEUEURL').split('/')[4],
                    exchange=self.alertExchange)
            else:
                self.alertExchange = kombu.Exchange(
                    name=RABBITMQ["alertexchange"], type="topic", durable=True)
                self.alertExchange(self.mqConn).declare()
                alertQueue = kombu.Queue(RABBITMQ["alertqueue"],
                                         exchange=self.alertExchange)
            alertQueue(self.mqConn).declare()
            self.mqproducer = self.mqConn.Producer(serializer="json")
            self.log.debug("Kombu configured")
        except Exception as e:
            self.log.error(
                "Exception while configuring kombu for alerts: {0}".format(e))

    def _configureES(self):
        """
        Configure elasticsearch client
        """
        try:
            self.es = ElasticsearchClient(ES["servers"])
            self.log.debug("ES configured")
        except Exception as e:
            self.log.error(
                "Exception while configuring ES for alerts: {0}".format(e))

    def mostCommon(self, listofdicts, dictkeypath):
        """
            Given a list containing dictionaries,
            return the most common entries
            along a key path separated by .
            i.e. dictkey.subkey.subkey
            returned as a list of tuples
            [(value,count),(value,count)]
        """
        inspectlist = list()
        path = list(dictpath(dictkeypath))
        for i in listofdicts:
            for k in list(keypaths(i)):
                if not (set(k[0]).symmetric_difference(path)):
                    inspectlist.append(k[1])

        return Counter(inspectlist).most_common()

    def alertToMessageQueue(self, alertDict):
        """
        Send alert to the kombu based message queue.  The default is rabbitmq.
        """
        try:
            self.log.debug(alertDict)
            ensurePublish = self.mqConn.ensure(self.mqproducer,
                                               self.mqproducer.publish,
                                               max_retries=10)
            ensurePublish(
                alertDict,
                exchange=self.alertExchange,
                routing_key=RABBITMQ["alertqueue"],
            )
            self.log.debug("alert sent to the alert queue")
        except Exception as e:
            self.log.error(
                "Exception while sending alert to message queue: {0}".format(
                    e))

    def alertToES(self, alertDict):
        """
        Send alert to elasticsearch
        """
        try:
            res = self.es.save_alert(body=alertDict)
            self.log.debug("alert sent to ES")
            self.log.debug(res)
            return res
        except Exception as e:
            self.log.error(
                "Exception while pushing alert to ES: {0}".format(e))

    def tagBotNotify(self, alert):
        """
            Tag alert to be excluded based on severity
            If 'channel' is set in an alert, we automatically notify mozdefbot
        """
        # If an alert code hasn't explicitly set notify_mozdefbot field
        if 'notify_mozdefbot' not in alert or alert['notify_mozdefbot'] is None:
            alert["notify_mozdefbot"] = True
            if alert["severity"] == "NOTICE" or alert["severity"] == "INFO":
                alert["notify_mozdefbot"] = False

            # If an alert sets specific channel, then we should probably always notify in mozdefbot
            if ("channel" in alert and alert["channel"] != ""
                    and alert["channel"] is not None):
                alert["notify_mozdefbot"] = True
        return alert

    def saveAlertID(self, saved_alert):
        """
        Save alert to self so we can analyze it later
        """
        self.alert_ids.append(saved_alert["_id"])

    def filtersManual(self, query):
        """
        Configure filters manually

        query is a search query object with date_timedelta populated

        """
        # Don't fire on already alerted events
        duplicate_matcher = TermMatch("alert_names",
                                      self.determine_alert_classname())
        if duplicate_matcher not in query.must_not:
            query.add_must_not(duplicate_matcher)

        self.main_query = query

    def determine_alert_classname(self):
        alert_name = self.classname()
        # Allow alerts like the generic alerts (one python alert but represents many 'alerts')
        # can customize the alert name
        if hasattr(self, "custom_alert_name"):
            alert_name = self.custom_alert_name
        return alert_name

    def executeSearchEventsSimple(self):
        """
        Execute the search for simple events
        """
        return self.main_query.execute(self.es, indices=self.event_indices)

    def searchEventsSimple(self):
        """
        Search events matching filters, store events in self.events
        """
        try:
            results = self.executeSearchEventsSimple()
            self.events = results["hits"]
            self.log.debug(self.events)
        except Exception as e:
            self.log.error("Error while searching events in ES: {0}".format(e))

    def searchEventsAggregated(self, aggregationPath, samplesLimit=5):
        """
        Search events, aggregate matching ES filters by aggregationPath,
        store them in self.aggregations as a list of dictionaries
        keys:
          value: the text value that was found in the aggregationPath
          count: the hitcount of the text value
          events: the sampled list of events that matched
          allevents: the unsample, total list of matching events
        aggregationPath can be key.subkey.subkey to specify a path to a dictionary value
        relative to the _source that's returned from elastic search.
        ex: details.sourceipaddress
        """

        # We automatically add the key that we're matching on
        # for aggregation, as a query requirement
        aggreg_key_exists = ExistsMatch(aggregationPath)
        if aggreg_key_exists not in self.main_query.must:
            self.main_query.add_must(aggreg_key_exists)

        try:
            esresults = self.main_query.execute(self.es,
                                                indices=self.event_indices)
            results = esresults["hits"]

            # List of aggregation values that can be counted/summarized by Counter
            # Example: ['*****@*****.**','*****@*****.**', '*****@*****.**'] for an email aggregField
            aggregationValues = []
            for r in results:
                aggregationValues.append(
                    getValueByPath(r["_source"], aggregationPath))

            # [{value:'*****@*****.**',count:1337,events:[...]}, ...]
            aggregationList = []
            for i in Counter(aggregationValues).most_common():
                idict = {
                    "value": i[0],
                    "count": i[1],
                    "events": [],
                    "allevents": []
                }
                for r in results:
                    if getValueByPath(r["_source"], aggregationPath) == i[0]:
                        # copy events detail into this aggregation up to our samples limit
                        if len(idict["events"]) < samplesLimit:
                            idict["events"].append(r)
                        # also copy all events to a non-sampled list
                        # so we mark all events as alerted and don't re-alert
                        idict["allevents"].append(r)
                aggregationList.append(idict)

            self.aggregations = aggregationList
            self.log.debug(self.aggregations)
        except Exception as e:
            self.log.error("Error while searching events in ES: {0}".format(e))

    def walkEvents(self, **kwargs):
        """
        Walk through events, provide some methods to hook in alerts
        """
        if len(self.events) > 0:
            for i in self.events:
                alert = self.onEvent(i, **kwargs)
                if alert:
                    alert = self.tagBotNotify(alert)
                    self.log.debug(alert)
                    alert = self.alertPlugins(alert)
                    alertResultES = self.alertToES(alert)
                    self.tagEventsAlert([i], alertResultES)
                    full_alert_doc = self.generate_full_doc(
                        alert, alertResultES)
                    self.alertToMessageQueue(full_alert_doc)
                    self.hookAfterInsertion(alert)
                    self.saveAlertID(alertResultES)
        # did we not match anything?
        # can also be used as an alert trigger
        if len(self.events) == 0:
            alert = self.onNoEvent(**kwargs)
            if alert:
                alert = self.tagBotNotify(alert)
                self.log.debug(alert)
                alertResultES = self.alertToES(alert)
                full_alert_doc = self.generate_full_doc(alert, alertResultES)
                self.alertToMessageQueue(full_alert_doc)
                self.hookAfterInsertion(alert)
                self.saveAlertID(alertResultES)

    def walkAggregations(self, threshold, config=None):
        """
        Walk through aggregations, provide some methods to hook in alerts
        """
        if len(self.aggregations) > 0:
            for aggregation in self.aggregations:
                if aggregation["count"] >= threshold:
                    aggregation["config"] = config
                    alert = self.onAggregation(aggregation)
                    if alert:
                        alert = self.tagBotNotify(alert)
                        self.log.debug(alert)
                        alert = self.alertPlugins(alert)
                        alertResultES = self.alertToES(alert)
                        full_alert_doc = self.generate_full_doc(
                            alert, alertResultES)
                        # even though we only sample events in the alert
                        # tag all events as alerted to avoid re-alerting
                        # on events we've already processed.
                        self.tagEventsAlert(aggregation["allevents"],
                                            alertResultES)
                        self.alertToMessageQueue(full_alert_doc)
                        self.saveAlertID(alertResultES)

    def alertPlugins(self, alert):
        """
        Send alerts through a plugin system
        """
        alertDict = self.plugin_set.run_plugins(alert)[0]

        return alertDict

    def createAlertDict(
        self,
        summary,
        category,
        tags,
        events,
        severity="NOTICE",
        url=None,
        channel=None,
        notify_mozdefbot=None,
    ):
        """
        Create an alert dict
        """

        # Tag alert documents with alert classname
        # that was triggered
        classname = self.__name__
        # Handle generic alerts
        if classname == 'AlertGenericLoader':
            classname = self.custom_alert_name

        alert = {
            "utctimestamp": toUTC(datetime.now()).isoformat(),
            "severity": severity,
            "summary": summary,
            "category": category,
            "tags": tags,
            "events": [],
            "channel": channel,
            "notify_mozdefbot": notify_mozdefbot,
            "status": DEFAULT_STATUS,
            "classname": classname
        }
        if url:
            alert["url"] = url

        for e in events:
            alert["events"].append({
                "documentindex": e["_index"],
                "documentsource": e["_source"],
                "documentid": e["_id"],
            })
        self.log.debug(alert)
        return alert

    def onEvent(self, event, *args, **kwargs):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onNoEvent(self, *args, **kwargs):
        """
        To be overriden by children to run their code
        when NOTHING matches a filter
        which can be used to trigger on the absence of
        events much like a dead man switch.
        This is to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onAggregation(self, aggregation):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        must return an alert dict or None
        """
        pass

    def hookAfterInsertion(self, alert):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        """
        pass

    def tagEventsAlert(self, events, alertResultES):
        """
        Update the event with the alertid/index
        and update the alert_names on the event itself so it's
        not re-alerted
        """
        try:
            for event in events:
                if "alerts" not in event["_source"]:
                    event["_source"]["alerts"] = []
                event["_source"]["alerts"].append({
                    "index":
                    alertResultES["_index"],
                    "id":
                    alertResultES["_id"]
                })

                if "alert_names" not in event["_source"]:
                    event["_source"]["alert_names"] = []
                event["_source"]["alert_names"].append(
                    self.determine_alert_classname())

                self.es.save_event(index=event["_index"],
                                   body=event["_source"],
                                   doc_id=event["_id"])
                # We refresh here to ensure our changes to the events will show up for the next search query results
                self.es.refresh(event["_index"])
        except Exception as e:
            self.log.error("Error while updating events in ES: {0}".format(e))

    def main(self):
        """
        To be overriden by children to run their code
        """
        pass

    def run(self, *args, **kwargs):
        """
        Main method launched by celery periodically
        """
        try:
            self.main(*args, **kwargs)
            self.log.debug("finished")
        except Exception as e:
            self.error_thrown = e
            self.log.exception("Exception in main() method: {0}".format(e))

    def parse_json_alert_config(self, config_file):
        """
        Helper function to parse an alert config file
        """
        alert_dir = os.path.join(os.path.dirname(__file__), "..")
        config_file_path = os.path.abspath(os.path.join(
            alert_dir, config_file))
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = json.load(fd)
            except ValueError:
                logger.error("FAILED to open the configuration file\n")

        return json_obj

    def generate_full_doc(self, alert_body, alert_es):
        return {
            '_id': alert_es['_id'],
            '_index': alert_es['_index'],
            '_source': alert_body
        }
Пример #30
0
# Fill in with events you want to write to elasticsearch
# NEED TO MODIFY
events = [{
    "category": "testcategory",
    "details": {
        "program": "sshd",
        "type": "Success Login",
        "username": "******",
        "sourceipaddress": random_ip(),
    },
    "hostname": "i-99999999",
    "mozdefhostname": socket.gethostname(),
    "processid": "1337",
    "processname": "auth0_cron",
    "severity": "INFO",
    "source": "auth0",
    "summary": "login invalid ldap_count_entries failed",
    "tags": ["auth0"],
}]

es_client = ElasticsearchClient(options.elasticsearch_host)

for event in events:
    timestamp = toUTC(datetime.now()).isoformat()
    event['utctimestamp'] = timestamp
    event['timestamp'] = timestamp
    event['receivedtimestamp'] = timestamp
    es_client.save_event(body=event)
    print("Wrote event to elasticsearch")
    time.sleep(0.2)
Пример #31
0
    help='The relative path to rotateIndexes.conf file (default: {0})'.format(
        default_file),
    default=default_file,
    nargs='?')

parser.add_argument(
    'kibana_url',
    help='The URL of the kibana endpoint (ex: http://kibana:5601)')
args = parser.parse_args()

esserver = os.environ.get('OPTIONS_ESSERVERS')
if esserver is None:
    esserver = args.esserver
esserver = esserver.strip('/')
print("Connecting to " + esserver)
client = ElasticsearchClient(esserver)

kibana_url = os.environ.get('OPTIONS_KIBANAURL', args.kibana_url)

current_date = datetime.now()
event_index_name = current_date.strftime("events-%Y%m%d")
previous_event_index_name = (current_date -
                             timedelta(days=1)).strftime("events-%Y%m%d")
weekly_index_alias = 'events-weekly'
alert_index_name = current_date.strftime("alerts-%Y%m")

kibana_index_name = '.kibana_1'
state_index_name = 'mozdefstate'

index_settings_str = ''
with open(args.default_mapping_file) as data_file:
Пример #32
0
    def onMessage(self, request, response):
        '''
        request: http://bottlepy.org/docs/dev/api.html#the-request-object
        response: http://bottlepy.org/docs/dev/api.html#the-response-object

        '''
        # an ES query/facet to count success/failed logins
        # oriented to the data having
        # category: authentication
        # details.success marked true/false for success/failed auth
        # details.username as the user

        begindateUTC = None
        enddateUTC = None
        resultsList = list()
        if begindateUTC is None:
            begindateUTC = datetime.now() - timedelta(hours=12)
            begindateUTC = toUTC(begindateUTC)
        if enddateUTC is None:
            enddateUTC = datetime.now()
            enddateUTC = toUTC(enddateUTC)

        es_client = ElasticsearchClient(
            list('{0}'.format(s) for s in self.restoptions['esservers']))
        search_query = SearchQuery()
        # a query to tally users with failed logins
        date_range_match = RangeMatch('utctimestamp', begindateUTC, enddateUTC)
        search_query.add_must(date_range_match)
        search_query.add_must(PhraseMatch('category', 'authentication'))
        search_query.add_must(PhraseMatch('details.success', 'false'))
        search_query.add_must(ExistsMatch('details.username'))
        search_query.add_aggregation(Aggregation('details.success'))
        search_query.add_aggregation(Aggregation('details.username'))

        results = search_query.execute(es_client,
                                       indices=['events', 'events-previous'])

        # any usernames or words to ignore
        # especially useful if ES is analyzing the username field and breaking apart [email protected]
        # into user somewhere and .com
        stoplist = self.options.ignoreusernames.split(',')
        # walk the aggregate failed users
        # and look for successes/failures
        for t in results['aggregations']['details.username']['terms']:
            if t['key'] in stoplist:
                continue
            failures = 0
            success = 0
            username = t['key']

            details_query = SearchQuery()
            details_query.add_must(date_range_match)
            details_query.add_must(PhraseMatch('category', 'authentication'))
            details_query.add_must(PhraseMatch('details.username', username))
            details_query.add_aggregation(Aggregation('details.success'))

            details_results = details_query.execute(es_client)
            # details.success is boolean. As an aggregate is an int (0/1)
            for details_term in details_results['aggregations'][
                    'details.success']['terms']:
                if details_term['key'] == 1:
                    success = details_term['count']
                if details_term['key'] == 0:
                    failures = details_term['count']
            resultsList.append(
                dict(username=username,
                     failures=failures,
                     success=success,
                     begin=begindateUTC.isoformat(),
                     end=enddateUTC.isoformat()))

        response.body = json.dumps(resultsList)
        response.status = 200

        return (request, response)
Пример #33
0
def esConnect():
    '''open or re-open a connection to elastic search'''
    return ElasticsearchClient(
        (list('{0}'.format(s) for s in options.esservers)), options.esbulksize)
Пример #34
0
def esConnect():
    """open or re-open a connection to elastic search"""
    return ElasticsearchClient(
        (list("{0}".format(s) for s in options.esservers)), options.esbulksize)
Пример #35
0
def main():
    '''
    Get health and status stats and post to ES
    Post both as a historical reference (for charts)
    and as a static docid (for realtime current health/EPS displays)
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    index = options.index

    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    if not es.index_exists(index):
        try:
            logger.debug('Creating %s index' % index)
            es.create_index(index, default_mapping_contents)
        except Exception as e:
            logger.error("Unhandled exception, terminating: %r" % e)

    auth = HTTPBasicAuth(options.mquser, options.mqpassword)

    for server in options.mqservers:
        logger.debug('checking message queues on {0}'.format(server))
        r = requests.get(
            'http://{0}:{1}/api/queues'.format(server,
                                               options.mqapiport),
            auth=auth)

        mq = r.json()
        # setup a log entry for health/status.
        healthlog = dict(
            utctimestamp=toUTC(datetime.now()).isoformat(),
            hostname=server,
            processid=os.getpid(),
            processname=sys.argv[0],
            severity='INFO',
            summary='mozdef health/status',
            category='mozdef',
            type='mozdefhealth',
            source='mozdef',
            tags=[],
            details=[])

        healthlog['details'] = dict(username='******')
        healthlog['details']['loadaverage'] = list(os.getloadavg())
        healthlog['details']['queues']=list()
        healthlog['details']['total_deliver_eps'] = 0
        healthlog['details']['total_publish_eps'] = 0
        healthlog['details']['total_messages_ready'] = 0
        healthlog['tags'] = ['mozdef', 'status']
        for m in mq:
            if 'message_stats' in m and isinstance(m['message_stats'], dict):
                if 'messages_ready' in m:
                    mready = m['messages_ready']
                    healthlog['details']['total_messages_ready'] += m['messages_ready']
                else:
                    mready = 0
                if 'messages_unacknowledged' in m:
                    munack = m['messages_unacknowledged']
                else:
                    munack = 0
                queueinfo=dict(
                    queue=m['name'],
                    vhost=m['vhost'],
                    messages_ready=mready,
                    messages_unacknowledged=munack)

                if 'deliver_details' in m['message_stats']:
                    queueinfo['deliver_eps'] = round(m['message_stats']['deliver_details']['rate'], 2)
                    healthlog['details']['total_deliver_eps'] += round(m['message_stats']['deliver_details']['rate'], 2)
                if 'deliver_no_ack_details' in m['message_stats']:
                    queueinfo['deliver_eps'] = round(m['message_stats']['deliver_no_ack_details']['rate'], 2)
                    healthlog['details']['total_deliver_eps'] += round(m['message_stats']['deliver_no_ack_details']['rate'], 2)
                if 'publish_details' in m['message_stats']:
                    queueinfo['publish_eps'] = round(m['message_stats']['publish_details']['rate'], 2)
                    healthlog['details']['total_publish_eps'] += round(m['message_stats']['publish_details']['rate'], 2)
                healthlog['details']['queues'].append(queueinfo)

        # post to elastic search servers directly without going through
        # message queues in case there is an availability issue
        es.save_event(index=index, body=json.dumps(healthlog))
        # post another doc with a static docid and tag
        # for use when querying for the latest status
        healthlog['tags'] = ['mozdef', 'status', 'latest']
        es.save_event(index=index, doc_id=getDocID(server), body=json.dumps(healthlog))
Пример #36
0
    else:
        print("Running for {0} rounds".format(num_rounds))
        handle_events(sample_events, num_events, es_client)


if __name__ == '__main__':
    parser = optparse.OptionParser()
    parser.add_option(
        '--elasticsearch_host',
        help='Elasticsearch host (default: http://localhost:9200)',
        default='http://localhost:9200')
    parser.add_option(
        '--num_events',
        help='Number of random events to insert (default: 0 (run all))',
        default=0)
    parser.add_option(
        '--num_rounds',
        help=
        'Number of rounds to insert events (default: 0 (run continuously))',
        default=0)
    parser.add_option(
        '--sleep_time',
        help='Number of seconds to sleep between rounds (default: 2)',
        default=2)
    options, arguments = parser.parse_args()
    es_client = ElasticsearchClient(options.elasticsearch_host)
    run(num_rounds=options.num_rounds,
        num_events=options.num_events,
        sleep_time=options.sleep_time,
        es_client=es_client)
Пример #37
0
class AlertTask(Task):

    abstract = True

    def __init__(self):
        self.alert_name = self.__class__.__name__
        self.main_query = None

        # Used to store any alerts that were thrown
        self.alert_ids = []

        # List of events
        self.events = None
        # List of aggregations
        # e.g. when aggregField is email: [{value:'*****@*****.**',count:1337,events:[...]}, ...]
        self.aggregations = None

        self.log.debug("starting {0}".format(self.alert_name))
        self.log.debug(RABBITMQ)
        self.log.debug(ES)

        self._configureKombu()
        self._configureES()

        # We want to select all event indices
        # and filter out the window based on timestamp
        # from the search query
        self.event_indices = ["events-*"]

    def classname(self):
        return self.__class__.__name__

    @property
    def log(self):
        return get_task_logger("%s.%s" % (__name__, self.alert_name))

    def parse_config(self, config_filename, config_keys):
        myparser = OptionParser()
        self.config = None
        (self.config, args) = myparser.parse_args([])
        for config_key in config_keys:
            temp_value = getConfig(config_key, "", config_filename)
            setattr(self.config, config_key, temp_value)

    def _discover_task_exchange(self):
        """Use configuration information to understand the message queue protocol.
        return: amqp, sqs
        """
        return getConfig("mqprotocol", "amqp", None)

    def __build_conn_string(self):
        exchange_protocol = self._discover_task_exchange()
        if exchange_protocol == "amqp":
            connString = "amqp://{0}:{1}@{2}:{3}//".format(
                RABBITMQ["mquser"],
                RABBITMQ["mqpassword"],
                RABBITMQ["mqserver"],
                RABBITMQ["mqport"],
            )
            return connString
        elif exchange_protocol == "sqs":
            connString = "sqs://{}".format(getConfig("alertSqsQueueUrl", None, None))
            if connString:
                connString = connString.replace('https://','')
            return connString

    def _configureKombu(self):
        """
        Configure kombu for amqp or sqs
        """
        try:
            connString = self.__build_conn_string()
            self.mqConn = kombu.Connection(connString)
            if connString.find('sqs') == 0:
                self.mqConn.transport_options['region'] = os.getenv('DEFAULT_AWS_REGION', 'us-west-2')
                self.alertExchange = kombu.Exchange(
                    name=RABBITMQ["alertexchange"], type="topic", durable=True
                )
                self.alertExchange(self.mqConn).declare()
                alertQueue = kombu.Queue(
                    os.getenv('OPTIONS_ALERTSQSQUEUEURL').split('/')[4], exchange=self.alertExchange
                )
            else:
                self.alertExchange = kombu.Exchange(
                    name=RABBITMQ["alertexchange"], type="topic", durable=True
                )
                self.alertExchange(self.mqConn).declare()
                alertQueue = kombu.Queue(
                    RABBITMQ["alertqueue"], exchange=self.alertExchange
                )
            alertQueue(self.mqConn).declare()
            self.mqproducer = self.mqConn.Producer(serializer="json")
            self.log.debug("Kombu configured")
        except Exception as e:
            self.log.error(
                "Exception while configuring kombu for alerts: {0}".format(e)
            )

    def _configureES(self):
        """
        Configure elasticsearch client
        """
        try:
            self.es = ElasticsearchClient(ES["servers"])
            self.log.debug("ES configured")
        except Exception as e:
            self.log.error("Exception while configuring ES for alerts: {0}".format(e))

    def mostCommon(self, listofdicts, dictkeypath):
        """
            Given a list containing dictionaries,
            return the most common entries
            along a key path separated by .
            i.e. dictkey.subkey.subkey
            returned as a list of tuples
            [(value,count),(value,count)]
        """
        inspectlist = list()
        path = list(dictpath(dictkeypath))
        for i in listofdicts:
            for k in list(keypaths(i)):
                if not (set(k[0]).symmetric_difference(path)):
                    inspectlist.append(k[1])

        return Counter(inspectlist).most_common()

    def alertToMessageQueue(self, alertDict):
        """
        Send alert to the kombu based message queue.  The default is rabbitmq.
        """
        try:
            # cherry pick items from the alertDict to send to the alerts messageQueue
            mqAlert = dict(severity="INFO", category="")
            if "severity" in alertDict:
                mqAlert["severity"] = alertDict["severity"]
            if "category" in alertDict:
                mqAlert["category"] = alertDict["category"]
            if "utctimestamp" in alertDict:
                mqAlert["utctimestamp"] = alertDict["utctimestamp"]
            if "eventtimestamp" in alertDict:
                mqAlert["eventtimestamp"] = alertDict["eventtimestamp"]
            mqAlert["summary"] = alertDict["summary"]
            self.log.debug(mqAlert)
            ensurePublish = self.mqConn.ensure(
                self.mqproducer, self.mqproducer.publish, max_retries=10
            )
            ensurePublish(
                alertDict,
                exchange=self.alertExchange,
                routing_key=RABBITMQ["alertqueue"],
            )
            self.log.debug("alert sent to the alert queue")
        except Exception as e:
            self.log.error(
                "Exception while sending alert to message queue: {0}".format(e)
            )

    def alertToES(self, alertDict):
        """
        Send alert to elasticsearch
        """
        try:
            res = self.es.save_alert(body=alertDict)
            self.log.debug("alert sent to ES")
            self.log.debug(res)
            return res
        except Exception as e:
            self.log.error("Exception while pushing alert to ES: {0}".format(e))

    def tagBotNotify(self, alert):
        """
            Tag alert to be excluded based on severity
            If 'ircchannel' is set in an alert, we automatically notify mozdefbot
        """
        alert["notify_mozdefbot"] = True
        if alert["severity"] == "NOTICE" or alert["severity"] == "INFO":
            alert["notify_mozdefbot"] = False

        # If an alert sets specific ircchannel, then we should probably always notify in mozdefbot
        if (
            "ircchannel" in alert and alert["ircchannel"] != "" and alert["ircchannel"] is not None
        ):
            alert["notify_mozdefbot"] = True
        return alert

    def saveAlertID(self, saved_alert):
        """
        Save alert to self so we can analyze it later
        """
        self.alert_ids.append(saved_alert["_id"])

    def filtersManual(self, query):
        """
        Configure filters manually

        query is a search query object with date_timedelta populated

        """
        # Don't fire on already alerted events
        duplicate_matcher = TermMatch("alert_names", self.determine_alert_classname())
        if duplicate_matcher not in query.must_not:
            query.add_must_not(duplicate_matcher)

        self.main_query = query

    def determine_alert_classname(self):
        alert_name = self.classname()
        # Allow alerts like the generic alerts (one python alert but represents many 'alerts')
        # can customize the alert name
        if hasattr(self, "custom_alert_name"):
            alert_name = self.custom_alert_name
        return alert_name

    def executeSearchEventsSimple(self):
        """
        Execute the search for simple events
        """
        return self.main_query.execute(self.es, indices=self.event_indices)

    def searchEventsSimple(self):
        """
        Search events matching filters, store events in self.events
        """
        try:
            results = self.executeSearchEventsSimple()
            self.events = results["hits"]
            self.log.debug(self.events)
        except Exception as e:
            self.log.error("Error while searching events in ES: {0}".format(e))

    def searchEventsAggregated(self, aggregationPath, samplesLimit=5):
        """
        Search events, aggregate matching ES filters by aggregationPath,
        store them in self.aggregations as a list of dictionaries
        keys:
          value: the text value that was found in the aggregationPath
          count: the hitcount of the text value
          events: the sampled list of events that matched
          allevents: the unsample, total list of matching events
        aggregationPath can be key.subkey.subkey to specify a path to a dictionary value
        relative to the _source that's returned from elastic search.
        ex: details.sourceipaddress
        """

        # We automatically add the key that we're matching on
        # for aggregation, as a query requirement
        aggreg_key_exists = ExistsMatch(aggregationPath)
        if aggreg_key_exists not in self.main_query.must:
            self.main_query.add_must(aggreg_key_exists)

        try:
            esresults = self.main_query.execute(self.es, indices=self.event_indices)
            results = esresults["hits"]

            # List of aggregation values that can be counted/summarized by Counter
            # Example: ['*****@*****.**','*****@*****.**', '*****@*****.**'] for an email aggregField
            aggregationValues = []
            for r in results:
                aggregationValues.append(getValueByPath(r["_source"], aggregationPath))

            # [{value:'*****@*****.**',count:1337,events:[...]}, ...]
            aggregationList = []
            for i in Counter(aggregationValues).most_common():
                idict = {"value": i[0], "count": i[1], "events": [], "allevents": []}
                for r in results:
                    if (
                        getValueByPath(r["_source"], aggregationPath).encode(
                            "ascii", "ignore"
                        ) == i[0]
                    ):
                        # copy events detail into this aggregation up to our samples limit
                        if len(idict["events"]) < samplesLimit:
                            idict["events"].append(r)
                        # also copy all events to a non-sampled list
                        # so we mark all events as alerted and don't re-alert
                        idict["allevents"].append(r)
                aggregationList.append(idict)

            self.aggregations = aggregationList
            self.log.debug(self.aggregations)
        except Exception as e:
            self.log.error("Error while searching events in ES: {0}".format(e))

    def walkEvents(self, **kwargs):
        """
        Walk through events, provide some methods to hook in alerts
        """
        if len(self.events) > 0:
            for i in self.events:
                alert = self.onEvent(i, **kwargs)
                if alert:
                    alert = self.tagBotNotify(alert)
                    self.log.debug(alert)
                    alert = self.alertPlugins(alert)
                    alertResultES = self.alertToES(alert)
                    self.tagEventsAlert([i], alertResultES)
                    self.alertToMessageQueue(alert)
                    self.hookAfterInsertion(alert)
                    self.saveAlertID(alertResultES)
        # did we not match anything?
        # can also be used as an alert trigger
        if len(self.events) == 0:
            alert = self.onNoEvent(**kwargs)
            if alert:
                alert = self.tagBotNotify(alert)
                self.log.debug(alert)
                alertResultES = self.alertToES(alert)
                self.alertToMessageQueue(alert)
                self.hookAfterInsertion(alert)
                self.saveAlertID(alertResultES)

    def walkAggregations(self, threshold, config=None):
        """
        Walk through aggregations, provide some methods to hook in alerts
        """
        if len(self.aggregations) > 0:
            for aggregation in self.aggregations:
                if aggregation["count"] >= threshold:
                    aggregation["config"] = config
                    alert = self.onAggregation(aggregation)
                    if alert:
                        alert = self.tagBotNotify(alert)
                        self.log.debug(alert)
                        alert = self.alertPlugins(alert)
                        alertResultES = self.alertToES(alert)
                        # even though we only sample events in the alert
                        # tag all events as alerted to avoid re-alerting
                        # on events we've already processed.
                        self.tagEventsAlert(aggregation["allevents"], alertResultES)
                        self.alertToMessageQueue(alert)
                        self.saveAlertID(alertResultES)

    def alertPlugins(self, alert):
        """
        Send alerts through a plugin system
        """

        plugin_dir = os.path.join(os.path.dirname(__file__), "../plugins")
        plugin_set = AlertPluginSet(plugin_dir, ALERT_PLUGINS)
        alertDict = plugin_set.run_plugins(alert)[0]

        return alertDict

    def createAlertDict(
        self,
        summary,
        category,
        tags,
        events,
        severity="NOTICE",
        url=None,
        ircchannel=None,
    ):
        """
        Create an alert dict
        """
        alert = {
            "utctimestamp": toUTC(datetime.now()).isoformat(),
            "severity": severity,
            "summary": summary,
            "category": category,
            "tags": tags,
            "events": [],
            "ircchannel": ircchannel,
        }
        if url:
            alert["url"] = url

        for e in events:
            alert["events"].append(
                {
                    "documentindex": e["_index"],
                    "documentsource": e["_source"],
                    "documentid": e["_id"],
                }
            )
        self.log.debug(alert)
        return alert

    def onEvent(self, event, *args, **kwargs):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onNoEvent(self, *args, **kwargs):
        """
        To be overriden by children to run their code
        when NOTHING matches a filter
        which can be used to trigger on the absence of
        events much like a dead man switch.
        This is to be used when creating an alert using an event
        must return an alert dict or None
        """
        pass

    def onAggregation(self, aggregation):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        must return an alert dict or None
        """
        pass

    def hookAfterInsertion(self, alert):
        """
        To be overriden by children to run their code
        to be used when creating an alert using an aggregation
        """
        pass

    def tagEventsAlert(self, events, alertResultES):
        """
        Update the event with the alertid/index
        and update the alert_names on the event itself so it's
        not re-alerted
        """
        try:
            for event in events:
                if "alerts" not in event["_source"]:
                    event["_source"]["alerts"] = []
                event["_source"]["alerts"].append(
                    {"index": alertResultES["_index"], "id": alertResultES["_id"]}
                )

                if "alert_names" not in event["_source"]:
                    event["_source"]["alert_names"] = []
                event["_source"]["alert_names"].append(self.determine_alert_classname())

                self.es.save_event(
                    index=event["_index"], body=event["_source"], doc_id=event["_id"]
                )
            # We refresh here to ensure our changes to the events will show up for the next search query results
            self.es.refresh(event["_index"])
        except Exception as e:
            self.log.error("Error while updating events in ES: {0}".format(e))

    def main(self):
        """
        To be overriden by children to run their code
        """
        pass

    def run(self, *args, **kwargs):
        """
        Main method launched by celery periodically
        """
        try:
            self.main(*args, **kwargs)
            self.log.debug("finished")
        except Exception as e:
            self.log.exception("Exception in main() method: {0}".format(e))

    def parse_json_alert_config(self, config_file):
        """
        Helper function to parse an alert config file
        """
        alert_dir = os.path.join(os.path.dirname(__file__), "..")
        config_file_path = os.path.abspath(os.path.join(alert_dir, config_file))
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = json.load(fd)
            except ValueError:
                sys.stderr.write("FAILED to open the configuration file\n")

        return json_obj
Пример #38
0
def esRotateIndexes():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))

        indices = es.get_indices()

        # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
        odate_day = date.strftime(
            toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
        odate_month = date.strftime(
            toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
        ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
        ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
        # examine each index in the .conf file
        # for rotation settings
        for (index, dobackup, rotation,
             pruning) in zip(options.indices, options.dobackup,
                             options.rotation, options.pruning):
            try:
                if rotation != 'none':
                    oldindex = index
                    newindex = index
                    if rotation == 'daily':
                        oldindex += '-%s' % odate_day
                        newindex += '-%s' % ndate_day
                    elif rotation == 'monthly':
                        oldindex += '-%s' % odate_month
                        newindex += '-%s' % ndate_month
                        # do not rotate before the month ends
                        if oldindex == newindex:
                            logger.debug(
                                'do not rotate %s index, month has not changed yet'
                                % index)
                            continue
                    if newindex not in indices:
                        index_settings = {}
                        if 'events' in newindex:
                            index_settings = {
                                "index": {
                                    "refresh_interval":
                                    options.refresh_interval,
                                    "number_of_shards":
                                    options.number_of_shards,
                                    "number_of_replicas":
                                    options.number_of_replicas,
                                    "search.slowlog.threshold.query.warn":
                                    options.slowlog_threshold_query_warn,
                                    "search.slowlog.threshold.fetch.warn":
                                    options.slowlog_threshold_fetch_warn,
                                    "mapping.total_fields.limit":
                                    options.mapping_total_fields_limit
                                }
                            }
                        default_mapping_contents['settings'] = index_settings
                        logger.debug('Creating %s index' % newindex)
                        es.create_index(newindex, default_mapping_contents)
                    # set aliases: events to events-YYYYMMDD
                    # and events-previous to events-YYYYMMDD-1
                    logger.debug('Setting {0} alias to index: {1}'.format(
                        index, newindex))
                    es.create_alias(index, newindex)
                    if oldindex in indices:
                        logger.debug(
                            'Setting {0}-previous alias to index: {1}'.format(
                                index, oldindex))
                        es.create_alias('%s-previous' % index, oldindex)
                    else:
                        logger.debug(
                            'Old index %s is missing, do not change %s-previous alias'
                            % (oldindex, index))
            except Exception as e:
                logger.error(
                    "Unhandled exception while rotating %s, terminating: %r" %
                    (index, e))

        indices = es.get_indices()
        # Create weekly aliases for certain indices
        week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
        week_ago_str = week_ago_date.strftime('%Y%m%d')
        current_date = toUTC(datetime.now())
        for index in options.weekly_rotation_indices:
            weekly_index_alias = '%s-weekly' % index
            logger.debug('Trying to re-alias {0} to indices since {1}'.format(
                weekly_index_alias, week_ago_str))
            existing_weekly_indices = []
            for day_obj in daterange(week_ago_date, current_date):
                day_str = day_obj.strftime('%Y%m%d')
                day_index = index + '-' + str(day_str)
                if day_index in indices:
                    existing_weekly_indices.append(day_index)
                else:
                    logger.debug('%s not found, so cant assign weekly alias' %
                                 day_index)
            if existing_weekly_indices:
                logger.debug('Creating {0} alias for {1}'.format(
                    weekly_index_alias, existing_weekly_indices))
                es.create_alias_multiple_indices(weekly_index_alias,
                                                 existing_weekly_indices)
            else:
                logger.warning(
                    'No indices within the past week to assign events-weekly to'
                )
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Пример #39
0

parser = argparse.ArgumentParser(description='Create the correct indexes and aliases in elasticsearch')
parser.add_argument('esserver', help='Elasticsearch server (ex: http://elasticsearch:9200)')
parser.add_argument('default_mapping_file', help='The relative path to default mapping json file (ex: cron/defaultMappingTemplate.json)')
parser.add_argument('backup_conf_file', help='The relative path to backup.conf file (ex: cron/backup.conf)')
parser.add_argument('kibana_url', help='The URL of the kibana endpoint (ex: http://kibana:5601)')
args = parser.parse_args()


esserver = os.environ.get('OPTIONS_ESSERVERS')
if esserver is None:
    esserver = args.esserver
esserver = esserver.strip('/')
print "Connecting to " + esserver
client = ElasticsearchClient(esserver)

kibana_url = os.environ.get('OPTIONS_KIBANAURL', args.kibana_url)

current_date = datetime.now()
event_index_name = current_date.strftime("events-%Y%m%d")
previous_event_index_name = (current_date - timedelta(days=1)).strftime("events-%Y%m%d")
weekly_index_alias = 'events-weekly'
alert_index_name = current_date.strftime("alerts-%Y%m")
kibana_index_name = '.kibana'

index_settings_str = ''
with open(args.default_mapping_file) as data_file:
    index_settings_str = data_file.read()

index_settings = json.loads(index_settings_str)
Пример #40
0
 def setup(self):
     super(ElasticsearchClientTest, self).setup()
     self.es_client = ElasticsearchClient(self.options.esservers,
                                          bulk_refresh_time=3)
Пример #41
0
def getQueueSizes():
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(options.esservers)
    sqslist = {}
    sqslist['queue_stats'] = {}
    qcount = len(options.taskexchange)
    qcounter = qcount - 1

    mqConn = boto.sqs.connect_to_region(
        options.region,
        aws_access_key_id=options.accesskey,
        aws_secret_access_key=options.secretkey
    )

    while qcounter >= 0:
        for exchange in options.taskexchange:
            logger.debug('Looking for sqs queue stats in queue' + exchange)
            eventTaskQueue = mqConn.get_queue(exchange)
            # get queue stats
            taskQueueStats = eventTaskQueue.get_attributes('All')
            sqslist['queue_stats'][qcounter] = taskQueueStats
            sqslist['queue_stats'][qcounter]['name'] = exchange
            qcounter -= 1

    # setup a log entry for health/status.
    sqsid = '{0}-{1}'.format(options.account, options.region)
    healthlog = dict(
        utctimestamp=toUTC(datetime.now()).isoformat(),
        hostname=sqsid,
        processid=os.getpid(),
        processname=sys.argv[0],
        severity='INFO',
        summary='mozdef health/status',
        category='mozdef',
        source='aws-sqs',
        tags=[],
        details=[])
    healthlog['details'] = dict(username='******')
    healthlog['details']['queues']= list()
    healthlog['details']['total_messages_ready'] = 0
    healthlog['details']['total_feeds'] = qcount
    healthlog['tags'] = ['mozdef', 'status', 'sqs']
    ready = 0
    qcounter = qcount - 1
    for q in sqslist['queue_stats'].keys():
        queuelist = sqslist['queue_stats'][qcounter]
        if 'ApproximateNumberOfMessages' in queuelist:
            ready1 = int(queuelist['ApproximateNumberOfMessages'])
            ready = ready1 + ready
            healthlog['details']['total_messages_ready'] = ready
        if 'ApproximateNumberOfMessages' in queuelist:
            messages = int(queuelist['ApproximateNumberOfMessages'])
        if 'ApproximateNumberOfMessagesNotVisible' in queuelist:
            inflight = int(queuelist['ApproximateNumberOfMessagesNotVisible'])
        if 'ApproximateNumberOfMessagesDelayed' in queuelist:
            delayed = int(queuelist['ApproximateNumberOfMessagesDelayed'])
        if 'name' in queuelist:
            name = queuelist['name']
        queueinfo=dict(
            queue=name,
            messages_delayed=delayed,
            messages_ready=messages,
            messages_inflight=inflight)
        healthlog['details']['queues'].append(queueinfo)
        qcounter -= 1
    # post to elasticsearch servers directly without going through
    # message queues in case there is an availability issue
    es.save_event(index=options.index, doc_type='mozdefhealth', body=json.dumps(healthlog))
    # post another doc with a static docid and tag
    # for use when querying for the latest sqs status
    healthlog['tags'] = ['mozdef', 'status', 'sqs-latest']
    es.save_event(index=options.index, doc_type='mozdefhealth', doc_id=getDocID(sqsid), body=json.dumps(healthlog))