def main(): ''' Look for events that contain username and a mac address Add the correlation to the intelligence index. ''' logger.debug('starting') logger.debug(options) es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) # create intelligence index if it's not already there es.create_index('intelligence') # read in the OUI file for mac prefix to vendor dictionary macassignments = readOUIFile(options.ouifilename) # search ES for events containing username and mac address correlations = esSearch(es, macassignments=macassignments) # store the correlation in the intelligence index esStoreCorrelations(es, correlations) logger.debug('finished')
def esRotateIndexes(): if options.output == 'syslog': logger.addHandler( SysLogHandler(address=(options.sysloghostname, options.syslogport))) else: sh = logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') with open(options.default_mapping_file, 'r') as mapping_file: default_mapping_contents = json.loads(mapping_file.read()) try: es = ElasticsearchClient( (list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc. odate_day = date.strftime( toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d') odate_month = date.strftime( toUTC(datetime.now()) - timedelta(days=1), '%Y%m') ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d') ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m') # examine each index in the .conf file # for rotation settings for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning): try: if rotation != 'none': oldindex = index newindex = index if rotation == 'daily': oldindex += '-%s' % odate_day newindex += '-%s' % ndate_day elif rotation == 'monthly': oldindex += '-%s' % odate_month newindex += '-%s' % ndate_month # do not rotate before the month ends if oldindex == newindex: logger.debug( 'do not rotate %s index, month has not changed yet' % index) continue if newindex not in indices: index_settings = {} if 'events' in newindex: index_settings = { "index": { "refresh_interval": options.refresh_interval, "number_of_shards": options.number_of_shards, "number_of_replicas": options.number_of_replicas, "search.slowlog.threshold.query.warn": options.slowlog_threshold_query_warn, "search.slowlog.threshold.fetch.warn": options.slowlog_threshold_fetch_warn, "mapping.total_fields.limit": options.mapping_total_fields_limit } } default_mapping_contents['settings'] = index_settings logger.debug('Creating %s index' % newindex) es.create_index(newindex, default_mapping_contents) # set aliases: events to events-YYYYMMDD # and events-previous to events-YYYYMMDD-1 logger.debug('Setting {0} alias to index: {1}'.format( index, newindex)) es.create_alias(index, newindex) if oldindex in indices: logger.debug( 'Setting {0}-previous alias to index: {1}'.format( index, oldindex)) es.create_alias('%s-previous' % index, oldindex) else: logger.debug( 'Old index %s is missing, do not change %s-previous alias' % (oldindex, index)) except Exception as e: logger.error( "Unhandled exception while rotating %s, terminating: %r" % (index, e)) indices = es.get_indices() # Create weekly aliases for certain indices week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1) week_ago_str = week_ago_date.strftime('%Y%m%d') current_date = toUTC(datetime.now()) for index in options.weekly_rotation_indices: weekly_index_alias = '%s-weekly' % index logger.debug('Trying to re-alias {0} to indices since {1}'.format( weekly_index_alias, week_ago_str)) existing_weekly_indices = [] for day_obj in daterange(week_ago_date, current_date): day_str = day_obj.strftime('%Y%m%d') day_index = index + '-' + str(day_str) if day_index in indices: existing_weekly_indices.append(day_index) else: logger.debug('%s not found, so cant assign weekly alias' % day_index) if existing_weekly_indices: logger.debug('Creating {0} alias for {1}'.format( weekly_index_alias, existing_weekly_indices)) es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices) else: logger.warning( 'No indices within the past week to assign events-weekly to' ) except Exception as e: logger.error("Unhandled exception, terminating: %r" % e)
index_settings['settings'] = { "index": { "refresh_interval": refresh_interval, "number_of_shards": number_of_shards, "number_of_replicas": number_of_replicas, "search.slowlog.threshold.query.warn": slowlog_threshold_query_warn, "search.slowlog.threshold.fetch.warn": slowlog_threshold_fetch_warn, "mapping.total_fields.limit": mapping_total_fields_limit } } # Create initial indices if event_index_name not in all_indices: print "Creating " + event_index_name client.create_index(event_index_name, index_config=index_settings) client.create_alias('events', event_index_name) if previous_event_index_name not in all_indices: print "Creating " + previous_event_index_name client.create_index(previous_event_index_name, index_config=index_settings) client.create_alias('events-previous', previous_event_index_name) if alert_index_name not in all_indices: print "Creating " + alert_index_name client.create_index(alert_index_name) client.create_alias('alerts', alert_index_name) if weekly_index_alias not in all_indices: print "Creating " + weekly_index_alias client.create_alias_multiple_indices(
index_options = { "index": { "refresh_interval": refresh_interval, "number_of_shards": number_of_shards, "number_of_replicas": number_of_replicas, "search.slowlog.threshold.query.warn": slowlog_threshold_query_warn, "search.slowlog.threshold.fetch.warn": slowlog_threshold_fetch_warn, "mapping.total_fields.limit": mapping_total_fields_limit } } index_settings['settings'] = index_options # Create initial indices if event_index_name not in all_indices: print "Creating " + event_index_name client.create_index(event_index_name, index_config=index_settings) client.create_alias('events', event_index_name) if previous_event_index_name not in all_indices: print "Creating " + previous_event_index_name client.create_index(previous_event_index_name, index_config=index_settings) client.create_alias('events-previous', previous_event_index_name) if alert_index_name not in all_indices: print "Creating " + alert_index_name client.create_index(alert_index_name, index_config=index_settings) client.create_alias('alerts', alert_index_name) if weekly_index_alias not in all_indices: print "Creating " + weekly_index_alias client.create_alias_multiple_indices(
index_options = { "index": { "refresh_interval": refresh_interval, "number_of_shards": number_of_shards, "number_of_replicas": number_of_replicas, "search.slowlog.threshold.query.warn": slowlog_threshold_query_warn, "search.slowlog.threshold.fetch.warn": slowlog_threshold_fetch_warn, "mapping.total_fields.limit": mapping_total_fields_limit } } index_settings['settings'] = index_options # Create initial indices if event_index_name not in all_indices: print "Creating " + event_index_name client.create_index(event_index_name, index_config=index_settings) client.create_alias('events', event_index_name) if previous_event_index_name not in all_indices: print "Creating " + previous_event_index_name client.create_index(previous_event_index_name, index_config=index_settings) client.create_alias('events-previous', previous_event_index_name) if alert_index_name not in all_indices: print "Creating " + alert_index_name client.create_index(alert_index_name, index_config=index_settings) client.create_alias('alerts', alert_index_name) if weekly_index_alias not in all_indices: print "Creating " + weekly_index_alias client.create_alias_multiple_indices(weekly_index_alias, [event_index_name, previous_event_index_name])
def main(): ''' Get health and status stats and post to ES Post both as a historical reference (for charts) and as a static docid (for realtime current health/EPS displays) ''' logger.debug('starting') logger.debug(options) es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) index = options.index with open(options.default_mapping_file, 'r') as mapping_file: default_mapping_contents = json.loads(mapping_file.read()) if not es.index_exists(index): try: logger.debug('Creating %s index' % index) es.create_index(index, default_mapping_contents) except Exception as e: logger.error("Unhandled exception, terminating: %r" % e) auth = HTTPBasicAuth(options.mquser, options.mqpassword) for server in options.mqservers: logger.debug('checking message queues on {0}'.format(server)) r = requests.get( 'http://{0}:{1}/api/queues'.format(server, options.mqapiport), auth=auth) mq = r.json() # setup a log entry for health/status. healthlog = dict( utctimestamp=toUTC(datetime.now()).isoformat(), hostname=server, processid=os.getpid(), processname=sys.argv[0], severity='INFO', summary='mozdef health/status', category='mozdef', type='mozdefhealth', source='mozdef', tags=[], details=[]) healthlog['details'] = dict(username='******') healthlog['details']['loadaverage'] = list(os.getloadavg()) healthlog['details']['queues']=list() healthlog['details']['total_deliver_eps'] = 0 healthlog['details']['total_publish_eps'] = 0 healthlog['details']['total_messages_ready'] = 0 healthlog['tags'] = ['mozdef', 'status'] for m in mq: if 'message_stats' in m and isinstance(m['message_stats'], dict): if 'messages_ready' in m: mready = m['messages_ready'] healthlog['details']['total_messages_ready'] += m['messages_ready'] else: mready = 0 if 'messages_unacknowledged' in m: munack = m['messages_unacknowledged'] else: munack = 0 queueinfo=dict( queue=m['name'], vhost=m['vhost'], messages_ready=mready, messages_unacknowledged=munack) if 'deliver_details' in m['message_stats']: queueinfo['deliver_eps'] = round(m['message_stats']['deliver_details']['rate'], 2) healthlog['details']['total_deliver_eps'] += round(m['message_stats']['deliver_details']['rate'], 2) if 'deliver_no_ack_details' in m['message_stats']: queueinfo['deliver_eps'] = round(m['message_stats']['deliver_no_ack_details']['rate'], 2) healthlog['details']['total_deliver_eps'] += round(m['message_stats']['deliver_no_ack_details']['rate'], 2) if 'publish_details' in m['message_stats']: queueinfo['publish_eps'] = round(m['message_stats']['publish_details']['rate'], 2) healthlog['details']['total_publish_eps'] += round(m['message_stats']['publish_details']['rate'], 2) healthlog['details']['queues'].append(queueinfo) # post to elastic search servers directly without going through # message queues in case there is an availability issue es.save_event(index=index, body=json.dumps(healthlog)) # post another doc with a static docid and tag # for use when querying for the latest status healthlog['tags'] = ['mozdef', 'status', 'latest'] es.save_event(index=index, doc_id=getDocID(server), body=json.dumps(healthlog))
def esRotateIndexes(): if options.output == 'syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport))) else: sh = logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') with open(options.default_mapping_file, 'r') as mapping_file: default_mapping_contents = json.loads(mapping_file.read()) try: es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc. odate_day = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d') odate_month = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m') ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d') ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m') # examine each index in the .conf file # for rotation settings for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning): try: if rotation != 'none': oldindex = index newindex = index if rotation == 'daily': oldindex += '-%s' % odate_day newindex += '-%s' % ndate_day elif rotation == 'monthly': oldindex += '-%s' % odate_month newindex += '-%s' % ndate_month # do not rotate before the month ends if oldindex == newindex: logger.debug('do not rotate %s index, month has not changed yet' % index) continue if newindex not in indices: index_settings = {} if 'events' in newindex: index_settings = { "index": { "refresh_interval": options.refresh_interval, "number_of_shards": options.number_of_shards, "number_of_replicas": options.number_of_replicas, "search.slowlog.threshold.query.warn": options.slowlog_threshold_query_warn, "search.slowlog.threshold.fetch.warn": options.slowlog_threshold_fetch_warn, "mapping.total_fields.limit": options.mapping_total_fields_limit } } elif 'alerts' in newindex: index_settings = { "index": { "number_of_shards": 1 } } default_mapping_contents['settings'] = index_settings logger.debug('Creating %s index' % newindex) es.create_index(newindex, default_mapping_contents) # set aliases: events to events-YYYYMMDD # and events-previous to events-YYYYMMDD-1 logger.debug('Setting {0} alias to index: {1}'.format(index, newindex)) es.create_alias(index, newindex) if oldindex in indices: logger.debug('Setting {0}-previous alias to index: {1}'.format(index, oldindex)) es.create_alias('%s-previous' % index, oldindex) else: logger.debug('Old index %s is missing, do not change %s-previous alias' % (oldindex, index)) except Exception as e: logger.error("Unhandled exception while rotating %s, terminating: %r" % (index, e)) indices = es.get_indices() # Create weekly aliases for certain indices week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1) week_ago_str = week_ago_date.strftime('%Y%m%d') current_date = toUTC(datetime.now()) for index in options.weekly_rotation_indices: weekly_index_alias = '%s-weekly' % index logger.debug('Trying to re-alias {0} to indices since {1}'.format(weekly_index_alias, week_ago_str)) existing_weekly_indices = [] for day_obj in daterange(week_ago_date, current_date): day_str = day_obj.strftime('%Y%m%d') day_index = index + '-' + str(day_str) if day_index in indices: existing_weekly_indices.append(day_index) else: logger.debug('%s not found, so cant assign weekly alias' % day_index) if existing_weekly_indices: logger.debug('Creating {0} alias for {1}'.format(weekly_index_alias, existing_weekly_indices)) es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices) else: logger.warning('No indices within the past week to assign events-weekly to') except Exception as e: logger.error("Unhandled exception, terminating: %r" % e)