示例#1
0
def esPruneIndexes():
    if options.output == 'syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
        # do the pruning
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.debug('Deleting index: %s' % index_to_prune)
                        es.delete_index(index_to_prune, True)
                    else:
                        logger.error('Error deleting index %s, index missing' % index_to_prune)
            except Exception as e:
                logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
示例#2
0
def esCloseIndices():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
    except Exception as e:
        logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e))

    # examine each index pulled from get_indice
    # to determine if it meets aging criteria
    month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))
    month_ago_date = month_ago_date.replace(tzinfo=None)
    for index in indices:
        if 'events' in index:
            index_date = index.rsplit('-', 1)[1]
            logger.debug("Checking to see if Index: %s can be closed." % (index))
            if len(index_date) == 8:
                index_date_obj = datetime.strptime(index_date, '%Y%m%d')
                try:
                    if month_ago_date > index_date_obj:
                        logger.debug("Index: %s will be closed." % (index))
                        es.close_index(index)
                    else:
                        logger.debug("Index: %s  does not meet aging criteria and will not be closed." % (index))
                except Exception as e:
                    logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
示例#3
0
def esCloseIndices():
    logger.debug('started')
    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
    except Exception as e:
        logger.error(
            "Unhandled exception while connecting to ES, terminating: %r" %
            (e))

    # examine each index pulled from get_indice
    # to determine if it meets aging criteria
    month_ago_date = toUTC(
        datetime.now()) - timedelta(days=int(options.index_age))
    month_ago_date = month_ago_date.replace(tzinfo=None)
    for index in indices:
        if 'events' in index:
            index_date = index.rsplit('-', 1)[1]
            logger.debug("Checking to see if Index: %s can be closed." %
                         (index))
            if len(index_date) == 8:
                index_date_obj = datetime.strptime(index_date, '%Y%m%d')
                try:
                    if month_ago_date > index_date_obj:
                        logger.debug("Index: %s will be closed." % (index))
                        es.close_index(index)
                    else:
                        logger.debug(
                            "Index: %s  does not meet aging criteria and will not be closed."
                            % (index))
                except Exception as e:
                    logger.error(
                        "Unhandled exception while closing indices, terminating: %r"
                        % (e))
示例#4
0
def esRotateIndexes():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))

        indices = es.get_indices()

        # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
        odate_day = date.strftime(
            toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
        odate_month = date.strftime(
            toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
        ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
        ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
        # examine each index in the .conf file
        # for rotation settings
        for (index, dobackup, rotation,
             pruning) in zip(options.indices, options.dobackup,
                             options.rotation, options.pruning):
            try:
                if rotation != 'none':
                    oldindex = index
                    newindex = index
                    if rotation == 'daily':
                        oldindex += '-%s' % odate_day
                        newindex += '-%s' % ndate_day
                    elif rotation == 'monthly':
                        oldindex += '-%s' % odate_month
                        newindex += '-%s' % ndate_month
                        # do not rotate before the month ends
                        if oldindex == newindex:
                            logger.debug(
                                'do not rotate %s index, month has not changed yet'
                                % index)
                            continue
                    if newindex not in indices:
                        index_settings = {}
                        if 'events' in newindex:
                            index_settings = {
                                "index": {
                                    "refresh_interval":
                                    options.refresh_interval,
                                    "number_of_shards":
                                    options.number_of_shards,
                                    "number_of_replicas":
                                    options.number_of_replicas,
                                    "search.slowlog.threshold.query.warn":
                                    options.slowlog_threshold_query_warn,
                                    "search.slowlog.threshold.fetch.warn":
                                    options.slowlog_threshold_fetch_warn,
                                    "mapping.total_fields.limit":
                                    options.mapping_total_fields_limit
                                }
                            }
                        default_mapping_contents['settings'] = index_settings
                        logger.debug('Creating %s index' % newindex)
                        es.create_index(newindex, default_mapping_contents)
                    # set aliases: events to events-YYYYMMDD
                    # and events-previous to events-YYYYMMDD-1
                    logger.debug('Setting {0} alias to index: {1}'.format(
                        index, newindex))
                    es.create_alias(index, newindex)
                    if oldindex in indices:
                        logger.debug(
                            'Setting {0}-previous alias to index: {1}'.format(
                                index, oldindex))
                        es.create_alias('%s-previous' % index, oldindex)
                    else:
                        logger.debug(
                            'Old index %s is missing, do not change %s-previous alias'
                            % (oldindex, index))
            except Exception as e:
                logger.error(
                    "Unhandled exception while rotating %s, terminating: %r" %
                    (index, e))

        indices = es.get_indices()
        # Create weekly aliases for certain indices
        week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
        week_ago_str = week_ago_date.strftime('%Y%m%d')
        current_date = toUTC(datetime.now())
        for index in options.weekly_rotation_indices:
            weekly_index_alias = '%s-weekly' % index
            logger.debug('Trying to re-alias {0} to indices since {1}'.format(
                weekly_index_alias, week_ago_str))
            existing_weekly_indices = []
            for day_obj in daterange(week_ago_date, current_date):
                day_str = day_obj.strftime('%Y%m%d')
                day_index = index + '-' + str(day_str)
                if day_index in indices:
                    existing_weekly_indices.append(day_index)
                else:
                    logger.debug('%s not found, so cant assign weekly alias' %
                                 day_index)
            if existing_weekly_indices:
                logger.debug('Creating {0} alias for {1}'.format(
                    weekly_index_alias, existing_weekly_indices))
                es.create_alias_multiple_indices(weekly_index_alias,
                                                 existing_weekly_indices)
            else:
                logger.warning(
                    'No indices within the past week to assign events-weekly to'
                )
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
示例#5
0
weekly_index_alias = 'events-weekly'
alert_index_name = current_date.strftime("alerts-%Y%m")
kibana_index_name = '.kibana'
kibana_version = '5.6.14'

index_settings_str = ''
with open(args.default_mapping_file) as data_file:
    index_settings_str = data_file.read()

index_settings = json.loads(index_settings_str)

all_indices = []
total_num_tries = 15
for attempt in range(total_num_tries):
    try:
        all_indices = client.get_indices()
    except ConnectionError:
        print 'Unable to connect to Elasticsearch...retrying'
        sleep(5)
    else:
        break
else:
    print 'Cannot connect to Elasticsearch after ' + str(
        total_num_tries) + ' tries, exiting script.'
    exit(1)

refresh_interval = getConfig('refresh_interval', '1s', args.backup_conf_file)
number_of_shards = getConfig('number_of_shards', '1', args.backup_conf_file)
number_of_replicas = getConfig('number_of_replicas', '1',
                               args.backup_conf_file)
slowlog_threshold_query_warn = getConfig('slowlog_threshold_query_warn', '5s',
示例#6
0
with open(args.default_mapping_file) as data_file:
    index_settings_str = data_file.read()

index_settings = json.loads(index_settings_str)

state_index_settings_str = ''
with open(args.state_mapping_file) as data_file:
    state_index_settings_str = data_file.read()

state_index_settings = json.loads(state_index_settings_str)

all_indices = []
total_num_tries = 15
for attempt in range(total_num_tries):
    try:
        all_indices = client.get_indices()
    except ConnectionError:
        print('Unable to connect to Elasticsearch...retrying')
        sleep(5)
    else:
        break
else:
    print('Cannot connect to Elasticsearch after ' + str(total_num_tries) +
          ' tries, exiting script.')
    exit(1)

refresh_interval = getConfig('refresh_interval', '1s', args.backup_conf_file)
number_of_shards = getConfig('number_of_shards', '1', args.backup_conf_file)
number_of_replicas = getConfig('number_of_replicas', '1',
                               args.backup_conf_file)
slowlog_threshold_query_warn = getConfig('slowlog_threshold_query_warn', '5s',
示例#7
0
previous_event_index_name = (current_date - timedelta(days=1)).strftime("events-%Y%m%d")
weekly_index_alias = 'events-weekly'
alert_index_name = current_date.strftime("alerts-%Y%m")
kibana_index_name = '.kibana'

index_settings_str = ''
with open(args.default_mapping_file) as data_file:
    index_settings_str = data_file.read()

index_settings = json.loads(index_settings_str)

all_indices = []
total_num_tries = 15
for attempt in range(total_num_tries):
    try:
        all_indices = client.get_indices()
    except ConnectionError:
        print 'Unable to connect to Elasticsearch...retrying'
        sleep(5)
    else:
        break
else:
    print 'Cannot connect to Elasticsearch after ' + str(total_num_tries) + ' tries, exiting script.'
    exit(1)

refresh_interval = getConfig('refresh_interval', '1s', args.backup_conf_file)
number_of_shards = getConfig('number_of_shards', '1', args.backup_conf_file)
number_of_replicas = getConfig('number_of_replicas', '1', args.backup_conf_file)
slowlog_threshold_query_warn = getConfig('slowlog_threshold_query_warn', '5s', args.backup_conf_file)
slowlog_threshold_fetch_warn = getConfig('slowlog_threshold_fetch_warn', '5s', args.backup_conf_file)
mapping_total_fields_limit = getConfig('mapping_total_fields_limit', '1000', args.backup_conf_file)
示例#8
0
def esRotateIndexes():
    if options.output == 'syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))

        indices = es.get_indices()

        # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
        odate_day = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
        odate_month = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
        ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
        ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
        # examine each index in the .conf file
        # for rotation settings
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if rotation != 'none':
                    oldindex = index
                    newindex = index
                    if rotation == 'daily':
                        oldindex += '-%s' % odate_day
                        newindex += '-%s' % ndate_day
                    elif rotation == 'monthly':
                        oldindex += '-%s' % odate_month
                        newindex += '-%s' % ndate_month
                        # do not rotate before the month ends
                        if oldindex == newindex:
                            logger.debug('do not rotate %s index, month has not changed yet' % index)
                            continue
                    if newindex not in indices:
                        index_settings = {}
                        if 'events' in newindex:
                            index_settings = {
                                "index": {
                                    "refresh_interval": options.refresh_interval,
                                    "number_of_shards": options.number_of_shards,
                                    "number_of_replicas": options.number_of_replicas,
                                    "search.slowlog.threshold.query.warn": options.slowlog_threshold_query_warn,
                                    "search.slowlog.threshold.fetch.warn": options.slowlog_threshold_fetch_warn,
                                    "mapping.total_fields.limit": options.mapping_total_fields_limit
                                }
                            }
                        elif 'alerts' in newindex:
                            index_settings = {
                                "index": {
                                    "number_of_shards": 1
                                }
                            }
                        default_mapping_contents['settings'] = index_settings
                        logger.debug('Creating %s index' % newindex)
                        es.create_index(newindex, default_mapping_contents)
                    # set aliases: events to events-YYYYMMDD
                    # and events-previous to events-YYYYMMDD-1
                    logger.debug('Setting {0} alias to index: {1}'.format(index, newindex))
                    es.create_alias(index, newindex)
                    if oldindex in indices:
                        logger.debug('Setting {0}-previous alias to index: {1}'.format(index, oldindex))
                        es.create_alias('%s-previous' % index, oldindex)
                    else:
                        logger.debug('Old index %s is missing, do not change %s-previous alias' % (oldindex, index))
            except Exception as e:
                logger.error("Unhandled exception while rotating %s, terminating: %r" % (index, e))

        indices = es.get_indices()
        # Create weekly aliases for certain indices
        week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
        week_ago_str = week_ago_date.strftime('%Y%m%d')
        current_date = toUTC(datetime.now())
        for index in options.weekly_rotation_indices:
            weekly_index_alias = '%s-weekly' % index
            logger.debug('Trying to re-alias {0} to indices since {1}'.format(weekly_index_alias, week_ago_str))
            existing_weekly_indices = []
            for day_obj in daterange(week_ago_date, current_date):
                day_str = day_obj.strftime('%Y%m%d')
                day_index = index + '-' + str(day_str)
                if day_index in indices:
                    existing_weekly_indices.append(day_index)
                else:
                    logger.debug('%s not found, so cant assign weekly alias' % day_index)
            if existing_weekly_indices:
                logger.debug('Creating {0} alias for {1}'.format(weekly_index_alias, existing_weekly_indices))
                es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices)
            else:
                logger.warning('No indices within the past week to assign events-weekly to')
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)