def esPruneIndexes(): if options.output == 'syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport))) else: sh = logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') try: es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() # do the pruning for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning): try: if pruning != '0': index_to_prune = index if rotation == 'daily': idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)),'%Y%m%d') index_to_prune += '-%s' % idate elif rotation == 'monthly': idate = date.strftime(datetime.utcnow() - timedelta(days=31*int(pruning)),'%Y%m') index_to_prune += '-%s' % idate if index_to_prune in indices: logger.info('Deleting index: %s' % index_to_prune) es.delete_index(index_to_prune, True) else: logger.error('Error deleting index %s, index missing' % index_to_prune) except Exception as e: logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e)) except Exception as e: logger.error("Unhandled exception, terminating: %r"%e)
def esRotateIndexes(): if options.output == 'syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport))) else: sh = logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') try: es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc. odate_day = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d') odate_month = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m') ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d') ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m') # examine each index in the .conf file # for rotation settings for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning): try: if rotation != 'none': oldindex = index newindex = index if rotation == 'daily': oldindex += '-%s' % odate_day newindex += '-%s' % ndate_day elif rotation == 'monthly': oldindex += '-%s' % odate_month newindex += '-%s' % ndate_month # do not rotate before the month ends if oldindex == newindex: logger.debug('do not rotate %s index, month has not changed yet' % index) continue if newindex not in indices: logger.debug('Creating %s index' % newindex) es.create_index(newindex) # set aliases: events to events-YYYYMMDD # and events-previous to events-YYYYMMDD-1 logger.debug('Setting {0} alias to index: {1}'.format(index, newindex)) es.create_alias(index, newindex) if oldindex in indices: logger.debug('Setting {0}-previous alias to index: {1}'.format(index, oldindex)) es.create_alias('%s-previous' % index, oldindex) else: logger.debug('Old index %s is missing, do not change %s-previous alias' % (oldindex, index)) except Exception as e: logger.error("Unhandled exception while rotating %s, terminating: %r" % (index, e)) indices = es.get_indices() # Create weekly aliases for certain indices week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1) week_ago_str = week_ago_date.strftime('%Y%m%d') current_date = toUTC(datetime.now()) for index in options.weekly_rotation_indices: weekly_index_alias = '%s-weekly' % index logger.debug('Trying to re-alias {0} to indices since {1}'.format(weekly_index_alias, week_ago_str)) existing_weekly_indices = [] for day_obj in daterange(week_ago_date, current_date): day_str = day_obj.strftime('%Y%m%d') day_index = index + '-' + str(day_str) if day_index in indices: existing_weekly_indices.append(day_index) else: logger.debug('%s not found, so cant assign weekly alias' % day_index) if existing_weekly_indices: logger.debug('Creating {0} alias for {1}'.format(weekly_index_alias, existing_weekly_indices)) es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices) else: logger.warning('No indices within the past week to assign events-weekly to') except Exception as e: logger.error("Unhandled exception, terminating: %r" % e)
event_index_name = current_date.strftime("events-%Y%m%d") previous_event_index_name = (current_date - timedelta(days=1)).strftime("events-%Y%m%d") weekly_index_alias = 'events-weekly' alert_index_name = current_date.strftime("alerts-%Y%m") index_settings_str = '' with open(args.default_mapping_file) as data_file: index_settings_str = data_file.read() index_settings = json.loads(index_settings_str) all_indices = [] total_num_tries = 15 for attempt in range(total_num_tries): try: all_indices = client.get_indices() except ConnectionError: print 'Unable to connect to Elasticsearch...retrying' sleep(5) else: break else: print 'Cannot connect to Elasticsearch after ' + str(total_num_tries) + ' tries, exiting script.' exit(1) refresh_interval = getConfig('refresh_interval', '1s', args.backup_conf_file) number_of_shards = getConfig('number_of_shards', '1', args.backup_conf_file) number_of_replicas = getConfig('number_of_replicas', '1', args.backup_conf_file) slowlog_threshold_query_warn = getConfig('slowlog_threshold_query_warn', '5s', args.backup_conf_file) slowlog_threshold_fetch_warn = getConfig('slowlog_threshold_fetch_warn', '5s', args.backup_conf_file) mapping_total_fields_limit = getConfig('mapping_total_fields_limit', '1000', args.backup_conf_file)