def update_settings(cls, records): """ Update the settings on Elastic Search. """ config, = records conn = config.get_es_connection() indices = Indices(conn) logger = cls.get_logger() if indices.exists_index(config.index_name): # Updating an existing index requires closing it and updating # it, then reopening the index # # See: http://www.elasticsearch.org/guide/en/elasticsearch/ # reference/current # /indices-update-settings.html#update-settings-analysis logger.info('Index %s already exists' % config.index_name) logger.info('Closing Index %s' % config.index_name) indices.close_index(config.index_name) logger.info('Updating existing Index %s' % config.index_name) indices.update_settings(config.index_name, config.settings) logger.info('Opening Index %s' % config.index_name) indices.open_index(config.index_name) else: # Create a brand new index logger.info( 'Creating new index %s with settings' % config.index_name ) indices.create_index(config.index_name, config.settings) cls.write([records], {'settings_updated': True})
def clear_server(self): """ Clear the elasticsearch server. """ conn = self.ElasticConfig(1).get_es_connection() index_name = self.ElasticConfig(1).get_index_name(name=None) indices = Indices(conn) indices.delete_index_if_exists(index_name)
def find_indices_to_optimize(connection, days_to_optimize=None, hours_to_optimize=None, separator='.', prefix='logstash-', out=sys.stdout, err=sys.stderr): """ Generator that yields indices to optimize. :return: Yields tuples on the format ``(index_name, to_optimize)`` where index_name is the name of the index to optimize and to_optimize is the number of seconds (a float value) that the index is to optimize. """ utc_now_time = time.time() + time.altzone days_cutoff = utc_now_time - days_to_optimize * 24 * 60 * 60 if days_to_optimize is not None else None hours_cutoff = utc_now_time - hours_to_optimize * 60 * 60 if hours_to_optimize is not None else None for index_name in sorted(set(Indices(connection).get_indices().keys())): if not index_name.startswith(prefix): print >> out, 'Skipping index due to missing prefix {0}: {1}'.format( prefix, index_name) continue unprefixed_index_name = index_name[len(prefix):] # find the timestamp parts (i.e ['2011', '01', '05'] from '2011.01.05') using the configured separator parts = unprefixed_index_name.split(separator) # perform some basic validation if len(parts) < 3 or len(parts) > 4 or not all( [item.isdigit() for item in parts]): print >> err, 'Could not find a valid timestamp from the index: {0}'.format( index_name) continue # find the cutoff. if we have more than 3 parts in the timestamp, the timestamp includes the hours and we # should compare it to the hours_cutoff, otherwise, we should use the days_cutoff cutoff = hours_cutoff if len(parts) == 3: cutoff = days_cutoff # but the cutoff might be none, if the current index only has three parts (year.month.day) and we're only # optimizing hourly indices: if cutoff is None: print >> out, 'Skipping {0} because it is of a type (hourly or daily) that I\'m not asked to optimize.'.format( index_name) continue index_epoch = get_index_epoch(unprefixed_index_name) # if the index is older than the cutoff if index_epoch > cutoff: yield index_name, cutoff - index_epoch else: print >> out, '{0} is {1} above the cutoff.'.format( index_name, timedelta(seconds=index_epoch - cutoff))
def main(): start = time.time() parser = make_parser() arguments = parser.parse_args() if not arguments.hours_to_keep and not arguments.days_to_keep: print >> sys.stderr, 'Invalid arguments: You must specify either the number of hours or the number of days to keep.' parser.print_help() return connection = pyes.ES('{0}:{1}'.format(arguments.host, arguments.port), timeout=arguments.timeout) if arguments.days_to_keep: print 'Deleting daily indices older than {0} days.'.format(arguments.days_to_keep) if arguments.hours_to_keep: print 'Deleting hourly indices older than {0} hours.'.format(arguments.hours_to_keep) print '' for index_name, expired_by in find_expired_indices(connection, arguments.days_to_keep, arguments.hours_to_keep, arguments.separator, arguments.prefix): expiration = timedelta(seconds=expired_by) if arguments.dry_run: print 'Would have attempted deleting index {0} because it is {1} older than the calculated cutoff.'.format(index_name, expiration) continue print 'Deleting index {0} because it was {1} older than cutoff.'.format(index_name, expiration) deletion = Indices(connection).delete_index_if_exists(index_name) # ES returns a dict on the format {u'acknowledged': True, u'ok': True} on success. if deletion.get('ok'): print 'Successfully deleted index: {0}'.format(index_name) else: print 'Error deleting index: {0}. ({1})'.format(index_name, deletion) print '' print 'Done in {0}.'.format(timedelta(seconds=time.time()-start))
def update_settings(cls, records): """ Update the settings on Elastic Search. """ config, = records conn = config.get_es_connection() if conn is None: return indices = Indices(conn) logger = cls.get_logger() if indices.exists_index(config.index_name): # Updating an existing index requires closing it and updating # it, then reopening the index # # See: http://www.elasticsearch.org/guide/en/elasticsearch/ # reference/current # /indices-update-settings.html#update-settings-analysis logger.info('Index %s already exists' % config.index_name) logger.info('Closing Index %s' % config.index_name) indices.close_index(config.index_name) logger.info('Updating existing Index %s' % config.index_name) indices.update_settings(config.index_name, config.settings) logger.info('Opening Index %s' % config.index_name) indices.open_index(config.index_name) else: # Create a brand new index logger.info('Creating new index %s with settings' % config.index_name) indices.create_index(config.index_name, config.settings) cls.write([records], {'settings_updated': True})