def __delete_index_list(index_list=None, dry_run=True): if dry_run: logger.info( msg=f"deleting indices: {index_list.indices}, mode: DRY-RUN") curator.DeleteIndices(index_list).do_dry_run() else: logger.info(msg=f"deleting indices: {index_list.indices}") curator.DeleteIndices(index_list).do_action()
def handler(event, context): es = Elasticsearch(hosts=[{'host': elasticsearch_host, 'port': 443}], http_auth=aws_auth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection) logger.info(f"Connected to Elasticsearch at https://{elasticsearch_host}") ilo = curator.IndexList(es) try: ilo.filter_by_age(source='name', direction='older', timestring='%Y%m%d', unit='days', unit_count=max_age_days) ilo.filter_by_regex(kind='suffix', value='-reload', exclude=True) delete_indices = curator.DeleteIndices(ilo) logger.info(f"Got {len(ilo.indices)} indices : {ilo.indices}") if dry_run_only: logger.info("Performing dry run only") delete_indices.do_dry_run() else: logger.info("Deleting real indices:") delete_indices.do_action() except NoIndices as e: logger.info("Got 0 indices, shutting down")
def main(): for host in HOSTS: scheme, _, domain = host.rpartition('://') scheme = scheme if scheme else 'http' basic_auth_uri = '{}://{}:{}@{}'.format(scheme, USERNAME, PASSWORD, domain) client = elasticsearch.Elasticsearch([basic_auth_uri], verify_certs=True, ca_certs=certifi.where()) index_list = curator.IndexList(client) index_list.filter_by_regex(kind='prefix', value='logstash-') index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=DELETE_OLDER_THAN) if len(index_list.indices): logging.info('Deleting indices: {}'.format(', '.join( index_list.indices))) delete_indices = curator.DeleteIndices(index_list) delete_indices.do_action() else: logging.info('No indices to delete')
def main(): if len(sys.argv) != 3: print( 'USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]' .format(sys.argv[0])) print( 'NUM_OF_DAYS ... delete indices that are older than the given number of days.' ) print( 'HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from.' ) print( 'TIMEOUT ... number of seconds to wait for master node response.'. format(TIMEOUT)) print('INDEX_PREFIX ... specifies index prefix.') print( 'ARCHIVE ... specifies whether to remove archive indices (default false).' ) print('ES_USERNAME ... The username required by Elasticsearch.') print('ES_PASSWORD ... The password required by Elasticsearch.') print('ES_TLS ... enable TLS (default false).') print('ES_TLS_CA ... Path to TLS CA file.') print('ES_TLS_CERT ... Path to TLS certificate file.') print('ES_TLS_KEY ... Path to TLS key file.') sys.exit(1) username = os.getenv("ES_USERNAME") password = os.getenv("ES_PASSWORD") if username is not None and password is not None: client = elasticsearch.Elasticsearch(sys.argv[2:], http_auth=(username, password)) elif str2bool(os.getenv("ES_TLS", 'false')): context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=os.getenv("ES_TLS_CA")) context.load_cert_chain(certfile=os.getenv("ES_TLS_CERT"), keyfile=os.getenv("ES_TLS_KEY")) client = elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context) else: client = elasticsearch.Elasticsearch(sys.argv[2:]) ilo = curator.IndexList(client) empty_list(ilo, 'Elasticsearch has no indices') prefix = os.getenv("INDEX_PREFIX", '') if prefix != '': prefix += '-' if str2bool(os.getenv("ARCHIVE", 'false')): filter_archive_indices(ilo, prefix) else: filter_main_indices(ilo, prefix) empty_list(ilo, 'No indices to delete') for index in ilo.working_list(): print("Removing", index) timeout = int(os.getenv("TIMEOUT", TIMEOUT)) delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout) delete_indices.do_action()
def lambda_handler(event, context): #es = Elasticsearch([host]) es = Elasticsearch(hosts=[{ 'host': host, 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection) idx_list = curator.IndexList(es) print(f"Indices existentes: {idx_list.indices}") idx_list.filter_by_regex(kind='prefix', value=prefix) print(f'Indices que cumplen el prefijo: {prefix}: {idx_list.indices}') idx_list.filter_by_age(source='creation_date', direction='older', unit='minutes', unit_count=30) print(f'Indices que finalmente se eliminaran: {idx_list.indices}') if idx_list: curator.DeleteIndices(idx_list).do_action() else: print("No hay indices por eliminar")
def execute(self, index_prefix: str, month_period: int) -> Optional[NoReturn]: try: indexes = curator.IndexList(self.es) except: self._to_slack( channel=self.SLACK_CHANNEL, text=f">*Exception Occured*\n>{sys.exc_info()}", bot_name=self.SLACK_BOT_NAME, ) raise Exception("IndexList exception") indexes.filter_by_regex( kind="prefix", value=index_prefix, ) indexes.filter_by_age( source="name", direction="older", timestring="%Y.%m.%d", unit="months", unit_count=month_period, ) deleted_count = len(indexes.indices) if deleted_count != 0: curator.DeleteIndices(indexes).do_action() self._to_slack( channel=self.SLACK_CHANNEL, text= f">*Index*\n>{index_prefix}\n>*Count*\n>{deleted_count}\n>*Period*\n>{month_period}", bot_name=self.SLACK_BOT_NAME, )
def main(): if len(sys.argv) == 1: print( 'USAGE: [TIMEOUT=(default 120)] %s NUM_OF_DAYS HOSTNAME[:PORT] ...' % sys.argv[0]) print( 'Specify a NUM_OF_DAYS that will delete indices that are older than the given NUM_OF_DAYS.' ) print( 'HOSTNAME ... specifies which ElasticSearch hosts to search and delete indices from.' ) sys.exit(1) client = elasticsearch.Elasticsearch(sys.argv[2:]) ilo = curator.IndexList(client) empty_list(ilo, 'ElasticSearch has no indices') ilo.filter_by_regex(kind='prefix', value='jaeger-') ilo.filter_by_age(source='name', direction='older', timestring='%Y-%m-%d', unit='days', unit_count=int(sys.argv[1])) empty_list(ilo, 'No indices to delete') for index in ilo.working_list(): print("Removing", index) timeout = int(os.getenv("TIMEOUT", 120)) delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout) delete_indices.do_action()
def lambda_handler(event, context): # Build the Elasticsearch client. es = Elasticsearch(hosts=[{ 'host': host, 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection) index_list = curator.IndexList(es) # Filters by age, anything with a time stamp older than 30 days in the index name. index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=90) print("Found %s indices to delete" % len(index_list.indices)) # If our filtered list contains any indices, delete them. if index_list.indices: curator.DeleteIndices(index_list).do_action()
def delete_old_indices(host, port, unit_count, time_unit, module): service = 'es' region = 'eu-west-1' credentials = boto3.Session().get_credentials() awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) # Create the ES object client = elasticsearch.Elasticsearch( http_auth=awsauth, hosts=[{ 'host': host, 'port': port }], use_ssl=True, verify_certs=True, connection_class=elasticsearch.RequestsHttpConnection) indices = curator.IndexList(client) indices.filter_by_age(source='name', direction='older', timestring="%Y.%m.%d", unit=time_unit, unit_count=int(unit_count)) indices.empty_list_check() delete_indices = curator.DeleteIndices(indices) delete_indices.do_action() return indices.working_list()
def deleteIndices(es, repository, keep): indices = curator.IndexList(es) snapshots = indices.indices try: indices.filter_by_count(count=keep, reverse=True, pattern="^(.*)-\d{8}.*$") for snap in snapshots: if snap in getAllSnapshots(es, repository): snapshot = es.snapshot.get(repository=repository, snapshot=snap)["snapshots"][0] if snapshot["state"] != "SUCCESS": indices.filter_by_regex(kind="prefix", value=snap, exclude=True) logger.info( f"Snapshot {snap} is not ready. It is in {snapshot['state']} state" ) else: logger.info(f"Snapshot {snap} already exists") else: indices.filter_by_regex(kind="prefix", value=snap, exclude=True) deleteIndices = curator.DeleteIndices(indices) deleteIndices.do_action() except NoIndices as e: logger.info("No indices to delete")
def lambda_handler(event, context): awsauth = AWS4Auth( credentials.access_key, credentials.secret_key, os.environ['region'], service, session_token=credentials.token ) es = Elasticsearch( hosts = [{'host': os.environ['host'], 'port': 443}], http_auth = awsauth, use_ssl = True, verify_certs = True, connection_class = RequestsHttpConnection, timeout = 120 ) # retention_daysより古いindexを削除する try: index_list = curator.IndexList(es) index_list.filter_by_regex(kind='prefix', value=os.environ['index_prefix']) index_list.filter_by_age( source = 'name', direction = 'older', timestring = os.environ['date_string'], unit = 'days', unit_count = int(os.environ['retention_days']) + 1 ) curator.DeleteIndices(index_list, master_timeout=180).do_action() except curator.exceptions.NoIndices as e: logging.info("No indices to delete") except curator.exceptions.FailedExecution as e: logging.error("Failed to delete indices", exc_info=True)
def main(event, context): # Build the Elasticsearch clientself es = Elasticsearch ( hosts=[{'host': endpoint, 'port': 443}], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection ) # Create the index list and filter, excluding .kibana index index_list = curator.IndexList(es) index_list.filter_kibana(exclude=True) # Optionally, if field filter is not empty, then also exclude the indexes inserted it if exclude != "" and regex != "": index_list.filter_by_regex(kind=regex, value=exclude, exclude=True) index_list.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=days) else: index_list.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=days) # Delete indexes print("Found %s indexes to delete" % len(index_list.indices)) if len(index_list.indices) > 0: print('Indexes found: {0}'.format(index_list.indices)) curator.DeleteIndices(index_list).do_action() print('Indices deleted successfully.') else: print('No indexes to delete.')
def lambda_handler(event, context): cluster_endpoint = 'https://%s' % ES_ENDPOINT index_prefix = os.environ['INDEX_PREFIX'] retention = int(os.environ['RETENTION']) cluster_name = ES_ENDPOINT.split('.')[0] deleted_indices = [] es = Elasticsearch(cluster_endpoint, connection_class=RequestsHttpConnection, http_auth=auth, use_ssl=True, verify_certs=True, ca_certs=certifi.where()) print('Checking "%s" indices on %s cluster.' % (index_prefix, cluster_name)) index_list = curator.IndexList(es) try: index_list.filter_by_regex(kind='prefix', value=index_prefix) index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=retention) curator.DeleteIndices(index_list).do_action() except NoIndices: pass deleted_indices.extend(index_list.working_list()) lambda_response = {'deleted': deleted_indices} print(lambda_response) return lambda_response
def lambda_handler(event, context): host = 'elasticsearch2.XXX.rocks' awsauth = AWS4Auth(os.environ['AK'], os.environ['SK'], 'us-east-1', 'es') client = Elasticsearch( hosts=[{ 'host': host, 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=False, #use custom domain name will fail on verify certs connection_class=RequestsHttpConnection) ilo = curator.IndexList(client) if ilo.indices == []: print "No indices available in ES" else: ilo.filter_by_regex(kind='prefix', value='cflogs-') ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=1) if ilo.indices == []: print "No matched indices in ES" else: print "Find matched indices in ES:" print ilo.indices delete_indices = curator.DeleteIndices(ilo) delete_indices.do_action() return
def lambda_handler(event, context): if not SNAPSHOT_INDEX_FILTERS and not DELETE_INDEX_FILTERS: raise ValueError( 'No value for delete_index_filters or snapshot_index_filters found - aborting' ) if SNAPSHOT_INDEX_FILTERS: if not SNAPSHOT_BUCKET or not SNAPSHOT_BUCKET_REGION or not SNAPSHOT_NAME: raise ValueError( 'Some required snapshot parameters have no values - aborting') es = elasticsearch_client() result = {} if SNAPSHOT_INDEX_FILTERS: snapshot_name = datetime.now().strftime(SNAPSHOT_NAME) indices = filter_indices(es, SNAPSHOT_INDEX_FILTERS) if TEST_MODE: result['snapshots'] = indices.working_list() result['test_mode'] = True else: if indices.working_list(): if not curator.repository_exists(es, repository=SNAPSHOT_BUCKET): print('Registering snapshot repository in s3://{}'.format( SNAPSHOT_BUCKET)) response = curator.create_repository( client=es, repository=SNAPSHOT_BUCKET, repo_type='s3', bucket=SNAPSHOT_BUCKET, region=SNAPSHOT_BUCKET_REGION) print('Response: ' + str(response)) print('Creating a snapshot of indices') snapshot_indices = curator.Snapshot(indices, repository=SNAPSHOT_BUCKET, name=snapshot_name) snapshot_indices.do_action() result['snapshot'] = indices.working_list() if DELETE_INDEX_FILTERS: indices = filter_indices(es, DELETE_INDEX_FILTERS) if TEST_MODE: result['deleted'] = indices.working_list() result['test_mode'] = True else: if indices.working_list(): print('Deleting indices') delete_indices = curator.DeleteIndices(indices) delete_indices.do_action() result['deleted'] = indices.working_list() return result
def main(): if len(sys.argv) != 3: print( 'USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]' .format(sys.argv[0])) print( 'NUM_OF_DAYS ... delete indices that are older than the given number of days.' ) print( 'HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from.' ) print( 'TIMEOUT ... number of seconds to wait for master node response.'. format(TIMEOUT)) print('INDEX_PREFIX ... specifies index prefix.') print( 'ARCHIVE ... specifies whether to remove archive indices (only works for rollover) (default false).' ) print( 'ROLLOVER ... specifies whether to remove indices created by rollover (default false).' ) print('ES_USERNAME ... The username required by Elasticsearch.') print('ES_PASSWORD ... The password required by Elasticsearch.') print('ES_TLS ... enable TLS (default false).') print('ES_TLS_CA ... Path to TLS CA file.') print('ES_TLS_CERT ... Path to TLS certificate file.') print('ES_TLS_KEY ... Path to TLS key file.') print( 'ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server\'s certificate chain and host name verification.' ) sys.exit(1) client = create_client( os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')), os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"), str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false'))) ilo = curator.IndexList(client) empty_list(ilo, 'Elasticsearch has no indices') prefix = os.getenv("INDEX_PREFIX", '') if prefix != '': prefix += '-' if str2bool(os.getenv("ARCHIVE", 'false')): filter_archive_indices_rollover(ilo, prefix) else: if str2bool(os.getenv("ROLLOVER", 'false')): filter_main_indices_rollover(ilo, prefix) else: filter_main_indices(ilo, prefix) empty_list(ilo, 'No indices to delete') for index in ilo.working_list(): print("Removing", index) timeout = int(os.getenv("TIMEOUT", TIMEOUT)) delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout) delete_indices.do_action()
def test_verify_result_positive(self): client = Mock() client.indices.get_settings.return_value = testvars.settings_four client.cluster.state.return_value = testvars.clu_state_four client.indices.stats.return_value = testvars.stats_four client.indices.delete.return_value = None ilo = curator.IndexList(client) do = curator.DeleteIndices(ilo) self.assertTrue(do._verify_result([],2))
def test_do_action_not_successful(self): client = Mock() client.indices.get_settings.return_value = testvars.settings_four client.cluster.state.return_value = testvars.clu_state_four client.indices.stats.return_value = testvars.stats_four client.indices.delete.return_value = None ilo = curator.IndexList(client) do = curator.DeleteIndices(ilo) self.assertIsNone(do.do_action())
def test_init(self): client = Mock() client.indices.get_settings.return_value = testvars.settings_one client.cluster.state.return_value = testvars.clu_state_one client.indices.stats.return_value = testvars.stats_one ilo = curator.IndexList(client) do = curator.DeleteIndices(ilo) self.assertEqual(ilo, do.index_list) self.assertEqual(client, do.client)
def test_do_action_raises_exception(self): client = Mock() client.indices.get_settings.return_value = testvars.settings_four client.cluster.state.return_value = testvars.clu_state_four client.indices.stats.return_value = testvars.stats_four client.indices.delete.return_value = None client.indices.delete.side_effect = testvars.fake_fail ilo = curator.IndexList(client) do = curator.DeleteIndices(ilo) self.assertRaises(curator.FailedExecution, do.do_action)
def test_do_action(self): client = Mock() client.info.return_value = {'version': {'number': '5.0.0'}} client.indices.get_settings.return_value = testvars.settings_four client.cluster.state.return_value = testvars.clu_state_four client.indices.stats.return_value = testvars.stats_four client.indices.delete.return_value = None ilo = curator.IndexList(client) do = curator.DeleteIndices(ilo) self.assertIsNone(do.do_action())
def cleanup_indices(es_host, es_port, indices): try: client = elasticsearch.Elasticsearch(hosts=es_host, port=es_port) ilo = curator.IndexList(client) ilo.filter_by_regex(kind='prefix', value=indices) if ilo.indices: delete_indices = curator.DeleteIndices(ilo) delete_indices.do_action() except: print("ingnore if not available")
def handler(event, context): # For this function, we don't care about 'event' and 'context', # but they need to be in the function signature anyway. with open('serverless-curator.yaml') as config_file: config = yaml.load(config_file) # Create a place to track any indices that are deleted. deleted_indices = {} # We can define multiple Elasticsearch clusters to manage, so we'll have # an outer loop for working through them. for cluster_config in config: cluster_name = cluster_config['name'] deleted_indices[cluster_name] = [] # Create a collection to the cluster. We're using mangaged clusters in # Elastic Cloud for this example, so we can enable SSL security. # es = Elasticsearch(cluster_config['endpoint'], use_ssl=True, # verify_certs=True, ca_certs=certifi.where()) es = Elasticsearch(cluster_config['endpoint']) # Now we'll work through each set of time-series indices defined in # our config for this cluster. for index in cluster_config['indices']: prefix = index['prefix'] print('Checking "%s" indices on %s cluster.' % (prefix, cluster_name)) # Fetch all the index names. index_list = curator.IndexList(es) try: # Reduce the list to those that match the prefix. index_list.filter_by_regex(kind='prefix', value=prefix) # Reduce again, by age. index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=index['days']) curator.DeleteIndices(index_list).do_action() # If nothing is left in the list, we'll get a NoIndices exception. # That's OK. except NoIndices: pass # Record the names of any indices we removed. deleted_indices[cluster_name].extend(index_list.working_list()) lambda_response = {'deleted': deleted_indices} print(lambda_response) return lambda_response
def delete_index(self, index_prefix, days): ''' Delete index equal to or older than days. ''' try: ilo = curator.IndexList(self.es) ilo.filter_by_regex(kind='prefix', value=index_prefix) ilo.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=days) delete_indices = curator.DeleteIndices(ilo) delete_indices.do_action() except Exception as e: # TODO: log exception return False
def purge_index(index_name, days_to_keep): try: ilo = curator.IndexList(client) ilo.filter_by_regex(kind='prefix', value=index_name) ilo.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=days_to_keep) delete_indices = curator.DeleteIndices(ilo) delete_indices.do_action() except NoIndices: pass
def lambda_handler(event, context): print(event) # to catch lambda timeout timer = threading.Timer( (context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context]) timer.start() try: awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, event['region'], service, session_token=credentials.token) # Build the Elasticsearch client. es = Elasticsearch(hosts=[{ 'host': event['host'], 'port': event['port'] }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection) index_list = curator.IndexList(es) # Filters by age, anything created more than x weeks ago. index_list.filter_by_age(source='creation_date', direction='older', unit=event['unit'], unit_count=event['count']) # Delete all indices in the filtered list. result = curator.DeleteIndices(index_list).do_action() return result except Exception as e: print('Function failed due to exception:') print(e) raise Exception('unexpected error (check the lambda logs)') finally: timer.cancel()
def cleaner(event, context): """Remove indices that are older than X days.""" todo = LEVELS if event and AGE_KEY in event and PREFIX_KEY in event: todo = {event[PREFIX_KEY]: event[AGE_KEY]} for prefix, age in todo.items(): ilo = filter(prefix, '%Y.%m.%d', age) logger.info(f'Indices to delete: {ilo.working_list()}') if ilo.working_list(): delete_indices = curator.DeleteIndices(ilo) delete_indices.do_action() logger.info("Deleted indices") ilo = filter(prefix, '%Y-%m-%d', age) logger.info(f'Indices to delete: {ilo.working_list()}') if ilo.working_list(): delete_indices = curator.DeleteIndices(ilo) delete_indices.do_action() logger.info("Deleted indices")
def lambda_handler(event, context): try: retention_days = int(os.environ['ES_RETENTION_DAYS']) except ValueError: logger.error("ES_RETENTION_DAYS must be set as environment variable") return try: index_list = curator.IndexList(client) except Exception as e: logger.error("Failed to connect to ES DB with error: {}".format(e)) return try: index_list.filter_by_regex(kind='prefix', value='jaeger-') except Exception as e: logger.error( "Filtering by jeager preffix failed, received error: {}".format(e)) return try: index_list.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=retention_days) except Exception as e: logger.error("Filtering by age failed with error: {}".format(e)) return indexes_to_delete = index_list.working_list() if len(indexes_to_delete) > 0: try: logger.info("Deleting indexes: {}".format( indexes_to_delete.sort())) curator.DeleteIndices(index_list).do_action() except Exception as e: logger.error( "Got error {} when trying to delete indexes".format(e)) return return { 'statusCode': 200, 'body': json.dumps('Removed indexes for last {} days'.format(retention_days)) }
def main(): if len(sys.argv) == 1: print( 'USAGE: [TIMEOUT=(default 120)] [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] {} NUM_OF_DAYS HOSTNAME[:PORT]' .format(sys.argv[0])) print( 'Specify a NUM_OF_DAYS that will delete indices that are older than the given NUM_OF_DAYS.' ) print( 'HOSTNAME ... specifies which ElasticSearch hosts to search and delete indices from.' ) print('INDEX_PREFIX ... specifies index prefix.') print( 'ARCHIVE ... specifies whether to remove archive indices. Use true or false' ) sys.exit(1) username = os.getenv("ES_USERNAME") password = os.getenv("ES_PASSWORD") if username is not None and password is not None: client = elasticsearch.Elasticsearch(sys.argv[2:], http_auth=(username, password)) else: client = elasticsearch.Elasticsearch(sys.argv[2:]) ilo = curator.IndexList(client) empty_list(ilo, 'ElasticSearch has no indices') prefix = os.getenv("INDEX_PREFIX", '') if prefix != '': prefix += '-' prefix += 'jaeger' if str2bool(os.getenv("ARCHIVE", 'false')): filter_archive_indices(ilo, prefix) else: filter_main_indices(ilo, prefix) empty_list(ilo, 'No indices to delete') for index in ilo.working_list(): print("Removing", index) timeout = int(os.getenv("TIMEOUT", 120)) delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout) delete_indices.do_dry_run()
def lambda_handler(event, context): """ Prior to running snapshots, we're checking if we can connect, and if for the current status of the cluster and the repository. """ try: health = es.cat.health().split(" ")[3] repo = es.cat.repositories() if health == 'red': slackpost('danger', 'Cluster status is RED! Investigate ASAP!! ') exit if repo is None: slackpost( 'warning', 'Missing repository for snapshot.' 'Trying to recreate the repository...') try: es.snapshot.create_repository( repository='s3', body=repository_body) es.snapshot.get_repository('s3') slackpost('good', 'Successfully re-created the repository.') except exceptions.NotFoundError as e: slackpost( 'danger', 'Failed to create the repository. Please check!') # Create the snapshot ilo = curator.IndexList(es) create_snapshot = curator.Snapshot(ilo=ilo, repository='s3', name=snapshot_name) create_snapshot.do_action() create_snapshot_status = create_snapshot.get_state() if create_snapshot_status == 'SUCCESS': msg = "Successfully created new snapshot:\n" + snapshot_name color = 'good' slackpost(color, msg) else: msg = "Failed to create new snapshot" color = 'danger' slackpost(color, msg) # Deleting old indices ilo.filter_by_age(source='creation_date', direction='older', unit='months', unit_count=1) curator.DeleteIndices(ilo) except exceptions.ConnectionError as e: slackpost('danger', e)