def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( host = dict(required=True), port = dict(default=80), prefix = dict(default="logstash"), older_than = dict(default=7), time_unit = dict(default="days", choices=["hours", "days", "weeks", "months"]), timestring = dict(default="%Y.%m.%d"), )) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_DEPENDENCIES: module.fail_json(msg='requests_aws4auth, logging, elasticsearch and elasticsearch-curator are required for this module, install via pip or your package manager') try: client = get_elasticsearch_client(module) indices = filter_indices(module, client) if indices: curator.delete_indices(client, indices) module.exit_json(changed=True, msg="The following indices have been removed: " + ", ".join(indices)) else: module.exit_json(msg="No indices matching the filters were found. No indices removed") except (StandardError), e: module.fail_json(msg=str(e))
def delete_indices(esclient, indices): if not indices: return indices = indices.keys() logger.debug("try to delete %s" % ','.join(indices)) for index in indices: if curator.delete_indices(esclient, [index]): logger.info('%s deleted' % index)
def prune_es_indices(): """Prune ES indices older than the age defined in settings.PRUNE_OLDER_THAN.""" curation_params = [ {"prefix": "events_", "time_string": "%Y-%m-%d"}, {"prefix": "logstash-", "time_string": "%Y.%m.%d"}, {"prefix": "goldstone-", "time_string": "%Y.%m.%d"}, {"prefix": "goldstone_metrics-", "time_string": "%Y.%m.%d"}, {"prefix": "api_stats-", "time_string": "%Y.%m.%d"}, {"prefix": "internal-", "time_string": "%Y.%m.%d"}, ] client = es_conn() all_indices = curator.get_indices(client) deleted_indices = [] working_list = all_indices # we'll whittle this down with filters for index_set in curation_params: # filter on our prefix name_filter = curator.build_filter( kindOf='prefix', value=index_set['prefix']) # filter on the datestring age_filter = curator.build_filter( kindOf='older_than', time_unit='days', timestring=index_set['time_string'], value=settings.PRUNE_OLDER_THAN) # apply the filters to get the final list of indices to delete working_list = curator.apply_filter(working_list, **name_filter) working_list = curator.apply_filter(working_list, **age_filter) if working_list is not None and len(working_list) > 0: try: curator.delete_indices(client, working_list) deleted_indices = deleted_indices + working_list except Exception: logger.exception("curator.delete_indices raised an exception") working_list = all_indices # reset for the next loop iteration return deleted_indices
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( host=dict(required=True), port=dict(default=80), prefix=dict(default="logstash"), older_than=dict(default=7), time_unit=dict(default="days", choices=["hours", "days", "weeks", "months"]), timestring=dict(default="%Y.%m.%d"), )) module = AnsibleModule(argument_spec=argument_spec, ) if not HAS_DEPENDENCIES: module.fail_json( msg= 'requests_aws4auth, logging, elasticsearch and elasticsearch-curator are required for this module, install via pip or your package manager' ) try: client = get_elasticsearch_client(module) indices = filter_indices(module, client) if indices: curator.delete_indices(client, indices) module.exit_json(changed=True, msg="The following indices have been removed: " + ", ".join(indices)) else: module.exit_json( msg= "No indices matching the filters were found. No indices removed" ) except (StandardError), e: module.fail_json(msg=str(e))
def delete_indices(esclient, indices, settings): """ :type esclient: elasticsearch.Elasticsearch :type indices: list of (indexname,index_settings) :type settings: dict, not used :rtype: None """ if not indices: return indices = [e[0] for e in indices] _delete.extend(indices) logger.debug("try to delete %s" % ",".join(indices)) global lock with lock: for index in indices: if curator.delete_indices(esclient, [index], master_timeout="300s"): logger.info("%s deleted" % index) dopey_summary.add(u"%s 己删除" % index) else: logger.warn("%s deleted failed" % index) dopey_summary.add(u"%s 删除失败" % index)
def delete_indices(esclient, indices, settings): """ :type esclient: elasticsearch.Elasticsearch :type indices: list of (indexname,index_settings) :type settings: dict, not used :rtype: None """ if not indices: return indices = [e[0] for e in indices] _delete.extend(indices) logger.debug("try to delete %s" % ','.join(indices)) global lock with lock: for index in indices: if curator.delete_indices(esclient, [index], master_timeout='300s'): logger.info('%s deleted' % index) dopey_summary.add(u'%s 己删除' % index) else: logger.warn('%s deleted failed' % index) dopey_summary.add(u'%s 删除失败' % index)
def delete(self): """return number of indices closed """ indices = select_indices(self.es, self.model.index_name_prefix, self.model.index_timestring, self.model.index_timestring_interval, self.model.delete.exec_offset + 1) indices_deleted = 0 for index in indices: try: ret = curator.delete_indices(self.es, index) if ret is True: indices_deleted += 1 # elif ret is False: index doesn't exist except elasticsearch.exceptions.ConnectionTimeout as e: raise CanNotDeleteIndex(str(e)) return indices_deleted
def delete(self): """return number of indices closed """ indices = select_indices( self.es, self.model.index_name_prefix, self.model.index_timestring, self.model.index_timestring_interval, self.model.delete.exec_offset + 1 ) indices_deleted = 0 for index in indices: try: ret = curator.delete_indices(self.es, index) if ret is True: indices_deleted += 1 # elif ret is False: index doesn't exist except elasticsearch.exceptions.ConnectionTimeout as e: raise CanNotDeleteIndex(str(e)) return indices_deleted
def indices_delete(connection, indices): curator.delete_indices(connection, indices) if isinstance(indices, (list, tuple)): return ' '.join([', '.join(indices), 'deleted']) else: return ' '.join([indices, 'deleted'])