def reindex(self, cluster_name, from_index, to_index): body = """ { "source": { "index": "%s" }, "dest": { "index": "%s" } } """ % (from_index, to_index) connection = ConnectionService().get_connection(cluster_name) connection.reindex(body=body, wait_for_completion=False) return
def get_indices_summary(self, cluster_name, indices_names=None): """ Returns a formatted representation of one/many indices. :param cluster_name: :param indices_names: :return: """ connection = ConnectionService().get_connection(cluster_name) indices_stats = connection.indices.stats(index=indices_names, request_timeout=REQUEST_TIMEOUT) # get shard info cluster_state = ClusterService().get_cluster_state(cluster_name, metric="metadata", indices=indices_names) state_indices = jmespath.search("metadata.indices", cluster_state) cat = connection.cat.indices(format='json') indices = [] if state_indices: the_indices = indices_stats.get("indices", None) index_keys = list(the_indices.keys()) for key in index_keys: one_index = the_indices.get(key) index = {"index_name": key} index['health'] = [x['health'] for x in cat if x['index'] == key][0] index['docs'] = jmespath.search("primaries.docs.count", one_index) index['docs_deleted'] = jmespath.search("primaries.docs.deleted", one_index) index['size_in_bytes'] = jmespath.search("primaries.store.size_in_bytes", one_index) index['fielddata'] = { 'memory_size_in_bytes': jmespath.search("total.fielddata.memory_size_in_bytes", one_index)} index_state = state_indices.get(key) index['settings'] = { 'number_of_shards': int(jmespath.search("settings.index.number_of_shards", index_state)), "number_of_replicas": int(jmespath.search("settings.index.number_of_replicas", index_state))} index['state'] = index_state.get("state", None) indices.append(index) return indices
def expunge_deleted(self, cluster_name, index_name): connection = ConnectionService().get_connection(cluster_name) try: return connection.indices.forcemerge(index=index_name, params={"only_expunge_deletes": 1}, request_timeout=REQUEST_TIMEOUT) except: # this will time out on large indices, so ignore. return
def get_closed_indices(self, cluster_name): connection = ConnectionService().get_connection(cluster_name) cat_indices = connection.cat.indices(format='json') indices = [] if cat_indices: for index in cat_indices: if index.get('status', "").startswith('close'): indices.append(index) return indices
def get_repositories(self, cluster_name): connection = ConnectionService().get_connection(cluster_name) repos = connection.snapshot.get_repository() data = [] for repo in repos: repo_type = repos.get(repo).get('type', None) repo_item = {'repository_name': repo, 'repository_type': repo_type} data.append(repo_item) return data
def task_procesor(room_name, cluster_name, metric): """ This will dispatch to the appropriate task/room """ # DO NOT REMOVE: # This is necessary to initialize SQLAlchemy in case the very first request is made directly to this socket endpoint. ConnectionService().get_connection(cluster_name) task = Task(room_name=room_name, cluster_name=cluster_name, metric=metric) taskPool.create_task(task=task, sid=request.sid)
def copy_mapping(self, cluster_name, from_index, to_index): # check that destination does NOT contain a mapping dest_mapping_exists = IndicesService().get_mapping(cluster_name, to_index) if bool(dest_mapping_exists.get(to_index).get('mappings', None)): raise BadRequest(message='Index already contains a mapping!') else: source_mapping = IndicesService().get_mapping(cluster_name, from_index) connection = ConnectionService().get_connection(cluster_name) root_mapping = source_mapping[from_index] doc_type = list(root_mapping['mappings'].keys())[0] mapping_body = root_mapping['mappings'].get(doc_type, {}) return connection.indices.put_mapping(doc_type=doc_type, body=mapping_body, index=to_index)
def get_alias(self, cluster_name, index_name): """ Fetches alias definitions for an index, if passed in. For now, we ignore nested data inside of the alias payload, like filter terms. TODO: https://www.elastic.co/guide/en/elasticsearch/reference/2.0/indices-aliases.html#_examples_2 :param cluster_name: :param index_name: :return: """ connection = ConnectionService().get_connection(cluster_name) alias_defs = connection.indices.get_alias(index=index_name, request_timeout=REQUEST_TIMEOUT) aliases = [] for index_name in alias_defs: aliases_as_dicts = alias_defs[index_name].get('aliases', None) alias_keys = list(aliases_as_dicts) if alias_keys: for key in alias_keys: row = {'index_name': index_name, 'alias': key} aliases.append(row) return aliases
def get(self, cluster_name, command): """ Endpoint for generic GET requests on a cluster. Simply does a pass-thru call to the actual cluster endpoint. :type cluster_name: string :param cluster_name: :type command: string :param command: :returns: :resheader Content-Type: application/json :status 200: OK :status 500: server error """ if command is not None: if command == '_cluster_status': response = ClusterService().get_cluster_status(cluster_name) elif command == '_cluster_settings': response = ClusterService().get_cluster_settings(cluster_name) elif command == '_cluster_tasks': response = ClusterService().get_cluster_tasks(cluster_name) elif command == '_cluster_state': response = ClusterService().get_cluster_state(cluster_name) elif command == '_cluster_stats': response = ClusterService().get_cluster_stats(cluster_name) elif command == '_cluster_health': response = ClusterService().get_cluster_health(cluster_name) elif command == '_nodes': response = NodeService().get_node_info(cluster_name) elif command == '_nodes_stats': response = NodeService().get_node_stats(cluster_name) elif command == '_indices_info': response = IndicesService().get_indices(cluster_name) elif command == '_indices_mappings': response = IndicesService().get_mapping(cluster_name) elif command == '_indices_aliases': response = IndicesService().get_alias(cluster_name) elif command == '_indices_stats': response = IndicesService().get_indices_stats(cluster_name) elif command == '_indices_templates': response = IndicesService().get_indices_templates(cluster_name) elif command == '_indices_segments': response = IndicesService().get_indices_segments(cluster_name) elif command == '_indices_shard_stores': response = IndicesService().get_indices_shard_stores( cluster_name) elif command == '_indices_recovery': response = IndicesService().get_indices_recovery(cluster_name) elif command == '_hq_status': response = HQService().get_status() elif command == '_hq_cluster_summary': response = ClusterService().get_cluster_summary(cluster_name) elif command == '_hq_cluster_list': res = ClusterService().get_clusters() schema = ClusterDTO(many=True) response = schema.dump(res) elif command.startswith( '_cat' ): # cat api is pretty safe and does not currently have a Service interface connection = ConnectionService().get_connection(cluster_name) format = 'json' if command == '_cat_aliases': response = connection.cat.aliases(format=format, h="*") elif command == '_cat_allocation': response = connection.cat.allocation(format=format, h="*") elif command == '_cat_count': response = connection.cat.count(format=format, h="*") elif command == '_cat_fielddata': response = connection.cat.fielddata(format=format, h="*") elif command == '_cat_health': response = connection.cat.health(format=format, h="*") elif command == '_cat_indices': response = connection.cat.indices(format=format, h="*") elif command == '_cat_master': response = connection.cat.master(format=format, h="*") elif command == '_cat_nodeattrs': response = connection.cat.nodeattrs(format=format, h="*") elif command == '_cat_nodes': response = connection.cat.nodes(format=format, full_id=True, h="*") elif command == '_cat_pending_tasks': response = connection.cat.pending_tasks(format=format, h="*") elif command == '_cat_plugins': response = connection.cat.plugins(format=format, h="*") elif command == '_cat_recovery': response = connection.cat.recovery(format=format, h="*") elif command == '_cat_thread_pool': response = connection.cat.thread_pool(format=format, h="*") elif command == '_cat_shards': response = connection.cat.shards(format=format, h="*") elif command == '_cat_segments': response = connection.cat.segments(format=format, h="*") # summary = DiagnosticsService().get_diagnostics_summary(cluster_name) return APIResponse(response, HTTP_Status.OK, None)
def get_indices_recovery(self, cluster_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.recovery()
def get_indices_shard_stores(self, cluster_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.shard_stores()
def get_indices_segments(self, cluster_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.segments()
def get_snapshots(self, cluster_name, repository_name): connection = ConnectionService().get_connection(cluster_name) snapshots = connection.snapshot.get(repository=repository_name, snapshot='_all', request_timeout=120) snapshots = snapshots.get('snapshots', None) return snapshots
def force_merge(self, cluster_name, index_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.forcemerge(index=index_name, request_timeout=REQUEST_TIMEOUT)
def remove_alias(self, cluster_name, index_name, alias_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.delete_alias(index_name, name=alias_name)
def refresh_index(self, cluster_name, index_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.refresh(index=index_name, request_timeout=REQUEST_TIMEOUT)
def create_index(self, cluster_name, index_name, settings=None): connection = ConnectionService().get_connection(cluster_name) return connection.indices.create(index=index_name, body=settings, request_timeout=REQUEST_TIMEOUT)
def get_indices_stats(self, cluster_name, indices_names=None): connection = ConnectionService().get_connection(cluster_name) return connection.indices.stats(index=indices_names, request_timeout=REQUEST_TIMEOUT)
def clear_cache(self, cluster_name, index_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.clear_cache(index=index_name, request_timeout=REQUEST_TIMEOUT)
def get_indices(self, cluster_name, index_name=None): connection = ConnectionService().get_connection(cluster_name) return connection.indices.get(index=index_name or "_all", request_timeout=REQUEST_TIMEOUT)
def create_alias(self, cluster_name, index_name, alias_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.put_alias(index_name, name=alias_name)
def get_shards(self, cluster_name, index_name): connection = ConnectionService().get_connection(cluster_name) shards = connection.cat.shards(index=index_name, format='json') return shards
def get_mapping(self, cluster_name, index_name, mapping_name=None): # TODO: add options here, per: https://www.elastic.co/guide/en/elasticsearch/reference/6.x/indices-get-mapping.html#indices-get-mapping connection = ConnectionService().get_connection(cluster_name) return connection.indices.get_mapping(index=index_name, doc_type=mapping_name, request_timeout=REQUEST_TIMEOUT)
def get_indices_templates(self, cluster_name): connection = ConnectionService().get_connection(cluster_name) return connection.indices.get_template()