def start(self): hosts = self.cfg.opts("client", "hosts") client_options = self.cfg.opts("client", "options") es = self.client_factory(hosts, client_options).create() t = telemetry.Telemetry(devices=[ telemetry.ClusterMetaDataInfo(es), telemetry.ClusterEnvironmentInfo(es, self.metrics_store), telemetry.NodeStats(es, self.metrics_store), telemetry.IndexStats(es, self.metrics_store), telemetry.MlBucketProcessingTime(es, self.metrics_store) ]) # The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here c = cluster.Cluster(hosts, [], t) logger.info( "All cluster nodes have successfully started. Checking if REST API is available." ) if wait_for_rest_layer(es, max_attempts=20): logger.info( "REST API is available. Attaching telemetry devices to cluster." ) t.attach_to_cluster(c) logger.info("Telemetry devices are now attached to the cluster.") else: # Just stop the cluster here and raise. The caller is responsible for terminating individual nodes. logger.error( "REST API layer is not yet available. Forcefully terminating cluster." ) self.stop(c) raise exceptions.LaunchError( "Elasticsearch REST API layer is not available. Forcefully terminated cluster." ) return c
def start(self): """ Performs final startup tasks. Precondition: All cluster nodes have been started. Postcondition: The cluster is ready to receive HTTP requests or a ``LaunchError`` is raised. :return: A representation of the launched cluster. """ enabled_devices = self.cfg.opts("mechanic", "telemetry.devices") telemetry_params = self.cfg.opts("mechanic", "telemetry.params") all_hosts = self.cfg.opts("client", "hosts").all_hosts default_hosts = self.cfg.opts("client", "hosts").default preserve = self.cfg.opts("mechanic", "preserve.install") skip_rest_api_check = self.cfg.opts("mechanic", "skip.rest.api.check") es = {} for cluster_name, cluster_hosts in all_hosts.items(): all_client_options = self.cfg.opts("client", "options").all_client_options cluster_client_options = dict(all_client_options[cluster_name]) # Use retries to avoid aborts on long living connections for telemetry devices cluster_client_options["retry-on-timeout"] = True es[cluster_name] = self.client_factory(cluster_hosts, cluster_client_options).create() es_default = es["default"] t = telemetry.Telemetry(enabled_devices, devices=[ telemetry.NodeStats(telemetry_params, es, self.metrics_store), telemetry.ClusterMetaDataInfo(es_default), telemetry.ClusterEnvironmentInfo(es_default, self.metrics_store), telemetry.JvmStatsSummary(es_default, self.metrics_store), telemetry.IndexStats(es_default, self.metrics_store), telemetry.MlBucketProcessingTime(es_default, self.metrics_store), telemetry.CcrStats(telemetry_params, es, self.metrics_store), telemetry.RecoveryStats(telemetry_params, es, self.metrics_store) ]) # The list of nodes will be populated by ClusterMetaDataInfo, so no need to do it here c = cluster.Cluster(default_hosts, [], t, preserve) if skip_rest_api_check: self.logger.info("Skipping REST API check and attaching telemetry devices to cluster.") t.attach_to_cluster(c) self.logger.info("Telemetry devices are now attached to the cluster.") else: self.logger.info("All cluster nodes have successfully started. Checking if REST API is available.") if wait_for_rest_layer(es_default, max_attempts=40): self.logger.info("REST API is available. Attaching telemetry devices to cluster.") t.attach_to_cluster(c) self.logger.info("Telemetry devices are now attached to the cluster.") else: # Just stop the cluster here and raise. The caller is responsible for terminating individual nodes. self.logger.error("REST API layer is not yet available. Forcefully terminating cluster.") self.stop(c) raise exceptions.LaunchError( "Elasticsearch REST API layer is not available. Forcefully terminated cluster.") return c