def start(self, car, binary, data_paths): hosts = self.cfg.opts("client", "hosts") client_options = self.cfg.opts("client", "options") es = client.EsClientFactory(hosts, client_options).create() # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine node_prefix = self.cfg.opts("provisioning", "node.name.prefix") process.kill_running_es_instances(node_prefix) logger.info("Starting a cluster based on car [%s] with [%d] nodes." % (car, car.nodes)) # TODO dm: Get rid of these... enabled_devices = self.cfg.opts("mechanic", "telemetry.devices") cluster_telemetry = [ # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster telemetry.MergeParts(self.metrics_store, self.node_log_dir), telemetry.EnvironmentInfo(es, self.metrics_store), telemetry.NodeStats(es, self.metrics_store), telemetry.IndexStats(es, self.metrics_store), # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster telemetry.IndexSize(data_paths, self.metrics_store) ] t = telemetry.Telemetry(enabled_devices, devices=cluster_telemetry) c = cluster.Cluster(hosts, [self._start_node(node, car, es, binary) for node in range(car.nodes)], t) logger.info("All cluster nodes have successfully started. Checking if REST API is available.") if wait_for_rest_layer(es): logger.info("REST API is available. Attaching telemetry devices to cluster.") t.attach_to_cluster(c) logger.info("Telemetry devices are now attached to the cluster.") else: logger.error("REST API layer is not yet available. Forcefully terminating cluster.") self.stop(c) raise exceptions.LaunchError("Elasticsearch REST API layer is not available. Forcefully terminated cluster.") return c
def start(self, car): port = self.cfg.opts("provisioning", "node.http.port") hosts = [{"host": "localhost", "port": port}] client_options = self.cfg.opts("launcher", "client.options") # unified client config self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts) self.cfg.add(config.Scope.benchmark, "client", "options", client_options) es = client.EsClientFactory(hosts, client_options).create() # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine node_prefix = self.cfg.opts("provisioning", "node.name.prefix") process.kill_running_es_instances(node_prefix) logger.info("Starting a cluster based on car [%s] with [%d] nodes." % (car, car.nodes)) cluster_telemetry = [ # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster telemetry.MergeParts(self.cfg, self.metrics_store), telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store), telemetry.NodeStats(self.cfg, es, self.metrics_store), telemetry.IndexStats(self.cfg, es, self.metrics_store), # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster telemetry.IndexSize(self.cfg, self.metrics_store) ] t = telemetry.Telemetry(self.cfg, devices=cluster_telemetry) c = cluster.Cluster( [self._start_node(node, car, es) for node in range(car.nodes)], t) t.attach_to_cluster(c) return c
def start(self, car): port = self.cfg.opts("provisioning", "node.http.port") hosts = [{"host": "localhost", "port": port}] client_options = self.cfg.opts("launcher", "client.options") # unified client config self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts) self.cfg.add(config.Scope.benchmark, "client", "options", client_options) es = client.EsClientFactory(hosts, client_options).create() # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine node_prefix = self.cfg.opts("provisioning", "node.name.prefix") process.kill_running_es_instances(node_prefix) logger.info("Starting a cluster based on car [%s] with [%d] nodes." % (car, car.nodes)) cluster_telemetry = [ # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster telemetry.MergeParts(self.cfg, self.metrics_store), telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store), telemetry.NodeStats(self.cfg, es, self.metrics_store), telemetry.IndexStats(self.cfg, es, self.metrics_store), # TODO dm: Once we do distributed launching, this needs to be done per node not per cluster telemetry.IndexSize(self.cfg, self.metrics_store) ] t = telemetry.Telemetry(self.cfg, devices=cluster_telemetry) c = cluster.Cluster([self._start_node(node, car, es) for node in range(car.nodes)], t) t.attach_to_cluster(c) return c
def start(self, node_configurations): # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine # The only specific trait of a Rally-related process is that is started "somewhere" in the races root directory. # # We also do this only once per host otherwise we would kill instances that we've just launched. process.kill_running_es_instances(self.races_root_dir) node_count_on_host = len(node_configurations) return [self._start_node(node_configuration, node_count_on_host) for node_configuration in node_configurations]
def start(self, node_configurations): # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine # The only specific trait of a Rally-related process is that is started "somewhere" in the races root directory. # # We also do this only once per host otherwise we would kill instances that we've just launched. process.kill_running_es_instances(self.races_root_dir) java_major_version = jvm.major_version(self.java_home) logger.info("Detected Java major version [%s]." % java_major_version) node_count_on_host = len(node_configurations) return [self._start_node(node_configuration, node_count_on_host, java_major_version) for node_configuration in node_configurations]
def test_kills_only_rally_es_processes(self, process_iter): rally_es_5_process = ProcessTests.Process(100, "java", [ "/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "-Ees.path.home=~/.rally/benchmarks/races/20170101", "org.elasticsearch.bootstrap.Elasticsearch" ]) rally_es_1_process = ProcessTests.Process(101, "java", [ "/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "-Des.path.home=~/.rally/benchmarks/races/20170101", "org.elasticsearch.bootstrap.Elasticsearch" ]) metrics_store_process = ProcessTests.Process(102, "java", [ "/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "-Des.path.home=~/rally/metrics/", "org.elasticsearch.bootstrap.Elasticsearch" ]) random_java = ProcessTests.Process(103, "java", [ "/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "jenkins.main" ]) other_process = ProcessTests.Process(104, "init", ["/usr/sbin/init"]) rally_process_p = ProcessTests.Process( 105, "python3", ["/usr/bin/python3", "~/.local/bin/esrally"]) rally_process_r = ProcessTests.Process( 106, "rally", ["/usr/bin/python3", "~/.local/bin/esrally"]) rally_process_e = ProcessTests.Process( 107, "esrally", ["/usr/bin/python3", "~/.local/bin/esrally"]) rally_process_mac = ProcessTests.Process( 108, "Python", ["/Python.app/Contents/MacOS/Python", "~/.local/bin/esrally"]) process_iter.return_value = [ rally_es_1_process, rally_es_5_process, metrics_store_process, random_java, other_process, rally_process_p, rally_process_r, rally_process_e, rally_process_mac ] process.kill_running_es_instances("~/.rally/benchmarks/races") self.assertTrue(rally_es_5_process.killed) self.assertTrue(rally_es_1_process.killed) self.assertFalse(metrics_store_process.killed) self.assertFalse(random_java.killed) self.assertFalse(other_process.killed) self.assertFalse(rally_process_p.killed) self.assertFalse(rally_process_r.killed) self.assertFalse(rally_process_e.killed) self.assertFalse(rally_process_mac.killed)
def test_kills_only_rally_es_processes(self, process_iter): rally_es_5_process = ProcessTests.Process(100, "java", ["/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "-Ees.path.home=~/.rally/benchmarks/races/20170101", "org.elasticsearch.bootstrap.Elasticsearch"]) rally_es_1_process = ProcessTests.Process(101, "java", ["/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "-Des.path.home=~/.rally/benchmarks/races/20170101", "org.elasticsearch.bootstrap.Elasticsearch"]) metrics_store_process = ProcessTests.Process(102, "java", ["/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "-Des.path.home=~/rally/metrics/", "org.elasticsearch.bootstrap.Elasticsearch"]) random_java = ProcessTests.Process(103, "java", ["/usr/lib/jvm/java-8-oracle/bin/java", "-Xms2g", "-Xmx2g", "jenkins.main"]) other_process = ProcessTests.Process(104, "init", ["/usr/sbin/init"]) rally_process_p = ProcessTests.Process(105, "python3", ["/usr/bin/python3", "~/.local/bin/esrally"]) rally_process_r = ProcessTests.Process(106, "rally", ["/usr/bin/python3", "~/.local/bin/esrally"]) rally_process_e = ProcessTests.Process(107, "esrally", ["/usr/bin/python3", "~/.local/bin/esrally"]) rally_process_mac = ProcessTests.Process(108, "Python", ["/Python.app/Contents/MacOS/Python", "~/.local/bin/esrally"]) process_iter.return_value = [ rally_es_1_process, rally_es_5_process, metrics_store_process, random_java, other_process, rally_process_p, rally_process_r, rally_process_e, rally_process_mac ] process.kill_running_es_instances("~/.rally/benchmarks/races") self.assertTrue(rally_es_5_process.killed) self.assertTrue(rally_es_1_process.killed) self.assertFalse(metrics_store_process.killed) self.assertFalse(random_java.killed) self.assertFalse(other_process.killed) self.assertFalse(rally_process_p.killed) self.assertFalse(rally_process_r.killed) self.assertFalse(rally_process_e.killed) self.assertFalse(rally_process_mac.killed)
def kill(ctx): # we're very specific which nodes we kill as there is potentially also an Elasticsearch based metrics store running on this machine node_prefix = ctx.config.opts("provisioning", "node.name.prefix") process.kill_running_es_instances(node_prefix)