def test_launches_cluster(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cfg.add(config.Scope.application, "system", "env.name", "test") ms = get_metrics_store(cfg) cluster_launcher = launcher.ClusterLauncher( cfg, ms, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry)
def test_launches_cluster_with_telemetry_client_timeout_enabled(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", False) cfg.add(config.Scope.application, "system", "env.name", "test") ms = get_metrics_store(cfg) cluster_launcher = launcher.ClusterLauncher( cfg, ms, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() for telemetry_device in cluster.telemetry.devices: if hasattr(telemetry_device, "clients"): # Process all clients options for multi cluster aware telemetry devices, like CcrStats for _, client in telemetry_device.clients.items(): self.assertDictEqual( { "retry-on-timeout": True, "timeout": 60 }, client.client_options) else: self.assertDictEqual({ "retry-on-timeout": True, "timeout": 60 }, telemetry_device.client.client_options)
def test_launches_cluster_with_post_launch_handler(self): on_post_launch = mock.Mock() cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), on_post_launch=on_post_launch, client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry) # this requires at least Python 3.6 # on_post_launch.assert_called_once() self.assertEqual(1, on_post_launch.call_count)
def on_all_nodes_started(self): self.cluster_launcher = launcher.ClusterLauncher( self.cfg, self.metrics_store) self.cluster = self.cluster_launcher.start() # push down all meta data again self.send_to_children_and_transition( self.myAddress, ApplyMetricsMetaInfo(self.metrics_store.meta_info), "nodes_started", "apply_meta_info")
def test_error_on_cluster_launch(self, sleep): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) # Simulate that the client will raise an error upon startup cfg.add(config.Scope.application, "client", "options", opts.ClientOptions("raise-error-on-info:true")) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cfg.add(config.Scope.application, "mechanic", "preserve.install", False) cluster_launcher = launcher.ClusterLauncher(cfg, MockMetricsStore(), client_factory_class=MockClientFactory) with self.assertRaisesRegex(exceptions.LaunchError, "Elasticsearch REST API layer is not available. Forcefully terminated cluster."): cluster_launcher.start()
def on_all_nodes_started(self): self.cluster_launcher = launcher.ClusterLauncher(self.cfg, self.metrics_store) # Workaround because we could raise a LaunchError here and thespian will attempt to retry a failed message. # In that case, we will get a followup RallyAssertionError because on the second attempt, Rally will check # the status which is now "nodes_started" but we expected the status to be "nodes_starting" previously. try: self.cluster = self.cluster_launcher.start() except BaseException as e: self.send(self.race_control, actor.BenchmarkFailure("Could not launch cluster", e)) else: # push down all meta data again self.send_to_children_and_transition(self.myAddress, ApplyMetricsMetaInfo(self.metrics_store.meta_info), "nodes_started", "apply_meta_info")
def test_launches_cluster(self): cfg = config.Config() cfg.add(config.Scope.application, "client", "hosts", self.test_host) cfg.add(config.Scope.application, "client", "options", self.client_options) cfg.add(config.Scope.application, "mechanic", "telemetry.devices", []) cfg.add(config.Scope.application, "mechanic", "telemetry.params", {}) cluster_launcher = launcher.ClusterLauncher( cfg, MockMetricsStore(), client_factory_class=MockClientFactory) cluster = cluster_launcher.start() self.assertEqual([{ "host": "10.0.0.10", "port": 9200 }, { "host": "10.0.0.11", "port": 9200 }], cluster.hosts) self.assertIsNotNone(cluster.telemetry)