def test_stores_cluster_level_metrics_on_attach(self, metrics_store_add_meta_info): nodes_info = {"nodes": collections.OrderedDict()} nodes_info["nodes"]["FCFjozkeTiOpN-SI88YEcg"] = { "name": "rally0", "host": "127.0.0.1", "attributes": { "group": "cold_nodes" }, "os": { "name": "Mac OS X", "version": "10.11.4", "available_processors": 8 }, "jvm": { "version": "1.8.0_74", "vm_vendor": "Oracle Corporation" } } nodes_info["nodes"]["EEEjozkeTiOpN-SI88YEcg"] = { "name": "rally1", "host": "127.0.0.1", "attributes": { "group": "hot_nodes" }, "os": { "name": "Mac OS X", "version": "10.11.5", "available_processors": 8 }, "jvm": { "version": "1.8.0_102", "vm_vendor": "Oracle Corporation" } } cluster_info = { "version": { "build_hash": "abc123", "number": "6.0.0-alpha1" } } client = Client(nodes=SubClient(info=nodes_info), info=cluster_info) metrics_store = metrics.EsMetricsStore(self.cfg) env_device = telemetry.EnvironmentInfo(client, metrics_store) t = telemetry.Telemetry(self.cfg, devices=[env_device]) t.attach_to_cluster(cluster.Cluster([], [], t)) calls = [ mock.call(metrics.MetaInfoScope.cluster, None, "source_revision", "abc123"), mock.call(metrics.MetaInfoScope.cluster, None, "distribution_version", "6.0.0-alpha1"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_version", "1.8.0_74"), mock.call(metrics.MetaInfoScope.node, "rally1", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally1", "jvm_version", "1.8.0_102"), mock.call(metrics.MetaInfoScope.node, "rally0", "attribute_group", "cold_nodes"), mock.call(metrics.MetaInfoScope.node, "rally1", "attribute_group", "hot_nodes") ] metrics_store_add_meta_info.assert_has_calls(calls)
def test_stores_node_level_metrics_on_attach(self, cpu_model, physical_cpu_cores, logical_cpu_cores, os_version, os_name, metrics_store_add_meta_info): cpu_model.return_value = "Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz" physical_cpu_cores.return_value = 4 logical_cpu_cores.return_value = 8 os_version.return_value = "4.2.0-18-generic" os_name.return_value = "Linux" metrics_store = metrics.EsMetricsStore(create_config()) node = cluster.Node(None, "io", "rally0", None) env_device = telemetry.NodeEnvironmentInfo(metrics_store) env_device.attach_to_node(node) calls = [ mock.call(metrics.MetaInfoScope.node, "rally0", "os_name", "Linux"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_version", "4.2.0-18-generic"), mock.call(metrics.MetaInfoScope.node, "rally0", "cpu_logical_cores", 8), mock.call(metrics.MetaInfoScope.node, "rally0", "cpu_physical_cores", 4), mock.call(metrics.MetaInfoScope.node, "rally0", "cpu_model", "Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz"), mock.call(metrics.MetaInfoScope.node, "rally0", "node_name", "rally0"), mock.call(metrics.MetaInfoScope.node, "rally0", "host_name", "io"), ] metrics_store_add_meta_info.assert_has_calls(calls)
def test_store_calculated_metrics(self, listdir_mock, open_mock, metrics_store_put_value, metrics_store_put_count): log_file = ''' INFO: System starting up INFO: 100 msec to merge doc values [500 docs] INFO: Something unrelated INFO: 250 msec to merge doc values [1350 docs] INFO: System shutting down ''' listdir_mock.return_value = [open_mock] open_mock.side_effect = [ mock.mock_open(read_data=log_file).return_value ] metrics_store = metrics.EsMetricsStore(self.cfg) node = cluster.Node(None, "io", "rally0", None) merge_parts_device = telemetry.MergeParts(metrics_store, node_log_dir="/var/log") merge_parts_device.attach_to_node(node) merge_parts_device.on_benchmark_stop() metrics_store_put_value.assert_called_with( "rally0", "merge_parts_total_time_doc_values", 350, "ms") metrics_store_put_count.assert_called_with( "rally0", "merge_parts_total_docs_doc_values", 1850)
def start_metrics(self, track, setup): invocation = self._config.opts("meta", "time.start") self._metrics_store = metrics.EsMetricsStore(self._config) self._metrics_store.open(invocation, track.name, setup.name, create=True)
def test_stores_index_size_for_data_paths(self, run_subprocess, metrics_store_node_count, get_size): get_size.side_effect = [2048, 16384] cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexSize( ["/var/elasticsearch/data/1", "/var/elasticsearch/data/2"], metrics_store) t = telemetry.Telemetry(enabled_devices=[], devices=[device]) node = cluster.Node(process=None, host_name="localhost", node_name="rally-node-0", telemetry=t) t.attach_to_node(node) t.on_benchmark_start() t.on_benchmark_stop() t.detach_from_node(node, running=True) t.detach_from_node(node, running=False) metrics_store_node_count.assert_has_calls([ mock.call("rally-node-0", "final_index_size_bytes", 18432, "byte") ]) run_subprocess.assert_has_calls([ mock.call("find /var/elasticsearch/data/1 -ls", header="index files:"), mock.call("find /var/elasticsearch/data/2 -ls", header="index files:") ])
def test_stores_cluster_level_metrics_on_attach(self, metrics_store_add_meta_info): nodes_stats = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1" } } } nodes_info = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1", "attributes": { "az": "us_east1" }, "os": { "name": "Mac OS X", "version": "10.11.4", "available_processors": 8 }, "jvm": { "version": "1.8.0_74", "vm_vendor": "Oracle Corporation" } } } } cluster_info = { "version": { "build_hash": "253032b", "number": "5.0.0" } } client = Client(nodes=SubClient(stats=nodes_stats, info=nodes_info), info=cluster_info) metrics_store = metrics.EsMetricsStore(self.cfg) env_device = telemetry.ExternalEnvironmentInfo(client, metrics_store) t = telemetry.Telemetry(devices=[env_device]) t.attach_to_cluster(cluster.Cluster([], [], t)) calls = [ mock.call(metrics.MetaInfoScope.cluster, None, "source_revision", "253032b"), mock.call(metrics.MetaInfoScope.cluster, None, "distribution_version", "5.0.0"), mock.call(metrics.MetaInfoScope.node, "rally0", "node_name", "rally0"), mock.call(metrics.MetaInfoScope.node, "rally0", "host_name", "127.0.0.1"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_name", "Mac OS X"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_version", "10.11.4"), mock.call(metrics.MetaInfoScope.node, "rally0", "cpu_logical_cores", 8), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_version", "1.8.0_74"), mock.call(metrics.MetaInfoScope.node, "rally0", "attribute_az", "us_east1"), mock.call(metrics.MetaInfoScope.cluster, None, "attribute_az", "us_east1") ] metrics_store_add_meta_info.assert_has_calls(calls)
def setUp(self): self.cfg = config.Config() self.cfg.add(config.Scope.application, "system", "env.name", "unittest") self.metrics_store = metrics.EsMetricsStore(self.cfg, client_factory_class=MockClientFactory, index_template_provider_class=DummyIndexTemplateProvider, clock=StaticClock) # get hold of the mocked client... self.es_mock = self.metrics_store._client self.es_mock.exists.return_value = False
def test_fallback_when_host_not_available(self, metrics_store_add_meta_info): nodes_stats = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", } } } nodes_info = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "os": { "name": "Mac OS X", "version": "10.11.4", "available_processors": 8 }, "jvm": { "version": "1.8.0_74", "vm_vendor": "Oracle Corporation" } } } } cluster_info = {"version": {"build_hash": "abc123"}} client = Client(cluster=SubClient(nodes_stats), nodes=SubClient(nodes_info), info=cluster_info) metrics_store = metrics.EsMetricsStore(self.cfg) env_device = telemetry.ExternalEnvironmentInfo(self.cfg, client, metrics_store) t = telemetry.Telemetry(self.cfg, devices=[env_device]) t.attach_to_cluster(cluster.Cluster([], t)) calls = [ mock.call(metrics.MetaInfoScope.cluster, None, "source_revision", "abc123"), mock.call(metrics.MetaInfoScope.node, "rally0", "node_name", "rally0"), mock.call(metrics.MetaInfoScope.node, "rally0", "host_name", "unknown"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_name", "Mac OS X"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_version", "10.11.4"), mock.call(metrics.MetaInfoScope.node, "rally0", "cpu_logical_cores", 8), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_version", "1.8.0_74") ] metrics_store_add_meta_info.assert_has_calls(calls)
def test_stores_available_index_stats(self, metrics_store_cluster_count, metrics_store_cluster_value): indices_stats = { "_all": { "primaries": { "segments": { "count": 5, "memory_in_bytes": 2048, "stored_fields_memory_in_bytes": 1024, "doc_values_memory_in_bytes": 128, "terms_memory_in_bytes": 256, "points_memory_in_bytes": 512 }, "merges": { "total_time_in_millis": 300, "total_throttled_time_in_millis": 120 }, "indexing": { "index_time_in_millis": 2000 }, "refresh": { "total_time_in_millis": 200 }, "flush": { "total_time_in_millis": 100 } } } } client = Client(indices=SubClient(indices_stats)) cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexStats(cfg, client, metrics_store) t = telemetry.Telemetry(cfg, devices=[device]) t.on_benchmark_start() t.on_benchmark_stop() metrics_store_cluster_count.assert_has_calls( [mock.call("segments_count", 5)]) metrics_store_cluster_value.assert_has_calls([ mock.call("segments_memory_in_bytes", 2048, "byte"), mock.call("segments_doc_values_memory_in_bytes", 128, "byte"), mock.call("segments_stored_fields_memory_in_bytes", 1024, "byte"), mock.call("segments_terms_memory_in_bytes", 256, "byte"), # we don't have norms, so nothing should have been called mock.call("segments_points_memory_in_bytes", 512, "byte"), mock.call("merges_total_time", 300, "ms"), mock.call("merges_total_throttled_time", 120, "ms"), mock.call("indexing_total_time", 2000, "ms"), mock.call("refresh_total_time", 200, "ms"), mock.call("flush_total_time", 100, "ms"), ])
def report(self, t): self.print_header( "------------------------------------------------------") self.print_header( " _______ __ _____ ") self.print_header( " / ____(_)___ ____ _/ / / ___/_________ ________ ") self.print_header( " / /_ / / __ \/ __ `/ / \__ \/ ___/ __ \/ ___/ _ \\") self.print_header( " / __/ / / / / / /_/ / / ___/ / /__/ /_/ / / / __/") self.print_header( "/_/ /_/_/ /_/\__,_/_/ /____/\___/\____/_/ \___/ ") self.print_header( "------------------------------------------------------") selected_setups = self._config.opts("benchmarks", "tracksetups.selected") invocation = self._config.opts("meta", "time.start") for track_setup in t.track_setups: if track_setup.name in selected_setups: if len(selected_setups) > 1: self.print_header("*** Track setup %s ***\n" % track_setup.name) store = metrics.EsMetricsStore(self._config) store.open(invocation, t.name, track_setup.name) if track.BenchmarkPhase.index in track_setup.benchmark: self.report_index_throughput(store) print("") self.report_total_times(store) self.report_merge_part_times(store) if track.BenchmarkPhase.search in track_setup.benchmark: self.report_search_latency( store, t, track_setup.benchmark[ track.BenchmarkPhase.search].iteration_count) self.print_header("System Metrics") self.report_cpu_usage(store) self.report_gc_times(store) print("") self.print_header("Index Metrics") self.report_disk_usage(store) self.report_segment_memory(store) self.report_segment_counts(store) print("") if track.BenchmarkPhase.stats in track_setup.benchmark: self.report_stats_times( store, track_setup.benchmark[ track.BenchmarkPhase.stats].iteration_count)
def test_store_nothing_if_no_metrics_present(self, listdir_mock, open_mock, metrics_store_put_value, metrics_store_put_count): listdir_mock.return_value = [open_mock] open_mock.side_effect = [ mock.mock_open(read_data="no data to parse").return_value ] metrics_store = metrics.EsMetricsStore(self.cfg) merge_parts_device = telemetry.MergeParts(self.cfg, metrics_store) merge_parts_device.on_benchmark_stop() metrics_store_put_value.assert_not_called() metrics_store_put_count.assert_not_called()
def test_store_calculated_metrics(self, metrics_store_put_value, stop_watch): stop_watch.total_time.return_value = 2 metrics_store = metrics.EsMetricsStore(create_config()) node = cluster.Node(None, "io", "rally0", None) startup_time = telemetry.StartupTime(metrics_store) # replace with mock startup_time.timer = stop_watch startup_time.on_pre_node_start(node.node_name) # ... nodes starts up ... startup_time.attach_to_node(node) metrics_store_put_value.assert_called_with("rally0", "node_startup_time", 2, "s")
def test_stores_nothing_if_no_data_path(self, run_subprocess, metrics_store_cluster_count, get_size): get_size.return_value = 2048 cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexSize(data_paths=[], metrics_store=metrics_store) t = telemetry.Telemetry(devices=[device]) t.attach_to_cluster(None) t.on_benchmark_start() t.on_benchmark_stop() t.detach_from_cluster(None) run_subprocess.assert_not_called() metrics_store_cluster_count.assert_not_called() get_size.assert_not_called()
def test_stores_cluster_level_metrics_on_attach( self, cluster_info, metrics_store_add_meta_info): cluster_info.return_value = {"version": {"build_hash": "abc123"}} cfg = self.create_config() metrics_store = metrics.EsMetricsStore(cfg) env_device = telemetry.EnvironmentInfo(cfg, metrics_store) t = telemetry.Telemetry(cfg, metrics_store, devices=[env_device]) t.attach_to_cluster( cluster.Cluster([{ "host": "::1:9200" }], [], metrics_store, t, client_factory_class=MockClientFactory)) metrics_store_add_meta_info.assert_called_with( metrics.MetaInfoScope.cluster, None, "source_revision", "abc123")
def test_stores_nothing_if_no_data_path(self, run_subprocess, metrics_store_cluster_count, get_size): get_size.return_value = 2048 cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexSize(data_paths=[], metrics_store=metrics_store) t = telemetry.Telemetry(devices=[device]) node = cluster.Node(process=None, host_name="localhost", node_name="rally-node-0", telemetry=t) t.attach_to_node(node) t.on_benchmark_start() t.on_benchmark_stop() t.detach_from_node(node, running=True) t.detach_from_node(node, running=False) run_subprocess.assert_not_called() metrics_store_cluster_count.assert_not_called() get_size.assert_not_called()
def test_store_calculated_metrics(self, listdir_mock, open_mock, metrics_store_put_value, metrics_store_put_count): log_file = ''' INFO: System starting up INFO: 100 msec to merge doc values [500 docs] INFO: Something unrelated INFO: 250 msec to merge doc values [1350 docs] INFO: System shutting down ''' listdir_mock.return_value = [open_mock] open_mock.side_effect = [ mock.mock_open(read_data=log_file).return_value ] config = self.create_config() metrics_store = metrics.EsMetricsStore(config) merge_parts_device = telemetry.MergeParts(config, metrics_store) merge_parts_device.on_benchmark_stop() metrics_store_put_value.assert_called_with("merge_parts_total_time_doc_values", 350, "ms") metrics_store_put_count.assert_called_with("merge_parts_total_docs_doc_values", 1850)
def test_stores_index_size_for_data_path(self, run_subprocess, metrics_store_cluster_count, get_size): get_size.return_value = 2048 cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexSize(["/var/elasticsearch/data"], metrics_store) t = telemetry.Telemetry(enabled_devices=[], devices=[device]) t.attach_to_cluster(None) t.on_benchmark_start() t.on_benchmark_stop() t.detach_from_cluster(None) metrics_store_cluster_count.assert_has_calls([ mock.call("final_index_size_bytes", 2048, "byte") ]) run_subprocess.assert_has_calls([ mock.call("find /var/elasticsearch/data -ls", header="index files:") ])
def test_stores_cluster_level_metrics_on_attach( self, nodes_info, cluster_info, metrics_store_add_meta_info): nodes_info.return_value = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1", "os": { "name": "Mac OS X", "version": "10.11.4", "available_processors": 8 }, "jvm": { "version": "1.8.0_74", "vm_vendor": "Oracle Corporation" } } } } cluster_info.return_value = {"version": {"build_hash": "abc123"}} cfg = self.create_config() metrics_store = metrics.EsMetricsStore(cfg) env_device = telemetry.EnvironmentInfo(cfg, metrics_store) t = telemetry.Telemetry(cfg, metrics_store, devices=[env_device]) t.attach_to_cluster( cluster.Cluster([{ "host": "::1:9200" }], [], {}, metrics_store, t, client_factory_class=MockClientFactory)) calls = [ mock.call(metrics.MetaInfoScope.cluster, None, "source_revision", "abc123"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_version", "1.8.0_74") ] metrics_store_add_meta_info.assert_has_calls(calls)
def test_index_stats_are_per_lap(self, metrics_store_cluster_count, metrics_store_cluster_value): client = Client(indices=SubClient({ "_all": { "primaries": { "segments": { "count": 0 }, "merges": { "total_time_in_millis": 0, "total_throttled_time_in_millis": 0 }, "indexing": { "index_time_in_millis": 0 }, "refresh": { "total_time_in_millis": 0 }, "flush": { "total_time_in_millis": 0 } } } })) cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexStats(client, metrics_store) t = telemetry.Telemetry(cfg, devices=[device]) # lap 1 t.on_benchmark_start() client.indices = SubClient({ "_all": { "primaries": { "segments": { "count": 5, "memory_in_bytes": 2048, "stored_fields_memory_in_bytes": 1024, "doc_values_memory_in_bytes": 128, "terms_memory_in_bytes": 256 }, "merges": { "total_time_in_millis": 300, "total_throttled_time_in_millis": 120 }, "indexing": { "index_time_in_millis": 2000 }, "refresh": { "total_time_in_millis": 200 }, "flush": { "total_time_in_millis": 100 } } } }) t.on_benchmark_stop() # lap 2 t.on_benchmark_start() client.indices = SubClient({ "_all": { "primaries": { "segments": { "count": 7, "memory_in_bytes": 2048, "stored_fields_memory_in_bytes": 1024, "doc_values_memory_in_bytes": 128, "terms_memory_in_bytes": 256 }, "merges": { "total_time_in_millis": 900, "total_throttled_time_in_millis": 120 }, "indexing": { "index_time_in_millis": 8000 }, "refresh": { "total_time_in_millis": 500 }, "flush": { "total_time_in_millis": 300 } } } }) t.on_benchmark_stop() metrics_store_cluster_value.assert_has_calls( [ # 1st lap mock.call("segments_memory_in_bytes", 2048, "byte"), mock.call("merges_total_time", 300, "ms"), mock.call("merges_total_throttled_time", 120, "ms"), mock.call("indexing_total_time", 2000, "ms"), mock.call("refresh_total_time", 200, "ms"), mock.call("flush_total_time", 100, "ms"), mock.call("segments_doc_values_memory_in_bytes", 128, "byte"), mock.call("segments_stored_fields_memory_in_bytes", 1024, "byte"), mock.call("segments_terms_memory_in_bytes", 256, "byte"), # we don't have norms or points, so nothing should have been called # 2nd lap mock.call("segments_memory_in_bytes", 2048, "byte"), mock.call("merges_total_time", 900, "ms"), mock.call("merges_total_throttled_time", 120, "ms"), mock.call("indexing_total_time", 8000, "ms"), mock.call("refresh_total_time", 500, "ms"), mock.call("flush_total_time", 300, "ms"), mock.call("segments_doc_values_memory_in_bytes", 128, "byte"), mock.call("segments_stored_fields_memory_in_bytes", 1024, "byte"), mock.call("segments_terms_memory_in_bytes", 256, "byte"), ], any_order=True)
def test_stores_only_diff_of_gc_times(self, metrics_store_node_level, metrics_store_cluster_level): nodes_stats_at_start = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1", "jvm": { "gc": { "collectors": { "old": { "collection_time_in_millis": 1000 }, "young": { "collection_time_in_millis": 500 } } } } } } } client = Client(nodes=SubClient(nodes_stats_at_start)) cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.NodeStats(client, metrics_store) t = telemetry.Telemetry(cfg, devices=[device]) t.on_benchmark_start() # now we'd need to change the node stats response nodes_stats_at_end = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1", "jvm": { "gc": { "collectors": { "old": { "collection_time_in_millis": 2500 }, "young": { "collection_time_in_millis": 1200 } } } } } } } client.nodes = SubClient(nodes_stats_at_end) t.on_benchmark_stop() metrics_store_node_level.assert_has_calls([ mock.call("rally0", "node_young_gen_gc_time", 700, "ms"), mock.call("rally0", "node_old_gen_gc_time", 1500, "ms") ]) metrics_store_cluster_level.assert_has_calls([ mock.call("node_total_young_gen_gc_time", 700, "ms"), mock.call("node_total_old_gen_gc_time", 1500, "ms") ])
def test_stores_all_node_metrics_on_attach(self, metrics_store_add_meta_info): nodes_stats = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1" } } } nodes_info = { "nodes": { "FCFjozkeTiOpN-SI88YEcg": { "name": "rally0", "host": "127.0.0.1", "attributes": { "az": "us_east1" }, "os": { "name": "Mac OS X", "version": "10.11.4", "available_processors": 8 }, "jvm": { "version": "1.8.0_74", "vm_vendor": "Oracle Corporation" }, "plugins": [{ "name": "ingest-geoip", "version": "5.0.0", "description": "Ingest processor that uses looksup geo data ...", "classname": "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin", "has_native_controller": False }] } } } cluster_info = { "version": { "build_hash": "253032b", "number": "5.0.0" } } client = Client(nodes=SubClient(stats=nodes_stats, info=nodes_info), info=cluster_info) metrics_store = metrics.EsMetricsStore(self.cfg) env_device = telemetry.ExternalEnvironmentInfo(client, metrics_store) t = telemetry.Telemetry(devices=[env_device]) t.attach_to_cluster(cluster.Cluster([], [], t)) calls = [ mock.call(metrics.MetaInfoScope.node, "rally0", "node_name", "rally0"), mock.call(metrics.MetaInfoScope.node, "rally0", "host_name", "127.0.0.1"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_name", "Mac OS X"), mock.call(metrics.MetaInfoScope.node, "rally0", "os_version", "10.11.4"), mock.call(metrics.MetaInfoScope.node, "rally0", "cpu_logical_cores", 8), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_version", "1.8.0_74"), mock.call(metrics.MetaInfoScope.node, "rally0", "plugins", ["ingest-geoip"]), # these are automatically pushed up to cluster level (additionally) if all nodes match mock.call(metrics.MetaInfoScope.cluster, None, "plugins", ["ingest-geoip"]), mock.call(metrics.MetaInfoScope.node, "rally0", "attribute_az", "us_east1"), mock.call(metrics.MetaInfoScope.cluster, None, "attribute_az", "us_east1"), ] metrics_store_add_meta_info.assert_has_calls(calls)
def test_stores_cluster_level_metrics_on_attach( self, metrics_store_add_meta_info): nodes_info = {"nodes": collections.OrderedDict()} nodes_info["nodes"]["FCFjozkeTiOpN-SI88YEcg"] = { "name": "rally0", "host": "127.0.0.1", "attributes": { "group": "cold_nodes" }, "os": { "name": "Mac OS X", "version": "10.11.4", "available_processors": 8 }, "jvm": { "version": "1.8.0_74", "vm_vendor": "Oracle Corporation" }, "plugins": [{ "name": "ingest-geoip", "version": "5.0.0", "description": "Ingest processor that uses looksup geo data ...", "classname": "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin", "has_native_controller": False }] } nodes_info["nodes"]["EEEjozkeTiOpN-SI88YEcg"] = { "name": "rally1", "host": "127.0.0.1", "attributes": { "group": "hot_nodes" }, "os": { "name": "Mac OS X", "version": "10.11.5", "available_processors": 8 }, "jvm": { "version": "1.8.0_102", "vm_vendor": "Oracle Corporation" }, "plugins": [{ "name": "ingest-geoip", "version": "5.0.0", "description": "Ingest processor that uses looksup geo data ...", "classname": "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin", "has_native_controller": False }] } cluster_info = { "version": { "build_hash": "abc123", "number": "6.0.0-alpha1" } } cfg = create_config() client = Client(nodes=SubClient(info=nodes_info), info=cluster_info) metrics_store = metrics.EsMetricsStore(cfg) env_device = telemetry.ClusterEnvironmentInfo(client, metrics_store) t = telemetry.Telemetry(cfg, devices=[env_device]) t.attach_to_cluster(cluster.Cluster([], [], t)) calls = [ mock.call(metrics.MetaInfoScope.cluster, None, "source_revision", "abc123"), mock.call(metrics.MetaInfoScope.cluster, None, "distribution_version", "6.0.0-alpha1"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally0", "jvm_version", "1.8.0_74"), mock.call(metrics.MetaInfoScope.node, "rally1", "jvm_vendor", "Oracle Corporation"), mock.call(metrics.MetaInfoScope.node, "rally1", "jvm_version", "1.8.0_102"), mock.call(metrics.MetaInfoScope.node, "rally0", "plugins", ["ingest-geoip"]), mock.call(metrics.MetaInfoScope.node, "rally1", "plugins", ["ingest-geoip"]), # can push up to cluster level as all nodes have the same plugins installed mock.call(metrics.MetaInfoScope.cluster, None, "plugins", ["ingest-geoip"]), mock.call(metrics.MetaInfoScope.node, "rally0", "attribute_group", "cold_nodes"), mock.call(metrics.MetaInfoScope.node, "rally1", "attribute_group", "hot_nodes"), ] metrics_store_add_meta_info.assert_has_calls(calls)
def test_stores_available_index_stats(self, metrics_store_cluster_count, metrics_store_cluster_value): indices_stats = { "_all": { "primaries": { "segments": { "count": 5, "memory_in_bytes": 2048, "stored_fields_memory_in_bytes": 1024, "doc_values_memory_in_bytes": 128, "terms_memory_in_bytes": 256, "points_memory_in_bytes": 512, "file_sizes": { "dii": { "size_in_bytes": 8552, "description": "Points" }, "doc": { "size_in_bytes": 236429758, "description": "Frequencies" }, "fdx": { "size_in_bytes": 636858, "description": "Field Index" }, "dim": { "size_in_bytes": 199771717, "description": "Points" }, "fdt": { "size_in_bytes": 812786379, "description": "Field Data" }, "fnm": { "size_in_bytes": 487464, "description": "Fields" }, "dvd": { "size_in_bytes": 692513616, "description": "DocValues" }, "dvm": { "size_in_bytes": 197706, "description": "DocValues" }, "tip": { "size_in_bytes": 11887500, "description": "Term Index" }, "tim": { "size_in_bytes": 658631045, "description": "Term Dictionary" }, "si": { "size_in_bytes": 5736, "description": "Segment Info" }, "nvd": { "size_in_bytes": 94717780, "description": "Norms" }, "nvm": { "size_in_bytes": 18834, "description": "Norms" }, "pos": { "size_in_bytes": 51762724, "description": "Positions" } } }, "merges": { "total_time_in_millis": 300, "total_throttled_time_in_millis": 120 }, "indexing": { "index_time_in_millis": 2000 }, "refresh": { "total_time_in_millis": 200 }, "flush": { "total_time_in_millis": 100 } } } } client = Client(indices=SubClient(indices_stats)) cfg = create_config() metrics_store = metrics.EsMetricsStore(cfg) device = telemetry.IndexStats(cfg, client, metrics_store) t = telemetry.Telemetry(cfg, devices=[device]) t.on_benchmark_start() t.on_benchmark_stop() metrics_store_cluster_count.assert_has_calls( [mock.call("segments_count", 5)]) metrics_store_cluster_value.assert_has_calls([ mock.call("segments_memory_in_bytes", 2048, "byte"), mock.call("segments_doc_values_memory_in_bytes", 128, "byte"), mock.call("segments_stored_fields_memory_in_bytes", 1024, "byte"), mock.call("segments_terms_memory_in_bytes", 256, "byte"), # we don't have norms, so nothing should have been called mock.call("segments_points_memory_in_bytes", 512, "byte"), mock.call("merges_total_time", 300, "ms"), mock.call("merges_total_throttled_time", 120, "ms"), mock.call("indexing_total_time", 2000, "ms"), mock.call("refresh_total_time", 200, "ms"), mock.call("flush_total_time", 100, "ms"), mock.call("dii_size_in_bytes", 8552, "byte"), mock.call("doc_size_in_bytes", 236429758, "byte"), mock.call("fdx_size_in_bytes", 636858, "byte"), mock.call("dim_size_in_bytes", 199771717, "byte"), mock.call("fdt_size_in_bytes", 812786379, "byte"), mock.call("fnm_size_in_bytes", 487464, "byte"), mock.call("dvd_size_in_bytes", 692513616, "byte"), mock.call("dvm_size_in_bytes", 197706, "byte"), mock.call("tip_size_in_bytes", 11887500, "byte"), mock.call("tim_size_in_bytes", 658631045, "byte"), mock.call("si_size_in_bytes", 5736, "byte"), mock.call("nvd_size_in_bytes", 94717780, "byte"), mock.call("nvm_size_in_bytes", 18834, "byte"), mock.call("pos_size_in_bytes", 51762724, "byte"), ])