def test_calculate_simple_index_stats(self): cfg = config.Config() cfg.add(config.Scope.application, "system", "env.name", "unittest") store = metrics.InMemoryMetricsStore(config=cfg, clear=True) store.open(datetime.datetime.now(), "test", "unittest", "unittest_car") store.put_value_cluster_level("throughput", 500, unit="docs/s", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("throughput", 1000, unit="docs/s", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("throughput", 2000, unit="docs/s", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("latency", 2800, unit="ms", operation="index", operation_type=track.OperationType.Index, sample_type=metrics.SampleType.Warmup) store.put_value_cluster_level("latency", 200, unit="ms", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("latency", 220, unit="ms", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("latency", 225, unit="ms", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("service_time", 250, unit="ms", operation="index", operation_type=track.OperationType.Index, sample_type=metrics.SampleType.Warmup) store.put_value_cluster_level("service_time", 190, unit="ms", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("service_time", 200, unit="ms", operation="index", operation_type=track.OperationType.Index) store.put_value_cluster_level("service_time", 215, unit="ms", operation="index", operation_type=track.OperationType.Index) index = track.Task(operation=track.Operation(name="index", operation_type=track.OperationType.Index, granularity_unit="docs/s")) challenge = track.Challenge(name="unittest", description="", index_settings=None, schedule=[index]) stats = reporter.Stats(store, challenge) self.assertEqual((500, 1000, 2000, "docs/s"), stats.op_metrics["index"]["throughput"]) self.assertEqual(collections.OrderedDict([(50.0, 220), (100, 225)]), stats.op_metrics["index"]["latency"]) self.assertEqual(collections.OrderedDict([(50.0, 200), (100, 215)]), stats.op_metrics["index"]["service_time"])
def test_store_results(self): # here we need the real thing from esrally import reporter from esrally.mechanic import cluster schedule = [ track.Task(track.Operation("index", track.OperationType.Index)) ] t = track.Track(name="unittest-track", short_description="unittest track", source_root_url="http://example.org", indices=[ track.Index(name="tests", auto_managed=True, types=[ track.Type(name="test-type", mapping_file=None) ]) ], challenges=[ track.Challenge(name="index", description="Index", default=True, index_settings=None, schedule=schedule) ]) c = cluster.Cluster([], [], None) c.distribution_version = "5.0.0" node = c.add_node("localhost", "rally-node-0") node.plugins.append("x-pack") race = metrics.Race( rally_version="0.4.4", environment_name="unittest", trial_timestamp=EsResultsStoreTests.TRIAL_TIMESTAMP, pipeline="from-sources", user_tag="let-me-test", track=t, challenge=t.default_challenge, car="4gheap", total_laps=12, cluster=c, lap_results=[], results=reporter.Stats({ "young_gc_time": 100, "old_gc_time": 5, "op_metrics": [{ "operation": "index", "throughput": { "min": 1000, "median": 1250, "max": 1500, "unit": "docs/s" } }] })) self.race_store.store_results(race) expected_docs = [{ "environment": "unittest", "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tag": "let-me-test", "track": "unittest-track", "challenge": "index", "car": "4gheap", "node-count": 1, "plugins": ["x-pack"], "active": True, "name": "old_gc_time", "value": { "single": 5 } }, { "environment": "unittest", "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tag": "let-me-test", "track": "unittest-track", "challenge": "index", "car": "4gheap", "node-count": 1, "plugins": ["x-pack"], "active": True, "name": "throughput", "operation": "index", "value": { "min": 1000, "median": 1250, "max": 1500, "unit": "docs/s" } }, { "environment": "unittest", "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tag": "let-me-test", "track": "unittest-track", "challenge": "index", "car": "4gheap", "node-count": 1, "plugins": ["x-pack"], "active": True, "name": "young_gc_time", "value": { "single": 100 } }] self.es_mock.bulk_index.assert_called_with( index="rally-results-2016-01", doc_type="results", items=expected_docs)
def test_as_flat_list(self): d = { "op_metrics": [ { "task": "index #1", "operation": "index", "throughput": { "min": 450, "median": 450, "max": 452, "unit": "docs/s" }, "latency": { "50": 340, "100": 376, }, "service_time": { "50": 341, "100": 376 }, "error_rate": 0.0 } ], "node_metrics": [ { "node": "rally-node-0", "startup_time": 3.4 }, { "node": "rally-node-1", "startup_time": 4.2 } ], "ml_processing_time": [ { "job": "job_1", "min": 3.3, "mean": 5.2, "median": 5.8, "max": 12.34 }, { "job": "job_2", "min": 3.55, "mean": 4.2, "median": 4.9, "max": 9.4 }, ], "young_gc_time": 68, "old_gc_time": 0, "merge_time": 3702, "merge_time_per_shard": { "min": 40, "median": 3702, "max": 3900, "unit": "ms" }, "merge_count": 2, "refresh_time": 596, "refresh_time_per_shard": { "min": 48, "median": 89, "max": 204, "unit": "ms" }, "refresh_count": 10, "flush_time": None, "flush_time_per_shard": {}, "flush_count": 0 } s = reporter.Stats(d) metric_list = s.as_flat_list() self.assertEqual({ "name": "throughput", "task": "index #1", "operation": "index", "value": { "min": 450, "median": 450, "max": 452, "unit": "docs/s" } }, select(metric_list, "throughput", operation="index")) self.assertEqual({ "name": "service_time", "task": "index #1", "operation": "index", "value": { "50": 341, "100": 376 } }, select(metric_list, "service_time", operation="index")) self.assertEqual({ "name": "latency", "task": "index #1", "operation": "index", "value": { "50": 340, "100": 376 } }, select(metric_list, "latency", operation="index")) self.assertEqual({ "name": "error_rate", "task": "index #1", "operation": "index", "value": { "single": 0.0 } }, select(metric_list, "error_rate", operation="index")) self.assertEqual({ "node": "rally-node-0", "name": "startup_time", "value": { "single": 3.4 } }, select(metric_list, "startup_time", node="rally-node-0")) self.assertEqual({ "node": "rally-node-1", "name": "startup_time", "value": { "single": 4.2 } }, select(metric_list, "startup_time", node="rally-node-1")) self.assertEqual({ "name": "ml_processing_time", "job": "job_1", "value": { "min": 3.3, "mean": 5.2, "median": 5.8, "max": 12.34 } }, select(metric_list, "ml_processing_time", job="job_1")) self.assertEqual({ "name": "ml_processing_time", "job": "job_2", "value": { "min": 3.55, "mean": 4.2, "median": 4.9, "max": 9.4 } }, select(metric_list, "ml_processing_time", job="job_2")) self.assertEqual({ "name": "young_gc_time", "value": { "single": 68 } }, select(metric_list, "young_gc_time")) self.assertEqual({ "name": "old_gc_time", "value": { "single": 0 } }, select(metric_list, "old_gc_time")) self.assertEqual({ "name": "merge_time", "value": { "single": 3702 } }, select(metric_list, "merge_time")) self.assertEqual({ "name": "merge_time_per_shard", "value": { "min": 40, "median": 3702, "max": 3900, "unit": "ms" } }, select(metric_list, "merge_time_per_shard")) self.assertEqual({ "name": "merge_count", "value": { "single": 2 } }, select(metric_list, "merge_count")) self.assertEqual({ "name": "refresh_time", "value": { "single": 596 } }, select(metric_list, "refresh_time")) self.assertEqual({ "name": "refresh_time_per_shard", "value": { "min": 48, "median": 89, "max": 204, "unit": "ms" } }, select(metric_list, "refresh_time_per_shard")) self.assertEqual({ "name": "refresh_count", "value": { "single": 10 } }, select(metric_list, "refresh_count")) self.assertIsNone(select(metric_list, "flush_time")) self.assertIsNone(select(metric_list, "flush_time_per_shard")) self.assertEqual({ "name": "flush_count", "value": { "single": 0 } }, select(metric_list, "flush_count"))
def test_as_flat_list(self): d = { "op_metrics": [{ "task": "index #1", "operation": "index", "throughput": { "min": 450, "median": 450, "max": 452, "unit": "docs/s" }, "latency": { "50": 340, "100": 376, }, "service_time": { "50": 341, "100": 376 }, "error_rate": 0.0 }], "node_metrics": [{ "node": "rally-node-0", "startup_time": 3.4 }, { "node": "rally-node-1", "startup_time": 4.2 }], "young_gc_time": 68, "old_gc_time": 0 } s = reporter.Stats(d) metric_list = s.as_flat_list() self.assertEqual( { "name": "throughput", "task": "index #1", "operation": "index", "value": { "min": 450, "median": 450, "max": 452, "unit": "docs/s" } }, select(metric_list, "throughput", operation="index")) self.assertEqual( { "name": "service_time", "task": "index #1", "operation": "index", "value": { "50": 341, "100": 376 } }, select(metric_list, "service_time", operation="index")) self.assertEqual( { "name": "latency", "task": "index #1", "operation": "index", "value": { "50": 340, "100": 376 } }, select(metric_list, "latency", operation="index")) self.assertEqual( { "name": "error_rate", "task": "index #1", "operation": "index", "value": { "single": 0.0 } }, select(metric_list, "error_rate", operation="index")) self.assertEqual( { "node": "rally-node-0", "name": "startup_time", "value": { "single": 3.4 } }, select(metric_list, "startup_time", node="rally-node-0")) self.assertEqual( { "node": "rally-node-1", "name": "startup_time", "value": { "single": 4.2 } }, select(metric_list, "startup_time", node="rally-node-1")) self.assertEqual({ "name": "young_gc_time", "value": { "single": 68 } }, select(metric_list, "young_gc_time")) self.assertEqual({ "name": "old_gc_time", "value": { "single": 0 } }, select(metric_list, "old_gc_time"))
def test_store_results(self): # here we need the real thing from esrally import reporter from esrally.mechanic import cluster schedule = [ track.Task("index #1", track.Operation("index", track.OperationType.Bulk)) ] t = track.Track(name="unittest-track", indices=[track.Index(name="tests", types=["test-type"])], challenges=[track.Challenge(name="index", default=True, schedule=schedule)]) c = cluster.Cluster([], [], None) c.distribution_version = "5.0.0" node = c.add_node("localhost", "rally-node-0") node.plugins.append("x-pack") race = metrics.Race(rally_version="0.4.4", environment_name="unittest", trial_id=EsResultsStoreTests.TRIAL_ID, trial_timestamp=EsResultsStoreTests.TRIAL_TIMESTAMP, pipeline="from-sources", user_tags={"os": "Linux"}, track=t, track_params=None, challenge=t.default_challenge, car="4gheap", car_params=None, plugin_params={"some-param": True}, total_laps=12, cluster=c, lap_results=[], results=reporter.Stats( { "young_gc_time": 100, "old_gc_time": 5, "op_metrics": [ { "task": "index #1", "operation": "index", "throughput": { "min": 1000, "median": 1250, "max": 1500, "unit": "docs/s" } } ], "node_metrics": [ { "node": "rally-node-0", "startup_time": 3.4 } ] }) ) self.race_store.store_results(race) expected_docs = [ { "rally-version": "0.4.4", "environment": "unittest", "trial-id": EsResultsStoreTests.TRIAL_ID, "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tags": { "os": "Linux" }, "track": "unittest-track", "challenge": "index", "car": "4gheap", "plugin-params": { "some-param": True }, "node-count": 1, "plugins": ["x-pack"], "active": True, "name": "old_gc_time", "value": { "single": 5 } }, { "rally-version": "0.4.4", "environment": "unittest", "trial-id": EsResultsStoreTests.TRIAL_ID, "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tags": { "os": "Linux" }, "track": "unittest-track", "challenge": "index", "car": "4gheap", "plugin-params": { "some-param": True }, "node-count": 1, "plugins": ["x-pack"], "active": True, "node": "rally-node-0", "name": "startup_time", "value": { "single": 3.4 }, }, { "rally-version": "0.4.4", "environment": "unittest", "trial-id": EsResultsStoreTests.TRIAL_ID, "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tags": { "os": "Linux" }, "track": "unittest-track", "challenge": "index", "car": "4gheap", "plugin-params": { "some-param": True }, "node-count": 1, "plugins": ["x-pack"], "active": True, "name": "throughput", "task": "index #1", "operation": "index", "value": { "min": 1000, "median": 1250, "max": 1500, "unit": "docs/s" } }, { "rally-version": "0.4.4", "environment": "unittest", "trial-id": EsResultsStoreTests.TRIAL_ID, "trial-timestamp": "20160131T000000Z", "distribution-version": "5.0.0", "distribution-major-version": 5, "user-tags": { "os": "Linux" }, "track": "unittest-track", "challenge": "index", "car": "4gheap", "plugin-params": { "some-param": True }, "node-count": 1, "plugins": ["x-pack"], "active": True, "name": "young_gc_time", "value": { "single": 100 } } ] self.es_mock.bulk_index.assert_called_with(index="rally-results-2016-01", doc_type="results", items=expected_docs)