Ejemplo n.º 1
0
    def test_execute_schedule_throughput_throttled(self, es):
        es.bulk.return_value = {"errors": False}

        params.register_param_source_for_name("driver-test-param-source",
                                              DriverTestParamSource)
        test_track = track.Track(name="unittest",
                                 description="unittest track",
                                 indices=None,
                                 challenges=None)

        # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples
        for target_throughput, bounds in {
                10: [2, 4],
                100: [24, 26],
                1000: [245, 255]
        }.items():
            task = track.Task("time-based",
                              track.Operation(
                                  "time-based",
                                  track.OperationType.Bulk.name,
                                  params={
                                      "body":
                                      ["action_metadata_line", "index_line"],
                                      "action_metadata_present":
                                      True,
                                      "bulk-size":
                                      1
                                  },
                                  param_source="driver-test-param-source"),
                              warmup_time_period=0.5,
                              time_period=0.5,
                              clients=4,
                              params={
                                  "target-throughput": target_throughput,
                                  "clients": 4
                              },
                              completes_parent=True)
            schedule = driver.schedule_for(test_track, task, 0)
            sampler = driver.Sampler(client_id=0, task=task, start_timestamp=0)

            cancel = threading.Event()
            complete = threading.Event()

            execute_schedule = driver.Executor(task, schedule, es, sampler,
                                               cancel, complete)
            execute_schedule()

            samples = sampler.samples

            sample_size = len(samples)
            lower_bound = bounds[0]
            upper_bound = bounds[1]
            self.assertTrue(
                lower_bound <= sample_size <= upper_bound,
                msg="Expected sample size to be between %d and %d but was %d" %
                (lower_bound, upper_bound, sample_size))
            self.assertTrue(
                complete.is_set(),
                "Executor should auto-complete a task that terminates its parent"
            )
Ejemplo n.º 2
0
    def test_cancel_execute_schedule(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", short_description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples
        for target_throughput, bounds in {10: [2, 4], 100: [24, 26], 1000: [245, 255]}.items():
            task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={
                "body": ["action_metadata_line", "index_line"],
                "action_metadata_present": True,
                "bulk-size": 1
            },
                                              param_source="driver-test-param-source"),
                              warmup_time_period=0.5, time_period=0.5, clients=4,
                              params={"target-throughput": target_throughput, "clients": 4})
            schedule = driver.schedule_for(test_track, task, 0)
            sampler = driver.Sampler(client_id=0, task=task, start_timestamp=0)

            cancel = threading.Event()
            complete = threading.Event()
            execute_schedule = driver.Executor(task, schedule, es, sampler, cancel, complete)

            cancel.set()
            execute_schedule()

            samples = sampler.samples

            sample_size = len(samples)
            self.assertEqual(0, sample_size)
Ejemplo n.º 3
0
 def setUp(self):
     params.register_param_source_for_name("driver-test-param-source",
                                           DriverTestParamSource)
     params.register_param_source_for_name(
         "driver-test-param-source-with-progress",
         DriverTestParamSourceWithProgress)
     self.test_track = track.Track(name="unittest")
Ejemplo n.º 4
0
    def test_execute_schedule_throughput_throttled(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", short_description="unittest track", description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples
        for target_throughput, bounds in {10: [2, 4], 100: [24, 26], 1000: [245, 255]}.items():
            task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={
                "body": ["action_metadata_line", "index_line"],
                "action_metadata_present": True
            },
                                              param_source="driver-test-param-source"),
                              warmup_time_period=0.5, time_period=0.5, clients=4, target_throughput=target_throughput)
            schedule = driver.schedule_for(test_track, task, 0)
            sampler = driver.Sampler(client_id=0, operation=task.operation, start_timestamp=0)

            driver.execute_schedule(schedule, es, sampler)

            samples = sampler.samples

            sample_size = len(samples)
            lower_bound = bounds[0]
            upper_bound = bounds[1]
            self.assertTrue(lower_bound <= sample_size <= upper_bound,
                            msg="Expected sample size to be between %d and %d but was %d" % (lower_bound, upper_bound, sample_size))
Ejemplo n.º 5
0
 def setUp(self):
     params.register_param_source_for_name("driver-test-param-source",
                                           DriverTestParamSource)
     self.test_track = track.Track(name="unittest",
                                   description="unittest track",
                                   source_root_url="http://example.org",
                                   indices=None,
                                   challenges=None)
Ejemplo n.º 6
0
    def test_execute_schedule_in_throughput_mode(self, es):
        es.bulk.return_value = {"errors": False}

        params.register_param_source_for_name("driver-test-param-source",
                                              DriverTestParamSource)
        test_track = track.Track(name="unittest",
                                 description="unittest track",
                                 indices=None,
                                 challenges=None)

        task = track.Task(
            "time-based",
            track.Operation(
                "time-based",
                track.OperationType.Bulk.name,
                params={
                    "body": ["action_metadata_line", "index_line"],
                    "action-metadata-present": True,
                    "bulk-size": 1,
                    # we need this because DriverTestParamSource does not know that we only have one bulk and hence size() returns incorrect results
                    "size": 1
                },
                param_source="driver-test-param-source"),
            warmup_time_period=0,
            clients=4)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2,
                                 task=task,
                                 start_timestamp=time.perf_counter())
        cancel = threading.Event()
        complete = threading.Event()

        execute_schedule = driver.Executor(task, schedule, es, sampler, cancel,
                                           complete)
        execute_schedule()

        samples = sampler.samples

        self.assertTrue(len(samples) > 0)
        self.assertFalse(complete.is_set(),
                         "Executor should not auto-complete a normal task")
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertLess(previous_absolute_time, sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertLess(previous_relative_time, sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("docs", sample.total_ops_unit)
            self.assertEqual(1, sample.request_meta_data["bulk-size"])
Ejemplo n.º 7
0
    def test_store_race(self):
        from esrally import time
        schedule = [
            track.Task("index #1",
                       track.Operation("index", track.OperationType.Bulk))
        ]

        t = track.Track(
            name="unittest",
            indices=[track.Index(name="tests", types=["test-type"])],
            challenges=[
                track.Challenge(name="index", default=True, schedule=schedule)
            ])

        race = metrics.Race(rally_version="0.4.4",
                            environment_name="unittest",
                            trial_id=FileRaceStoreTests.TRIAL_ID,
                            trial_timestamp=FileRaceStoreTests.TRIAL_TIMESTAMP,
                            pipeline="from-sources",
                            user_tags={"os": "Linux"},
                            track=t,
                            track_params={"clients": 12},
                            challenge=t.default_challenge,
                            car="4gheap",
                            total_laps=12,
                            cluster=FileRaceStoreTests.DictHolder({
                                "distribution-version":
                                "5.0.0",
                                "nodes": [{
                                    "node_name": "node0",
                                    "ip": "127.0.0.1"
                                }]
                            }),
                            lap_results=[],
                            results=FileRaceStoreTests.DictHolder({
                                "young_gc_time":
                                100,
                                "old_gc_time":
                                5,
                                "op_metrics": [{
                                    "task": "index #1",
                                    "operation": "index",
                                    "throughput": {
                                        "min": 1000,
                                        "median": 1250,
                                        "max": 1500,
                                        "unit": "docs/s"
                                    }
                                }]
                            }))

        self.race_store.store_race(race)

        retrieved_race = self.race_store.find_by_timestamp(
            timestamp=time.to_iso8601(FileRaceStoreTests.TRIAL_TIMESTAMP))
        self.assertEqual(race.trial_timestamp, retrieved_race.trial_timestamp)
        self.assertEqual(1, len(self.race_store.list()))
Ejemplo n.º 8
0
    def test_calculate_simple_index_stats(self):
        cfg = config.Config()
        cfg.add(config.Scope.application, "system", "env.name", "unittest")
        cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.now())
        cfg.add(config.Scope.application, "system", "trial.id", "6ebc6e53-ee20-4b0c-99b4-09697987e9f4")
        cfg.add(config.Scope.application, "reporting", "datastore.type", "in-memory")
        cfg.add(config.Scope.application, "mechanic", "car.names", ["unittest_car"])
        cfg.add(config.Scope.application, "mechanic", "car.params", {})
        cfg.add(config.Scope.application, "mechanic", "plugin.params", {})
        cfg.add(config.Scope.application, "race", "laps", 1)
        cfg.add(config.Scope.application, "race", "user.tag", "")
        cfg.add(config.Scope.application, "race", "pipeline", "from-sources-skip-build")
        cfg.add(config.Scope.application, "track", "params", {})

        index = track.Task(name="index #1", operation=track.Operation(name="index", operation_type=track.OperationType.Bulk, params=None))
        challenge = track.Challenge(name="unittest", schedule=[index], default=True)
        t = track.Track("unittest", "unittest-track", challenges=[challenge])

        store = metrics.metrics_store(cfg, read_only=False, track=t, challenge=challenge)
        store.lap = 1

        store.put_value_cluster_level("throughput", 500, unit="docs/s", task="index #1", operation_type=track.OperationType.Bulk)
        store.put_value_cluster_level("throughput", 1000, unit="docs/s", task="index #1", operation_type=track.OperationType.Bulk)
        store.put_value_cluster_level("throughput", 2000, unit="docs/s", task="index #1", operation_type=track.OperationType.Bulk)

        store.put_value_cluster_level("latency", 2800, unit="ms", task="index #1", operation_type=track.OperationType.Bulk,
                                      sample_type=metrics.SampleType.Warmup)
        store.put_value_cluster_level("latency", 200, unit="ms", task="index #1", operation_type=track.OperationType.Bulk)
        store.put_value_cluster_level("latency", 220, unit="ms", task="index #1", operation_type=track.OperationType.Bulk)
        store.put_value_cluster_level("latency", 225, unit="ms", task="index #1", operation_type=track.OperationType.Bulk)

        store.put_value_cluster_level("service_time", 250, unit="ms", task="index #1", operation_type=track.OperationType.Bulk,
                                      sample_type=metrics.SampleType.Warmup, meta_data={"success": False})
        store.put_value_cluster_level("service_time", 190, unit="ms", task="index #1", operation_type=track.OperationType.Bulk,
                                      meta_data={"success": True})
        store.put_value_cluster_level("service_time", 200, unit="ms", task="index #1", operation_type=track.OperationType.Bulk,
                                      meta_data={"success": False})
        store.put_value_cluster_level("service_time", 215, unit="ms", task="index #1", operation_type=track.OperationType.Bulk,
                                      meta_data={"success": True})
        store.put_count_node_level("rally-node-0", "final_index_size_bytes", 2048, unit="bytes")
        store.put_count_node_level("rally-node-1", "final_index_size_bytes", 4096, unit="bytes")

        stats = reporter.calculate_results(store, metrics.create_race(cfg, t, challenge))

        del store

        opm = stats.metrics("index #1")
        self.assertEqual(collections.OrderedDict([("min", 500), ("median", 1000), ("max", 2000), ("unit", "docs/s")]), opm["throughput"])
        self.assertEqual(collections.OrderedDict([("50_0", 220), ("100_0", 225)]), opm["latency"])
        self.assertEqual(collections.OrderedDict([("50_0", 200), ("100_0", 215)]), opm["service_time"])
        self.assertAlmostEqual(0.3333333333333333, opm["error_rate"])

        self.assertEqual(6144, stats.index_size)
Ejemplo n.º 9
0
    def test_execute_schedule_in_throughput_mode(self, es):
        es.bulk.return_value = {"errors": False}

        params.register_param_source_for_name("driver-test-param-source",
                                              DriverTestParamSource)
        test_track = track.Track(name="unittest",
                                 short_description="unittest track",
                                 description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        task = track.Task(track.Operation(
            "time-based",
            track.OperationType.Index.name,
            params={
                "body": ["action_metadata_line", "index_line"],
                "action_metadata_present": True
            },
            param_source="driver-test-param-source"),
                          warmup_time_period=0,
                          clients=4,
                          target_throughput=None)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2, task=task, start_timestamp=100)
        cancel = threading.Event()
        driver.execute_schedule(cancel, 0, task.operation, schedule, es,
                                sampler)

        samples = sampler.samples

        self.assertTrue(len(samples) > 0)
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertTrue(previous_absolute_time < sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertTrue(previous_relative_time < sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("docs", sample.total_ops_unit)
            self.assertEqual(1, sample.request_meta_data["bulk-size"])
Ejemplo n.º 10
0
    def test_store_race(self):
        self.cfg.add(config.Scope.application, "system", "pipeline", "unittest-pipeline")
        self.cfg.add(config.Scope.application, "system", "user.tag", "")
        self.cfg.add(config.Scope.application, "benchmarks", "challenge", "index-and-search")
        self.cfg.add(config.Scope.application, "benchmarks", "car", "defaults")
        self.cfg.add(config.Scope.application, "benchmarks", "laps", 1)
        self.cfg.add(config.Scope.application, "launcher", "external.target.hosts", [{"host": "localhost", "port": "9200"}])
        self.cfg.add(config.Scope.application, "source", "revision", "latest")
        self.cfg.add(config.Scope.application, "source", "distribution.version", "5.0.0")

        index = "tests"
        type = "test-type"

        schedule = [
            track.Task(track.Operation("index", track.OperationType.Index, None)),
            track.Task(track.Operation("search-all", track.OperationType.Search, None)),
        ]

        t = track.Track(name="unittest", short_description="unittest track", description="unittest track",
                        source_root_url="http://example.org",
                        indices=[track.Index(name=index, types=[track.Type(name=type, mapping_file=None)])],
                        challenges=[
                            track.Challenge(name="index-and-search", description="Index & Search", index_settings=None, schedule=schedule)
                        ])
        self.race_store.store_race(t)

        expected_doc = {
            "environment": "unittest-env",
            "trial-timestamp": "20160131T000000Z",
            "pipeline": "unittest-pipeline",
            "revision": "latest",
            "distribution-version": "5.0.0",
            "track": "unittest",
            "laps": 1,
            "selected-challenge": {
                "name": "index-and-search",
                "operations": [
                    "index",
                    "search-all"
                ]
            },
            "car": "defaults",
            "target-hosts": ["localhost:9200"],
            "user-tag": ""
        }

        self.es_mock.index.assert_called_with(index="rally-2016", doc_type="races", item=expected_doc)
Ejemplo n.º 11
0
    def setUp(self):
        self.cfg = config.Config()
        self.cfg.add(config.Scope.application, "system", "env.name", "unittest")
        self.cfg.add(config.Scope.application, "system", "time.start", datetime(year=2017, month=8, day=20, hour=1, minute=0, second=0))
        self.cfg.add(config.Scope.application, "track", "challenge.name", "default")
        self.cfg.add(config.Scope.application, "track", "test.mode.enabled", True)
        self.cfg.add(config.Scope.application, "mechanic", "car.names", ["default"])
        self.cfg.add(config.Scope.application, "client", "hosts", ["localhost:9200"])
        self.cfg.add(config.Scope.application, "client", "options", {})
        self.cfg.add(config.Scope.application, "driver", "cluster.health", "green")
        self.cfg.add(config.Scope.application, "driver", "load_driver_hosts", ["localhost"])

        default_challenge = track.Challenge("default", description="default challenge", default=True, schedule=[
            track.Task(operation=track.Operation("index", operation_type=track.OperationType.Index), clients=4)
        ])
        another_challenge = track.Challenge("other", description="non-default challenge", default=False)
        self.track = track.Track(name="unittest", short_description="unittest track", challenges=[another_challenge, default_challenge])
Ejemplo n.º 12
0
    def test_execute_schedule_with_progress_determined_by_runner(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", description="unittest track",
                                 indices=None,
                                 challenges=None)

        task = track.Task("time-based", track.Operation("time-based", operation_type="unit-test-recovery", params={
            "indices-to-restore": "*",
            # The runner will determine progress
            "size": None
        }, param_source="driver-test-param-source"), warmup_time_period=0, clients=4)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2, task=task, start_timestamp=time.perf_counter())
        cancel = threading.Event()
        complete = threading.Event()

        execute_schedule = driver.Executor(task, schedule, es, sampler, cancel, complete)
        execute_schedule()

        samples = sampler.samples

        self.assertEqual(5, len(samples))
        self.assertTrue(self.runner_with_progress.completed)
        self.assertEqual(1.0, self.runner_with_progress.percent_completed)
        self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task")
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertLess(previous_absolute_time, sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertLess(previous_relative_time, sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("ops", sample.total_ops_unit)
Ejemplo n.º 13
0
    def setUp(self):
        self.cfg = config.Config()
        self.cfg.add(config.Scope.application, "system", "env.name", "unittest")
        self.cfg.add(config.Scope.application, "system", "time.start", datetime(year=2017, month=8, day=20, hour=1, minute=0, second=0))
        self.cfg.add(config.Scope.application, "system", "trial.id", "6ebc6e53-ee20-4b0c-99b4-09697987e9f4")
        self.cfg.add(config.Scope.application, "track", "challenge.name", "default")
        self.cfg.add(config.Scope.application, "track", "params", {})
        self.cfg.add(config.Scope.application, "track", "test.mode.enabled", True)
        self.cfg.add(config.Scope.application, "mechanic", "car.names", ["default"])
        self.cfg.add(config.Scope.application, "client", "hosts", ["localhost:9200"])
        self.cfg.add(config.Scope.application, "client", "options", {})
        self.cfg.add(config.Scope.application, "driver", "load_driver_hosts", ["localhost"])
        self.cfg.add(config.Scope.application, "reporting", "datastore.type", "in-memory")

        default_challenge = track.Challenge("default", default=True, schedule=[
            track.Task(name="index", operation=track.Operation("index", operation_type=track.OperationType.Bulk), clients=4)
        ])
        another_challenge = track.Challenge("other", default=False)
        self.track = track.Track(name="unittest", description="unittest track", challenges=[another_challenge, default_challenge])
Ejemplo n.º 14
0
    def test_store_race(self):
        schedule = [
            track.Task("index #1",
                       track.Operation("index", track.OperationType.Bulk))
        ]

        t = track.Track(
            name="unittest",
            indices=[track.Index(name="tests", types=["test-type"])],
            challenges=[
                track.Challenge(name="index", default=True, schedule=schedule)
            ])

        race = metrics.Race(rally_version="0.4.4",
                            environment_name="unittest",
                            trial_id=EsRaceStoreTests.TRIAL_ID,
                            trial_timestamp=EsRaceStoreTests.TRIAL_TIMESTAMP,
                            pipeline="from-sources",
                            user_tags={"os": "Linux"},
                            track=t,
                            track_params={"shard-count": 3},
                            challenge=t.default_challenge,
                            car="4gheap",
                            total_laps=12,
                            cluster=EsRaceStoreTests.DictHolder({
                                "distribution-version":
                                "5.0.0",
                                "nodes": [{
                                    "node_name": "node0",
                                    "ip": "127.0.0.1",
                                    "plugins": ["analysis-icu", "x-pack"]
                                }]
                            }),
                            lap_results=[],
                            results=EsRaceStoreTests.DictHolder({
                                "young_gc_time":
                                100,
                                "old_gc_time":
                                5,
                                "op_metrics": [{
                                    "task": "index #1",
                                    "operation": "index",
                                    "throughput": {
                                        "min": 1000,
                                        "median": 1250,
                                        "max": 1500,
                                        "unit": "docs/s"
                                    }
                                }]
                            }))

        self.race_store.store_race(race)

        expected_doc = {
            "rally-version": "0.4.4",
            "environment": "unittest",
            "trial-id": EsRaceStoreTests.TRIAL_ID,
            "trial-timestamp": "20160131T000000Z",
            "pipeline": "from-sources",
            "user-tags": {
                "os": "Linux"
            },
            "track": "unittest",
            "track-params": {
                "shard-count": 3
            },
            "challenge": "index",
            "car": "4gheap",
            "total-laps": 12,
            "cluster": {
                "distribution-version":
                "5.0.0",
                "nodes": [{
                    "node_name": "node0",
                    "ip": "127.0.0.1",
                    "plugins": ["analysis-icu", "x-pack"]
                }]
            },
            "results": {
                "young_gc_time":
                100,
                "old_gc_time":
                5,
                "op_metrics": [{
                    "task": "index #1",
                    "operation": "index",
                    "throughput": {
                        "min": 1000,
                        "median": 1250,
                        "max": 1500,
                        "unit": "docs/s"
                    }
                }]
            }
        }
        self.es_mock.index.assert_called_with(index="rally-races-2016-01",
                                              doc_type="races",
                                              item=expected_doc)
Ejemplo n.º 15
0
    def test_store_race(self):
        from esrally import time
        schedule = [
            track.Task(track.Operation("index", track.OperationType.Index))
        ]

        t = track.Track(name="unittest",
                        short_description="unittest track",
                        source_root_url="http://example.org",
                        indices=[
                            track.Index(name="tests",
                                        auto_managed=True,
                                        types=[
                                            track.Type(name="test-type",
                                                       mapping_file=None)
                                        ])
                        ],
                        challenges=[
                            track.Challenge(name="index",
                                            description="Index",
                                            default=True,
                                            index_settings=None,
                                            schedule=schedule)
                        ])

        race = metrics.Race(rally_version="0.4.4",
                            environment_name="unittest",
                            trial_timestamp=FileRaceStoreTests.TRIAL_TIMESTAMP,
                            pipeline="from-sources",
                            user_tag="let-me-test",
                            track=t,
                            challenge=t.default_challenge,
                            car="4gheap",
                            total_laps=12,
                            cluster=FileRaceStoreTests.DictHolder({
                                "distribution-version":
                                "5.0.0",
                                "nodes": [{
                                    "node_name": "node0",
                                    "ip": "127.0.0.1"
                                }]
                            }),
                            lap_results=[],
                            results=FileRaceStoreTests.DictHolder({
                                "young_gc_time":
                                100,
                                "old_gc_time":
                                5,
                                "op_metrics": [{
                                    "operation": "index",
                                    "throughput": {
                                        "min": 1000,
                                        "median": 1250,
                                        "max": 1500,
                                        "unit": "docs/s"
                                    }
                                }]
                            }))

        self.race_store.store_race(race)

        retrieved_race = self.race_store.find_by_timestamp(
            timestamp=time.to_iso8601(FileRaceStoreTests.TRIAL_TIMESTAMP))
        self.assertEqual(race.trial_timestamp, retrieved_race.trial_timestamp)
        self.assertEqual(1, len(self.race_store.list()))
Ejemplo n.º 16
0
    def test_store_race(self):
        self.cfg.add(config.Scope.application, "system", "pipeline",
                     "unittest-pipeline")
        self.cfg.add(config.Scope.application, "system", "user.tag", "")
        self.cfg.add(config.Scope.application, "benchmarks", "challenge",
                     "index-and-search")
        self.cfg.add(config.Scope.application, "benchmarks", "car", "defaults")
        self.cfg.add(config.Scope.application, "benchmarks", "rounds", 1)
        self.cfg.add(config.Scope.application, "launcher",
                     "external.target.hosts", "")
        self.cfg.add(config.Scope.application, "source", "revision", "latest")
        self.cfg.add(config.Scope.application, "source",
                     "distribution.version", "5.0.0")

        index = "tests"
        type = "test-type"

        benchmarks = {
            track.BenchmarkPhase.index:
            track.IndexBenchmarkSettings(),
            track.BenchmarkPhase.search:
            track.LatencyBenchmarkSettings(queries=[
                track.DefaultQuery(index=index,
                                   type=type,
                                   name="default",
                                   body={"query": {
                                       "match_all": {}
                                   }})
            ])
        }

        t = track.Track(
            name="unittest",
            short_description="unittest track",
            description="unittest track",
            source_root_url="http://example.org",
            indices=[
                track.Index(name=index,
                            types=[track.Type(name=type, mapping_file=None)])
            ],
            challenges=[
                track.Challenge(name="index-and-search",
                                description="Index and Search Challenge",
                                benchmark=benchmarks)
            ])
        self.race_store.store_race(t)

        expected_doc = {
            "environment": "unittest-env",
            "trial-timestamp": "20160131T000000Z",
            "pipeline": "unittest-pipeline",
            "revision": "latest",
            "distribution-version": "5.0.0",
            "track": "unittest",
            "selected-challenge": {
                "name": "index-and-search",
                "benchmark-phase-index": True,
                "benchmark-phase-search": {
                    "sample-size": 1000,
                    "queries": ["default"]
                }
            },
            "car": "defaults",
            "rounds": 1,
            "target-hosts": "",
            "user-tag": ""
        }

        self.es_mock.index.assert_called_with(index="rally-2016",
                                              doc_type="races",
                                              item=expected_doc)
Ejemplo n.º 17
0
    def test_calculate_simple_index_stats(self):
        cfg = config.Config()
        cfg.add(config.Scope.application, "system", "env.name", "unittest")
        cfg.add(config.Scope.application, "system", "time.start",
                datetime.datetime.now())
        cfg.add(config.Scope.application, "reporting", "datastore.type",
                "in-memory")
        cfg.add(config.Scope.application, "mechanic", "car.name",
                "unittest_car")
        cfg.add(config.Scope.application, "race", "laps", 1)
        cfg.add(config.Scope.application, "race", "user.tag", "")
        cfg.add(config.Scope.application, "race", "pipeline",
                "from-sources-skip-build")

        index = track.Task(
            operation=track.Operation(name="index",
                                      operation_type=track.OperationType.Index,
                                      params=None))
        challenge = track.Challenge(name="unittest",
                                    description="",
                                    index_settings=None,
                                    schedule=[index],
                                    default=True)
        t = track.Track("unittest", "unittest-track", challenges=[challenge])

        store = metrics.metrics_store(cfg,
                                      read_only=False,
                                      track=t,
                                      challenge=challenge)
        store.lap = 1

        store.put_value_cluster_level("throughput",
                                      500,
                                      unit="docs/s",
                                      operation="index",
                                      operation_type=track.OperationType.Index)
        store.put_value_cluster_level("throughput",
                                      1000,
                                      unit="docs/s",
                                      operation="index",
                                      operation_type=track.OperationType.Index)
        store.put_value_cluster_level("throughput",
                                      2000,
                                      unit="docs/s",
                                      operation="index",
                                      operation_type=track.OperationType.Index)

        store.put_value_cluster_level("latency",
                                      2800,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index,
                                      sample_type=metrics.SampleType.Warmup)
        store.put_value_cluster_level("latency",
                                      200,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index)
        store.put_value_cluster_level("latency",
                                      220,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index)
        store.put_value_cluster_level("latency",
                                      225,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index)

        store.put_value_cluster_level("service_time",
                                      250,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index,
                                      sample_type=metrics.SampleType.Warmup,
                                      meta_data={"success": False})
        store.put_value_cluster_level("service_time",
                                      190,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index,
                                      meta_data={"success": True})
        store.put_value_cluster_level("service_time",
                                      200,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index,
                                      meta_data={"success": False})
        store.put_value_cluster_level("service_time",
                                      215,
                                      unit="ms",
                                      operation="index",
                                      operation_type=track.OperationType.Index,
                                      meta_data={"success": True})

        stats = reporter.calculate_results(
            store, metrics.create_race(cfg, t, challenge))

        del store

        opm = stats.metrics("index")
        self.assertEqual(
            collections.OrderedDict([("min", 500), ("median", 1000),
                                     ("max", 2000), ("unit", "docs/s")]),
            opm["throughput"])
        self.assertEqual(collections.OrderedDict([("50", 220), ("100", 225)]),
                         opm["latency"])
        self.assertEqual(collections.OrderedDict([("50", 200), ("100", 215)]),
                         opm["service_time"])
        self.assertAlmostEqual(0.3333333333333333, opm["error_rate"])
Ejemplo n.º 18
0
 def setUp(self):
     params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
     runner.register_default_runners()
     self.test_track = track.Track(name="unittest")
Ejemplo n.º 19
0
    def test_store_race(self):
        schedule = [
            track.Task(track.Operation("index", track.OperationType.Index))
        ]

        t = track.Track(name="unittest",
                        short_description="unittest track",
                        source_root_url="http://example.org",
                        indices=[
                            track.Index(name="tests",
                                        auto_managed=True,
                                        types=[
                                            track.Type(name="test-type",
                                                       mapping_file=None)
                                        ])
                        ],
                        challenges=[
                            track.Challenge(name="index",
                                            description="Index",
                                            default=True,
                                            index_settings=None,
                                            schedule=schedule)
                        ])

        race = metrics.Race(rally_version="0.4.4",
                            environment_name="unittest",
                            trial_timestamp=EsRaceStoreTests.TRIAL_TIMESTAMP,
                            pipeline="from-sources",
                            user_tag="let-me-test",
                            track=t,
                            challenge=t.default_challenge,
                            car="4gheap",
                            total_laps=12,
                            cluster=EsRaceStoreTests.DictHolder({
                                "distribution-version":
                                "5.0.0",
                                "nodes": [{
                                    "node_name": "node0",
                                    "ip": "127.0.0.1",
                                    "plugins": ["analysis-icu", "x-pack"]
                                }]
                            }),
                            lap_results=[],
                            results=EsRaceStoreTests.DictHolder({
                                "young_gc_time":
                                100,
                                "old_gc_time":
                                5,
                                "op_metrics": [{
                                    "operation": "index",
                                    "throughput": {
                                        "min": 1000,
                                        "median": 1250,
                                        "max": 1500,
                                        "unit": "docs/s"
                                    }
                                }]
                            }))

        self.race_store.store_race(race)

        expected_doc = {
            "rally-version": "0.4.4",
            "environment": "unittest",
            "trial-timestamp": "20160131T000000Z",
            "pipeline": "from-sources",
            "user-tag": "let-me-test",
            "track": "unittest",
            "challenge": "index",
            "car": "4gheap",
            "total-laps": 12,
            "cluster": {
                "distribution-version":
                "5.0.0",
                "nodes": [{
                    "node_name": "node0",
                    "ip": "127.0.0.1",
                    "plugins": ["analysis-icu", "x-pack"]
                }]
            },
            "results": {
                "young_gc_time":
                100,
                "old_gc_time":
                5,
                "op_metrics": [{
                    "operation": "index",
                    "throughput": {
                        "min": 1000,
                        "median": 1250,
                        "max": 1500,
                        "unit": "docs/s"
                    }
                }]
            }
        }
        self.es_mock.index.assert_called_with(index="rally-races-2016-01",
                                              doc_type="races",
                                              item=expected_doc)
Ejemplo n.º 20
0
 def setUp(self):
     self.test_track = track.Track(name="unittest")
     self.runner_with_progress = SchedulerTests.RunnerWithProgress()
     params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
     runner.register_default_runners()
     runner.register_runner("driver-test-runner-with-completion", self.runner_with_progress)
Ejemplo n.º 21
0
    def test_store_results(self):
        # here we need the real thing
        from esrally import reporter
        from esrally.mechanic import cluster

        schedule = [
            track.Task(track.Operation("index", track.OperationType.Index))
        ]

        t = track.Track(name="unittest-track",
                        short_description="unittest track",
                        source_root_url="http://example.org",
                        indices=[
                            track.Index(name="tests",
                                        auto_managed=True,
                                        types=[
                                            track.Type(name="test-type",
                                                       mapping_file=None)
                                        ])
                        ],
                        challenges=[
                            track.Challenge(name="index",
                                            description="Index",
                                            default=True,
                                            index_settings=None,
                                            schedule=schedule)
                        ])

        c = cluster.Cluster([], [], None)
        c.distribution_version = "5.0.0"
        node = c.add_node("localhost", "rally-node-0")
        node.plugins.append("x-pack")

        race = metrics.Race(
            rally_version="0.4.4",
            environment_name="unittest",
            trial_timestamp=EsResultsStoreTests.TRIAL_TIMESTAMP,
            pipeline="from-sources",
            user_tag="let-me-test",
            track=t,
            challenge=t.default_challenge,
            car="4gheap",
            total_laps=12,
            cluster=c,
            lap_results=[],
            results=reporter.Stats({
                "young_gc_time":
                100,
                "old_gc_time":
                5,
                "op_metrics": [{
                    "operation": "index",
                    "throughput": {
                        "min": 1000,
                        "median": 1250,
                        "max": 1500,
                        "unit": "docs/s"
                    }
                }]
            }))

        self.race_store.store_results(race)

        expected_docs = [{
            "environment": "unittest",
            "trial-timestamp": "20160131T000000Z",
            "distribution-version": "5.0.0",
            "distribution-major-version": 5,
            "user-tag": "let-me-test",
            "track": "unittest-track",
            "challenge": "index",
            "car": "4gheap",
            "node-count": 1,
            "plugins": ["x-pack"],
            "active": True,
            "name": "old_gc_time",
            "value": {
                "single": 5
            }
        }, {
            "environment": "unittest",
            "trial-timestamp": "20160131T000000Z",
            "distribution-version": "5.0.0",
            "distribution-major-version": 5,
            "user-tag": "let-me-test",
            "track": "unittest-track",
            "challenge": "index",
            "car": "4gheap",
            "node-count": 1,
            "plugins": ["x-pack"],
            "active": True,
            "name": "throughput",
            "operation": "index",
            "value": {
                "min": 1000,
                "median": 1250,
                "max": 1500,
                "unit": "docs/s"
            }
        }, {
            "environment": "unittest",
            "trial-timestamp": "20160131T000000Z",
            "distribution-version": "5.0.0",
            "distribution-major-version": 5,
            "user-tag": "let-me-test",
            "track": "unittest-track",
            "challenge": "index",
            "car": "4gheap",
            "node-count": 1,
            "plugins": ["x-pack"],
            "active": True,
            "name": "young_gc_time",
            "value": {
                "single": 100
            }
        }]
        self.es_mock.bulk_index.assert_called_with(
            index="rally-results-2016-01",
            doc_type="results",
            items=expected_docs)
Ejemplo n.º 22
0
    def test_store_results(self):
        # here we need the real thing
        from esrally import reporter
        from esrally.mechanic import cluster

        schedule = [
            track.Task("index #1", track.Operation("index", track.OperationType.Bulk))
        ]

        t = track.Track(name="unittest-track",
                        indices=[track.Index(name="tests", types=["test-type"])],
                        challenges=[track.Challenge(name="index", default=True, schedule=schedule)])

        c = cluster.Cluster([], [], None)
        c.distribution_version = "5.0.0"
        node = c.add_node("localhost", "rally-node-0")
        node.plugins.append("x-pack")

        race = metrics.Race(rally_version="0.4.4", environment_name="unittest", trial_id=EsResultsStoreTests.TRIAL_ID,
                            trial_timestamp=EsResultsStoreTests.TRIAL_TIMESTAMP,
                            pipeline="from-sources", user_tags={"os": "Linux"}, track=t, track_params=None,
                            challenge=t.default_challenge, car="4gheap", car_params=None, plugin_params={"some-param": True},
                            total_laps=12,
                            cluster=c,
                            lap_results=[],
                            results=reporter.Stats(
                                {
                                    "young_gc_time": 100,
                                    "old_gc_time": 5,
                                    "op_metrics": [
                                        {
                                            "task": "index #1",
                                            "operation": "index",
                                            "throughput": {
                                                "min": 1000,
                                                "median": 1250,
                                                "max": 1500,
                                                "unit": "docs/s"
                                            }
                                        }
                                    ],
                                    "node_metrics": [
                                        {
                                            "node": "rally-node-0",
                                            "startup_time": 3.4
                                        }
                                    ]
                                })
                            )

        self.race_store.store_results(race)

        expected_docs = [
            {
                "rally-version": "0.4.4",
                "environment": "unittest",
                "trial-id": EsResultsStoreTests.TRIAL_ID,
                "trial-timestamp": "20160131T000000Z",
                "distribution-version": "5.0.0",
                "distribution-major-version": 5,
                "user-tags": {
                    "os": "Linux"
                },
                "track": "unittest-track",
                "challenge": "index",
                "car": "4gheap",
                "plugin-params": {
                    "some-param": True
                },
                "node-count": 1,
                "plugins": ["x-pack"],
                "active": True,
                "name": "old_gc_time",
                "value": {
                    "single": 5
                }
            },
            {
                "rally-version": "0.4.4",
                "environment": "unittest",
                "trial-id": EsResultsStoreTests.TRIAL_ID,
                "trial-timestamp": "20160131T000000Z",
                "distribution-version": "5.0.0",
                "distribution-major-version": 5,
                "user-tags": {
                    "os": "Linux"
                },
                "track": "unittest-track",
                "challenge": "index",
                "car": "4gheap",
                "plugin-params": {
                    "some-param": True
                },
                "node-count": 1,
                "plugins": ["x-pack"],
                "active": True,
                "node": "rally-node-0",
                "name": "startup_time",
                "value": {
                    "single": 3.4
                },
            },
            {
                "rally-version": "0.4.4",
                "environment": "unittest",
                "trial-id": EsResultsStoreTests.TRIAL_ID,
                "trial-timestamp": "20160131T000000Z",
                "distribution-version": "5.0.0",
                "distribution-major-version": 5,
                "user-tags": {
                    "os": "Linux"
                },
                "track": "unittest-track",
                "challenge": "index",
                "car": "4gheap",
                "plugin-params": {
                    "some-param": True
                },
                "node-count": 1,
                "plugins": ["x-pack"],
                "active": True,
                "name": "throughput",
                "task": "index #1",
                "operation": "index",
                "value": {
                    "min": 1000,
                    "median": 1250,
                    "max": 1500,
                    "unit": "docs/s"
                }
            },
            {
                "rally-version": "0.4.4",
                "environment": "unittest",
                "trial-id": EsResultsStoreTests.TRIAL_ID,
                "trial-timestamp": "20160131T000000Z",
                "distribution-version": "5.0.0",
                "distribution-major-version": 5,
                "user-tags": {
                    "os": "Linux"
                },
                "track": "unittest-track",
                "challenge": "index",
                "car": "4gheap",
                "plugin-params": {
                    "some-param": True
                },
                "node-count": 1,
                "plugins": ["x-pack"],
                "active": True,
                "name": "young_gc_time",
                "value": {
                    "single": 100
                }
            }
        ]
        self.es_mock.bulk_index.assert_called_with(index="rally-results-2016-01", doc_type="results", items=expected_docs)