Ejemplo n.º 1
0
    def test_execute_schedule_throughput_throttled(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", short_description="unittest track", description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples
        for target_throughput, bounds in {10: [2, 4], 100: [24, 26], 1000: [245, 255]}.items():
            task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={
                "body": ["action_metadata_line", "index_line"],
                "action_metadata_present": True
            },
                                              param_source="driver-test-param-source"),
                              warmup_time_period=0.5, time_period=0.5, clients=4, target_throughput=target_throughput)
            schedule = driver.schedule_for(test_track, task, 0)
            sampler = driver.Sampler(client_id=0, operation=task.operation, start_timestamp=0)

            driver.execute_schedule(schedule, es, sampler)

            samples = sampler.samples

            sample_size = len(samples)
            lower_bound = bounds[0]
            upper_bound = bounds[1]
            self.assertTrue(lower_bound <= sample_size <= upper_bound,
                            msg="Expected sample size to be between %d and %d but was %d" % (lower_bound, upper_bound, sample_size))
Ejemplo n.º 2
0
    def test_search_task_one_client(self):
        task = track.Task("search",
                          track.Operation(
                              "search",
                              track.OperationType.Search.name,
                              param_source="driver-test-param-source"),
                          warmup_iterations=3,
                          iterations=5,
                          clients=1,
                          params={
                              "target-throughput": 10,
                              "clients": 1
                          })
        schedule = driver.schedule_for(self.test_track, task, 0)

        expected_schedule = [
            (0, metrics.SampleType.Warmup, 1 / 8, {}),
            (0.1, metrics.SampleType.Warmup, 2 / 8, {}),
            (0.2, metrics.SampleType.Warmup, 3 / 8, {}),
            (0.3, metrics.SampleType.Normal, 4 / 8, {}),
            (0.4, metrics.SampleType.Normal, 5 / 8, {}),
            (0.5, metrics.SampleType.Normal, 6 / 8, {}),
            (0.6, metrics.SampleType.Normal, 7 / 8, {}),
            (0.7, metrics.SampleType.Normal, 8 / 8, {}),
        ]
        self.assert_schedule(expected_schedule, schedule)
Ejemplo n.º 3
0
    def test_cancel_execute_schedule(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", short_description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples
        for target_throughput, bounds in {10: [2, 4], 100: [24, 26], 1000: [245, 255]}.items():
            task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={
                "body": ["action_metadata_line", "index_line"],
                "action_metadata_present": True,
                "bulk-size": 1
            },
                                              param_source="driver-test-param-source"),
                              warmup_time_period=0.5, time_period=0.5, clients=4,
                              params={"target-throughput": target_throughput, "clients": 4})
            schedule = driver.schedule_for(test_track, task, 0)
            sampler = driver.Sampler(client_id=0, task=task, start_timestamp=0)

            cancel = threading.Event()
            complete = threading.Event()
            execute_schedule = driver.Executor(task, schedule, es, sampler, cancel, complete)

            cancel.set()
            execute_schedule()

            samples = sampler.samples

            sample_size = len(samples)
            self.assertEqual(0, sample_size)
Ejemplo n.º 4
0
    def test_schedule_param_source_determines_iterations_no_warmup(self):
        # we neither define any time-period nor any iteration count on the task.
        task = track.Task("bulk-index",
                          track.Operation(
                              "bulk-index",
                              track.OperationType.Bulk.name,
                              params={
                                  "body": ["a"],
                                  "size": 3
                              },
                              param_source="driver-test-param-source"),
                          clients=1,
                          params={
                              "target-throughput": 4,
                              "clients": 4
                          })

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, 1 / 3, {
                "body": ["a"],
                "size": 3
            }),
            (1.0, metrics.SampleType.Normal, 2 / 3, {
                "body": ["a"],
                "size": 3
            }),
            (2.0, metrics.SampleType.Normal, 3 / 3, {
                "body": ["a"],
                "size": 3
            }),
        ], list(invocations))
Ejemplo n.º 5
0
    def test_eternal_schedule(self):
        task = track.Task("time-based",
                          track.Operation(
                              "time-based",
                              track.OperationType.Bulk.name,
                              params={"body": ["a"]},
                              param_source="driver-test-param-source"),
                          warmup_time_period=0,
                          clients=4,
                          params={
                              "target-throughput": 4,
                              "clients": 4
                          })

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, None, {
                "body": ["a"]
            }),
            (1.0, metrics.SampleType.Normal, None, {
                "body": ["a"]
            }),
            (2.0, metrics.SampleType.Normal, None, {
                "body": ["a"]
            }),
            (3.0, metrics.SampleType.Normal, None, {
                "body": ["a"]
            }),
            (4.0, metrics.SampleType.Normal, None, {
                "body": ["a"]
            }),
        ],
                             invocations,
                             eternal_schedule=True)
Ejemplo n.º 6
0
    def test_schedule_for_time_based(self):
        task = track.Task("time-based", track.Operation("time-based", track.OperationType.Bulk.name, params={"body": ["a"], "size": 11},
                                                        param_source="driver-test-param-source"), warmup_time_period=0.1, time_period=0.1,
                          clients=1)

        schedule_handle = driver.schedule_for(self.test_track, task, 0)
        schedule = list(schedule_handle())

        self.assertTrue(len(schedule) > 0)

        last_progress = -1

        for invocation_time, sample_type, progress_percent, runner, params in schedule:
            # we're not throughput throttled
            self.assertEqual(0, invocation_time)
            if progress_percent <= 0.5:
                self.assertEqual(metrics.SampleType.Warmup, sample_type)
            else:
                self.assertEqual(metrics.SampleType.Normal, sample_type)
            self.assertTrue(last_progress < progress_percent)
            last_progress = progress_percent
            self.assertTrue(round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent)
            self.assertTrue(round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent)
            self.assertIsNotNone(runner, "runner must be defined")
            self.assertEqual({"body": ["a"], "size": 11}, params)
Ejemplo n.º 7
0
    def test_execute_schedule_throughput_throttled(self, es):
        es.bulk.return_value = {"errors": False}

        params.register_param_source_for_name("driver-test-param-source",
                                              DriverTestParamSource)
        test_track = track.Track(name="unittest",
                                 description="unittest track",
                                 indices=None,
                                 challenges=None)

        # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples
        for target_throughput, bounds in {
                10: [2, 4],
                100: [24, 26],
                1000: [245, 255]
        }.items():
            task = track.Task("time-based",
                              track.Operation(
                                  "time-based",
                                  track.OperationType.Bulk.name,
                                  params={
                                      "body":
                                      ["action_metadata_line", "index_line"],
                                      "action_metadata_present":
                                      True,
                                      "bulk-size":
                                      1
                                  },
                                  param_source="driver-test-param-source"),
                              warmup_time_period=0.5,
                              time_period=0.5,
                              clients=4,
                              params={
                                  "target-throughput": target_throughput,
                                  "clients": 4
                              },
                              completes_parent=True)
            schedule = driver.schedule_for(test_track, task, 0)
            sampler = driver.Sampler(client_id=0, task=task, start_timestamp=0)

            cancel = threading.Event()
            complete = threading.Event()

            execute_schedule = driver.Executor(task, schedule, es, sampler,
                                               cancel, complete)
            execute_schedule()

            samples = sampler.samples

            sample_size = len(samples)
            lower_bound = bounds[0]
            upper_bound = bounds[1]
            self.assertTrue(
                lower_bound <= sample_size <= upper_bound,
                msg="Expected sample size to be between %d and %d but was %d" %
                (lower_bound, upper_bound, sample_size))
            self.assertTrue(
                complete.is_set(),
                "Executor should auto-complete a task that terminates its parent"
            )
Ejemplo n.º 8
0
    def test_execute_schedule_in_throughput_mode(self, es):
        es.bulk.return_value = {"errors": False}

        params.register_param_source_for_name("driver-test-param-source",
                                              DriverTestParamSource)
        test_track = track.Track(name="unittest",
                                 description="unittest track",
                                 indices=None,
                                 challenges=None)

        task = track.Task(
            "time-based",
            track.Operation(
                "time-based",
                track.OperationType.Bulk.name,
                params={
                    "body": ["action_metadata_line", "index_line"],
                    "action-metadata-present": True,
                    "bulk-size": 1,
                    # we need this because DriverTestParamSource does not know that we only have one bulk and hence size() returns incorrect results
                    "size": 1
                },
                param_source="driver-test-param-source"),
            warmup_time_period=0,
            clients=4)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2,
                                 task=task,
                                 start_timestamp=time.perf_counter())
        cancel = threading.Event()
        complete = threading.Event()

        execute_schedule = driver.Executor(task, schedule, es, sampler, cancel,
                                           complete)
        execute_schedule()

        samples = sampler.samples

        self.assertTrue(len(samples) > 0)
        self.assertFalse(complete.is_set(),
                         "Executor should not auto-complete a normal task")
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertLess(previous_absolute_time, sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertLess(previous_relative_time, sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("docs", sample.total_ops_unit)
            self.assertEqual(1, sample.request_meta_data["bulk-size"])
Ejemplo n.º 9
0
    def test_schedule_defaults_to_iteration_based(self):
        # no time-period and no iterations specified on the task. Also, the parameter source does not define a size.
        task = track.Task("bulk-index", track.Operation("bulk-index", track.OperationType.Bulk.name, params={"body": ["a"]},
                                                        param_source="driver-test-param-source"),
                          clients=1, params={"target-throughput": 4, "clients": 4})

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, 1 / 1, {"body": ["a"]}),
        ], list(invocations))
Ejemplo n.º 10
0
    def test_search_task_two_clients(self):
        task = track.Task(track.Operation("search", track.OperationType.Search.name, param_source="driver-test-param-source"),
                          warmup_iterations=2, iterations=10, clients=2, params={"target-throughput": 10, "clients": 2})
        schedule = driver.schedule_for(self.test_track, task, 0)

        expected_schedule = [
            (0, metrics.SampleType.Warmup, 1 / 6, {}),
            (0.2, metrics.SampleType.Normal, 2 / 6, {}),
            (0.4, metrics.SampleType.Normal, 3 / 6, {}),
            (0.6, metrics.SampleType.Normal, 4 / 6, {}),
            (0.8, metrics.SampleType.Normal, 5 / 6, {}),
            (1.0, metrics.SampleType.Normal, 6 / 6, {}),
        ]
        self.assert_schedule(expected_schedule, schedule)
Ejemplo n.º 11
0
    def test_schedule_param_source_determines_iterations_including_warmup(self):
        task = track.Task("bulk-index", track.Operation("bulk-index", track.OperationType.Bulk.name, params={"body": ["a"], "size": 5},
                                                        param_source="driver-test-param-source"),
                          warmup_iterations=2, clients=1, params={"target-throughput": 4, "clients": 4})

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Warmup, 1 / 5, {"body": ["a"], "size": 5}),
            (1.0, metrics.SampleType.Warmup, 2 / 5, {"body": ["a"], "size": 5}),
            (2.0, metrics.SampleType.Normal, 3 / 5, {"body": ["a"], "size": 5}),
            (3.0, metrics.SampleType.Normal, 4 / 5, {"body": ["a"], "size": 5}),
            (4.0, metrics.SampleType.Normal, 5 / 5, {"body": ["a"], "size": 5}),
        ], list(invocations))
Ejemplo n.º 12
0
    def test_finite_schedule_with_progress_indication(self):
        task = track.Task("time-based", track.Operation("time-based", track.OperationType.Bulk.name, params={"body": ["a"], "size": 5},
                                                        param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4})

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, 1 / 5, {"body": ["a"], "size": 5}),
            (1.0, metrics.SampleType.Normal, 2 / 5, {"body": ["a"], "size": 5}),
            (2.0, metrics.SampleType.Normal, 3 / 5, {"body": ["a"], "size": 5}),
            (3.0, metrics.SampleType.Normal, 4 / 5, {"body": ["a"], "size": 5}),
            (4.0, metrics.SampleType.Normal, 5 / 5, {"body": ["a"], "size": 5}),
        ], list(invocations), infinite_schedule=False)
Ejemplo n.º 13
0
    def test_search_task_two_clients(self):
        task = track.Task(track.Operation("search", track.OperationType.Search.name, param_source="driver-test-param-source"),
                          warmup_iterations=2, iterations=10, clients=2, target_throughput=10)
        schedule = driver.schedule_for(self.test_track, task, 0)

        expected_schedule = [
            (0, metrics.SampleType.Warmup, 0, 6, None, {}),
            (0.2, metrics.SampleType.Normal, 0, 6, None, {}),
            (0.4, metrics.SampleType.Normal, 1, 6, None, {}),
            (0.6, metrics.SampleType.Normal, 2, 6, None, {}),
            (0.8, metrics.SampleType.Normal, 3, 6, None, {}),
            (1.0, metrics.SampleType.Normal, 4, 6, None, {}),
        ]
        self.assert_schedule(expected_schedule, schedule)
Ejemplo n.º 14
0
    def test_eternal_schedule(self):
        task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={"body": ["a"]},
                                          param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4})

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (1.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (2.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (3.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (4.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
        ], invocations, eternal_schedule=True)
Ejemplo n.º 15
0
    def test_execute_schedule_in_throughput_mode(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", short_description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={
            "body": ["action_metadata_line", "index_line"],
            "action_metadata_present": True,
            "bulk-size": 1,
            # we need this because DriverTestParamSource does not know that we only have one bulk and hence size() returns incorrect results
            "size": 1
        },
                                          param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2, task=task, start_timestamp=100)
        cancel = threading.Event()
        complete = threading.Event()

        execute_schedule = driver.Executor(task, schedule, es, sampler, cancel, complete)
        execute_schedule()

        samples = sampler.samples

        self.assertTrue(len(samples) > 0)
        self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task")
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertTrue(previous_absolute_time < sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertTrue(previous_relative_time < sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("docs", sample.total_ops_unit)
            self.assertEqual(1, sample.request_meta_data["bulk-size"])
Ejemplo n.º 16
0
    def test_execute_schedule_in_throughput_mode(self, es):
        es.bulk.return_value = {"errors": False}

        params.register_param_source_for_name("driver-test-param-source",
                                              DriverTestParamSource)
        test_track = track.Track(name="unittest",
                                 short_description="unittest track",
                                 description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        task = track.Task(track.Operation(
            "time-based",
            track.OperationType.Index.name,
            params={
                "body": ["action_metadata_line", "index_line"],
                "action_metadata_present": True
            },
            param_source="driver-test-param-source"),
                          warmup_time_period=0,
                          clients=4,
                          target_throughput=None)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2, task=task, start_timestamp=100)
        cancel = threading.Event()
        driver.execute_schedule(cancel, 0, task.operation, schedule, es,
                                sampler)

        samples = sampler.samples

        self.assertTrue(len(samples) > 0)
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertTrue(previous_absolute_time < sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertTrue(previous_relative_time < sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("docs", sample.total_ops_unit)
            self.assertEqual(1, sample.request_meta_data["bulk-size"])
Ejemplo n.º 17
0
    def test_infinite_schedule_without_progress_indication(self):
        task = track.Task("time-based", track.Operation("time-based", track.OperationType.Bulk.name, params={"body": ["a"]},
                                                        param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4})

        schedule_handle = driver.schedule_for(self.test_track, task, 0)
        schedule = schedule_handle()

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (1.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (2.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (3.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (4.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
        ], schedule, infinite_schedule=True)
Ejemplo n.º 18
0
    def test_search_task_one_client(self):
        task = track.Task(track.Operation("search", track.OperationType.Search.name, param_source="driver-test-param-source"),
                          warmup_iterations=3, iterations=5, clients=1, params={"target-throughput": 10, "clients": 1})
        schedule = driver.schedule_for(self.test_track, task, 0)

        expected_schedule = [
            (0, metrics.SampleType.Warmup, 1 / 8, {}),
            (0.1, metrics.SampleType.Warmup, 2 / 8, {}),
            (0.2, metrics.SampleType.Warmup, 3 / 8, {}),
            (0.3, metrics.SampleType.Normal, 4 / 8, {}),
            (0.4, metrics.SampleType.Normal, 5 / 8, {}),
            (0.5, metrics.SampleType.Normal, 6 / 8, {}),
            (0.6, metrics.SampleType.Normal, 7 / 8, {}),
            (0.7, metrics.SampleType.Normal, 8 / 8, {}),
        ]
        self.assert_schedule(expected_schedule, schedule)
Ejemplo n.º 19
0
    def test_schedule_with_progress_determined_by_runner(self):
        task = track.Task("time-based", track.Operation("time-based", "driver-test-runner-with-completion",
                                                        params={"body": ["a"]},
                                                        param_source="driver-test-param-source"),
                          clients=1,
                          params={"target-throughput": 1, "clients": 1})

        schedule_handle = driver.schedule_for(self.test_track, task, 0)
        schedule = schedule_handle()

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (1.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (2.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (3.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
            (4.0, metrics.SampleType.Normal, None, {"body": ["a"]}),
        ], schedule, infinite_schedule=True)
Ejemplo n.º 20
0
    def test_execute_schedule_with_progress_determined_by_runner(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", description="unittest track",
                                 indices=None,
                                 challenges=None)

        task = track.Task("time-based", track.Operation("time-based", operation_type="unit-test-recovery", params={
            "indices-to-restore": "*",
            # The runner will determine progress
            "size": None
        }, param_source="driver-test-param-source"), warmup_time_period=0, clients=4)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2, task=task, start_timestamp=time.perf_counter())
        cancel = threading.Event()
        complete = threading.Event()

        execute_schedule = driver.Executor(task, schedule, es, sampler, cancel, complete)
        execute_schedule()

        samples = sampler.samples

        self.assertEqual(5, len(samples))
        self.assertTrue(self.runner_with_progress.completed)
        self.assertEqual(1.0, self.runner_with_progress.percent_completed)
        self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task")
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task, sample.task)
            self.assertLess(previous_absolute_time, sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertLess(previous_relative_time, sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("ops", sample.total_ops_unit)
Ejemplo n.º 21
0
    def test_execute_schedule_in_throughput_mode(self, es):
        es.bulk.return_value = {
            "errors": False
        }

        params.register_param_source_for_name("driver-test-param-source", DriverTestParamSource)
        test_track = track.Track(name="unittest", short_description="unittest track", description="unittest track",
                                 source_root_url="http://example.org",
                                 indices=None,
                                 challenges=None)

        task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={
            "body": ["action_metadata_line", "index_line"],
            "action_metadata_present": True
        },
                                          param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4, target_throughput=None)
        schedule = driver.schedule_for(test_track, task, 0)

        sampler = driver.Sampler(client_id=2, operation=task.operation, start_timestamp=100)

        driver.execute_schedule(schedule, es, sampler)

        samples = sampler.samples

        self.assertTrue(len(samples) > 0)
        previous_absolute_time = -1.0
        previous_relative_time = -1.0
        for sample in samples:
            self.assertEqual(2, sample.client_id)
            self.assertEqual(task.operation, sample.operation)
            self.assertTrue(previous_absolute_time < sample.absolute_time)
            previous_absolute_time = sample.absolute_time
            self.assertTrue(previous_relative_time < sample.relative_time)
            previous_relative_time = sample.relative_time
            # we don't have any warmup time period
            self.assertEqual(metrics.SampleType.Normal, sample.sample_type)
            # latency equals service time in throughput mode
            self.assertEqual(sample.latency_ms, sample.service_time_ms)
            self.assertEqual(1, sample.total_ops)
            self.assertEqual("docs", sample.total_ops_unit)
            self.assertEqual(1, sample.request_meta_data["bulk-size"])
Ejemplo n.º 22
0
    def test_schedule_for_warmup_time_based(self):
        task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={"body": ["a"], "size": 11},
                                          param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4, target_throughput=4)

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, 0, 11, "runner", {"body": ["a"], "size": 11}),
            (1.0, metrics.SampleType.Normal, 1, 11, "runner", {"body": ["a"], "size": 11}),
            (2.0, metrics.SampleType.Normal, 2, 11, "runner", {"body": ["a"], "size": 11}),
            (3.0, metrics.SampleType.Normal, 3, 11, "runner", {"body": ["a"], "size": 11}),
            (4.0, metrics.SampleType.Normal, 4, 11, "runner", {"body": ["a"], "size": 11}),
            (5.0, metrics.SampleType.Normal, 5, 11, "runner", {"body": ["a"], "size": 11}),
            (6.0, metrics.SampleType.Normal, 6, 11, "runner", {"body": ["a"], "size": 11}),
            (7.0, metrics.SampleType.Normal, 7, 11, "runner", {"body": ["a"], "size": 11}),
            (8.0, metrics.SampleType.Normal, 8, 11, "runner", {"body": ["a"], "size": 11}),
            (9.0, metrics.SampleType.Normal, 9, 11, "runner", {"body": ["a"], "size": 11}),
            (10.0, metrics.SampleType.Normal, 10, 11, "runner", {"body": ["a"], "size": 11}),
        ], list(invocations))
Ejemplo n.º 23
0
    def test_schedule_for_warmup_time_based(self):
        task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={"body": ["a"], "size": 11},
                                          param_source="driver-test-param-source"),
                          warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4})

        invocations = driver.schedule_for(self.test_track, task, 0)

        self.assert_schedule([
            (0.0, metrics.SampleType.Normal, 1 / 11, {"body": ["a"], "size": 11}),
            (1.0, metrics.SampleType.Normal, 2 / 11, {"body": ["a"], "size": 11}),
            (2.0, metrics.SampleType.Normal, 3 / 11, {"body": ["a"], "size": 11}),
            (3.0, metrics.SampleType.Normal, 4 / 11, {"body": ["a"], "size": 11}),
            (4.0, metrics.SampleType.Normal, 5 / 11, {"body": ["a"], "size": 11}),
            (5.0, metrics.SampleType.Normal, 6 / 11, {"body": ["a"], "size": 11}),
            (6.0, metrics.SampleType.Normal, 7 / 11, {"body": ["a"], "size": 11}),
            (7.0, metrics.SampleType.Normal, 8 / 11, {"body": ["a"], "size": 11}),
            (8.0, metrics.SampleType.Normal, 9 / 11, {"body": ["a"], "size": 11}),
            (9.0, metrics.SampleType.Normal, 10 / 11, {"body": ["a"], "size": 11}),
            (10.0, metrics.SampleType.Normal, 11 / 11, {"body": ["a"], "size": 11}),
        ], list(invocations))
Ejemplo n.º 24
0
    async def _run_benchmark(self):
        # avoid: aiohttp.internal WARNING The object should be created from async function
        es = self.create_es_clients(sync=False)
        try:
            cancel = threading.Event()
            # allow to buffer more events than by default as we expect to have way more clients.
            self.sampler = driver.Sampler(start_timestamp=time.perf_counter(),
                                          buffer_size=65536)

            for task in self.challenge.schedule:
                self.current_tasks = []
                aws = []
                for sub_task in task:
                    self.current_tasks.append(sub_task)
                    self.logger.info("Running task [%s] with [%d] clients...",
                                     sub_task.name, sub_task.clients)
                    for client_id in range(sub_task.clients):
                        schedule = driver.schedule_for(self.track, sub_task,
                                                       client_id)
                        # used to indicate that we want to prematurely consider this completed. This is *not* due to
                        # cancellation but a regular event in a benchmark and used to model task dependency of parallel tasks.
                        complete = threading.Event()
                        e = driver.AsyncExecutor(client_id, sub_task, schedule,
                                                 es, self.sampler, cancel,
                                                 complete, self.abort_on_error)
                        aws.append(e())
                # join point
                _ = await asyncio.gather(*aws)
                self.logger.info("All clients have finished running task [%s]",
                                 task.name)
                # drain the active samples before we move on to the next task
                self.update_samples()
                self.post_process_samples()
                self.reset_relative_time()
                self.update_progress_message(task_finished=True)
        finally:
            await asyncio.get_event_loop().shutdown_asyncgens()
            for e in es.values():
                await e.transport.close()
Ejemplo n.º 25
0
    def test_schedule_for_time_based(self):
        task = track.Task(track.Operation("time-based", track.OperationType.Index.name, params={"body": ["a"], "size": 11},
                                          param_source="driver-test-param-source"), warmup_time_period=0.1, time_period=0.1, clients=1)

        invocations = list(driver.schedule_for(self.test_track, task, 0))

        self.assertTrue(len(invocations) > 0)

        last_progress = -1

        for invocation_time, sample_type, progress_percent, runner, params in invocations:
            # we're not throughput throttled
            self.assertEqual(0, invocation_time)
            if progress_percent <= 0.5:
                self.assertEqual(metrics.SampleType.Warmup, sample_type)
            else:
                self.assertEqual(metrics.SampleType.Normal, sample_type)
            self.assertTrue(last_progress < progress_percent)
            last_progress = progress_percent
            self.assertTrue(round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent)
            self.assertTrue(round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent)
            self.assertIsNotNone(runner, "runner must be defined")
            self.assertEqual({"body": ["a"], "size": 11}, params)