Esempio n. 1
0
    def test_scheduler_adapts_to_changed_weights(self):
        task = track.Task(name="bulk-index",
                          operation=track.Operation(
                              name="bulk-index",
                              operation_type=track.OperationType.Bulk.name),
                          clients=4,
                          params={"target-throughput": "5000 docs/s"})

        s = scheduler.UnitAwareScheduler(
            task=task, scheduler_class=scheduler.DeterministicScheduler)
        # first request is unthrottled
        self.assertEqual(0, s.next(0))
        # we'll start with bulks of 1.000 docs, which corresponds to 5 requests per second for all clients
        s.after_request(now=None,
                        weight=1000,
                        unit="docs",
                        request_meta_data=None)
        self.assertEqual(1 / 5 * task.clients, s.next(0))

        # bulk size changes to 10.000 docs, which means one request every two seconds for all clients
        s.after_request(now=None,
                        weight=10000,
                        unit="docs",
                        request_meta_data=None)
        self.assertEqual(2 * task.clients, s.next(0))
Esempio n. 2
0
    def test_scheduler_adapts_to_changed_weights(self):
        task = track.Task(
            name="bulk-index",
            operation=track.Operation(name="bulk-index",
                                      operation_type=track.OperationType.Bulk.
                                      to_hyphenated_string()),
            clients=4,
            params={"target-throughput": "5000 docs/s"},
        )

        s = scheduler.UnitAwareScheduler(
            task=task, scheduler_class=scheduler.DeterministicScheduler)
        # first request is unthrottled
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 0
        # we'll start with bulks of 1.000 docs, which corresponds to 5 requests per second for all clients
        s.after_request(now=None,
                        weight=1000,
                        unit="docs",
                        request_meta_data=None)
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 1 / 5 * task.clients

        # bulk size changes to 10.000 docs, which means one request every two seconds for all clients
        s.after_request(now=None,
                        weight=10000,
                        unit="docs",
                        request_meta_data=None)
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 2 * task.clients
Esempio n. 3
0
    def test_scheduler_does_not_change_throughput_for_empty_requests(self):
        task = track.Task(
            name="match-all-query",
            operation=track.Operation(name="query",
                                      operation_type=track.OperationType.
                                      Search.to_hyphenated_string()),
            clients=1,
            params={
                # implicitly: ops/s
                "target-throughput": 10
            },
        )

        s = scheduler.UnitAwareScheduler(
            task=task, scheduler_class=scheduler.DeterministicScheduler)
        # first request is unthrottled...
        s.before_request(now=0)
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 0
        # ... but it also produced an error (zero ops)
        s.after_request(now=1, weight=0, unit="ops", request_meta_data=None)
        # next request is still unthrottled
        s.before_request(now=1)
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 0
        s.after_request(now=2, weight=1, unit="ops", request_meta_data=None)
        # now we throttle
        s.before_request(now=2)
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 0.1 * task.clients
Esempio n. 4
0
    def test_scheduler_accepts_differing_units_pages_and_ops(self):
        task = track.Task(
            name="scroll-query",
            operation=track.Operation(name="scroll-query",
                                      operation_type=track.OperationType.
                                      Search.to_hyphenated_string()),
            clients=1,
            params={
                # implicitly: ops/s
                "target-throughput": 10
            },
        )

        s = scheduler.UnitAwareScheduler(
            task=task, scheduler_class=scheduler.DeterministicScheduler)
        # first request is unthrottled
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 0
        # no exception despite differing units ...
        s.after_request(now=None,
                        weight=20,
                        unit="pages",
                        request_meta_data=None)
        # ... and it is still throttled in ops/s
        # suppress pylint false positive
        # pylint: disable=not-callable
        assert s.next(0) == 0.1 * task.clients
Esempio n. 5
0
    def test_scheduler_rejects_differing_throughput_units(self):
        task = track.Task(
            name="bulk-index",
            operation=track.Operation(name="bulk-index", operation_type=track.OperationType.Bulk.to_hyphenated_string()),
            clients=4,
            params={"target-throughput": "5000 MB/s"},
        )

        s = scheduler.UnitAwareScheduler(task=task, scheduler_class=scheduler.DeterministicScheduler)
        with self.assertRaises(exceptions.RallyAssertionError) as ex:
            s.after_request(now=None, weight=1000, unit="docs", request_meta_data=None)
        self.assertEqual(
            "Target throughput for [bulk-index] is specified in [MB/s] but the task throughput is measured in [docs/s].",
            ex.exception.args[0],
        )
Esempio n. 6
0
    def test_scheduler_accepts_differing_units_pages_and_ops(self):
        task = track.Task(
            name="scroll-query",
            operation=track.Operation(
                name="scroll-query",
                operation_type=track.OperationType.Search.name),
            clients=1,
            params={
                # implicitly: ops/s
                "target-throughput": 10
            })

        s = scheduler.UnitAwareScheduler(
            task=task, scheduler_class=scheduler.DeterministicScheduler)
        # first request is unthrottled
        self.assertEqual(0, s.next(0))
        # no exception despite differing units ...
        s.after_request(now=None,
                        weight=20,
                        unit="pages",
                        request_meta_data=None)
        # ... and it is still throttled in ops/s
        self.assertEqual(0.1 * task.clients, s.next(0))