Ejemplo n.º 1
0
    def _run_scenario(self, cls, method_name, context, args):

        times = self.config["times"]
        period = self.config["period"]
        timeout = self.config.get("timeout", 600)

        async_results = []

        for i in range(times):
            pool = multiprocessing_pool.ThreadPool(processes=1)
            scenario_args = ((i, cls, method_name,
                              base._get_scenario_context(context), args),)
            async_result = pool.apply_async(base._run_scenario_once,
                                            scenario_args)
            async_results.append(async_result)

            if i < times - 1:
                time.sleep(period)

        results = []
        for async_result in async_results:
            try:
                result = async_result.get(timeout=timeout)
            except multiprocessing.TimeoutError as e:
                result = {"duration": timeout, "idle_duration": 0,
                          "error": utils.format_exc(e)}
            results.append(result)

        return base.ScenarioRunnerResult(results)
Ejemplo n.º 2
0
    def test_run_scenario_internal_logic(self, mock_time, mock_mp,
                                         mock_result):
        context = fakes.FakeUserContext({}).context
        config = {"times": 4, "period": 0, "timeout": 5}
        runner = periodic.PeriodicScenarioRunner(
                        None, [context["admin"]["endpoint"]], config)

        mock_pool_inst = mock.MagicMock()
        mock_mp.Pool.return_value = mock_pool_inst

        runner._run_scenario(fakes.FakeScenario, "do_it", context, {})

        exptected_pool_inst_call = []
        for i in range(config["times"]):
            args = (
                base._run_scenario_once,
                ((i, fakes.FakeScenario, "do_it",
                  base._get_scenario_context(context), {}),)
            )
            exptected_pool_inst_call.append(mock.call.apply_async(*args))
            call = mock.call.close()
            exptected_pool_inst_call.append(call)

        for i in range(config["times"]):
            call = mock.call.apply_async().get(timeout=5)
            exptected_pool_inst_call.append(call)

        mock_mp.assert_has_calls([mock.call.Pool(1)])
        mock_pool_inst.assert_has_calls(exptected_pool_inst_call)
        mock_time.assert_has_calls([])
Ejemplo n.º 3
0
    def test_run_scenario_internal_logic(self, mock_time, mock_pool,
                                         mock_result):
        context = fakes.FakeUserContext({}).context
        config = {"times": 4, "period": 0, "timeout": 5}
        runner = periodic.PeriodicScenarioRunner(
                        None, [context["admin"]["endpoint"]], config)

        mock_pool_inst = mock.MagicMock()
        mock_pool.ThreadPool.return_value = mock_pool_inst

        runner._run_scenario(fakes.FakeScenario, "do_it", context, {})

        exptected_pool_inst_call = []
        for i in range(config["times"]):
            args = (
                base._run_scenario_once,
                ((i, fakes.FakeScenario, "do_it",
                  base._get_scenario_context(context), {}),)
            )
            exptected_pool_inst_call.append(mock.call.apply_async(*args))

        for i in range(config["times"]):
            call = mock.call.apply_async().get(timeout=5)
            exptected_pool_inst_call.append(call)

        mock_pool.assert_has_calls([mock.call.ThreadPool(processes=1)])
        mock_pool_inst.assert_has_calls(exptected_pool_inst_call)
        mock_time.assert_has_calls([])
Ejemplo n.º 4
0
    def _run_scenario(self, cls, method_name, context, args):

        times = self.config["times"]
        period = self.config["period"]
        timeout = self.config.get("timeout", 600)

        async_results = []

        for i in range(times):
            pool = multiprocessing_pool.ThreadPool(processes=1)
            scenario_args = ((i, cls, method_name,
                              base._get_scenario_context(context), args), )
            async_result = pool.apply_async(base._run_scenario_once,
                                            scenario_args)
            async_results.append(async_result)

            if i < times - 1:
                time.sleep(period)

        results = []
        for async_result in async_results:
            try:
                result = async_result.get(timeout=timeout)
            except multiprocessing.TimeoutError as e:
                result = {
                    "duration": timeout,
                    "idle_duration": 0,
                    "error": utils.format_exc(e)
                }
            results.append(result)

        return base.ScenarioRunnerResult(results)
Ejemplo n.º 5
0
    def test_get_scenario_context(self, mock_random):

        users = list()
        tenants = dict()

        for i in range(2):
            tenants[str(i)] = dict(name=str(i))
            for j in range(3):
                users.append({
                    "id": "%s_%s" % (i, j),
                    "tenant_id": str(i),
                    "endpoint": "endpoint"
                })

        context = {
            "admin": mock.MagicMock(),
            "users": users,
            "tenants": tenants,
            "some_random_key": {
                "nested": mock.MagicMock(),
                "one_more": 10
            }
        }
        chosen_tenant = context["tenants"][context["users"][1]["tenant_id"]]
        expected_context = {
            "admin": context["admin"],
            "user": context["users"][1],
            "tenant": chosen_tenant,
            "some_random_key": context["some_random_key"]
        }

        self.assertEqual(expected_context, base._get_scenario_context(context))
Ejemplo n.º 6
0
    def test_run_scenario_internal_logic(self, mock_time, mock_pool,
                                         mock_result):
        context = fakes.FakeUserContext({}).context
        runner = periodic.PeriodicScenarioRunner(
                        None, [context["admin"]["endpoint"]])
        times = 4
        period = 0

        mock_pool_inst = mock.MagicMock()
        mock_pool.ThreadPool.return_value = mock_pool_inst

        runner._run_scenario(fakes.FakeScenario, "do_it", context, {},
                             {"times": times, "period": period, "timeout": 5})

        exptected_pool_inst_call = []
        for i in range(times):
            args = (
                base._run_scenario_once,
                ((i, fakes.FakeScenario, "do_it",
                  base._get_scenario_context(context), {}),)
            )
            exptected_pool_inst_call.append(mock.call.apply_async(*args))

        for i in range(times):
            call = mock.call.apply_async().get(timeout=5)
            exptected_pool_inst_call.append(call)

        mock_pool.assert_has_calls([mock.call.ThreadPool(processes=1)])
        mock_pool_inst.assert_has_calls(exptected_pool_inst_call)
        mock_time.assert_has_calls([])
Ejemplo n.º 7
0
    def _run_scenario(self, cls, method_name, context, args):

        times = self.config["times"]
        period = self.config["period"]
        timeout = self.config.get("timeout", 600)

        async_results = []

        pools = []
        for i in range(times):
            pool = multiprocessing.Pool(1)
            scenario_args = ((i, cls, method_name,
                              base._get_scenario_context(context), args),)
            async_result = pool.apply_async(base._run_scenario_once,
                                            scenario_args)
            async_results.append(async_result)

            pool.close()
            pools.append(pool)

            if i < times - 1:
                time.sleep(period)

        for async_result in async_results:
            try:
                result = async_result.get(timeout=timeout)
            except multiprocessing.TimeoutError as e:
                result = base.format_result_on_timeout(e, timeout)

            self._send_result(result)

        for pool in pools:
            pool.join()
Ejemplo n.º 8
0
    def _run_scenario(self, cls, method_name, context, args):
        """Runs the specified benchmark scenario with given arguments.

        The scenario iterations are executed one-by-one in the same python
        interpreter process as Rally. This allows you to benchmark your
        scenario without introducing any concurrent operations as well as
        interactively debug the scenario from the same command that you use
        to start Rally.

        :param cls: The Scenario class where the scenario is implemented
        :param method_name: Name of the method that implements the scenario
        :param context: Benchmark context that contains users, admin & other
                        information, that was created before benchmark started.
        :param args: Arguments to call the scenario method with

        :returns: List of results fore each single scenario iteration,
                  where each result is a dictionary
        """
        times = self.config.get("times", 1)

        for i in range(times):
            if self.aborted.is_set():
                break
            run_args = (i, cls, method_name, base._get_scenario_context(context), args)
            result = base._run_scenario_once(run_args)
            self._send_result(result)
Ejemplo n.º 9
0
    def _run_scenario(self, cls, method_name, context, args):
        times = self.config["times"]
        timeout = self.config.get("timeout", 600)
        cpu_count = multiprocessing.cpu_count()
        processes2start = cpu_count if times >= cpu_count else times
        rps_per_worker = float(self.config["rps"]) / processes2start

        queue = multiprocessing.Queue()

        process_pool = []
        scenario_context = base._get_scenario_context(context)

        times_per_worker, rest = divmod(times, processes2start)

        for i in range(processes2start):
            times = times_per_worker + int(rest > 0)
            rest -= 1
            worker_args = (rps_per_worker, times, queue, scenario_context,
                           timeout, i, cls, method_name, args)
            process = multiprocessing.Process(target=worker_process,
                                              args=worker_args)
            process.start()
            process_pool.append(process)

        while process_pool:
            for process in process_pool:
                process.join(SEND_RESULT_DELAY)
                if not process.is_alive():
                    process.join()
                    process_pool.remove(process)

            while not queue.empty():
                self._send_result(queue.get())

        queue.close()
Ejemplo n.º 10
0
    def test_get_scenario_context(self, mock_random):

        users = list()
        tenants = dict()

        for i in range(2):
            tenants[str(i)] = dict(name=str(i))
            for j in range(3):
                users.append({"id": "%s_%s" % (i, j),
                              "tenant_id": str(i), "endpoint": "endpoint"})

        context = {
            "admin": mock.MagicMock(),
            "users": users,
            "tenants": tenants,
            "some_random_key": {
                "nested": mock.MagicMock(),
                "one_more": 10
            }
        }
        chosen_tenant = context["tenants"][context["users"][1]["tenant_id"]]
        expected_context = {
            "admin": context["admin"],
            "user": context["users"][1],
            "tenant": chosen_tenant,
            "some_random_key": context["some_random_key"]
        }

        self.assertEqual(expected_context, base._get_scenario_context(context))
Ejemplo n.º 11
0
    def _run_scenario(self, cls, method_name, context, args):
        """Runs the specified benchmark scenario with given arguments.

        The scenario iterations are executed one-by-one in the same python
        interpreter process as Rally. This allows you to benchmark your
        scenario without introducing any concurrent operations as well as
        interactively debug the scenario from the same command that you use
        to start Rally.

        :param cls: The Scenario class where the scenario is implemented
        :param method_name: Name of the method that implements the scenario
        :param context: Benchmark context that contains users, admin & other
                        information, that was created before benchmark started.
        :param args: Arguments to call the scenario method with

        :returns: List of results fore each single scenario iteration,
                  where each result is a dictionary
        """
        times = self.config.get("times", 1)

        for i in range(times):
            if self.aborted.is_set():
                break
            run_args = (i, cls, method_name,
                        base._get_scenario_context(context), args)
            result = base._run_scenario_once(run_args)
            self._send_result(result)
Ejemplo n.º 12
0
def _worker_process(rps, times, queue, context, timeout,
                    worker_id, workers, cls, method_name, args):
    """Start scenario within threads.

    Spawn N threads per second. Each thread runs scenario once, and appends
    result to queue.

    :param rps: runs per second
    :param times: number of threads to be run
    :param queue: queue object to append results
    :param context: scenario context object
    :param timeout: timeout operation
    :param worker_id: id of worker process
    :param workers: number of total workers
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    """

    pool = []
    i = 0
    start = time.time()
    sleep = 1.0 / rps

    # Injecting timeout to exclude situations, where start time and
    # actual time are neglible close

    randsleep_delay = random.randint(int(sleep / 2 * 100), int(sleep * 100))
    time.sleep(randsleep_delay / 100.0)

    while times > i:
        scenario_context = base._get_scenario_context(context)
        i += 1
        scenario_args = (queue, (worker_id + workers * (i - 1), cls,
                         method_name, scenario_context, args),)
        thread = threading.Thread(target=_worker_thread,
                                  args=scenario_args)
        thread.start()
        pool.append(thread)

        time_gap = time.time() - start
        real_rps = i / time_gap if time_gap else "Infinity"

        LOG.debug("Worker: %s rps: %s (requested rps: %s)" % (
            worker_id, real_rps, rps))

        # try to join latest thread(s) until it finished, or until time to
        # start new thread
        while i / (time.time() - start) > rps:
            if pool:
                pool[0].join(sleep)
                if not pool[0].isAlive():
                    pool.pop(0)
            else:
                time.sleep(sleep)

    while pool:
        thr = pool.pop(0)
        thr.join()
Ejemplo n.º 13
0
    def _run_scenario(self, cls, method_name, context, args):
        times = self.config.get('times', 1)

        for i in range(times):
            run_args = (i, cls, method_name,
                        base._get_scenario_context(context), args)
            result = base._run_scenario_once(run_args)
            self._send_result(result)
Ejemplo n.º 14
0
    def _run_scenario(self, cls, method_name, context, args):
        times = self.config.get("times", 1)

        for i in range(times):
            if self.aborted.is_set():
                break
            run_args = (i, cls, method_name,
                        base._get_scenario_context(context), args)
            result = base._run_scenario_once(run_args)
            self._send_result(result)
Ejemplo n.º 15
0
    def _run_scenario(self, cls, method_name, context, args):
        # runners settings are stored in self.config
        min_times = self.config.get("min_times", 1)
        max_times = self.config.get("max_times", 1)

        for i in range(random.randrange(min_times, max_times)):
            run_args = (i, cls, method_name,
                        base._get_scenario_context(context), args)
            result = base._run_scenario_once(run_args)
            # use self.send_result for result of each iteration
            self._send_result(result)
Ejemplo n.º 16
0
    def _run_scenario(self, cls, method_name, context, args):
        times = self.config.get('times', 1)

        results = []

        for i in range(times):
            run_args = (i, cls, method_name,
                        base._get_scenario_context(context), args)
            result = base._run_scenario_once(run_args)
            results.append(result)

        return base.ScenarioRunnerResult(results)
Ejemplo n.º 17
0
 def test_run_scenario_once_exception(self, mock_clients, mock_rtimer):
     context = base._get_scenario_context(fakes.FakeUserContext({}).context)
     args = (1, fakes.FakeScenario, "something_went_wrong", context, {})
     result = base._run_scenario_once(args)
     expected_error = result.pop("error")
     expected_result = {
         "duration": fakes.FakeTimer().duration(),
         "timestamp": fakes.FakeTimer().timestamp(),
         "idle_duration": 0,
         "scenario_output": {"errors": "", "data": {}},
         "atomic_actions": {},
     }
     self.assertEqual(expected_result, result)
     self.assertEqual(expected_error[:2], ["Exception", "Something went wrong"])
Ejemplo n.º 18
0
    def test_run_scenario_once_without_scenario_output(self, mock_clients, mock_rtimer):
        context = base._get_scenario_context(fakes.FakeUserContext({}).context)
        args = (1, fakes.FakeScenario, "do_it", context, {})
        result = base._run_scenario_once(args)

        expected_result = {
            "duration": fakes.FakeTimer().duration(),
            "timestamp": fakes.FakeTimer().timestamp(),
            "idle_duration": 0,
            "error": [],
            "scenario_output": {"errors": "", "data": {}},
            "atomic_actions": {},
        }
        self.assertEqual(expected_result, result)
Ejemplo n.º 19
0
    def test_run_scenario_once_with_scenario_output(self, mock_clients,
                                                    mock_rtimer):
        context = base._get_scenario_context(fakes.FakeUserContext({}).context)
        args = (1, fakes.FakeScenario, "with_output", context, {})
        result = base._run_scenario_once(args)

        expected_result = {
            "duration": fakes.FakeTimer().duration(),
            "timestamp": fakes.FakeTimer().timestamp(),
            "idle_duration": 0,
            "error": [],
            "scenario_output": fakes.FakeScenario().with_output(),
            "atomic_actions": {}
        }
        self.assertEqual(expected_result, result)
Ejemplo n.º 20
0
    def test_run_scenario_once_without_scenario_output(self, mock_clients,
                                                       mock_rutils):
        mock_rutils.Timer = fakes.FakeTimer
        context = base._get_scenario_context(fakes.FakeUserContext({}).context)
        args = (1, fakes.FakeScenario, "do_it", context, {})
        result = base._run_scenario_once(args)

        expected_result = {
            "duration": fakes.FakeTimer().duration(),
            "idle_duration": 0,
            "error": [],
            "scenario_output": {"errors": "", "data": {}},
            "atomic_actions": []
        }
        self.assertEqual(expected_result, result)
Ejemplo n.º 21
0
 def test_run_scenario_once_exception(self, mock_clients, mock_rutils):
     mock_rutils.Timer = fakes.FakeTimer
     context = base._get_scenario_context(fakes.FakeUserContext({}).context)
     args = (1, fakes.FakeScenario, "something_went_wrong", context, {})
     result = base._run_scenario_once(args)
     expected_error = result.pop("error")
     expected_result = {
         "duration": fakes.FakeTimer().duration(),
         "idle_duration": 0,
         "scenario_output": {"errors": "", "data": {}},
         "atomic_actions": []
     }
     self.assertEqual(expected_result, result)
     self.assertEqual(expected_error[:2],
                      [str(Exception), "Something went wrong"])
Ejemplo n.º 22
0
    def test_run_scenario_once_with_scenario_output(self, mock_clients,
                                                    mock_rutils):
        mock_rutils.Timer = fakes.FakeTimer
        context = base._get_scenario_context(fakes.FakeUserContext({}).context)
        args = (1, fakes.FakeScenario, "with_output", context, {})
        result = base._run_scenario_once(args)

        expected_result = {
            "duration": fakes.FakeTimer().duration(),
            "idle_duration": 0,
            "error": [],
            "scenario_output": fakes.FakeScenario().with_output(),
            "atomic_actions": []
        }
        self.assertEqual(expected_result, result)
Ejemplo n.º 23
0
 def test_run_scenario_once_exception(self, mock_clients, mock_rutils):
     mock_rutils.Timer = fakes.FakeTimer
     context = base._get_scenario_context(fakes.FakeUserContext({}).context)
     args = (1, fakes.FakeScenario, "something_went_wrong", context, {})
     result = base._run_scenario_once(args)
     expected_error = result.pop("error")
     expected_reuslt = {
         "duration": fakes.FakeTimer().duration(),
         "idle_duration": 0,
         "scenario_output": {},
         "atomic_actions": []
     }
     self.assertEqual(expected_reuslt, result)
     self.assertEqual(expected_error[:2],
                      [str(Exception), "Something went wrong"])
Ejemplo n.º 24
0
    def test_run_scenario_once_internal_logic(self, mock_clients):
        mock_clients.Clients.return_value = "cl"

        context = base._get_scenario_context(fakes.FakeUserContext({}).context)
        scenario_cls = mock.MagicMock()
        args = (2, scenario_cls, "test", context, {})
        base._run_scenario_once(args)

        expected_calls = [
            mock.call(context=context, admin_clients="cl", clients="cl"),
            mock.call().test(),
            mock.call().idle_duration(),
            mock.call().idle_duration(),
            mock.call().atomic_actions()
        ]
        scenario_cls.assert_has_calls(expected_calls, any_order=True)
Ejemplo n.º 25
0
    def test_run_scenario_once_internal_logic(self, mock_clients):
        mock_clients.Clients.return_value = "cl"

        context = base._get_scenario_context(fakes.FakeUserContext({}).context)
        scenario_cls = mock.MagicMock()
        args = (2, scenario_cls, "test", context, {})
        base._run_scenario_once(args)

        expected_calls = [
            mock.call(context=context, admin_clients="cl", clients="cl"),
            mock.call().test(),
            mock.call().idle_duration(),
            mock.call().idle_duration(),
            mock.call().atomic_actions()
        ]
        scenario_cls.assert_has_calls(expected_calls, any_order=True)
Ejemplo n.º 26
0
    def test_get_scenario_context(self, mock_random):
        mock_random.choice = lambda x: x[1]

        context = {
            "admin": mock.MagicMock(),
            "users": [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()],
            "some_random_key": {
                "nested": mock.MagicMock(),
                "one_more": 10
            }
        }
        expected_context = {
            "admin": context["admin"],
            "user": context["users"][1],
            "some_random_key": context["some_random_key"]
        }

        self.assertEqual(expected_context, base._get_scenario_context(context))
Ejemplo n.º 27
0
def _worker_process(queue, iteration_gen, timeout, rps, times,
                    max_concurrent, context, cls, method_name,
                    args, aborted):
    """Start scenario within threads.

    Spawn N threads per second. Each thread runs the scenario once, and appends
    result to queue. A maximum of max_concurrent threads will be ran
    concurrently.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param rps: number of scenario iterations to be run per one second
    :param times: total number of scenario iterations to be run
    :param max_concurrent: maximum worker concurrency
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    """

    pool = collections.deque()
    start = time.time()
    sleep = 1.0 / rps

    base._log_worker_info(times=times, rps=rps, timeout=timeout,
                          cls=cls, method_name=method_name, args=args)

    # Injecting timeout to exclude situations, where start time and
    # actual time are negligible close

    randsleep_delay = random.randint(int(sleep / 2 * 100), int(sleep * 100))
    time.sleep(randsleep_delay / 100.0)

    i = 0
    while i < times and not aborted.is_set():
        scenario_context = base._get_scenario_context(context)
        scenario_args = (next(iteration_gen), cls, method_name,
                         scenario_context, args)
        worker_args = (queue, scenario_args)
        thread = threading.Thread(target=base._worker_thread,
                                  args=worker_args)
        i += 1
        thread.start()
        pool.append(thread)

        time_gap = time.time() - start
        real_rps = i / time_gap if time_gap else "Infinity"

        LOG.debug("Worker: %s rps: %s (requested rps: %s)" %
                  (i, real_rps, rps))

        # try to join latest thread(s) until it finished, or until time to
        # start new thread (if we have concurrent slots available)
        while i / (time.time() - start) > rps or len(pool) >= max_concurrent:
            if pool:
                pool[0].join(sleep)
                if not pool[0].isAlive():
                    pool.popleft()
            else:
                time.sleep(sleep)

    while pool:
        thr = pool.popleft()
        thr.join()
Ejemplo n.º 28
0
 def _iter_scenario_args(cls, method, ctx, args, times):
     for i in xrange(times):
         yield (i, cls, method, base._get_scenario_context(ctx), args)
Ejemplo n.º 29
0
 def _scenario_args(i):
     return (i, cls, method, base._get_scenario_context(ctx), args)
Ejemplo n.º 30
0
 def _scenario_args(i):
     if aborted.is_set():
         raise StopIteration()
     return (i, cls, method, base._get_scenario_context(ctx), args)
Ejemplo n.º 31
0
def _worker_process(queue, iteration_gen, timeout, concurrency, times, context,
                    cls, method_name, args, aborted):
    """Start the scenario within threads.

    Spawn threads to support scenario execution for a fixed number of times.
    This generates a constant load on the cloud under test by executing each
    scenario iteration without pausing between iterations. Each thread runs
    the scenario method once with passed scenario arguments and context.
    After execution the result is appended to the queue.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param concurrency: number of concurrently running scenario iterations
    :param times: total number of scenario iterations to be run
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    """

    pool = collections.deque()
    alive_threads_in_pool = 0
    finished_threads_in_pool = 0

    base._log_worker_info(times=times, concurrency=concurrency,
                          timeout=timeout, cls=cls, method_name=method_name,
                          args=args)

    iteration = next(iteration_gen)
    while iteration < times and not aborted.is_set():
        scenario_context = base._get_scenario_context(context)
        scenario_args = (iteration, cls, method_name, scenario_context, args)
        worker_args = (queue, scenario_args)

        thread = threading.Thread(target=base._worker_thread,
                                  args=worker_args)
        thread.start()
        pool.append((thread, time.time()))
        alive_threads_in_pool += 1

        while alive_threads_in_pool == concurrency:
            prev_finished_threads_in_pool = finished_threads_in_pool
            finished_threads_in_pool = 0
            for t in pool:
                if not t[0].isAlive():
                    finished_threads_in_pool += 1

            alive_threads_in_pool -= finished_threads_in_pool
            alive_threads_in_pool += prev_finished_threads_in_pool

            if alive_threads_in_pool < concurrency:
                # NOTE(boris-42): cleanup pool array. This is required because
                # in other case array length will be equal to times which
                # is unlimited big
                while pool and not pool[0][0].isAlive():
                    pool.popleft()[0].join()
                    finished_threads_in_pool -= 1
                break

            # we should wait to not create big noise with these checks
            time.sleep(0.001)
        iteration = next(iteration_gen)

    # Wait until all threads are done
    while pool:
        pool.popleft()[0].join()
Ejemplo n.º 32
0
 def _iter_scenario_args(cls, method, ctx, args, times):
     for i in xrange(times):
         yield (i, cls, method, base._get_scenario_context(ctx), args)
Ejemplo n.º 33
0
 def _scenario_args(i):
     return (i, cls, method, base._get_scenario_context(ctx), args)
Ejemplo n.º 34
0
def _worker_process(queue, iteration_gen, timeout, rps, times, context, cls,
                    method_name, args, aborted):
    """Start scenario within threads.

    Spawn N threads per second. Each thread runs the scenario once, and appends
    the result to the queue.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param rps: number of scenario iterations to be run per one second
    :param times: total number of scenario iterations to be run
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    """

    pool = collections.deque()
    start = time.time()
    sleep = 1.0 / rps

    base._log_worker_info(times=times,
                          rps=rps,
                          timeout=timeout,
                          cls=cls,
                          method_name=method_name,
                          args=args)

    # Injecting timeout to exclude situations, where start time and
    # actual time are neglible close

    randsleep_delay = random.randint(int(sleep / 2 * 100), int(sleep * 100))
    time.sleep(randsleep_delay / 100.0)

    i = 0
    while i < times and not aborted.is_set():
        scenario_context = base._get_scenario_context(context)
        scenario_args = (next(iteration_gen), cls, method_name,
                         scenario_context, args)
        worker_args = (queue, scenario_args)
        thread = threading.Thread(target=base._worker_thread, args=worker_args)
        i += 1
        thread.start()
        pool.append(thread)

        time_gap = time.time() - start
        real_rps = i / time_gap if time_gap else "Infinity"

        LOG.debug("Worker: %s rps: %s (requested rps: %s)" %
                  (i, real_rps, rps))

        # try to join latest thread(s) until it finished, or until time to
        # start new thread
        while i / (time.time() - start) > rps:
            if pool:
                pool[0].join(sleep)
                if not pool[0].isAlive():
                    pool.popleft()
            else:
                time.sleep(sleep)

    while pool:
        thr = pool.popleft()
        thr.join()
Ejemplo n.º 35
0
def _worker_process(queue, iteration_gen, timeout, concurrency, times, context,
                    cls, method_name, args, aborted):
    """Start the scenario within threads.

    Spawn threads to support scenario execution for a fixed number of times.
    This generates a constant load on the cloud under test by executing each
    scenario iteration without pausing between iterations. Each thread runs
    the scenario method once with passed scenario arguments and context.
    After execution the result is appended to the queue.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param concurrency: number of concurrently running scenario iterations
    :param times: total number of scenario iterations to be run
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    """

    pool = collections.deque()
    alive_threads_in_pool = 0
    finished_threads_in_pool = 0

    base._log_worker_info(times=times,
                          concurrency=concurrency,
                          timeout=timeout,
                          cls=cls,
                          method_name=method_name,
                          args=args)

    iteration = next(iteration_gen)
    while iteration < times and not aborted.is_set():
        scenario_context = base._get_scenario_context(context)
        scenario_args = (iteration, cls, method_name, scenario_context, args)
        worker_args = (queue, scenario_args)

        thread = threading.Thread(target=base._worker_thread, args=worker_args)
        thread.start()
        pool.append((thread, time.time()))
        alive_threads_in_pool += 1

        while alive_threads_in_pool == concurrency:
            prev_finished_threads_in_pool = finished_threads_in_pool
            finished_threads_in_pool = 0
            for t in pool:
                if not t[0].isAlive():
                    finished_threads_in_pool += 1

            alive_threads_in_pool -= finished_threads_in_pool
            alive_threads_in_pool += prev_finished_threads_in_pool

            if alive_threads_in_pool < concurrency:
                # NOTE(boris-42): cleanup pool array. This is required because
                # in other case array length will be equal to times which
                # is unlimited big
                while pool and not pool[0][0].isAlive():
                    pool.popleft()[0].join()
                    finished_threads_in_pool -= 1
                break

            # we should wait to not create big noise with these checks
            time.sleep(0.001)
        iteration = next(iteration_gen)

    # Wait until all threads are done
    while pool:
        pool.popleft()[0].join()
Ejemplo n.º 36
0
Archivo: rps.py Proyecto: congto/rally
    def _run_scenario(self, cls, method_name, context, args):
        times = self.config["times"]
        rps = self.config["rps"]
        timeout = self.config.get("timeout", 600)
        cpu_count = multiprocessing.cpu_count()

        queue = multiprocessing.Queue()
        addition_args = args

        class WorkerProcess(multiprocessing.Process):

            def __init__(self, rps, times, queue, scenario_context, timeout,
                         process_id, args):
                self.rps = rps
                self.times = times
                self.timeout = timeout
                self.pool = []
                self.scenario_context = scenario_context
                self.id = process_id
                self.args = args
                self.queue = queue
                super(WorkerProcess, self).__init__()

            def _th_worker(self, args):
                result = base._run_scenario_once(args)
                self.queue.put(result)

            def run(self):
                for i in range(self.times):
                    scenario_args = (("%d:%d" % (self.id, i), cls, method_name,
                                     self.scenario_context, self.args),)
                    thread = threading.Thread(target=self._th_worker,
                                              args=scenario_args)
                    thread.start()
                    self.pool.append(thread)
                    time.sleep(1.0 / rps)

                while len(self.pool):
                    thr = self.pool.pop()
                    thr.join(self.timeout)

        process_pool = []
        scenario_context = base._get_scenario_context(context)

        if times <= cpu_count:
            processes2start = times
        else:
            processes2start = cpu_count

        for i in range(processes2start):
            process = WorkerProcess(rps / float(processes2start),
                                    times / processes2start,
                                    queue, scenario_context, timeout,
                                    i, addition_args)
            process.start()
            process_pool.append(process)

        while len(process_pool):
            for process in process_pool:
                if not process.is_alive():
                    process.join(timeout)
                    process_pool.remove(process)
            if not queue.empty():
                self._send_result(queue.get(timeout=timeout))
            time.sleep(1.0 / rps)

        while not queue.empty():
            result = queue.get(timeout=timeout)
            self._send_result(result)

        queue.close()
Ejemplo n.º 37
0
 def _iter_scenario_args(cls, method, ctx, args, times, aborted):
     for i in moves.range(times):
         if aborted.is_set():
             break
         yield (i, cls, method, base._get_scenario_context(ctx), args)
Ejemplo n.º 38
0
 def _scenario_args(i):
     if aborted.is_set():
         raise StopIteration()
     return (i, cls, method, base._get_scenario_context(ctx), args)