def _run_scenario(self, cls, method_name, context, args): """Runs the specified scenario with given arguments. The scenario iterations are executed one-by-one in the same python interpreter process as Rally. This allows you to execute scenario without introducing any concurrent operations as well as interactively debug the scenario from the same command that you use to start Rally. :param cls: The Scenario class where the scenario is implemented :param method_name: Name of the method that implements the scenario :param context: context that contains users, admin & other information, that was created before scenario execution starts. :param args: Arguments to call the scenario method with :returns: List of results fore each single scenario iteration, where each result is a dictionary """ times = self.config.get("times", 1) event_queue = rutils.DequeAsQueue(self.event_queue) for i in range(times): if self.aborted.is_set(): break result = runner._run_scenario_once( cls, method_name, runner._get_scenario_context(i, context), args, event_queue) self._send_result(result) self._flush_results()
def test_get_scenario_context(self, mock_choice): users = [] tenants = {} for i in range(2): tenants[str(i)] = dict(name=str(i)) for j in range(3): users.append({"id": "%s_%s" % (i, j), "tenant_id": str(i), "endpoint": "endpoint"}) context = { "admin": mock.MagicMock(), "users": users, "tenants": tenants, "some_random_key": { "nested": mock.MagicMock(), "one_more": 10 } } chosen_tenant = context["tenants"][context["users"][1]["tenant_id"]] expected_context = { "admin": context["admin"], "user": context["users"][1], "tenant": chosen_tenant, "some_random_key": context["some_random_key"] } self.assertEqual(expected_context, runner._get_scenario_context(context))
def test_get_scenario_context(self, mock_context_manager): mock_context_obj = {} mock_map_for_scenario = ( mock_context_manager.return_value.map_for_scenario) result = runner._get_scenario_context(13, mock_context_obj) self.assertEqual(mock_map_for_scenario.return_value, result) mock_context_manager.assert_called_once_with({"iteration": 14}) mock_map_for_scenario.assert_called_once_with()
def test_get_scenario_context(self, mock_context_manager): mock_context_obj = mock.MagicMock() mock_map_for_scenario = ( mock_context_manager.return_value.map_for_scenario) self.assertEqual(mock_map_for_scenario.return_value, runner._get_scenario_context(mock_context_obj)) mock_context_manager.assert_called_once_with(mock_context_obj) mock_map_for_scenario.assert_called_once_with()
def _run_scenario(self, cls, method_name, context, args): # runners settings are stored in self.config min_times = self.config.get("min_times", 1) max_times = self.config.get("max_times", 1) for i in range(random.randrange(min_times, max_times)): run_args = (i, cls, method_name, runner._get_scenario_context(context), args) result = runner._run_scenario_once(run_args) # use self.send_result for result of each iteration self._send_result(result)
def test_get_scenario_context(self, mock_context_manager): mock_context_obj = {} mock_map_for_scenario = ( mock_context_manager.return_value.map_for_scenario) result = runner._get_scenario_context(13, mock_context_obj) self.assertEqual( mock_map_for_scenario.return_value, result ) mock_context_manager.assert_called_once_with({"iteration": 14}) mock_map_for_scenario.assert_called_once_with()
def test_run_scenario_once_internal_logic(self): context = runner._get_scenario_context(fakes.FakeContext({}).context) scenario_cls = mock.MagicMock() args = (2, scenario_cls, "test", context, {}) runner._run_scenario_once(args) expected_calls = [ mock.call(context), mock.call().test(), mock.call().idle_duration(), mock.call().idle_duration(), mock.call().atomic_actions() ] scenario_cls.assert_has_calls(expected_calls, any_order=True)
def test_run_scenario_once_without_scenario_output(self, mock_timer): context = runner._get_scenario_context( fakes.FakeUserContext({}).context) args = (1, fakes.FakeScenario, "do_it", context, {}) result = runner._run_scenario_once(args) expected_result = { "duration": fakes.FakeTimer().duration(), "timestamp": fakes.FakeTimer().timestamp(), "idle_duration": 0, "error": [], "scenario_output": {"errors": "", "data": {}}, "atomic_actions": {} } self.assertEqual(expected_result, result)
def test_run_scenario_once_internal_logic(self): context = runner._get_scenario_context( 12, fakes.FakeContext({}).context) scenario_cls = mock.MagicMock() runner._run_scenario_once(scenario_cls, "test", context, {}) expected_calls = [ mock.call(context), mock.call().test(), mock.call().idle_duration(), mock.call().idle_duration(), mock.call().atomic_actions() ] scenario_cls.assert_has_calls(expected_calls, any_order=True)
def test_run_scenario_once_exception(self, mock_timer): context = runner._get_scenario_context( fakes.FakeUserContext({}).context) args = (1, fakes.FakeScenario, "something_went_wrong", context, {}) result = runner._run_scenario_once(args) expected_error = result.pop("error") expected_result = { "duration": fakes.FakeTimer().duration(), "timestamp": fakes.FakeTimer().timestamp(), "idle_duration": 0, "scenario_output": {"errors": "", "data": {}}, "atomic_actions": {} } self.assertEqual(expected_result, result) self.assertEqual(expected_error[:2], ["Exception", "Something went wrong"])
def test_run_scenario_once_internal_logic(self): context = runner._get_scenario_context( 12, fakes.FakeContext({}).context) scenario_cls = mock.MagicMock() event_queue = mock.MagicMock() runner._run_scenario_once( scenario_cls, "test", context, {}, event_queue) expected_calls = [ mock.call(context), mock.call().test(), mock.call().idle_duration(), mock.call().idle_duration(), mock.call().atomic_actions() ] scenario_cls.assert_has_calls(expected_calls, any_order=True) event_queue.put.assert_called_once_with( {"type": "iteration", "value": 13})
def _worker_process(queue, iteration_gen, timeout, rps, times, max_concurrent, context, cls, method_name, args, aborted, info): """Start scenario within threads. Spawn N threads per second. Each thread runs the scenario once, and appends result to queue. A maximum of max_concurrent threads will be ran concurrently. :param queue: queue object to append results :param iteration_gen: next iteration number generator :param timeout: operation's timeout :param rps: number of scenario iterations to be run per one second :param times: total number of scenario iterations to be run :param max_concurrent: maximum worker concurrency :param context: scenario context object :param cls: scenario class :param method_name: scenario method name :param args: scenario args :param aborted: multiprocessing.Event that aborts load generation if the flag is set :param info: info about all processes count and counter of runned process """ pool = collections.deque() sleep = 1.0 / rps runner._log_worker_info(times=times, rps=rps, timeout=timeout, cls=cls, method_name=method_name, args=args) time.sleep( (sleep * info["processes_counter"]) / info["processes_to_start"]) start = time.time() i = 0 while i < times and not aborted.is_set(): scenario_context = runner._get_scenario_context(context) scenario_args = (next(iteration_gen), cls, method_name, scenario_context, args) worker_args = (queue, scenario_args) thread = threading.Thread(target=runner._worker_thread, args=worker_args) i += 1 thread.start() pool.append(thread) time_gap = time.time() - start real_rps = i / time_gap if time_gap else "Infinity" LOG.debug("Worker: %s rps: %s (requested rps: %s)" % (i, real_rps, rps)) # try to join latest thread(s) until it finished, or until time to # start new thread (if we have concurrent slots available) while i / (time.time() - start) > rps or len(pool) >= max_concurrent: if pool: pool[0].join(0.001) if not pool[0].isAlive(): pool.popleft() else: time.sleep(0.001) while pool: thr = pool.popleft() thr.join()
def _worker_process(queue, iteration_gen, timeout, rps, times, max_concurrent, context, cls, method_name, args, aborted, info): """Start scenario within threads. Spawn N threads per second. Each thread runs the scenario once, and appends result to queue. A maximum of max_concurrent threads will be ran concurrently. :param queue: queue object to append results :param iteration_gen: next iteration number generator :param timeout: operation's timeout :param rps: number of scenario iterations to be run per one second :param times: total number of scenario iterations to be run :param max_concurrent: maximum worker concurrency :param context: scenario context object :param cls: scenario class :param method_name: scenario method name :param args: scenario args :param aborted: multiprocessing.Event that aborts load generation if the flag is set :param info: info about all processes count and counter of runned process """ pool = collections.deque() sleep = 1.0 / rps runner._log_worker_info(times=times, rps=rps, timeout=timeout, cls=cls, method_name=method_name, args=args) time.sleep( (sleep * info["processes_counter"]) / info["processes_to_start"]) start = time.time() timeout_queue = Queue.Queue() if timeout: collector_thr_by_timeout = threading.Thread( target=utils.timeout_thread, args=(timeout_queue, ) ) collector_thr_by_timeout.start() i = 0 while i < times and not aborted.is_set(): scenario_context = runner._get_scenario_context(next(iteration_gen), context) worker_args = (queue, cls, method_name, scenario_context, args) thread = threading.Thread(target=runner._worker_thread, args=worker_args) i += 1 thread.start() if timeout: timeout_queue.put((thread, time.time() + timeout)) pool.append(thread) time_gap = time.time() - start real_rps = i / time_gap if time_gap else "Infinity" LOG.debug("Worker: %s rps: %s (requested rps: %s)" % (i, real_rps, rps)) # try to join latest thread(s) until it finished, or until time to # start new thread (if we have concurrent slots available) while i / (time.time() - start) > rps or len(pool) >= max_concurrent: if pool: pool[0].join(0.001) if not pool[0].isAlive(): pool.popleft() else: time.sleep(0.001) while pool: pool.popleft().join() if timeout: timeout_queue.put((None, None,)) collector_thr_by_timeout.join()
def _scenario_args(i): if aborted.is_set(): raise StopIteration() return (cls, method, runner._get_scenario_context(i, ctx), args, event_queue)
def _worker_process(queue, iteration_gen, timeout, concurrency, times, context, cls, method_name, args, event_queue, aborted, info): """Start the scenario within threads. Spawn threads to support scenario execution for a fixed number of times. This generates a constant load on the cloud under test by executing each scenario iteration without pausing between iterations. Each thread runs the scenario method once with passed scenario arguments and context. After execution the result is appended to the queue. :param queue: queue object to append results :param iteration_gen: next iteration number generator :param timeout: operation's timeout :param concurrency: number of concurrently running scenario iterations :param times: total number of scenario iterations to be run :param context: scenario context object :param cls: scenario class :param method_name: scenario method name :param args: scenario args :param event_queue: queue object to append events :param aborted: multiprocessing.Event that aborts load generation if the flag is set :param info: info about all processes count and counter of launched process """ pool = collections.deque() alive_threads_in_pool = 0 finished_threads_in_pool = 0 runner._log_worker_info(times=times, concurrency=concurrency, timeout=timeout, cls=cls, method_name=method_name, args=args) if timeout: timeout_queue = Queue.Queue() collector_thr_by_timeout = threading.Thread( target=utils.timeout_thread, args=(timeout_queue, )) collector_thr_by_timeout.start() iteration = next(iteration_gen) while iteration < times and not aborted.is_set(): scenario_context = runner._get_scenario_context(iteration, context) worker_args = (queue, cls, method_name, scenario_context, args, event_queue) thread = threading.Thread(target=runner._worker_thread, args=worker_args) thread.start() if timeout: timeout_queue.put((thread, time.time() + timeout)) pool.append(thread) alive_threads_in_pool += 1 while alive_threads_in_pool == concurrency: prev_finished_threads_in_pool = finished_threads_in_pool finished_threads_in_pool = 0 for t in pool: if not t.isAlive(): finished_threads_in_pool += 1 alive_threads_in_pool -= finished_threads_in_pool alive_threads_in_pool += prev_finished_threads_in_pool if alive_threads_in_pool < concurrency: # NOTE(boris-42): cleanup pool array. This is required because # in other case array length will be equal to times which # is unlimited big while pool and not pool[0].isAlive(): pool.popleft().join() finished_threads_in_pool -= 1 break # we should wait to not create big noise with these checks time.sleep(0.001) iteration = next(iteration_gen) # Wait until all threads are done while pool: pool.popleft().join() if timeout: timeout_queue.put(( None, None, )) collector_thr_by_timeout.join()
def _worker_process(queue, iteration_gen, timeout, concurrency, times, duration, context, cls, method_name, args, event_queue, aborted, info): """Start the scenario within threads. Spawn threads to support scenario execution. Scenario is ran for a fixed number of times if times is specified Scenario is ran for fixed duration if duration is specified. This generates a constant load on the cloud under test by executing each scenario iteration without pausing between iterations. Each thread runs the scenario method once with passed scenario arguments and context. After execution the result is appended to the queue. :param queue: queue object to append results :param iteration_gen: next iteration number generator :param timeout: operation's timeout :param concurrency: number of concurrently running scenario iterations :param times: total number of scenario iterations to be run :param duration: total duration in seconds of the run :param context: scenario context object :param cls: scenario class :param method_name: scenario method name :param args: scenario args :param event_queue: queue object to append events :param aborted: multiprocessing.Event that aborts load generation if the flag is set :param info: info about all processes count and counter of launched process """ def _to_be_continued(iteration, current_duration, aborted, times=None, duration=None): if times is not None: return iteration < times and not aborted.is_set() elif duration is not None: return current_duration < duration and not aborted.is_set() else: return False if times is None and duration is None: raise ValueError("times or duration must be specified") pool = collections.deque() alive_threads_in_pool = 0 finished_threads_in_pool = 0 runner._log_worker_info(times=times, duration=duration, concurrency=concurrency, timeout=timeout, cls=cls, method_name=method_name, args=args) if timeout: timeout_queue = Queue.Queue() collector_thr_by_timeout = threading.Thread( target=utils.timeout_thread, args=(timeout_queue, ) ) collector_thr_by_timeout.start() iteration = next(iteration_gen) start_time = time.time() # NOTE(msimonin): keep the previous behaviour # > when duration is 0, scenario executes exactly 1 time current_duration = -1 while _to_be_continued(iteration, current_duration, aborted, times=times, duration=duration): scenario_context = runner._get_scenario_context(iteration, context) worker_args = ( queue, cls, method_name, scenario_context, args, event_queue) thread = threading.Thread(target=runner._worker_thread, args=worker_args) thread.start() if timeout: timeout_queue.put((thread, time.time() + timeout)) pool.append(thread) alive_threads_in_pool += 1 while alive_threads_in_pool == concurrency: prev_finished_threads_in_pool = finished_threads_in_pool finished_threads_in_pool = 0 for t in pool: if not t.isAlive(): finished_threads_in_pool += 1 alive_threads_in_pool -= finished_threads_in_pool alive_threads_in_pool += prev_finished_threads_in_pool if alive_threads_in_pool < concurrency: # NOTE(boris-42): cleanup pool array. This is required because # in other case array length will be equal to times which # is unlimited big while pool and not pool[0].isAlive(): pool.popleft().join() finished_threads_in_pool -= 1 break # we should wait to not create big noise with these checks time.sleep(0.001) iteration = next(iteration_gen) current_duration = time.time() - start_time # Wait until all threads are done while pool: pool.popleft().join() if timeout: timeout_queue.put((None, None,)) collector_thr_by_timeout.join()
def _worker_process(queue, iteration_gen, timeout, concurrency, times, context, cls, method_name, args, aborted, info): """Start the scenario within threads. Spawn threads to support scenario execution for a fixed number of times. This generates a constant load on the cloud under test by executing each scenario iteration without pausing between iterations. Each thread runs the scenario method once with passed scenario arguments and context. After execution the result is appended to the queue. :param queue: queue object to append results :param iteration_gen: next iteration number generator :param timeout: operation's timeout :param concurrency: number of concurrently running scenario iterations :param times: total number of scenario iterations to be run :param context: scenario context object :param cls: scenario class :param method_name: scenario method name :param args: scenario args :param aborted: multiprocessing.Event that aborts load generation if the flag is set :param info: info about all processes count and counter of launched process """ pool = collections.deque() alive_threads_in_pool = 0 finished_threads_in_pool = 0 runner._log_worker_info(times=times, concurrency=concurrency, timeout=timeout, cls=cls, method_name=method_name, args=args) if timeout: timeout_queue = Queue.Queue() collector_thr_by_timeout = threading.Thread( target=utils.timeout_thread, args=(timeout_queue, ) ) collector_thr_by_timeout.start() iteration = next(iteration_gen) while iteration < times and not aborted.is_set(): scenario_context = runner._get_scenario_context(iteration, context) worker_args = (queue, cls, method_name, scenario_context, args) thread = threading.Thread(target=runner._worker_thread, args=worker_args) thread.start() if timeout: timeout_queue.put((thread.ident, time.time() + timeout)) pool.append((thread, time.time())) alive_threads_in_pool += 1 while alive_threads_in_pool == concurrency: prev_finished_threads_in_pool = finished_threads_in_pool finished_threads_in_pool = 0 for t in pool: if not t[0].isAlive(): finished_threads_in_pool += 1 alive_threads_in_pool -= finished_threads_in_pool alive_threads_in_pool += prev_finished_threads_in_pool if alive_threads_in_pool < concurrency: # NOTE(boris-42): cleanup pool array. This is required because # in other case array length will be equal to times which # is unlimited big while pool and not pool[0][0].isAlive(): pool.popleft()[0].join() finished_threads_in_pool -= 1 break # we should wait to not create big noise with these checks time.sleep(0.001) iteration = next(iteration_gen) # Wait until all threads are done while pool: pool.popleft()[0].join() if timeout: timeout_queue.put((None, None,)) collector_thr_by_timeout.join()
def _scenario_args(i): if aborted.is_set(): raise StopIteration() return (cls, method, runner._get_scenario_context(i, ctx), args)
def test_get_scenario_context(self): context_obj = {"foo": "bar"} result = runner._get_scenario_context(13, context_obj) self.assertEqual({"foo": "bar", "iteration": 14}, result)
def _worker_process(queue, iteration_gen, timeout, times, max_concurrent, context, cls, method_name, args, event_queue, aborted, runs_per_second, rps_cfg, processes_to_start, info): """Start scenario within threads. Spawn N threads per second. Each thread runs the scenario once, and appends result to queue. A maximum of max_concurrent threads will be ran concurrently. :param queue: queue object to append results :param iteration_gen: next iteration number generator :param timeout: operation's timeout :param times: total number of scenario iterations to be run :param max_concurrent: maximum worker concurrency :param context: scenario context object :param cls: scenario class :param method_name: scenario method name :param args: scenario args :param aborted: multiprocessing.Event that aborts load generation if the flag is set :param runs_per_second: function that should return desired rps value :param rps_cfg: rps section from task config :param processes_to_start: int, number of started processes for scenario execution :param info: info about all processes count and counter of runned process """ pool = collections.deque() if isinstance(rps_cfg, dict): rps = rps_cfg["start"] else: rps = rps_cfg sleep = 1.0 / rps runner._log_worker_info(times=times, rps=rps, timeout=timeout, cls=cls, method_name=method_name, args=args) time.sleep( (sleep * info["processes_counter"]) / info["processes_to_start"]) start = time.time() timeout_queue = Queue.Queue() if timeout: collector_thr_by_timeout = threading.Thread( target=utils.timeout_thread, args=(timeout_queue, )) collector_thr_by_timeout.start() i = 0 while i < times and not aborted.is_set(): scenario_context = runner._get_scenario_context( next(iteration_gen), context) worker_args = (queue, cls, method_name, scenario_context, args, event_queue) thread = threading.Thread(target=runner._worker_thread, args=worker_args) i += 1 thread.start() if timeout: timeout_queue.put((thread, time.time() + timeout)) pool.append(thread) time_gap = time.time() - start real_rps = i / time_gap if time_gap else "Infinity" LOG.debug( "Worker: %s rps: %s (requested rps: %s)" % (i, real_rps, runs_per_second(rps_cfg, start, processes_to_start))) # try to join latest thread(s) until it finished, or until time to # start new thread (if we have concurrent slots available) while i / (time.time() - start) > runs_per_second( rps_cfg, start, processes_to_start) or (len(pool) >= max_concurrent): if pool: pool[0].join(0.001) if not pool[0].isAlive(): pool.popleft() else: time.sleep(0.001) while pool: pool.popleft().join() if timeout: timeout_queue.put(( None, None, )) collector_thr_by_timeout.join()