def _run_scenario(self, cls, method, context, args): timeout = self.config.get("timeout", 600) concurrency = self.config.get("concurrency", 1) # NOTE(msdubov): If not specified, perform single scenario run. times = self.config.get("times", 1) pool = multiprocessing.Pool(concurrency) iter_result = pool.imap( base._run_scenario_once, self._iter_scenario_args(cls, method, context, args, times)) results = [] for i in range(times): try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = { "duration": timeout, "idle_duration": 0, "error": utils.format_exc(e) } results.append(result) pool.close() pool.join() return base.ScenarioRunnerResult(results)
def _run_scenario_once(args): iteration, cls, method_name, context, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context["task"]["uuid"], "iteration": iteration}) context["iteration"] = iteration scenario = cls( context=context, admin_clients=osclients.Clients(context["admin"]["endpoint"]), clients=osclients.Clients(context["user"]["endpoint"])) error = [] scenario_output = {"errors": "", "data": {}} try: with rutils.Timer() as timer: scenario_output = getattr(scenario, method_name)(**kwargs) or scenario_output except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario.idle_duration(), "error": error, "scenario_output": scenario_output, "atomic_actions": scenario.atomic_actions()}
def _run_scenario(self, cls, method, context, args): timeout = self.config.get("timeout", 600) concurrency = self.config.get("concurrency", 1) # NOTE(msdubov): If not specified, perform single scenario run. times = self.config.get("times", 1) pool = multiprocessing.Pool(concurrency) iter_result = pool.imap(base._run_scenario_once, self._iter_scenario_args(cls, method, context, args, times)) results = [] for i in range(times): try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"duration": timeout, "idle_duration": 0, "scenario_output": {}, "atomic_actions": [], "error": utils.format_exc(e)} results.append(result) pool.close() pool.join() return base.ScenarioRunnerResult(results)
def _run_scenario(self, cls, method_name, context, args): times = self.config["times"] period = self.config["period"] timeout = self.config.get("timeout", 600) async_results = [] for i in range(times): pool = multiprocessing_pool.ThreadPool(processes=1) scenario_args = ((i, cls, method_name, base._get_scenario_context(context), args),) async_result = pool.apply_async(base._run_scenario_once, scenario_args) async_results.append(async_result) if i < times - 1: time.sleep(period) results = [] for async_result in async_results: try: result = async_result.get(timeout=timeout) except multiprocessing.TimeoutError as e: result = {"duration": timeout, "idle_duration": 0, "error": utils.format_exc(e)} results.append(result) return base.ScenarioRunnerResult(results)
def _run_scenario_once(args): i, cls, method_name, admin, user, kwargs = args LOG.info("ITER: %s START" % i) # TODO(boris-42): remove context scenario = cls( context={}, admin_clients=osclients.Clients(admin["endpoint"]), clients=osclients.Clients(user["endpoint"]) ) try: scenario_output = {} with rutils.Timer() as timer: scenario_output = getattr(scenario, method_name)(**kwargs) or {} error = None except Exception as e: error = utils.format_exc(e) if cfg.CONF.debug: LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("ITER: %(i)s END: %(status)s" % {"i": i, "status": status}) return { "time": timer.duration() - scenario.idle_time(), "idle_time": scenario.idle_time(), "error": error, "scenario_output": scenario_output, "atomic_actions_time": scenario.atomic_actions_time(), }
def _run_scenario(self, cls, method_name, context, args, config): times = config["times"] period = config["period"] timeout = config.get("timeout", 600) async_results = [] for i in range(times): thread = multiprocessing_pool.ThreadPool(processes=1) scenario_args = ((i, cls, method_name, context["admin"], random.choice(context["users"]), args),) async_result = thread.apply_async(base._run_scenario_once, scenario_args) async_results.append(async_result) if i != times - 1: time.sleep(period * 60) results = [] for async_result in async_results: try: result = async_result.get() except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": 0, "error": utils.format_exc(e)} results.append(result) return results
def _run_scenario(self, cls, method, context, args): timeout = self.config.get("timeout", 600) concurrency = self.config.get("concurrency", 1) duration = self.config.get("duration") pool = multiprocessing.Pool(concurrency) run_args = utils.infinite_run_args_generator( self._iter_scenario_args(cls, method, context, args)) iter_result = pool.imap(base._run_scenario_once, run_args) results = [] start = time.time() while True: try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"duration": timeout, "idle_duration": 0, "scenario_output": {}, "atomic_actions": [], "error": utils.format_exc(e)} results.append(result) if time.time() - start > duration: break pool.terminate() pool.join() return base.ScenarioRunnerResult(results)
def _run_scenario(self, cls, method, context, args): timeout = self.config.get("timeout", 600) concurrency = self.config.get("concurrency", 1) duration = self.config.get("duration") pool = multiprocessing.Pool(concurrency) run_args = utils.infinite_run_args_generator( self._iter_scenario_args(cls, method, context, args)) iter_result = pool.imap(base._run_scenario_once, run_args) results_queue = collections.deque([], maxlen=concurrency) start = time.time() while True: try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = { "duration": timeout, "idle_duration": 0, "error": utils.format_exc(e) } results_queue.append(result) if time.time() - start > duration: break results = list(results_queue) pool.terminate() pool.join() return base.ScenarioRunnerResult(results)
def _run_scenario_continuously_for_duration(self, cls, method, args, duration, concurrent, timeout): pool = multiprocessing.Pool(concurrent) run_args = utils.infinite_run_args((cls, method, args)) iter_result = pool.imap(_run_scenario_loop, run_args) start = time.time() results_queue = collections.deque([], maxlen=concurrent) while True: if time.time() - start > duration * 60: break try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": cls.idle_time, "error": utils.format_exc(e)} results_queue.append(result) results = list(results_queue) pool.terminate() pool.join() return results
def _run_scenario(self, cls, method_name, context, args): times = self.config["times"] period = self.config["period"] timeout = self.config.get("timeout", 600) async_results = [] for i in range(times): pool = multiprocessing_pool.ThreadPool(processes=1) scenario_args = ((i, cls, method_name, base._get_scenario_context(context), args), ) async_result = pool.apply_async(base._run_scenario_once, scenario_args) async_results.append(async_result) if i < times - 1: time.sleep(period) results = [] for async_result in async_results: try: result = async_result.get(timeout=timeout) except multiprocessing.TimeoutError as e: result = { "duration": timeout, "idle_duration": 0, "error": utils.format_exc(e) } results.append(result) return base.ScenarioRunnerResult(results)
def _run_scenario_continuously_for_duration(self, cls, method, context, args, duration, concurrent, timeout): pool = multiprocessing.Pool(concurrent) def _scenario_args(i): return (i, cls, method, base._get_scenario_context(context), args) run_args = utils.infinite_run_args_generator(_scenario_args) iter_result = pool.imap(base._run_scenario_once, run_args) results_queue = collections.deque([], maxlen=concurrent) start = time.time() while True: try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": 0, "error": utils.format_exc(e)} results_queue.append(result) if time.time() - start > duration: break results = list(results_queue) pool.terminate() pool.join() return results
def _run_scenario_once(args): iteration, cls, method_name, context, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context["task"]["uuid"], "iteration": iteration}) scenario = cls( context=context, admin_clients=osclients.Clients(context["admin"]["endpoint"]), clients=osclients.Clients(context["user"]["endpoint"])) error = [] scenario_output = {} try: with rutils.Timer() as timer: scenario_output = getattr(scenario, method_name)(**kwargs) or {} except Exception as e: error = utils.format_exc(e) if cfg.CONF.debug: LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario.idle_duration(), "idle_duration": scenario.idle_duration(), "error": error, "scenario_output": scenario_output, "atomic_actions": scenario.atomic_actions()}
def format_result_on_timeout(exc, timeout): return { "duration": timeout, "idle_duration": 0, "scenario_output": {"errors": "", "data": {}}, "atomic_actions": {}, "error": utils.format_exc(exc) }
def format_result_on_timeout(exc, timeout): return { "duration": timeout, "idle_duration": 0, "scenario_output": {"errors": "", "data": {}}, "atomic_actions": {}, "error": utils.format_exc(exc) }
def _run_scenario_once(args): iteration, cls, method_name, context, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context["task"]["uuid"], "iteration": iteration}) context["iteration"] = iteration scenario = cls( context=context, admin_clients=osclients.Clients(context["admin"]["endpoint"]), clients=osclients.Clients(context["user"]["endpoint"])) error = [] scenario_output = {"errors": "", "data": {}} try: with rutils.Timer() as timer: scenario_fn = getattr(scenario, method_name) fargs, fvargs, fkws, fdefaults = inspect.getargspec(scenario_fn) if fkws != None: kwargs["my_context"] = context print("KEYWORD ARGS PRESENT") else: print("NO KEYWORD ARGS") scenario_output = getattr(scenario, method_name)(**kwargs) or scenario_output except Exception as e: error = utils.format_exc(e) if cfg.CONF.debug: LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario.idle_duration(), "idle_duration": scenario.idle_duration(), "error": error, "scenario_output": scenario_output, "atomic_actions": scenario.atomic_actions()}
def _run_scenario_continuously_for_times(self, cls, method, context, args, times, concurrent, timeout): pool = multiprocessing.Pool(concurrent) iter_result = pool.imap(base._run_scenario_once, self._iter_scenario_args(cls, method, context, args, times)) results = [] for i in range(times): try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": 0, "error": utils.format_exc(e)} results.append(result) pool.close() pool.join() return results
def _run_scenario_continuously_for_times(self, cls, method, args, times, concurrent, timeout): test_args = [(i, cls, method, args) for i in xrange(times)] pool = multiprocessing.Pool(concurrent) iter_result = pool.imap(_run_scenario_loop, test_args) results = [] for i in range(len(test_args)): try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": cls.idle_time, "error": utils.format_exc(e)} results.append(result) pool.close() pool.join() return results
def _run_scenario_loop(args): i, cls, method_name, kwargs = args LOG.info("ITER: %s" % i) # NOTE(msdubov): Each scenario run uses a random openstack client # from a predefined set to act from different users. cls._clients = random.choice(__openstack_clients__) cls._admin_clients = __admin_clients__ cls._context = __scenario_context__ cls.idle_time = 0 try: with rutils.Timer() as timer: getattr(cls, method_name)(**kwargs) error = None except Exception as e: error = utils.format_exc(e) finally: return {"time": timer.duration() - cls.idle_time, "idle_time": cls.idle_time, "error": error}
def _run_scenario_continuously_for_times(self, cls, method, context, args, times, concurrent, timeout): test_args = [(i, cls, method, context["admin"], random.choice(context["users"]), args) for i in range(times)] pool = multiprocessing.Pool(concurrent) iter_result = pool.imap(base._run_scenario_once, test_args) results = [] for i in range(len(test_args)): try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": 0, "error": utils.format_exc(e)} results.append(result) pool.close() pool.join() return results
def _run_scenario_periodically(self, cls, method, args, times, period, timeout): async_results = [] for i in xrange(times): thread = multiprocessing_pool.ThreadPool(processes=1) async_result = thread.apply_async(_run_scenario_loop, ((i, cls, method, args),)) async_results.append(async_result) if i != times - 1: time.sleep(period * 60) results = [] for async_result in async_results: try: result = async_result.get() except multiprocessing.TimeoutError as e: result = {"time": timeout, "idle_time": cls.idle_time, "error": utils.format_exc(e)} results.append(result) return results
def boot_runcommand_delete_server(cls, image_id, flavor_id, script, interpreter, network='private', username='******', ip_version=4, retries=60, port=22, **kwargs): """Boot server, run a script that outputs JSON, delete server. Parameters: script: script to run on the server, must output JSON mapping metric names to values. See sample script below. network: Network to choose address to connect to instance from username: User to SSH to instance as ip_version: Version of ip protocol to use for connection returns: Dictionary containing two keys, data and errors. Data is JSON data output by the script. Errors is raw data from the script's standard error stream. Example Script: #!/bin/bash time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } file=/tmp/test.img c=1000 #1GB write_seq_1gb=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") read_seq_1gb=$(time_seconds "dd if=$file of=/dev/null bs=1M") [[ -f $file ]] && rm $file echo "{ \"write_seq_1gb\": $write_seq_1gb, \"read_seq_1gb\": $read_seq_1gb }" """ server_name = cls._generate_random_name(16) server = cls._boot_server(server_name, image_id, flavor_id, key_name='rally_ssh_key', **kwargs) server_ip = [ip for ip in server.addresses[network] if ip['version'] == ip_version][0]['addr'] ssh = sshutils.SSH(ip=server_ip, port=port, user=username, key=cls.clients('ssh_key_pair')['private'], key_type='string') for retry in range(retries): try: LOG.debug(_('Execute script on server attempt ' '%(retry)i/%(retries)i') % dict(retry=retry, retries=retries)) streams = list(ssh.execute_script(script=script, interpreter=interpreter, get_stdout=True, get_stderr=True)) #NOTE(hughsaunders): Decode JSON script output streams[sshutils.SSH.STDOUT_INDEX]\ = json.loads(streams[sshutils.SSH.STDOUT_INDEX]) break except (rally_exceptions.SSHError, rally_exceptions.TimeoutException, IOError) as e: LOG.debug(_('Error running script on instance via SSH. ' '%(id)s/%(ip)s Attempt:%(retry)i, ' 'Error: %(error)s') % dict( id=server.id, ip=server_ip, retry=retry, error=benchmark_utils.format_exc(e))) cls.sleep_between(5, 5) except ValueError: LOG.error(_('Script %(script)s did not output valid JSON. ') % dict(script=script)) cls._delete_server(server) LOG.debug(_('Output streams from in-instance script execution: ' 'stdout: %(stdout)s, stderr: $(stderr)s') % dict( stdout=str(streams[sshutils.SSH.STDOUT_INDEX]), stderr=str(streams[sshutils.SSH.STDERR_INDEX]))) return dict(data=streams[sshutils.SSH.STDOUT_INDEX], errors=streams[sshutils.SSH.STDERR_INDEX])