def setup(self): """This method is called before the task start.""" try: for user in self.context['users']: osclient = osclients.Clients(user['credential']) keystone = osclient.keystone() creds = keystone.ec2.list(user['id']) if not creds: creds = keystone.ec2.create(user['id'], user['tenant_id']) else: creds = creds[0] url = keystone.service_catalog.url_for(service_type='ec2') user['ec2args'] = { 'region': 'RegionOne', 'url': url, 'access': creds.access, 'secret': creds.secret } if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON: for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): body = {"quota": {"router": -1, "floatingip": -1}} self.net_wrapper.client.update_quota(tenant_id, body) network = self.net_wrapper.create_network( tenant_id, add_router=True, subnets_num=1) self.context["tenants"][tenant_id]["network"] = network except Exception as e: msg = "Can't prepare ec2 client: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def _consumer(consume, queue, is_published): """Infinity worker that consumes tasks from queue. This finishes it's work only in case if is_published.isSet(). :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from :param is_published: threading.Event that is used to stop the consumer when the queue is empty """ cache = {} while True: if queue: try: consume(cache, queue.popleft()) except IndexError: # NOTE(boris-42): queue is accessed from multiple threads so # it's quite possible to have 2 queue accessing # at the same point queue with only 1 element pass except Exception as e: LOG.warning(_("Failed to consume a task from the queue: " "%s") % e) if logging.is_debug(): LOG.exception(e) elif is_published.isSet(): break else: time.sleep(0.1)
def _consumer(consume, queue, is_published): """Infinity worker that consumes tasks from queue. This finishes it's work only in case if is_published.isSet(). :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from :param is_published: threading.Event that is used to stop the consumer when the queue is empty """ cache = {} while True: if not queue: if is_published.isSet(): break time.sleep(0.1) continue else: try: args = queue.popleft() except IndexError: # consumed by other thread continue try: consume(cache, args) except Exception as e: LOG.warning(_("Failed to consume a task from the queue: %s") % e) if logging.is_debug(): LOG.exception(e)
def setup(self): """This method is called before the task start.""" try: for user in self.context['users']: osclient = osclients.Clients(user['credential']) keystone = osclient.keystone() creds = keystone.ec2.list(user['id']) if not creds: creds = keystone.ec2.create(user['id'], user['tenant_id']) else: creds = creds[0] url = keystone.service_catalog.url_for(service_type='ec2') user['ec2args'] = { 'region': 'RegionOne', 'url': url, 'access': creds.access, 'secret': creds.secret } if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON: for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): body = {"quota": {"router": -1, "floatingip": -1}} self.net_wrapper.client.update_quota(tenant_id, body) network = self.net_wrapper.create_network(tenant_id, add_router=True, subnets_num=1) self.context["tenants"][tenant_id]["network"] = network except Exception as e: msg = "Can't prepare ec2 client: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def create(cls, config, name): """Create a deployment. :param config: a dict with deployment configuration :param name: a str represents a name of the deployment :returns: Deployment object """ try: deployment = objects.Deployment(name=name, config=config) except exceptions.DeploymentNameExists as e: if logging.is_debug(): LOG.exception(e) raise deployer = deploy_engine.Engine.get_engine( deployment["config"]["type"], deployment) try: deployer.validate() except jsonschema.ValidationError: LOG.error(_("Deployment %s: Schema validation error.") % deployment["uuid"]) deployment.update_status(consts.DeployStatus.DEPLOY_FAILED) raise with deployer: endpoints = deployer.make_deploy() deployment.update_endpoints(endpoints) return deployment
def _consumer(consume, queue, is_published): """Infinity worker that consumes tasks from queue. This finishes it's work only in case if is_published.isSet(). :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from :param is_published: threading.Event that is used to stop the consumer when the queue is empty """ cache = {} while True: if queue: try: consume(cache, queue.popleft()) except IndexError: # NOTE(boris-42): queue is accessed from multiple threads so # it's quite possible to have 2 queue accessing # at the same point queue with only 1 element pass except Exception as e: LOG.warning( _("Failed to consume a task from the queue: " "%s") % e) if logging.is_debug(): LOG.exception(e) elif is_published.isSet(): break else: time.sleep(0.1)
def _run_scenario_once(args): iteration, cls, method_name, context_obj, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context_obj["task"]["uuid"], "iteration": iteration}) context_obj["iteration"] = iteration scenario_inst = cls(context_obj) error = [] scenario_output = {"errors": "", "data": {}} try: with rutils.Timer() as timer: scenario_output = getattr(scenario_inst, method_name)(**kwargs) or scenario_output except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context_obj["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario_inst.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario_inst.idle_duration(), "error": error, "scenario_output": scenario_output, "atomic_actions": scenario_inst.atomic_actions()}
def _run_scenario_once(args): iteration, cls, method_name, context, kwargs = args LOG.info("Task %(task)s | ITER: %(iteration)s START" % {"task": context["task"]["uuid"], "iteration": iteration}) context["iteration"] = iteration scenario = cls( context=context, admin_clients=osclients.Clients(context["admin"]["endpoint"]), clients=osclients.Clients(context["user"]["endpoint"])) error = [] scenario_output = {"errors": "", "data": {}} try: with rutils.Timer() as timer: scenario_output = getattr(scenario, method_name)(**kwargs) or scenario_output except Exception as e: error = utils.format_exc(e) if logging.is_debug(): LOG.exception(e) finally: status = "Error %s: %s" % tuple(error[0:2]) if error else "OK" LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" % {"task": context["task"]["uuid"], "iteration": iteration, "status": status}) return {"duration": timer.duration() - scenario.idle_duration(), "timestamp": timer.timestamp(), "idle_duration": scenario.idle_duration(), "error": error, "scenario_output": scenario_output, "atomic_actions": scenario.atomic_actions()}
def create(cls, config, name): """Create a deployment. :param config: a dict with deployment configuration :param name: a str represents a name of the deployment :returns: Deployment object """ try: deployment = objects.Deployment(name=name, config=config) except exceptions.DeploymentNameExists as e: if logging.is_debug(): LOG.exception(e) raise deployer = deploy_engine.Engine.get_engine( deployment["config"]["type"], deployment) try: deployer.validate() except jsonschema.ValidationError: LOG.error( _("Deployment %s: Schema validation error.") % deployment["uuid"]) deployment.update_status(consts.DeployStatus.DEPLOY_FAILED) raise with deployer: endpoints = deployer.make_deploy() deployment.update_endpoints(endpoints) return deployment
def load_plugins(dir_or_file): if os.path.isdir(dir_or_file): directory = dir_or_file LOG.info(_("Loading plugins from directories %s/*") % directory) to_load = [] for root, dirs, files in os.walk(directory): to_load.extend((plugin[:-3], root) for plugin in files if plugin.endswith(".py")) for plugin, directory in to_load: if directory not in sys.path: sys.path.append(directory) fullpath = os.path.join(directory, plugin) try: fp, pathname, descr = imp.find_module(plugin, [directory]) imp.load_module(plugin, fp, pathname, descr) fp.close() LOG.info(_("\t Loaded module with plugins: %s.py") % fullpath) except Exception as e: LOG.warning( "\t Failed to load module with plugins %(path)s.py: %(e)s" % { "path": fullpath, "e": e }) if logging.is_debug(): LOG.exception(e) elif os.path.isfile(dir_or_file): plugin_file = dir_or_file LOG.info(_("Loading plugins from file %s") % plugin_file) if plugin_file not in sys.path: sys.path.append(plugin_file) try: plugin_name = os.path.splitext(plugin_file.split("/")[-1])[0] imp.load_source(plugin_name, plugin_file) LOG.info(_("\t Loaded module with plugins: %s.py") % plugin_name) except Exception as e: LOG.warning( _("\t Failed to load module with plugins %(path)s: %(e)s") % { "path": plugin_file, "e": e }) if logging.is_debug(): LOG.exception(e)
def _delete_single_resource(self, resource): """Safe resource deletion with retries and timeouts. Send request to delete resource, in case of failures repeat it few times. After that pull status of resource until it's deleted. Writes in LOG warning with UUID of resource that wasn't deleted :param resource: instance of resource manager initiated with resource that should be deleted. """ msg_kw = { "uuid": resource.id(), "service": resource._service, "resource": resource._resource } LOG.debug("Deleting %(service)s %(resource)s object %(uuid)s" % msg_kw) try: rutils.retry(resource._max_attempts, resource.delete) except Exception as e: msg_kw["reason"] = e LOG.warning( _("Resource deletion failed, max retries exceeded for " "%(service)s.%(resource)s: %(uuid)s. Reason: %(reason)s") % msg_kw) if logging.is_debug(): LOG.exception(e) else: started = time.time() failures_count = 0 while time.time() - started < resource._timeout: try: if resource.is_deleted(): return except Exception as e: LOG.warning( _("Seems like %s.%s.is_deleted(self) method is broken " "It shouldn't raise any exceptions.") % (resource.__module__, type(resource).__name__)) LOG.exception(e) # NOTE(boris-42): Avoid LOG spamming in case of bad # is_deleted() method failures_count += 1 if failures_count > resource._max_attempts: break finally: time.sleep(resource._interval) LOG.warning(_("Resource deletion failed, timeout occurred for " "%(service)s.%(resource)s: %(uuid)s.") % msg_kw)
def check_output(*args, **kwargs): kwargs["stderr"] = subprocess.STDOUT try: output = subprocess.check_output(*args, **kwargs) except subprocess.CalledProcessError as e: LOG.debug("failed cmd: '%s'" % e.cmd) LOG.debug("error output: '%s'" % e.output) raise if logging.is_debug(): print(output)
def load_plugins(dir_or_file): if os.path.isdir(dir_or_file): directory = dir_or_file LOG.info(_("Loading plugins from directories %s/*") % directory.rstrip("/")) to_load = [] for root, dirs, files in os.walk(directory, followlinks=True): to_load.extend((plugin[:-3], root) for plugin in files if plugin.endswith(".py")) for plugin, directory in to_load: if directory not in sys.path: sys.path.append(directory) fullpath = os.path.join(directory, plugin) try: fp, pathname, descr = imp.find_module(plugin, [directory]) imp.load_module(plugin, fp, pathname, descr) fp.close() LOG.info(_("\t Loaded module with plugins: %s.py") % fullpath) except Exception as e: LOG.warning( "\t Failed to load module with plugins %(path)s.py: %(e)s" % {"path": fullpath, "e": e}) if logging.is_debug(): LOG.exception(e) elif os.path.isfile(dir_or_file): plugin_file = dir_or_file LOG.info(_("Loading plugins from file %s") % plugin_file) if plugin_file not in sys.path: sys.path.append(plugin_file) try: plugin_name = os.path.splitext(plugin_file.split("/")[-1])[0] imp.load_source(plugin_name, plugin_file) LOG.info(_("\t Loaded module with plugins: %s.py") % plugin_name) except Exception as e: LOG.warning(_( "\t Failed to load module with plugins %(path)s: %(e)s") % {"path": plugin_file, "e": e}) if logging.is_debug(): LOG.exception(e)
def _publisher(publish, queue): """Calls a publish method that fills queue with jobs. :param publish: method that fills the queue :param queue: deque object to be filled by the publish() method """ try: publish(queue) except Exception as e: LOG.warning(_("Failed to publish a task to the queue: %s") % e) if logging.is_debug(): LOG.exception(e)
def cleanup(self): """This method is called after the task finish.""" try: nova = osclients.Clients(self.context["admin"]["endpoint"]).nova() nova.flavors.delete(self.context["flavor"]["id"]) LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"]) except Exception as e: msg = "Can't delete flavor: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def cleanup(self): try: if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON: for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): network = self.context["tenants"][tenant_id]["network"] self.net_wrapper.delete_network(network) except Exception as e: msg = "Can't cleanup ec2 client: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def nova(self, version="2"): """Return nova client.""" kc = self.keystone() compute_api_url = kc.service_catalog.url_for( service_type="compute", endpoint_type=self.endpoint.endpoint_type, region_name=self.endpoint.region_name) client = nova.Client(version, auth_token=kc.auth_token, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=CONF.https_insecure, cacert=CONF.https_cacert) client.set_management_url(compute_api_url) return client
def cinder(self, version="1"): """Return cinder client.""" client = cinder.Client(version, None, None, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=CONF.https_insecure, cacert=CONF.https_cacert) kc = self.keystone() volume_api_url = kc.service_catalog.url_for( service_type="volume", endpoint_type=self.endpoint.endpoint_type, region_name=self.endpoint.region_name) client.client.management_url = volume_api_url client.client.auth_token = kc.auth_token return client
def cinder(self, version="1"): """Return cinder client.""" from cinderclient import client as cinder client = cinder.Client(version, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.endpoint.insecure, cacert=self.endpoint.cacert, **self._get_auth_info(password_key="api_key")) kc = self.keystone() volume_api_url = kc.service_catalog.url_for( service_type="volume", endpoint_type=self.endpoint.endpoint_type, region_name=self.endpoint.region_name) client.client.management_url = volume_api_url client.client.auth_token = kc.auth_token return client
def nova(self, version="2"): """Return nova client.""" from novaclient import client as nova kc = self.keystone() compute_api_url = kc.service_catalog.url_for( service_type="compute", endpoint_type=self.endpoint.endpoint_type, region_name=self.endpoint.region_name) client = nova.Client(version, auth_token=kc.auth_token, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.endpoint.insecure, cacert=self.endpoint.cacert, **self._get_auth_info(password_key="api_key")) client.set_management_url(compute_api_url) return client
def manila(self, version="1"): """Return manila client.""" from manilaclient import client as manila manila_client = manila.Client( version, region_name=self.endpoint.region_name, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.endpoint.insecure, cacert=self.endpoint.cacert, **self._get_auth_info(password_key="api_key", project_name_key="project_name")) kc = self.keystone() manila_client.client.management_url = kc.service_catalog.url_for( service_type="share", endpoint_type=self.endpoint.endpoint_type, region_name=self.endpoint.region_name) manila_client.client.auth_token = kc.auth_token return manila_client
def _publisher(publish, queue, is_published): """Calls a publish method that fills queue with jobs. After running publish method it sets is_published variable, that is used to stop workers (consumers). :param publish: method that fills the queue :param queue: deque object to be filled by the publish() method :param is_published: threading.Event that is used to stop consumers and finish task """ try: publish(queue) except Exception as e: LOG.warning(_("Failed to publish a task to the queue: %s") % e) if logging.is_debug(): LOG.exception(e) finally: is_published.set()
def setup(self): """This method is called before the task start.""" try: # use rally.osclients to get necessary client instance nova = osclients.Clients(self.context["admin"]["endpoint"]).nova() # and than do what you need with this client self.context["flavor"] = nova.flavors.create( # context settings are stored in self.config name=self.config.get("flavor_name", "rally_test_flavor"), ram=self.config.get("ram", 1), vcpus=self.config.get("vcpus", 1), disk=self.config.get("disk", 1)).to_dict() LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"]) except Exception as e: msg = "Can't create flavor: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def create_client(self, version=None): """Return nova client.""" from novaclient import client as nova kc = self.keystone() compute_api_url = kc.service_catalog.url_for( service_type=self.get_service_type(), endpoint_type=self.endpoint.endpoint_type, region_name=self.endpoint.region_name) #TBD VISHNU KUMAR acess key or ? client = nova.Client(self.choose_version(version), auth_token=kc.auth_token, http_log_debug=logging.is_debug(), timeout=CONF.openstack_client_http_timeout, insecure=self.endpoint.insecure, cacert=self.endpoint.cacert, **self._get_auth_info(password_key="api_key")) enforce_boot_from_volume(client) client.set_management_url(compute_api_url) return client
def _print_task_info(task): print() print("-" * 80) print(_("Task %(task_id)s: %(status)s") % {"task_id": task_id, "status": task["status"]}) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if not logging.is_debug(): print(verification[0]) print(verification[1]) print() print(_("For more details run:\n" "rally -vd task detailed %s") % task["uuid"]) else: print(yaml.safe_load(verification[2])) return False return True
def load_plugins(directory): if os.path.exists(directory): LOG.info("Loading plugins from directories %s/*" % directory) to_load = [] for root, dirs, files in os.walk(directory): to_load.extend((plugin[:-3], root) for plugin in files if plugin.endswith(".py")) for plugin, directory in to_load: fullpath = os.path.join(directory, plugin) try: fp, pathname, descr = imp.find_module(plugin, [directory]) imp.load_module(plugin, fp, pathname, descr) fp.close() LOG.info("\t Loaded module with plugins: %s.py" % fullpath) except Exception as e: LOG.warning( "\t Failed to load module with plugins %(path)s.py: %(e)s" % {"path": fullpath, "e": e}) if logging.is_debug(): LOG.exception(e)
def _consumer(consume, queue): """Infinity worker that consumes tasks from queue. :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from """ cache = {} while True: if not queue: break else: try: args = queue.popleft() except IndexError: # consumed by other thread continue try: consume(cache, args) except Exception as e: LOG.warning(_("Failed to consume a task from the queue: %s") % e) if logging.is_debug(): LOG.exception(e)
def load_plugins(directory): if os.path.exists(directory): LOG.info("Loading plugins from directories %s/*" % directory) to_load = [] for root, dirs, files in os.walk(directory): to_load.extend((plugin[:-3], root) for plugin in files if plugin.endswith(".py")) for plugin, directory in to_load: fullpath = os.path.join(directory, plugin) try: fp, pathname, descr = imp.find_module(plugin, [directory]) imp.load_module(plugin, fp, pathname, descr) fp.close() LOG.info("\t Loaded module with plugins: %s.py" % fullpath) except Exception as e: LOG.warning( "\t Failed to load module with plugins %(path)s.py: %(e)s" % { "path": fullpath, "e": e }) if logging.is_debug(): LOG.exception(e)
def setup(self): """Create list of flavors.""" self.context["flavors"] = {} clients = osclients.Clients(self.context["admin"]["endpoint"]) for flavor_config in self.config: extra_specs = flavor_config.get("extra_specs") flavor_config = FlavorConfig(**flavor_config) try: flavor = clients.nova().flavors.create(**flavor_config) except nova_exceptions.Conflict as e: LOG.warning("Using already existing flavor %s" % flavor_config["name"]) if logging.is_debug(): LOG.exception(e) continue if extra_specs: flavor.set_keys(extra_specs) self.context["flavors"][flavor_config["name"]] = flavor.to_dict() LOG.debug("Created flavor with id '%s'" % flavor.id)
def _print_task_info(task): print() print("-" * 80) print( _("Task %(task_id)s: %(status)s") % { "task_id": task_id, "status": task["status"] }) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if not logging.is_debug(): print(verification[0]) print(verification[1]) print() print( _("For more details run:\n" "rally -vd task detailed %s") % task["uuid"]) else: print(yaml.safe_load(verification[2])) return False return True
def report(self, tasks=None, out=None, open_it=False): """Generate HTML report file for specified task. :param task_id: UUID, task identifier :param tasks: list, UUIDs od tasks or pathes files with tasks results :param out: str, output html file name :param open_it: bool, whether to open output file in web browser """ tasks = isinstance(tasks, list) and tasks or [tasks] results = list() processed_names = dict() for task_file_or_uuid in tasks: if os.path.exists(os.path.expanduser(task_file_or_uuid)): with open(os.path.expanduser(task_file_or_uuid), "r") as inp_js: tasks_results = json.load(inp_js) for result in tasks_results: try: jsonschema.validate( result, objects.task.TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: print( _("ERROR: Invalid task result format in %s") % task_file_or_uuid, file=sys.stderr) if logging.is_debug(): print(e, file=sys.stderr) else: print(e.message, file=sys.stderr) return 1 elif uuidutils.is_uuid_like(task_file_or_uuid): tasks_results = map( lambda x: { "key": x["key"], "sla": x["data"]["sla"], "result": x["data"]["raw"], "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"] }, objects.Task.get(task_file_or_uuid).get_results()) else: print(_("ERROR: Invalid UUID or file name passed: %s") % task_file_or_uuid, file=sys.stderr) return 1 for task_result in tasks_results: if task_result["key"]["name"] in processed_names: processed_names[task_result["key"]["name"]] += 1 task_result["key"]["pos"] = processed_names[ task_result["key"]["name"]] else: processed_names[task_result["key"]["name"]] = 0 results.append(task_result) output_file = os.path.expanduser(out) with open(output_file, "w+") as f: f.write(plot.plot(results)) if open_it: webbrowser.open_new_tab("file://" + os.path.realpath(out))
def run(argv, categories): parser = lambda subparsers: _add_command_parsers(categories, subparsers) category_opt = cfg.SubCommandOpt("category", title="Command categories", help="Available categories", handler=parser) CONF.register_cli_opt(category_opt) help_msg = ("Additional custom plugin locations. Multiple files or " "directories may be specified. All plugins in the specified" " directories and subdirectories will be imported. Plugins in" " /opt/rally/plugins and ~/.rally/plugins will always be " "imported.") CONF.register_cli_opt(cfg.ListOpt("plugin-paths", default=os.environ.get( "RALLY_PLUGIN_PATHS"), help=help_msg)) try: CONF(argv[1:], project="rally", version=version.version_string(), default_config_files=find_config_files(CONFIG_SEARCH_PATHS)) logging.setup("rally") if not CONF.get("log_config_append"): # The below two lines are to disable noise from request module. The # standard way should be we make such lots of settings on the root # rally. However current oslo codes doesn't support such interface. # So I choose to use a 'hacking' way to avoid INFO logs from # request module where user didn't give specific log configuration. # And we could remove this hacking after oslo.log has such # interface. LOG.debug("INFO logs from urllib3 and requests module are hide.") requests_log = logging.getLogger("requests").logger requests_log.setLevel(logging.WARNING) urllib3_log = logging.getLogger("urllib3").logger urllib3_log.setLevel(logging.WARNING) # NOTE(wtakase): This is for suppressing boto error logging. LOG.debug("ERROR log from boto module is hide.") boto_log = logging.getLogger("boto").logger boto_log.setLevel(logging.CRITICAL) except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv) except Exception: print(_("sudo failed, continuing as if nothing happened")) print(_("Please re-run %s as root.") % argv[0]) return(2) if CONF.category.name == "version": print(version.version_string()) return(0) if CONF.category.name == "bash-completion": print(_generate_bash_completion_script()) return(0) fn = CONF.category.action_fn fn_args = [encodeutils.safe_decode(arg) for arg in CONF.category.action_args] fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, "action_kwarg_" + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: validate_args(fn, *fn_args, **fn_kwargs) except MissingArgs as e: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() print("Missing arguments:") for missing in e.missing: for arg in fn.args: if arg[1].get("dest", "").endswith(missing): print(" " + arg[0][0]) break return(1) try: for path in CONF.plugin_paths or []: discover.load_plugins(path) validate_deprecated_args(argv, fn) if getattr(fn, "_suppress_warnings", False): with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = fn(*fn_args, **fn_kwargs) else: ret = fn(*fn_args, **fn_kwargs) return(ret) except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound, exceptions.TaskNotFound, jsonschema.ValidationError) as e: if logging.is_debug(): LOG.exception(e) print(e) return 1 except Exception: print(_("Command failed, please check log for more info")) raise
def detailed(self, task_id=None, iterations_data=False): """Display results table. :param task_id: Task uuid :param iterations_data: print detailed results for each iteration Prints detailed information of task. """ def _print_iterations_data(raw_data): headers = ["iteration", "full duration"] float_cols = ["full duration"] atomic_actions = [] for row in raw_data: # find first non-error result to get atomic actions names if not row["error"] and "atomic_actions" in row: atomic_actions = row["atomic_actions"].keys() for row in raw_data: if row["atomic_actions"]: for (c, a) in enumerate(atomic_actions, 1): action = "%(no)i. %(action)s" % {"no": c, "action": a} headers.append(action) float_cols.append(action) break table_rows = [] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) for (c, r) in enumerate(raw_data, 1): dlist = [c] dlist.append(r["duration"]) if r["atomic_actions"]: for action in atomic_actions: dlist.append(r["atomic_actions"].get(action) or 0) cliutils.print_list(table_rows, fields=headers, formatters=formatters) print() task = db.task_get_detailed(task_id) if task is None: print("The task %s can not be found" % task_id) return (1) print() print("-" * 80) print( _("Task %(task_id)s: %(status)s") % { "task_id": task_id, "status": task["status"] }) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if not logging.is_debug(): print(verification[0]) print(verification[1]) print() print( _("For more details run:\nrally -vd task detailed %s") % task["uuid"]) else: print(yaml.safe_load(verification[2])) return for result in task["results"]: key = result["key"] print("-" * 80) print() print("test scenario %s" % key["name"]) print("args position %s" % key["pos"]) print("args values:") print(json.dumps(key["kw"], indent=2)) raw = result["data"]["raw"] table_cols = [ "action", "min (sec)", "avg (sec)", "max (sec)", "90 percentile", "95 percentile", "success", "count" ] float_cols = [ "min (sec)", "avg (sec)", "max (sec)", "90 percentile", "95 percentile" ] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) table_rows = [] actions_data = utils.get_atomic_actions_data(raw) for action in actions_data: durations = actions_data[action] if durations: data = [ action, min(durations), utils.mean(durations), max(durations), utils.percentile(durations, 0.90), utils.percentile(durations, 0.95), "%.1f%%" % (len(durations) * 100.0 / len(raw)), len(raw) ] else: data = [ action, None, None, None, None, None, "0.0%", len(raw) ] table_rows.append(rutils.Struct(**dict(zip(table_cols, data)))) cliutils.print_list(table_rows, fields=table_cols, formatters=formatters) if iterations_data: _print_iterations_data(raw) print(_("Load duration: %s") % result["data"]["load_duration"]) print(_("Full duration: %s") % result["data"]["full_duration"]) # NOTE(hughsaunders): ssrs=scenario specific results ssrs = [] for result in raw: data = result["scenario_output"].get("data") if data: ssrs.append(data) if ssrs: keys = set() for ssr in ssrs: keys.update(ssr.keys()) headers = [ "key", "max", "avg", "min", "90 pecentile", "95 pecentile" ] float_cols = [ "max", "avg", "min", "90 pecentile", "95 pecentile" ] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) table_rows = [] for key in keys: values = [float(ssr[key]) for ssr in ssrs if key in ssr] if values: row = [ str(key), max(values), utils.mean(values), min(values), utils.percentile(values, 0.90), utils.percentile(values, 0.95) ] else: row = [str(key)] + ["n/a"] * 5 table_rows.append(rutils.Struct(**dict(zip(headers, row)))) print("\nScenario Specific Results\n") cliutils.print_list(table_rows, fields=headers, formatters=formatters) for result in raw: errors = result["scenario_output"].get("errors") if errors: print(errors) print() print("HINTS:") print(_("* To plot HTML graphics with this data, run:")) print("\trally task report %s --out output.html" % task["uuid"]) print() print(_("* To get raw JSON output of task results, run:")) print("\trally task results %s\n" % task["uuid"])
def report(self, tasks=None, out=None, open_it=False, out_format="html"): """Generate report file for specified task. :param task_id: UUID, task identifier :param tasks: list, UUIDs od tasks or pathes files with tasks results :param out: str, output file name :param open_it: bool, whether to open output file in web browser :param out_format: output format (junit or html) """ tasks = isinstance(tasks, list) and tasks or [tasks] results = [] message = [] processed_names = {} for task_file_or_uuid in tasks: if os.path.exists(os.path.expanduser(task_file_or_uuid)): with open(os.path.expanduser(task_file_or_uuid), "r") as inp_js: tasks_results = json.load(inp_js) for result in tasks_results: try: jsonschema.validate( result, objects.task.TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: print( _("ERROR: Invalid task result format in %s") % task_file_or_uuid, file=sys.stderr) if logging.is_debug(): print(e, file=sys.stderr) else: print(e.message, file=sys.stderr) return 1 elif uuidutils.is_uuid_like(task_file_or_uuid): tasks_results = map( lambda x: { "key": x["key"], "sla": x["data"]["sla"], "result": x["data"]["raw"], "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"] }, objects.Task.get(task_file_or_uuid).get_results()) else: print(_("ERROR: Invalid UUID or file name passed: %s") % task_file_or_uuid, file=sys.stderr) return 1 for task_result in tasks_results: if task_result["key"]["name"] in processed_names: processed_names[task_result["key"]["name"]] += 1 task_result["key"]["pos"] = processed_names[ task_result["key"]["name"]] else: processed_names[task_result["key"]["name"]] = 0 results.append(task_result) output_file = os.path.expanduser(out) if out_format == "html": with open(output_file, "w+") as f: f.write(plot.plot(results)) if open_it: webbrowser.open_new_tab("file://" + os.path.realpath(out)) elif out_format == "junit": test_suite = junit.JUnit("Rally test suite") for result in results: if isinstance(result["sla"], list): message = ",".join([ sla["detail"] for sla in result["sla"] if not sla["success"] ]) if message: outcome = junit.JUnit.FAILURE else: outcome = junit.JUnit.SUCCESS test_suite.add_test(result["key"]["name"], result["full_duration"], outcome, message) with open(output_file, "w+") as f: f.write(test_suite.to_xml()) else: print(_("Invalid output format: %s") % out_format, file=sys.stderr) return 1
def run(argv, categories): parser = lambda subparsers: _add_command_parsers(categories, subparsers) category_opt = cfg.SubCommandOpt("category", title="Command categories", help="Available categories", handler=parser) CONF.register_cli_opt(category_opt) try: CONF(argv[1:], project="rally", version=version.version_string()) logging.setup("rally") if not CONF.get("log_config_append"): # The below two lines are to disable noise from request module. The # standard way should be we make such lots of settings on the root # rally. However current oslo codes doesn't support such interface. # So I choose to use a 'hacking' way to avoid INFO logs from # request module where user didn't give specific log configuration. # And we could remove this hacking after oslo.log has such # interface. LOG.debug("INFO logs from urllib3 and requests module are hide.") requests_log = logging.getLogger("requests").logger requests_log.setLevel(logging.WARNING) urllib3_log = logging.getLogger("urllib3").logger urllib3_log.setLevel(logging.WARNING) # NOTE(wtakase): This is for suppressing boto error logging. LOG.debug("ERROR log from boto module is hide.") boto_log = logging.getLogger("boto").logger boto_log.setLevel(logging.CRITICAL) except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv) except Exception: print(_("sudo failed, continuing as if nothing happened")) print(_("Please re-run %s as root.") % argv[0]) return (2) if CONF.category.name == "version": print(version.version_string()) return (0) if CONF.category.name == "bash-completion": print(_generate_bash_completion_script()) return (0) fn = CONF.category.action_fn fn_args = [ encodeutils.safe_decode(arg) for arg in CONF.category.action_args ] fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, "action_kwarg_" + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: validate_args(fn, *fn_args, **fn_kwargs) except MissingArgs as e: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() print("Missing arguments:") for missing in e.missing: for arg in fn.args: if arg[1].get("dest", "").endswith(missing): print(" " + arg[0][0]) break return (1) try: utils.load_plugins("/opt/rally/plugins/") utils.load_plugins(os.path.expanduser("~/.rally/plugins/")) validate_deprecated_args(argv, fn) if getattr(fn, "_suppress_warnings", False): with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = fn(*fn_args, **fn_kwargs) else: ret = fn(*fn_args, **fn_kwargs) return (ret) except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound, exceptions.TaskNotFound, jsonschema.ValidationError) as e: if logging.is_debug(): LOG.exception(e) print(e) return 1 except Exception: print(_("Command failed, please check log for more info")) raise
def report(self, tasks=None, out=None, open_it=False, out_format="html"): """Generate report file for specified task. :param task_id: UUID, task identifier :param tasks: list, UUIDs od tasks or pathes files with tasks results :param out: str, output file name :param open_it: bool, whether to open output file in web browser :param out_format: output format (junit or html) """ tasks = isinstance(tasks, list) and tasks or [tasks] results = [] message = [] processed_names = {} for task_file_or_uuid in tasks: if os.path.exists(os.path.expanduser(task_file_or_uuid)): with open(os.path.expanduser(task_file_or_uuid), "r") as inp_js: tasks_results = json.load(inp_js) for result in tasks_results: try: jsonschema.validate( result, objects.task.TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: print(_("ERROR: Invalid task result format in %s") % task_file_or_uuid, file=sys.stderr) if logging.is_debug(): print(e, file=sys.stderr) else: print(e.message, file=sys.stderr) return 1 elif uuidutils.is_uuid_like(task_file_or_uuid): tasks_results = map( lambda x: {"key": x["key"], "sla": x["data"]["sla"], "result": x["data"]["raw"], "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"]}, objects.Task.get(task_file_or_uuid).get_results()) else: print(_("ERROR: Invalid UUID or file name passed: %s" ) % task_file_or_uuid, file=sys.stderr) return 1 for task_result in tasks_results: if task_result["key"]["name"] in processed_names: processed_names[task_result["key"]["name"]] += 1 task_result["key"]["pos"] = processed_names[ task_result["key"]["name"]] else: processed_names[task_result["key"]["name"]] = 0 results.append(task_result) output_file = os.path.expanduser(out) if out_format == "html": with open(output_file, "w+") as f: f.write(plot.plot(results)) if open_it: webbrowser.open_new_tab("file://" + os.path.realpath(out)) elif out_format == "junit": test_suite = junit.JUnit("Rally test suite") for result in results: if isinstance(result["sla"], list): message = ",".join([sla["detail"] for sla in result["sla"] if not sla["success"]]) if message: outcome = junit.JUnit.FAILURE else: outcome = junit.JUnit.SUCCESS test_suite.add_test(result["key"]["name"], result["full_duration"], outcome, message) with open(output_file, "w+") as f: f.write(test_suite.to_xml()) else: print(_("Invalid output format: %s") % out_format, file=sys.stderr) return 1
def detailed(self, task_id=None, iterations_data=False): """Display results table. :param task_id: Task uuid :param iterations_data: print detailed results for each iteration Prints detailed information of task. """ def _print_iterations_data(raw_data): headers = ["iteration", "full duration"] float_cols = ["full duration"] atomic_actions = [] for row in raw_data: # find first non-error result to get atomic actions names if not row["error"] and "atomic_actions" in row: atomic_actions = row["atomic_actions"].keys() for row in raw_data: if row["atomic_actions"]: for (c, a) in enumerate(atomic_actions, 1): action = "%(no)i. %(action)s" % {"no": c, "action": a} headers.append(action) float_cols.append(action) break table_rows = [] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) for (c, r) in enumerate(raw_data, 1): dlist = [c] dlist.append(r["duration"]) if r["atomic_actions"]: for action in atomic_actions: dlist.append(r["atomic_actions"].get(action) or 0) cliutils.print_list(table_rows, fields=headers, formatters=formatters) print() task = db.task_get_detailed(task_id) if task is None: print("The task %s can not be found" % task_id) return(1) print() print("-" * 80) print(_("Task %(task_id)s: %(status)s") % {"task_id": task_id, "status": task["status"]}) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if not logging.is_debug(): print(verification[0]) print(verification[1]) print() print(_("For more details run:\nrally -vd task detailed %s") % task["uuid"]) else: print(yaml.safe_load(verification[2])) return for result in task["results"]: key = result["key"] print("-" * 80) print() print("test scenario %s" % key["name"]) print("args position %s" % key["pos"]) print("args values:") print(json.dumps(key["kw"], indent=2)) raw = result["data"]["raw"] table_cols = ["action", "min (sec)", "avg (sec)", "max (sec)", "90 percentile", "95 percentile", "success", "count"] float_cols = ["min (sec)", "avg (sec)", "max (sec)", "90 percentile", "95 percentile"] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) table_rows = [] actions_data = utils.get_atomic_actions_data(raw) for action in actions_data: durations = actions_data[action] if durations: data = [action, min(durations), utils.mean(durations), max(durations), utils.percentile(durations, 0.90), utils.percentile(durations, 0.95), "%.1f%%" % (len(durations) * 100.0 / len(raw)), len(raw)] else: data = [action, None, None, None, None, None, "0.0%", len(raw)] table_rows.append(rutils.Struct(**dict(zip(table_cols, data)))) cliutils.print_list(table_rows, fields=table_cols, formatters=formatters) if iterations_data: _print_iterations_data(raw) print(_("Load duration: %s") % result["data"]["load_duration"]) print(_("Full duration: %s") % result["data"]["full_duration"]) # NOTE(hughsaunders): ssrs=scenario specific results ssrs = [] for result in raw: data = result["scenario_output"].get("data") if data: ssrs.append(data) if ssrs: keys = set() for ssr in ssrs: keys.update(ssr.keys()) headers = ["key", "max", "avg", "min", "90 pecentile", "95 pecentile"] float_cols = ["max", "avg", "min", "90 pecentile", "95 pecentile"] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) table_rows = [] for key in keys: values = [float(ssr[key]) for ssr in ssrs if key in ssr] if values: row = [str(key), max(values), utils.mean(values), min(values), utils.percentile(values, 0.90), utils.percentile(values, 0.95)] else: row = [str(key)] + ["n/a"] * 5 table_rows.append(rutils.Struct(**dict(zip(headers, row)))) print("\nScenario Specific Results\n") cliutils.print_list(table_rows, fields=headers, formatters=formatters) for result in raw: errors = result["scenario_output"].get("errors") if errors: print(errors) print() print("HINTS:") print(_("* To plot HTML graphics with this data, run:")) print("\trally task report %s --out output.html" % task["uuid"]) print() print(_("* To get raw JSON output of task results, run:")) print("\trally task results %s\n" % task["uuid"])
def report(self, tasks=None, out=None, open_it=False): """Generate HTML report file for specified task. :param task_id: UUID, task identifier :param tasks: list, UUIDs od tasks or pathes files with tasks results :param out: str, output html file name :param open_it: bool, whether to open output file in web browser """ tasks = isinstance(tasks, list) and tasks or [tasks] results = list() processed_names = dict() for task_file_or_uuid in tasks: if os.path.exists(os.path.expanduser(task_file_or_uuid)): with open(os.path.expanduser(task_file_or_uuid), "r") as inp_js: tasks_results = json.load(inp_js) for result in tasks_results: try: jsonschema.validate( result, objects.task.TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: print(_("ERROR: Invalid task result format in %s") % task_file_or_uuid, file=sys.stderr) if logging.is_debug(): print(e, file=sys.stderr) else: print(e.message, file=sys.stderr) return 1 elif uuidutils.is_uuid_like(task_file_or_uuid): tasks_results = map( lambda x: {"key": x["key"], "sla": x["data"]["sla"], "result": x["data"]["raw"], "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"]}, objects.Task.get(task_file_or_uuid).get_results()) else: print(_("ERROR: Invalid UUID or file name passed: %s" ) % task_file_or_uuid, file=sys.stderr) return 1 for task_result in tasks_results: if task_result["key"]["name"] in processed_names: processed_names[task_result["key"]["name"]] += 1 task_result["key"]["pos"] = processed_names[ task_result["key"]["name"]] else: processed_names[task_result["key"]["name"]] = 0 results.append(task_result) output_file = os.path.expanduser(out) with open(output_file, "w+") as f: f.write(plot.plot(results)) if open_it: webbrowser.open_new_tab("file://" + os.path.realpath(out))