def required_openstack(config, clients, deployment, admin=False, users=False): """Validator that requires OpenStack admin or (and) users. This allows us to create 4 kind of benchmarks: 1) not OpenStack related (validator is not specified) 2) requires OpenStack admin 3) requires OpenStack admin + users 4) requires OpenStack users :param admin: requires OpenStack admin :param users: requires OpenStack users """ if not (admin or users): return ValidationResult( False, _("You should specify admin=True or users=True or both.")) if deployment["admin"] and deployment["users"]: return ValidationResult(True) if deployment["admin"]: if users and not config.get("context", {}).get("users"): return ValidationResult(False, _("You should specify 'users' context")) return ValidationResult(True) if deployment["users"] and admin: return ValidationResult(False, _("Admin credentials required"))
def setup(self): # FIXME(andreykurilin): move all checks to validate method. # do not use admin, if we have users... user = random.choice(self.context.get("users", [self.context["admin"]])) clients = osclients.Clients(user["endpoint"]) services = clients.services() for client_name, conf in six.iteritems(self.config): if "service_type" in conf and conf["service_type"] not in services: raise exceptions.ValidationError(_( "There is no service with '%s' type in your environment.") % conf["service_type"]) elif "service_name" in conf: if conf["service_name"] not in services.values(): raise exceptions.ValidationError( _("There is no '%s' service in your environment") % conf["service_name"]) service_types = [ key for key in services if services[key] == conf["service_name"]] if len(service_types) > 1: # NOTE(andreykurilin): does it possible?? raise exceptions.ValidationError( _("There are several services with name '%s'. Try to " "specify service_type property instead.") % conf["service_name"]) self.context["config"]["api_versions"][client_name][ "service_type"] = service_types[0]
def render_template(cls, task_template, **kwargs): """Render jinja2 task template to Rally input task. :param task_template: String that contains template :param kwargs: Dict with template arguments :returns: rendered template str """ # NOTE(boris-42): We have to import builtins to get the full list of # builtin functions (e.g. range()). Unfortunately, # __builtins__ doesn't return them (when it is not # main module) from six.moves import builtins ast = jinja2.Environment().parse(task_template) required_kwargs = jinja2.meta.find_undeclared_variables(ast) missing = set(required_kwargs) - set(kwargs) - set(dir(builtins)) # NOTE(boris-42): Removing variables that have default values from # missing. Construction that won't be properly checked # is {% set x = x or 1} real_missing = [] for mis in missing: if not re.search(mis.join(["{%\s*set\s+", "\s*=\s*", "[^\w]+"]), task_template): real_missing.append(mis) if real_missing: multi_msg = _("Please specify next template task arguments: %s") single_msg = _("Please specify template task argument: %s") raise TypeError((len(real_missing) > 1 and multi_msg or single_msg) % ", ".join(real_missing)) return jinja2.Template(task_template).render(**kwargs)
def _get_validated_image(config, clients, param_name): image_context = config.get("context", {}).get("images", {}) image_args = config.get("args", {}).get(param_name) image_ctx_name = image_context.get("image_name") if not image_args: msg = _("Parameter %s is not specified.") % param_name return (ValidationResult(False, msg), None) if "image_name" in image_context: # NOTE(rvasilets) check string is "exactly equal to" a regex # or image name from context equal to image name from args if "regex" in image_args: match = re.match(image_args.get("regex"), image_ctx_name) if image_ctx_name == image_args.get("name") or ( "regex" in image_args and match): image = { "size": image_context.get("min_disk", 0), "min_ram": image_context.get("min_ram", 0), "min_disk": image_context.get("min_disk", 0) } return (ValidationResult(True), image) try: image_id = types.ImageResourceType.transform( clients=clients, resource_config=image_args) image = clients.glance().images.get(image=image_id).to_dict() return (ValidationResult(True), image) except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): message = _("Image '%s' not found") % image_args return (ValidationResult(False, message), None)
def image_valid_on_flavor(config, clients, deployment, flavor_name, image_name): """Returns validator for image could be used for current flavor :param flavor_name: defines which variable should be used to get flavor id value. :param image_name: defines which variable should be used to get image id value. """ valid_result, flavor = _get_validated_flavor(config, clients, flavor_name) if not valid_result.is_valid: return valid_result valid_result, image = _get_validated_image(config, clients, image_name) if not valid_result.is_valid: return valid_result if flavor.ram < (image["min_ram"] or 0): message = _("The memory size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image["id"]) return ValidationResult(False, message) if flavor.disk: if (image["size"] or 0) > flavor.disk * (1024 ** 3): message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image["id"]) return ValidationResult(False, message) if (image["min_disk"] or 0) > flavor.disk: message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image["id"]) return ValidationResult(False, message)
def install(self): """Creates local Tempest repo and virtualenv for deployment.""" if not self.is_installed(): LOG.info(_("Tempest is not installed " "for deployment: %s") % self.deployment) LOG.info(_("Installing Tempest " "for deployment: %s") % self.deployment) try: if not os.path.exists(self.path()): if not self._is_git_repo(self.base_repo): self._clone() shutil.copytree(self.base_repo, self.path()) if self.version: check_output(["git", "checkout", self.version], cwd=self.path()) if not self._system_wide: self._install_venv() self._initialize_testr() except subprocess.CalledProcessError as e: self.uninstall() raise TempestSetupFailure("Failed cmd: '%s'" % e.cmd) else: LOG.info(_("Tempest has been successfully installed!")) else: LOG.info(_("Tempest is already installed."))
def _download_image(image_path, image=None): if image: LOG.debug("Downloading image '%s' " "from Glance to %s" % (image.name, image_path)) with open(image_path, "wb") as image_file: for chunk in image.data(): image_file.write(chunk) else: LOG.debug("Downloading image from %s " "to %s" % (CONF.tempest.img_url, image_path)) try: response = requests.get(CONF.tempest.img_url, stream=True) except requests.ConnectionError as err: msg = _("Failed to download image. " "Possibly there is no connection to Internet. " "Error: %s.") % (str(err) or "unknown") raise exceptions.TempestConfigCreationFailure(msg) if response.status_code == 200: with open(image_path, "wb") as image_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks image_file.write(chunk) image_file.flush() else: if response.status_code == 404: msg = _("Failed to download image. Image was not found.") else: msg = _("Failed to download image. " "HTTP error code %d.") % response.status_code raise exceptions.TempestConfigCreationFailure(msg) LOG.debug("The image has been successfully downloaded!")
def _download_cirros_image(self): img_path = os.path.join(self.data_dir, IMAGE_NAME) if os.path.isfile(img_path): return try: response = requests.get(CONF.image.cirros_img_url, stream=True) except requests.ConnectionError as err: msg = _("Failed to download CirrOS image. " "Possibly there is no connection to Internet. " "Error: %s.") % (str(err) or "unknown") raise exceptions.TempestConfigCreationFailure(msg) if response.status_code == 200: with open(img_path + ".tmp", "wb") as img_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks img_file.write(chunk) img_file.flush() os.rename(img_path + ".tmp", img_path) else: if response.status_code == 404: msg = _("Failed to download CirrOS image. " "Image was not found.") else: msg = _("Failed to download CirrOS image. " "HTTP error code %d.") % response.status_code raise exceptions.TempestConfigCreationFailure(msg)
def install_plugin(self): """Install Tempest plugin for local Tempest repo.""" LOG.info(_("Installing Tempest plugin from %s for " "deployment: %s") % (self.plugin_source, self.deployment)) version = self.plugin_version or "master" egg = re.sub("\.git$", "", os.path.basename(self.plugin_source.strip("/"))) cmd = ["pip", "install", "--no-deps", "--src", self.path("plugins/system-wide"), "-e", "git+{0}@{1}#egg={2}".format(self.plugin_source, version, egg)] # Very often Tempest plugins are inside projects and requirements # for plugins are listed in the test-requirements.txt file. test_reqs_path = self.path("plugins/system-wide/" "%s/test-requirements.txt" % egg) if not self._system_wide: cmd.remove("--no-deps") cmd.remove(self.path("plugins/system-wide")) cmd.insert(0, self.path("tools/with_venv.sh")) cmd.insert(4, self.path("plugins")) test_reqs_path = self.path("plugins/" "%s/test-requirements.txt" % egg) check_output(cmd, cwd=self.path()) if os.path.exists(test_reqs_path): cmd = ["pip", "install", "-r", test_reqs_path] if not self._system_wide: cmd.insert(0, self.path("tools/with_venv.sh")) check_output(cmd, cwd=self.path()) LOG.info(_("Tempest plugin has been successfully installed!"))
def validate_heat_template(config, clients, deployment, *param_names): """Validates heat template. :param param_names: list of parameters to be validated. """ if param_names is None: return ValidationResult(False, _( "validate_heat_template validator accepts non empty arguments " "in form of `validate_heat_template(\"foo\", \"bar\")`")) for param_name in param_names: template_path = config.get("args", {}).get(param_name) if not template_path: return ValidationResult(False, _( "Path to heat template is not specified. Its needed for " "heat template validation. Please check the content of `%s` " "scenario argument.") % param_name) template_path = os.path.expanduser(template_path) if not os.path.exists(template_path): return ValidationResult(False, _("No file found by the given path " "%s") % template_path) with open(template_path, "r") as f: try: clients.heat().stacks.validate(template=f.read()) except Exception as e: dct = { "path": template_path, "msg": str(e), } msg = (_("Heat template validation failed on %(path)s. " "Original error message: %(msg)s.") % dct) return ValidationResult(False, msg)
def setup(self): # FIXME(andreykurilin): move all checks to validate method. # use admin only when `service_name` is presented admin_clients = osclients.Clients( self.context.get("admin", {}).get("credential")) clients = osclients.Clients(random.choice( self.context["users"])["credential"]) services = clients.keystone().service_catalog.get_endpoints() services_from_admin = None for client_name, conf in six.iteritems(self.config): if "service_type" in conf and conf["service_type"] not in services: raise exceptions.ValidationError(_( "There is no service with '%s' type in your environment.") % conf["service_type"]) elif "service_name" in conf: if not self.context.get("admin", {}).get("credential"): raise exceptions.BenchmarkSetupFailure(_( "Setting 'service_name' is allowed only for 'admin' " "user.")) if not services_from_admin: services_from_admin = dict( [(s.name, s.type) for s in admin_clients.keystone().services.list()]) if conf["service_name"] not in services_from_admin: raise exceptions.ValidationError( _("There is no '%s' service in your environment") % conf["service_name"]) self.context["config"]["api_versions"][client_name][ "service_type"] = services_from_admin[conf["service_name"]]
def tempest_tests_exists(config, clients, deployment): """Validator checks that specified test exists.""" args = config.get("args", {}) if "test_name" in args: tests = [args["test_name"]] else: tests = args.get("test_names", []) if not tests: return ValidationResult(False, _("Parameter 'test_name' or 'test_names' " "should be specified.")) verifier = tempest.Tempest(deployment["uuid"], source=config.get("context", {}).get("tempest", {}).get("source")) if not verifier.is_installed(): try: verifier.install() except tempest.TempestSetupFailure as e: return ValidationResult(False, e) if not verifier.is_configured(): verifier.generate_config_file() allowed_tests = verifier.discover_tests() for i, test in enumerate(tests): if not test.startswith("tempest.api."): tests[i] = "tempest.api." + test wrong_tests = set(tests) - allowed_tests if wrong_tests: message = _("One or more tests not found: '%s'") % "', '".join(sorted(wrong_tests)) return ValidationResult(False, message)
def required_api_versions(config, clients, deployment, component, versions): """Validator checks component API versions.""" versions = [str(v) for v in versions] versions_str = ", ".join(versions) msg = _("Task was designed to be used with %(component)s " "V%(version)s, but V%(found_version)s is " "selected.") if component == "keystone": if "2.0" not in versions and hasattr(clients.keystone(), "tenants"): return ValidationResult(False, msg % {"component": component, "version": versions_str, "found_version": "2.0"}) if "3" not in versions and hasattr(clients.keystone(), "projects"): return ValidationResult(False, msg % {"component": component, "version": versions_str, "found_version": "3"}) else: used_version = config.get("context", {}).get("api_versions", {}).get( component, {}).get("version", getattr(clients, component).choose_version()) if not used_version: return ValidationResult( False, _("Unable to determine the API version.")) if str(used_version) not in versions: return ValidationResult( False, msg % {"component": component, "version": versions_str, "found_version": used_version})
def setup(self): """Create tenants and users, using the broker pattern.""" self.context["users"] = [] self.context["tenants"] = {} threads = self.config["resource_management_workers"] LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" % {"tenants": self.config["tenants"], "threads": threads}) self.context["tenants"] = self._create_tenants() if len(self.context["tenants"]) < self.config["tenants"]: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=_("Failed to create the requested number of tenants.")) users_num = self.config["users_per_tenant"] * self.config["tenants"] LOG.debug("Creating %(users)d users using %(threads)s threads" % {"users": users_num, "threads": threads}) self.context["users"] = self._create_users() for user in self.context["users"]: self.context["tenants"][user["tenant_id"]]["users"].append(user) if len(self.context["users"]) < users_num: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=_("Failed to create the requested number of users."))
def check(self, deployment=None): """Check keystone authentication and list all available services. :param deployment: a UUID or name of the deployment """ headers = ["services", "type", "status"] table_rows = [] try: deployment = api.Deployment.get(deployment) except exceptions.DeploymentNotFound: print(_("Deployment %s is not found.") % deployment) return(1) try: services = api.Deployment.check(deployment) except keystone_exceptions.ConnectionRefused: print(_("Unable to connect %s.") % deployment["admin"]["auth_url"]) return(1) except exceptions.InvalidArgumentsException: data = ["keystone", "identity", "Error"] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("Authentication Issues: %s.") % sys.exc_info()[1]) return(1) for serv_type, serv in services.items(): data = [serv, serv_type, "Available"] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("keystone endpoints are valid and following" " services are available:")) cliutils.print_list(table_rows, headers)
def _load_img(self): cirros_url = ("%s/%s/%s" % (CONF.image.cirros_base_url, CONF.image.cirros_version, CONF.image.cirros_image)) try: response = requests.get(cirros_url, stream=True) except requests.ConnectionError as err: msg = _("Error on downloading cirros image, possibly" " no connection to Internet with message %s") % str(err) raise TempestConfigCreationFailure(msg) if response.status_code == 200: with open(self.img_path + ".tmp", "wb") as img_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks img_file.write(chunk) img_file.flush() os.rename(self.img_path + ".tmp", self.img_path) else: if response.status_code == 404: msg = _("Error on downloading cirros image, possibly" "invalid cirros_version or cirros_image in rally.conf") else: msg = _("Error on downloading cirros image, " "HTTP error code %s") % response.getcode() raise TempestConfigCreationFailure(msg)
def check(self, deployment=None): """Check keystone authentication and list all available services. :param deployment: a UUID or name of the deployment """ headers = ["services", "type", "status"] table_rows = [] try: deployment = db.deployment_get(deployment) admin = deployment.get("admin") clients = osclients.Clients(objects.Endpoint(**admin)) client = clients.verified_keystone() for service in client.services.list(): data = [service.name, service.type, "Available"] table_rows.append(utils.Struct(**dict(zip(headers, data)))) users = deployment.get("users") for endpoint_dict in users: osclients.Clients(objects.Endpoint(**endpoint_dict)).keystone() print(_("keystone endpoints are valid and following" " services are available:")) except exceptions.InvalidArgumentsException: data = ["keystone", "identity", "Error"] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("Authentication Issues: %s.") % sys.exc_info()[1]) return(1) cliutils.print_list(table_rows, headers)
def _install_venv(self): path_to_venv = self.path(".venv") if not os.path.isdir(path_to_venv): print("No virtual environment found...Install the virtualenv.") LOG.debug("Virtual environment directory: %s" % path_to_venv) required_vers = (2, 7) if sys.version_info[:2] != required_vers: # NOTE(andreykurilin): let's try to find a suitable python # interpreter for Tempest python_interpreter = costilius.get_interpreter(required_vers) if not python_interpreter: raise exceptions.IncompatiblePythonVersion( version=sys.version, required_version=required_vers) LOG.info( _("Tempest requires Python %(required)s, '%(found)s' was " "found in your system and it will be used for installing" " virtual environment.") % {"required": required_vers, "found": python_interpreter}) else: python_interpreter = sys.executable try: check_output("%s ./tools/install_venv.py" % python_interpreter, shell=True, cwd=self.path()) check_output("%s python setup.py install" % self.venv_wrapper, shell=True, cwd=self.path()) except subprocess.CalledProcessError: if os.path.exists(self.path(".venv")): shutil.rmtree(self.path(".venv")) raise TempestSetupFailure(_("failed to install virtualenv"))
def _delete_single_resource(self, resource): """Safe resource deletion with retries and timeouts. Send request to delete resource, in case of failures repeat it few times. After that pull status of resource until it's deleted. Writes in LOG warning with UUID of resource that wasn't deleted :param resource: instance of resource manager initiated with resource that should be deleted. """ msg_kw = { "uuid": resource.id(), "name": resource.name() or "", "service": resource._service, "resource": resource._resource, } LOG.debug("Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)" % msg_kw) try: rutils.retry(resource._max_attempts, resource.delete) except Exception as e: msg_kw["reason"] = e LOG.warning( _( "Resource deletion failed, max retries exceeded for " "%(service)s.%(resource)s: %(uuid)s. Reason: %(reason)s" ) % msg_kw ) if logging.is_debug(): LOG.exception(e) else: started = time.time() failures_count = 0 while time.time() - started < resource._timeout: try: if resource.is_deleted(): return except Exception as e: LOG.warning( _("Seems like %s.%s.is_deleted(self) method is broken " "It shouldn't raise any exceptions.") % (resource.__module__, type(resource).__name__) ) LOG.exception(e) # NOTE(boris-42): Avoid LOG spamming in case of bad # is_deleted() method failures_count += 1 if failures_count > resource._max_attempts: break finally: time.sleep(resource._interval) LOG.warning( _("Resource deletion failed, timeout occurred for " "%(service)s.%(resource)s: %(uuid)s.") % msg_kw )
def show(self, verification=None, sort_by="name", detailed=False): """Display results table of a verification. :param verification: UUID of a verification :param sort_by: Sort results by 'name' or 'duration' :param detailed: Display detailed errors of failed tests """ try: verification = api.Verification.get(verification) tests = verification.get_results() except exceptions.NotFoundException as e: print(six.text_type(e)) return 1 print(_("Total results of verification:\n")) total_fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures", "Created at", "Status"] cliutils.print_list([verification], fields=total_fields) print(_("\nTests:\n")) fields = ["name", "time", "status"] results = tests["test_cases"] values = [utils.Struct(**results[test_name]) for test_name in results] sortby_index = ("name", "duration").index(sort_by) cliutils.print_list(values, fields, sortby_index=sortby_index) if detailed: for test in six.itervalues(tests["test_cases"]): if test["status"] == "fail": header = cliutils.make_header( "FAIL: %(name)s\n" "Time: %(time)s" % {"name": test["name"], "time": test["time"]} ) formatted_test = "%(header)s%(log)s\n" % {"header": header, "log": test["traceback"]} print(formatted_test)
def start(self, deployment=None, set_name="", regex=None, tests_file=None, tempest_config=None, xfails_file=None, do_use=True, system_wide=False, concur=0): """Start verification (run Tempest tests). :param deployment: UUID or name of a deployment :param set_name: Name of a Tempest test set :param regex: Regular expression of test :param tests_file: Path to a file with a list of Tempest tests :param tempest_config: User specified Tempest config file location :param xfails_file: Path to a file in YAML format with a list of Tempest tests that are expected to fail :param do_use: Use new task as default for future operations :param system_wide: Whether or not to create a virtual env when installing Tempest; whether or not to use the local env instead of the Tempest virtual env when running the tests :param concur: How many processes to use to run Tempest tests. The default value (0) auto-detects CPU count """ msg = _("Arguments '%s' and '%s' are not compatible. " "You can use only one of the mentioned arguments.") if regex and set_name: print(msg % ("regex", "set")) return 1 if tests_file and set_name: print(msg % ("tests_file", "set")) return 1 if tests_file and regex: print(msg % ("tests_file", "regex")) return 1 if not (regex or set_name or tests_file): set_name = "full" if set_name and set_name not in AVAILABLE_SETS: print(_("Tempest test set '%s' not found " "in available test sets. Available sets are %s.") % (set_name, ", ".join(AVAILABLE_SETS))) return 1 if tests_file and not os.path.exists(tests_file): print(_("File '%s' not found.") % tests_file) return 1 expected_failures = None if xfails_file: if os.path.exists(xfails_file): with open(os.path.abspath(xfails_file), "rb") as f: expected_failures = yaml.load(f) else: print(_("File '%s' not found.") % xfails_file) return 1 verification = api.Verification.verify( deployment, set_name=set_name, regex=regex, tests_file=tests_file, tempest_config=tempest_config, expected_failures=expected_failures, system_wide=system_wide, concur=concur) if do_use: self.use(verification["uuid"])
def _check_tempest_tree_existence(verifier): if not os.path.exists(verifier.path()): msg = _("Tempest tree for " "deployment '%s' not found! ") % verifier.deployment LOG.error( msg + _("Use `rally verify install` for Tempest installation")) raise exceptions.NotFoundException(message=msg)
def _install_venv(self): path_to_venv = self.path(".venv") if not os.path.isdir(path_to_venv): LOG.debug("No virtual environment for Tempest found.") LOG.info(_("Installing the virtual environment for Tempest.")) LOG.debug("Virtual environment directory: %s" % path_to_venv) try: check_output(["virtualenv", "-p", sys.executable, ".venv"], cwd=self.path()) # NOTE(kun): Using develop mode installation is for run # multiple tempest instance. However, dependency # from tempest(os-testr) has issues here, before # https://review.openstack.org/#/c/207691/ being # merged, we have to install dependency manually and # run setup.py with -N(install package without # dependency) check_output([self.venv_wrapper, "pip", "install", "-r", "requirements.txt", "-r", "test-requirements.txt"], cwd=self.path()) check_output([self.venv_wrapper, "pip", "install", "-e", "./"], cwd=self.path()) except subprocess.CalledProcessError: if os.path.exists(self.path(".venv")): shutil.rmtree(self.path(".venv")) raise TempestSetupFailure(_("failed to install virtualenv"))
def create(self, name, fromenv=False, filename=None, do_use=False): """Create new deployment. This command will create a new deployment record in rally database. In the case of ExistingCloud deployment engine it will use the cloud represented in the configuration. If the cloud doesn't exist, Rally can deploy a new one for you with Devstack or Fuel. Different deployment engines exist for these cases. If you use the ExistingCloud deployment engine you can pass a deployment config by environment variables with ``--fromenv``: OS_USERNAME OS_PASSWORD OS_AUTH_URL OS_TENANT_NAME OS_ENDPOINT_TYPE or OS_INTERFACE OS_ENDPOINT OS_REGION_NAME OS_CACERT OS_INSECURE All other deployment engines need more complex configuration data, so it should be stored in a configuration file. You can use physical servers, LXC containers, KVM virtual machines or virtual machines in OpenStack for deploying the cloud. Except physical servers, Rally can create cluster nodes for you. Interaction with virtualization software, OpenStack cloud or physical servers is provided by server providers. :param fromenv: boolean, read environment instead of config file :param filename: path to the configuration file :param name: name of the deployment """ if fromenv: config = {"type": "ExistingCloud"} config.update(envutils.get_creds_from_env_vars()) else: if not filename: print("Either --filename or --fromenv is required.") return(1) filename = os.path.expanduser(filename) with open(filename, "rb") as deploy_file: config = yaml.safe_load(deploy_file.read()) try: deployment = api.Deployment.create(config, name) except jsonschema.ValidationError: print(_("Config schema validation error: %s.") % sys.exc_info()[1]) return(1) except exceptions.DeploymentNameExists: print(_("Error: %s") % sys.exc_info()[1]) return(1) self.list(deployment_list=[deployment]) if do_use: self.use(deployment["uuid"])
def details(self): strs = [_("Action: '%s'. %.2fs <= %.2fs") % (atom, self.avg_by_action[atom], val) for atom, val in self.criterion_items] head = _("Average duration of one iteration for atomic actions:") end = _("Status: %s") % self.status() return "\n".join([head] + strs + [end])
def get_image_uuid(self): """Get image uuid. Download image if necessary.""" image_uuid = self.config["image"].get("uuid", None) if image_uuid: return image_uuid else: if not self.glance: raise exceptions.InvalidConfigException( "If glance is not available in the service catalog" " obtained by the openstack server provider, then" " images cannot be uploaded so the uuid of an" " existing image must be specified in the" " deployment config." ) for image in self.glance.images.list(): if image.checksum == self.config["image"]["checksum"]: LOG.info(_("Found image with appropriate checksum. Using it.")) return image.id LOG.info(_("Downloading new image %s") % self.config["image"]["url"]) image = self.glance.images.create( name=self.config["image"]["name"], copy_from=self.config["image"]["url"], disk_format=self.config["image"]["format"], container_format="bare") image.get() if image.checksum != self.config["image"]["checksum"]: raise exceptions.ChecksumMismatch(url=self.config["image"]["url"]) return image.id
def setup(self): """Create containers and objects, using the broker pattern.""" threads = self.config["resource_management_workers"] containers_per_tenant = self.config["containers_per_tenant"] containers_num = len(self.context["tenants"]) * containers_per_tenant LOG.debug("Creating %d containers using %d threads." % (containers_num, threads)) containers_count = len(self._create_containers(self.context, containers_per_tenant, threads)) if containers_count != containers_num: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=_("Failed to create the requested number of containers, " "expected %(expected)s but got %(actual)s.") % {"expected": containers_num, "actual": containers_count}) objects_per_container = self.config["objects_per_container"] objects_num = containers_num * objects_per_container LOG.debug("Creating %d objects using %d threads." % (objects_num, threads)) objects_count = len(self._create_objects(self.context, objects_per_container, self.config["object_size"], threads)) if objects_count != objects_num: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=_("Failed to create the requested number of objects, " "expected %(expected)s but got %(actual)s.") % {"expected": objects_num, "actual": objects_count})
def create(self, name, filename, do_use=False): """Create new deployment. This command will create a new deployment record in rally ovs database. """ filename = os.path.expanduser(filename) print("file:" + filename) with open(filename, "rb") as deploy_file: config = yaml.safe_load(deploy_file.read()) try: deployment = api.Deployment.create(config, name) except jsonschema.ValidationError: print(_("Config schema validation error: %s.") % sys.exc_info()[1]) return(1) except exceptions.DeploymentNameExists: print(_("Error: %s") % sys.exc_info()[1]) return(1) self.list(deployment_list=[deployment]) if do_use: self.use(deployment["uuid"])
def _setup_for_existing_users(self): if (self.config["use_share_networks"] and not self.config["share_networks"]): msg = _("Usage of share networks was enabled but for deployment " "with existing users share networks also should be " "specified via arg 'share_networks'") raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=msg) # Set flag that says we will not delete/cleanup share networks self.context[CONTEXT_NAME]["delete_share_networks"] = False for tenant_name_or_id, share_networks in self.config[ "share_networks"].items(): # Verify project existence for tenant in self.context["tenants"].values(): if tenant_name_or_id in (tenant["id"], tenant["name"]): tenant_id = tenant["id"] existing_user = None for user in self.context["users"]: if user["tenant_id"] == tenant_id: existing_user = user break break else: msg = _("Provided tenant Name or ID '%s' was not found in " "existing tenants.") % tenant_name_or_id raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=msg) self.context["tenants"][tenant_id][CONTEXT_NAME] = {} self.context["tenants"][tenant_id][CONTEXT_NAME][ "share_networks"] = [] manila_scenario = manila_utils.ManilaScenario( {"user": existing_user}) existing_sns = manila_scenario._list_share_networks( detailed=False, search_opts={"project_id": tenant_id}) for sn_name_or_id in share_networks: # Verify share network existence for sn in existing_sns: if sn_name_or_id in (sn.id, sn.name): break else: msg = _("Specified share network '%(sn)s' does not " "exist for tenant '%(tenant_id)s'") % { "sn": sn_name_or_id, "tenant_id": tenant_id} raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=msg) # Set share network for project self.context["tenants"][tenant_id][CONTEXT_NAME][ "share_networks"].append(sn) # Add shared integer var per project that will be used as index # for list with share networks. It is required for balancing. self.context["tenants"][tenant_id][CONTEXT_NAME]["sn_iterator"] = ( utils.RAMInt())
def wrapper(self, *args, **kwargs): params = {"msg": msg % kw, "obj_name": obj.title(), "uuid": getattr(self, obj)["uuid"]} log_function(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") % params) result = f(self, *args, **kwargs) log_function(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") % params) return result
class InvalidConfigException(RallyException): msg_fmt = _("This config has invalid schema: `%(message)s`")
class InvalidTaskConfig(InvalidTaskException): msg_fmt = _("Input task is invalid!\n\n" "Subtask %(name)s[%(pos)s] has wrong configuration" "\Subtask configuration:\n%(config)s\n" "\nReason:\n %(reason)s")
class NotFoundScenarios(InvalidTaskException): msg_fmt = _("There are no benchmark scenarios with names: `%(names)s`.")
class ThreadTimeoutException(RallyException): msg_fmt = _("Iteration interrupted due to timeout.")
class NotFoundException(RallyException): msg_fmt = _("The resource can not be found: %(message)s")
class PluginWithSuchNameExists(RallyException): msg_fmt = _("Plugin with such name: %(name)s already exists in " "%(namespace)s namespace. It's module allocates at " "%(existing_path)s. You are trying to add plugin whose module " "allocates at %(new_path)s.")
class PluginNotFound(NotFoundException): msg_fmt = _("There is no plugin with name: `%(name)s` in " "%(namespace)s namespace.")
class NoSuchRole(NotFoundException): msg_fmt = _("There is no role with name `%(role)s`.")
class NoSuchConfigField(NotFoundException): msg_fmt = _("There is no field in the task config with name `%(name)s`.")
class BaseCustomImageGenerator(context.Context): """Base class for the contexts providing customized image with. Every context class for the specific customization must implement the method `_customize_image` that is able to connect to the server using SSH and e.g. install applications inside it. This is used e.g. to install the benchmark application using SSH access. This base context class provides a way to prepare an image with custom preinstalled applications. Basically, this code boots a VM, calls the `_customize_image` and then snapshots the VM disk, removing the VM afterwards. The image UUID is stored in the user["custom_image"]["id"] and can be used afterwards by scenario. """ CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "image": { "type": "object", "properties": { "name": { "type": "string" } } }, "flavor": { "type": "object", "properties": { "name": { "type": "string" } } }, "username": { "type": "string" }, "password": { "type": "string" }, "floating_network": { "type": "string" }, "internal_network": { "type": "string" }, "port": { "type": "integer", "minimum": 1, "maximum": 65535 }, "userdata": { "type": "string" }, "workers": { "type": "integer", "minimum": 1, } }, "required": ["image", "flavor"], "additionalProperties": False } DEFAULT_CONFIG = {"username": "******", "port": 22, "workers": 1} @utils.log_task_wrapper(LOG.info, _("Enter context: `custom_image`")) def setup(self): """Creates custom image(s) with preinstalled applications. When admin is present creates one public image that is usable from all the tenants and users. Otherwise create one image per user and tenant. """ if "admin" in self.context: # NOTE(pboldin): Create by first user and make it public by # the admin user = self.context["users"][0] tenant = self.context["tenants"][user["tenant_id"]] nics = None if "networks" in tenant: nics = [{"net-id": tenant["networks"][0]["id"]}] custom_image = self.create_one_image(user, nics=nics) self.make_image_public(custom_image) for tenant in self.context["tenants"].values(): tenant["custom_image"] = custom_image else: def publish(queue): users = self.context.get("users", []) for user, tenant_id in utils.iterate_per_tenants(users): queue.append((user, tenant_id)) def consume(cache, args): user, tenant_id = args tenant = self.context["tenants"][tenant_id] tenant["custom_image"] = self.create_one_image(user) broker.run(publish, consume, self.config["workers"]) def create_one_image(self, user, **kwargs): """Create one image for the user.""" clients = osclients.Clients(user["endpoint"]) image_id = types.ImageResourceType.transform( clients=clients, resource_config=self.config["image"]) flavor_id = types.FlavorResourceType.transform( clients=clients, resource_config=self.config["flavor"]) vm_scenario = vmtasks.VMTasks(self.context, clients=clients) server, fip = vm_scenario._boot_server_with_fip( name=vm_scenario._generate_random_name("rally_ctx_custom_image_"), image=image_id, flavor=flavor_id, floating_network=self.config.get("floating_network"), userdata=self.config.get("userdata"), key_name=user["keypair"]["name"], security_groups=[user["secgroup"]["name"]], **kwargs) LOG.debug("Installing benchmark on %r %s", server, fip["ip"]) self.customize_image(server, fip, user) LOG.debug("Stopping server %r", server) vm_scenario._stop_server(server) LOG.debug("Creating snapshot for %r", server) custom_image = vm_scenario._create_image(server).to_dict() vm_scenario._delete_server_with_fip(server, fip) return custom_image def make_image_public(self, custom_image): """Make the image available publicly.""" admin_clients = osclients.Clients(self.context["admin"]["endpoint"]) LOG.debug("Making image %r public", custom_image["id"]) admin_clients.glance().images.get( custom_image["id"]).update(is_public=True) @utils.log_task_wrapper(LOG.info, _("Exit context: `custom_image`")) def cleanup(self): """Delete created custom image(s).""" if "admin" in self.context: user = self.context["users"][0] tenant = self.context["tenants"][user["tenant_id"]] if "custom_image" in tenant: self.delete_one_image(user, tenant["custom_image"]) tenant.pop("custom_image") else: def publish(queue): users = self.context.get("users", []) for user, tenant_id in utils.iterate_per_tenants(users): queue.append((user, tenant_id)) def consume(cache, args): user, tenant_id = args tenant = self.context["tenants"][tenant_id] if "custom_image" in tenant: self.delete_one_image(user, tenant["custom_image"]) tenant.pop("custom_image") broker.run(publish, consume, self.config["workers"]) def delete_one_image(self, user, custom_image): """Delete the image created for the user and tenant.""" clients = osclients.Clients(user["endpoint"]) nova_scenario = nova_utils.NovaScenario(context=self.context, clients=clients) with logging.ExceptionLogger( LOG, _("Unable to delete image %s") % custom_image["id"]): custom_image = nova_scenario.clients("nova").images.get( custom_image["id"]) nova_scenario._delete_image(custom_image) @utils.log_task_wrapper(LOG.info, _("Custom image context: customizing")) def customize_image(self, server, ip, user): return self._customize_image(server, ip, user) @abc.abstractmethod def _customize_image(self, server, ip, user): """Override this method with one that customizes image. Basically, code can simply call `VMScenario._run_command` function specifying an installation script and interpreter. This script will be then executed using SSH. :param server: nova.Server instance :param ip: dict with server IP details :param user: user who started a VM instance. Used to extract keypair """ pass
class CeilometerSampleGenerator(context.Context): """Context for creating samples and collecting resources for benchmarks.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "counter_name": { "type": "string" }, "counter_type": { "type": "string" }, "counter_unit": { "type": "string" }, "counter_volume": { "type": "number", "minimum": 0 }, "resources_per_tenant": { "type": "integer", "minimum": 1 }, "samples_per_resource": { "type": "integer", "minimum": 1 }, }, "required": ["counter_name", "counter_type", "counter_unit", "counter_volume"], "additionalProperties": False } DEFAULT_CONFIG = {"resources_per_tenant": 5, "samples_per_resource": 5} @rutils.log_task_wrapper(LOG.info, _("Enter context: `Ceilometer`")) def setup(self): counter_name = self.config["counter_name"] counter_type = self.config["counter_type"] counter_unit = self.config["counter_unit"] counter_volume = self.config["counter_volume"] for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): self.context["tenants"][tenant_id]["samples"] = [] self.context["tenants"][tenant_id]["resources"] = [] scenario = ceilo_utils.CeilometerScenario({"user": user}) for i in range(self.config["resources_per_tenant"]): for j in range(self.config["samples_per_resource"]): sample = scenario._create_sample(counter_name, counter_type, counter_unit, counter_volume) self.context["tenants"][tenant_id]["samples"].append( sample[0].to_dict()) self.context["tenants"][tenant_id]["resources"].append( sample[0].resource_id) @rutils.log_task_wrapper(LOG.info, _("Exit context: `Ceilometer`")) def cleanup(self): # We don't have API for removal of samples and resources pass
class SaharaOutputDataSources(context.Context): """Context class for setting up Output Data Sources for an EDP job.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "output_type": { "enum": ["swift", "hdfs"], }, "output_url_prefix": { "type": "string", } }, "additionalProperties": False, "required": ["output_type", "output_url_prefix"] } @logging.log_task_wrapper(LOG.info, _("Enter context: `Sahara Output Data Sources`")) def setup(self): utils.init_sahara_context(self) for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): clients = osclients.Clients(user["credential"]) sahara = clients.sahara() if self.config["output_type"] == "swift": swift = swift_utils.SwiftScenario(clients=clients, context=self.context) container_name = self.generate_random_name() self.context["tenants"][tenant_id]["sahara"]["container"] = { "name": swift._create_container(container_name=container_name), "output_swift_objects": [] } self.setup_outputs_swift(swift, sahara, tenant_id, container_name, user["credential"].username, user["credential"].password) else: self.setup_outputs_hdfs(sahara, tenant_id, self.config["output_url_prefix"]) def setup_outputs_hdfs(self, sahara, tenant_id, output_url): output_ds = sahara.data_sources.create( name=self.generate_random_name(), description="", data_source_type="hdfs", url=output_url) self.context["tenants"][tenant_id]["sahara"]["output"] = output_ds.id def setup_outputs_swift(self, swift, sahara, tenant_id, container_name, username, password): output_ds_swift = sahara.data_sources.create( name=self.generate_random_name(), description="", data_source_type="swift", url="swift://" + container_name + ".sahara/", credential_user=username, credential_pass=password) self.context["tenants"][tenant_id]["sahara"]["output"] = ( output_ds_swift.id) @logging.log_task_wrapper(LOG.info, _("Exit context: `Sahara Output Data Sources`")) def cleanup(self): for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): if self.context["tenants"][tenant_id].get("sahara", {}).get( "container", {}).get("name") is not None: for swift_object in ( self.context["tenants"][tenant_id]["sahara"] ["container"]["output_swift_objects"]): res_cleanup.SwiftObject(swift_object[1]) res_cleanup.SwiftContainer(self.context["tenants"][tenant_id].get( "sahara", {}).get("container", {}).get("name")) resources = ["data_sources"] resource_manager.cleanup( names=["sahara.%s" % res for res in resources], users=self.context.get("users", []))
class InvalidTaskException(InvalidConfigException): msg_fmt = _("Task config is invalid: `%(message)s`")
class DevstackEngine(engine.Engine): """Deploy Devstack cloud. Sample configuration: { "type": "DevstackEngine", "devstack_repo": "https://example.com/devstack/", "localrc": { "ADMIN_PASSWORD": "******" }, "provider": { "type": "ExistingServers", "credentials": [{"user": "******", "host": "10.2.0.8"}] } } """ CONFIG_SCHEMA = { "type": "object", "properties": { "type": { "type": "string" }, "provider": { "type": "object" }, "localrc": { "type": "object" }, "devstack_repo": { "type": "string" }, "devstack_branch": { "type": "string" }, }, "required": ["type", "provider"] } def __init__(self, deployment): super(DevstackEngine, self).__init__(deployment) self.localrc = { "DATABASE_PASSWORD": "******", "RABBIT_PASSWORD": "******", "SERVICE_TOKEN": "rally", "SERVICE_PASSWORD": "******", "ADMIN_PASSWORD": "******", "RECLONE": "yes", "SYSLOG": "yes", } if "localrc" in self.config: self.localrc.update(self.config["localrc"]) @utils.log_deploy_wrapper(LOG.info, _("Prepare server for devstack")) def prepare_server(self, server): script_path = os.path.abspath( os.path.join(os.path.dirname(__file__), "devstack", "install.sh")) server.ssh.run("/bin/sh -e", stdin=open(script_path, "rb")) if server.password: server.ssh.run("chpasswd", stdin="rally:%s" % server.password) @utils.log_deploy_wrapper(LOG.info, _("Deploy devstack")) def deploy(self): self.servers = self.get_provider().create_servers() devstack_repo = self.config.get("devstack_repo", DEVSTACK_REPO) devstack_branch = self.config.get("devstack_branch", DEVSTACK_BRANCH) localrc = "" for k, v in six.iteritems(self.localrc): localrc += "%s=%s\n" % (k, v) for server in self.servers: self.deployment.add_resource(provider_name="DevstackEngine", type="credentials", info=server.get_credentials()) cmd = "/bin/sh -e -s %s %s" % (devstack_repo, devstack_branch) server.ssh.run(cmd, stdin=get_script("install.sh")) devstack_server = get_updated_server(server, user=DEVSTACK_USER) devstack_server.ssh.run("cat > ~/devstack/localrc", stdin=localrc) devstack_server.ssh.run("~/devstack/stack.sh") admin_endpoint = objects.Endpoint( "http://%s:5000/v2.0/" % self.servers[0].host, "admin", self.localrc["ADMIN_PASSWORD"], "admin", consts.EndpointPermission.ADMIN) return {"admin": admin_endpoint} def cleanup(self): for resource in self.deployment.get_resources(type="credentials"): server = provider.Server.from_credentials(resource.info) devstack_server = get_updated_server(server, user=DEVSTACK_USER) devstack_server.ssh.run("~/devstack/unstack.sh") self.deployment.delete_resource(resource.id)
class ImmutableException(RallyException): msg_fmt = _("This object is immutable.")
class InvalidRunnerResult(RallyException): msg_fmt = _("Type of result of `%(name)s` runner should be" " `base.ScenarioRunnerResult`. Got: `%(results_type)s`")
class InvalidConnectionString(RallyException): msg_fmt = _("The connection string is not valid: %(message)s. Please " "check your connection string.")
class InvalidArgumentsException(RallyException): msg_fmt = _("Invalid arguments: '%(message)s'")
class MigrateException(RallyException): msg_fmt = _("Migration failed: %(message)s")
class DowngradeNotSupported(RallyException): msg_fmt = _("Database schema downgrade is not supported.")
class WorkerAlreadyRegistered(RallyException): msg_fmt = _("Worker %(worker)s already registered")
class InvalidHostException(RallyException): msg_fmt = _("Live Migration failed: %(message)s")
class IncompatiblePythonVersion(RallyException): msg_fmt = _("Incompatible python version found '%(version)s', " "required '%(required_version)s'")
class SaharaClusterFailure(RallyException): msg_fmt = _("Sahara cluster %(name)s has failed to %(action)s. " "Reason: '%(reason)s'")
class ImageCleanUpException(CleanUpException): msg_fmt = _("Image Deletion Failed")
class WorkerNotFound(NotFoundException): msg_fmt = _("Worker %(worker)s could not be found")
def start(self, deployment=None, set_name="", regex=None, tests_file=None, tests_file_to_skip=None, tempest_config=None, xfails_file=None, do_use=True, system_wide=False, concur=0, failing=False): """Start verification (run Tempest tests). :param deployment: UUID or name of a deployment :param set_name: Name of a Tempest test set :param regex: Regular expression of test :param tests_file: Path to a file with a list of Tempest tests to run them :param tests_file_to_skip: Path to a file with a list of Tempest tests to skip them :param tempest_config: User specified Tempest config file location :param xfails_file: Path to a YAML file with a list of Tempest tests that are expected to fail :param do_use: Use new task as default for future operations :param system_wide: Whether or not to create a virtual env when installing Tempest; whether or not to use the local env instead of the Tempest virtual env when running the tests :param concur: How many processes to use to run Tempest tests. The default value (0) auto-detects CPU count :param failing: Re-run tests that failed during the last execution """ msg = _("Arguments '%s' and '%s' are incompatible. " "You can use only one of the mentioned arguments.") incompatible_args_map = [{ "regex": regex, "set": set_name }, { "tests-file": tests_file, "set": set_name }, { "tests-file": tests_file, "regex": regex }, { "tests-file": tests_file, "skip-list": tests_file_to_skip }, { "failing": failing, "set": set_name }, { "failing": failing, "regex": regex }, { "failing": failing, "tests-file": tests_file }, { "failing": failing, "skip-list": tests_file_to_skip }] for args in incompatible_args_map: arg_keys = list(args) if args[arg_keys[0]] and args[arg_keys[1]]: print(msg % (arg_keys[0], arg_keys[1])) return 1 if not (regex or set_name or tests_file or failing): set_name = "full" if set_name and set_name not in AVAILABLE_SETS: print( _("Tempest test set '%s' not found " "in available test sets. Available sets are %s.") % (set_name, ", ".join(AVAILABLE_SETS))) return 1 if tests_file and not os.path.exists(tests_file): print(_("File '%s' not found.") % tests_file) return 1 if tests_file_to_skip and not os.path.exists(tests_file_to_skip): print(_("File '%s' not found.") % tests_file_to_skip) return 1 expected_failures = None if xfails_file: if os.path.exists(xfails_file): with open(os.path.abspath(xfails_file), "rb") as f: expected_failures = yaml.load(f) else: print(_("File '%s' not found.") % xfails_file) return 1 verification = api.Verification.verify( deployment, set_name=set_name, regex=regex, tests_file=tests_file, tests_file_to_skip=tests_file_to_skip, tempest_config=tempest_config, expected_failures=expected_failures, system_wide=system_wide, concur=concur, failing=failing) if do_use: self.use(verification["uuid"]) else: print(_("Verification UUID: %s") % verification["uuid"])
class EncryptionTypeDeleteException(CleanUpException): msg_fmt = _("EncryptionType Deletion Failed")
class EC2ServerGenerator(context.Context): """Context class for adding temporary servers for benchmarks. Servers are added for each tenant. """ CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "image": { "type": "object", "properties": { "name": { "type": "string" } } }, "flavor": { "type": "object", "properties": { "name": { "type": "string" } } }, "servers_per_tenant": { "type": "integer", "minimum": 1 } }, "required": ["image", "flavor", "servers_per_tenant"], "additionalProperties": False } @logging.log_task_wrapper(LOG.info, _("Enter context: `EC2 Servers`")) def setup(self): image = self.config["image"] flavor = self.config["flavor"] clients = osclients.Clients(self.context["users"][0]["credential"]) image_id = types.EC2Image.transform(clients=clients, resource_config=image) for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): LOG.debug("Booting servers for tenant %s ", (user["tenant_id"])) ec2_scenario = ec2_utils.EC2Scenario({ "user": user, "task": self.context["task"], "owner_id": self.context["owner_id"] }) LOG.debug( "Calling _boot_servers with " "image_id={image_id} flavor_name={flavor_name} " "servers_per_tenant={servers_per_tenant}".format( image_id=image_id, flavor_name=flavor["name"], servers_per_tenant=self.config["servers_per_tenant"])) servers = ec2_scenario._boot_servers( image_id, flavor["name"], self.config["servers_per_tenant"]) current_servers = [server.id for server in servers] self.context["tenants"][tenant_id]["ec2_servers"] = current_servers @logging.log_task_wrapper(LOG.info, _("Exit context: `EC2 Servers`")) def cleanup(self): resource_manager.cleanup(names=["ec2.servers"], users=self.context.get("users", []), superclass=ec2_utils.EC2Scenario, task_id=self.get_owner_id())
class CleanUpException(RallyException): msg_fmt = _("Cleanup failed.")