def wrapper(self, *args, **kwargs): params = {"msg": msg % kw, "obj_name": obj.title(), "uuid": getattr(self, obj)["uuid"]} log(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") % params) result = f(self, *args, **kwargs) log(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") % params) return result
def image_valid_on_flavor_validator(config, clients, task): flavor_id = types.FlavorResourceType.transform( clients=clients, resource_config=config.get("args", {}).get(flavor_name)) try: flavor = clients.nova().flavors.get(flavor=flavor_id) except nova_exc.NotFound: message = _("Flavor with id '%s' not found") % flavor_id return ValidationResult(False, message) image_id = types.ImageResourceType.transform( clients=clients, resource_config=config.get("args", {}).get(image_name)) try: image = clients.glance().images.get(image=image_id) except glance_exc.HTTPNotFound: message = _("Image with id '%s' not found") % image_id return ValidationResult(False, message) if flavor.ram < (image.min_ram or 0): message = _("The memory size for flavor '%s' is too small " "for requested image '%s'") % (flavor_id, image_id) return ValidationResult(False, message) if flavor.disk: if (image.size or 0) > flavor.disk * (1024**3): message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor_id, image_id) return ValidationResult(False, message) if (image.min_disk or 0) > flavor.disk: message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor_id, image_id) return ValidationResult(False, message) return ValidationResult()
def get_image_uuid(self): """Get image uuid. Download image if necessary.""" image_uuid = self.config['image'].get('uuid', None) if image_uuid: return image_uuid for image in self.glance.images.list(): if image.checksum == self.config['image']['checksum']: LOG.info(_('Found image with appropriate checksum. Using it.')) return image.id LOG.info(_('Downloading new image %s') % self.config['image']['url']) image = self.glance.images.create(name=self.config['image']['name']) try: image.update(data=urllib2.urlopen(self.config['image']['url']), disk_format=self.config['image']['format'], container_format='bare') except urllib2.URLError: LOG.error(_('Unable to retrieve %s') % self.config['image']['url']) raise image.get() if image.checksum != self.config['image']['checksum']: raise exceptions.ChecksumMismatch(url=self.config['image']['url']) return image.id
class AllowSSH(base.Context): __ctx_name__ = "allow_ssh" __ctx_order__ = 301 __ctx_hidden__ = True def __init__(self, context): super(AllowSSH, self).__init__(context) self.context["allow_ssh"] = SSH_GROUP_NAME self.secgroup = [] @utils.log_task_wrapper(LOG.info, _("Exit context: `allow_ssh`")) def setup(self): used_tenants = [] for user in self.context['users']: endpoint = user['endpoint'] tenant = endpoint.tenant_name if tenant not in used_tenants: secgroup = _prepare_open_secgroup(endpoint) self.secgroup.append(secgroup) used_tenants.append(tenant) @utils.log_task_wrapper(LOG.info, _("Exit context: `allow_ssh`")) def cleanup(self): for secgroup in self.secgroup: try: secgroup.delete() except Exception as ex: LOG.warning("Unable to delete secgroup: %(group_id)s. " "Exception: %(ex)s" % { "group_id": secgroup.id, "ex": ex })
def _load_img(self): cirros_url = ('http://download.cirros-cloud.net/%s/%s' % (CONF.image.cirros_version, CONF.image.cirros_image)) try: response = requests.get(cirros_url, stream=True) except requests.ConnectionError as err: msg = _('Error on downloading cirros image, possibly' ' no connection to Internet with message %s') % str(err) raise TempestConfigCreationFailure(msg) if response.status_code == 200: with open(self.img_path + '.tmp', 'wb') as img_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks img_file.write(chunk) img_file.flush() os.rename(self.img_path + '.tmp', self.img_path) else: if response.status_code == 404: msg = _('Error on downloading cirros image, possibly' 'invalid cirros_version or cirros_image in rally.conf') else: msg = _('Error on downloading cirros image, ' 'HTTP error code %s') % response.getcode() raise TempestConfigCreationFailure(msg)
def to_bytes(text, default=0): """Converts a string into an integer of bytes. Looks at the last characters of the text to determine what conversion is needed to turn the input text into a byte number. Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) :param text: String input for bytes size conversion. :param default: Default return value when text is blank. """ match = BYTE_REGEX.search(text) if match: magnitude = int(match.group(1)) mult_key_org = match.group(2) if not mult_key_org: return magnitude elif text: msg = _("Invalid string format: %s") % text raise TypeError(msg) else: return default mult_key = mult_key_org.lower().replace("b", "", 1) multiplier = BYTE_MULTIPLIERS.get(mult_key) if multiplier is None: msg = _("Unknown byte multiplier: %s") % mult_key_org raise TypeError(msg) return magnitude * multiplier
def to_bytes(text, default=0): """Converts a string into an integer of bytes. Looks at the last characters of the text to determine what conversion is needed to turn the input text into a byte number. Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) :param text: String input for bytes size conversion. :param default: Default return value when text is blank. """ match = BYTE_REGEX.search(text) if match: magnitude = int(match.group(1)) mult_key_org = match.group(2) if not mult_key_org: return magnitude elif text: msg = _('Invalid string format: %s') % text raise TypeError(msg) else: return default mult_key = mult_key_org.lower().replace('b', '', 1) multiplier = BYTE_MULTIPLIERS.get(mult_key) if multiplier is None: msg = _('Unknown byte multiplier: %s') % mult_key_org raise TypeError(msg) return magnitude * multiplier
def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug(_('Got semaphore / lock "%(function)s"'), {"function": f.__name__}) return f(*args, **kwargs) finally: LOG.debug(_('Semaphore / lock released "%(function)s"'), {"function": f.__name__})
def setup(self): self.verifier = tempest.Tempest(self.task.task.deployment_uuid) self.verifier.log_file = "/dev/null" try: if not self.verifier.is_installed(): self.verifier.install() if not self.verifier.is_configured(): self.verifier.generate_config_file() except exceptions.TempestSetupFailure: msg = _("Failing to install tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) except exceptions.TempestConfigCreationFailure: msg = _("Failing to configure tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) self.context["verifier"] = self.verifier # Create temporary directory for xml-results. self.results_dir = os.path.join( tempfile.gettempdir(), "%s-results" % self.task.task.uuid) os.mkdir(self.results_dir) self.context["tmp_results_dir"] = self.results_dir
def _cleanup_users_resources(self): def _init_services_to_cleanup(cleanup_methods): scenario_name = self.context.get('scenario_name') if scenario_name: cls_name, method_name = scenario_name.split(".", 1) scenario = scenario_base.Scenario.get_by_name(cls_name)() scenario_method = getattr(scenario, method_name) if hasattr(scenario_method, "cleanup_services"): return getattr(scenario_method, "cleanup_services") return cleanup_methods.keys() if not self.users: return for user in self.users: clients = osclients.Clients(user) cleanup_methods = { "nova": functools.partial(utils.delete_nova_resources, clients.nova()), "glance": functools.partial(utils.delete_glance_resources, clients.glance(), clients.keystone()), "cinder": functools.partial(utils.delete_cinder_resources, clients.cinder()) } for service in _init_services_to_cleanup(cleanup_methods): try: cleanup_methods[service]() except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (e.message))
def get_image_uuid(self): """Get image uuid. Download image if necessary.""" image_uuid = self.config['image'].get('uuid', None) if image_uuid: return image_uuid else: if not self.glance: raise exceptions.InvalidConfigException( 'If glance is not available in the service catalog' ' obtained by the openstack server provider, then' ' images cannot be uploaded so the uuid of an' ' existing image must be specified in the' ' deployment config.') for image in self.glance.images.list(): if image.checksum == self.config['image']['checksum']: LOG.info(_('Found image with appropriate checksum. Using it.')) return image.id LOG.info(_('Downloading new image %s') % self.config['image']['url']) image = self.glance.images.create(name=self.config['image']['name']) try: image.update(data=urllib2.urlopen(self.config['image']['url']), disk_format=self.config['image']['format'], container_format='bare') except urllib2.URLError: LOG.error(_('Unable to retrieve %s') % self.config['image']['url']) raise image.get() if image.checksum != self.config['image']['checksum']: raise exceptions.ChecksumMismatch(url=self.config['image']['url']) return image.id
def _cleanup_users_resources(self): for user in self.users: clients = osclients.Clients(user) admin_clients = osclients.Clients(self.admin) cleanup_methods = { "nova": functools.partial(utils.delete_nova_resources, clients.nova()), "glance": functools.partial(utils.delete_glance_resources, clients.glance(), clients.keystone().tenant_id), "cinder": functools.partial(utils.delete_cinder_resources, clients.cinder()), "quotas": functools.partial(utils.delete_quotas, admin_clients, clients.keystone().tenant_id) } for service in self.config: try: cleanup_methods[service]() except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning( _('Unable to fully cleanup the cloud: %s') % (e.message))
def required_openstack(config, clients, task, admin=False, users=False): """Validator that requires OpenStack admin or (and) users. This allows us to create 4 kind of benchmarks: 1) not OpenStack related (validator is not specified) 2) requires OpenStack admin 3) requires OpenStack admin + users 4) requires OpenStack users :param admin: requires OpenStack admin :param users: requires OpenStack users """ if not (admin or users): return ValidationResult(False, _("You should specify admin=True or users=True or both.")) deployment = objects.Deployment.get(task["deployment_uuid"]) if deployment["admin"] and deployment["users"]: return ValidationResult() if deployment["admin"]: if users and not config.get("context", {}).get("users"): return ValidationResult(False, _("You should specify 'users' context")) return ValidationResult() if deployment["users"] and admin: return ValidationResult(False, _("Admin credentials required")) return ValidationResult()
def image_valid_on_flavor_validator(config, clients, task): flavor_id = types.FlavorResourceType.transform( clients=clients, resource_config=config.get("args", {}).get(flavor_name)) try: flavor = clients.nova().flavors.get(flavor=flavor_id) except nova_exc.NotFound: message = _("Flavor with id '%s' not found") % flavor_id return ValidationResult(False, message) image_id = types.ImageResourceType.transform( clients=clients, resource_config=config.get("args", {}).get(image_name)) try: image = clients.glance().images.get(image=image_id) except glance_exc.HTTPNotFound: message = _("Image with id '%s' not found") % image_id return ValidationResult(False, message) if flavor.ram < (image.min_ram or 0): message = _("The memory size for flavor '%s' is too small " "for requested image '%s'") % (flavor_id, image_id) return ValidationResult(False, message) if flavor.disk: if (image.size or 0) > flavor.disk * (1024 ** 3): message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor_id, image_id) return ValidationResult(False, message) if (image.min_disk or 0) > flavor.disk: message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor_id, image_id) return ValidationResult(False, message) return ValidationResult()
def _cleanup_users_resources(self): for user in self.users: clients = osclients.Clients(user) admin_clients = osclients.Clients(self.admin) cleanup_methods = { "nova": functools.partial(utils.delete_nova_resources, clients.nova()), "glance": functools.partial(utils.delete_glance_resources, clients.glance(), clients.keystone().tenant_id), "cinder": functools.partial(utils.delete_cinder_resources, clients.cinder()), "quotas": functools.partial(utils.delete_quotas, admin_clients, clients.keystone().tenant_id) } for service in self.config: try: cleanup_methods[service]() except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (e.message))
def _validate_test_config(self, test_config): """Checks whether the given test config is valid and can be used during verification and benchmarking tests. :param test_config: Dictionary in the same format as for the __init__ method. :raises: Exception if the test config is not valid """ task_uuid = self.task['uuid'] # Perform schema validation try: jsonschema.validate(test_config, config.test_config_schema) except jsonschema.ValidationError as e: LOG.exception(_('Task %s: Error: %s') % (task_uuid, e.message)) raise exceptions.InvalidConfigException(message=e.message) # Check for verification test names for test in test_config['verify']: if test not in self.verification_tests: LOG.exception(_('Task %s: Error: the specified ' 'verification test does not exist: %s') % (task_uuid, test)) raise exceptions.NoSuchVerificationTest(test_name=test) # Check for benchmark scenario names benchmark_scenarios_set = set(self.benchmark_scenarios) for scenario in test_config['benchmark']: if scenario not in benchmark_scenarios_set: LOG.exception(_('Task %s: Error: the specified ' 'benchmark scenario does not exist: %s') % (task_uuid, scenario)) raise exceptions.NoSuchScenario(name=scenario)
def setup(self): self.verifier = tempest.Tempest(self.task.task.deployment_uuid) self.verifier.log_file = "/dev/null" try: if not self.verifier.is_installed(): self.verifier.install() if not self.verifier.is_configured(): self.verifier.generate_config_file() except exceptions.TempestSetupFailure: msg = _("Failing to install tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) except exceptions.TempestConfigCreationFailure: msg = _("Failing to configure tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) self.context["verifier"] = self.verifier # Create temporary directory for xml-results. self.results_dir = os.path.join(tempfile.gettempdir(), "%s-results" % self.task.task.uuid) os.mkdir(self.results_dir) self.context["tmp_results_dir"] = self.results_dir
def image_valid_on_flavor(config, clients, task, flavor_name, image_name): """Returns validator for image could be used for current flavor :param flavor_name: defines which variable should be used to get flavor id value. :param image_name: defines which variable should be used to get image id value. """ valid_result, flavor = _get_validated_flavor(config, clients, flavor_name) if not valid_result.is_valid: return valid_result valid_result, image = _get_validated_image(config, clients, image_name) if not valid_result.is_valid: return valid_result if flavor.ram < (image.min_ram or 0): message = _("The memory size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) if flavor.disk: if (image.size or 0) > flavor.disk * (1024 ** 3): message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) if (image.min_disk or 0) > flavor.disk: message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) return ValidationResult()
def _initialize_testr(self): if not os.path.isdir(os.path.join(self.tempest_path, ".testrepository")): msg = _("Test Repository initialization.") LOG.info(_("Starting: ") + msg) subprocess.check_call("%s testr init" % self.venv_wrapper, shell=True, cwd=self.tempest_path) LOG.info(_("Completed: ") + msg)
def wrapper(*args, **kwargs): try: path = lock_path or os.environ.get("RALLY_LOCK_PATH") lock = lockfile.FileLock(os.path.join(path, lock_prefix)) with lock: LOG.debug(_('Got lock "%s"') % f.__name__) return f(*args, **kwargs) finally: LOG.debug(_('Lock released "%s"') % f.__name__)
def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) finally: LOG.debug(_('Semaphore / lock released "%(function)s"'), {'function': f.__name__})
def _cleanup_admin_resources(self): try: admin = osclients.Clients(self.admin) utils.delete_keystone_resources(admin.keystone()) except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup keystone service: %s') % (e.message))
def boot_runcommand_delete_server( self, image_id, flavor_id, script, interpreter, network="private", username="******", ip_version=4, retries=60, port=22, **kwargs ): """Boot server, run a script that outputs JSON, delete server. Parameters: script: script to run on the server, must output JSON mapping metric names to values. See sample script below. network: Network to choose address to connect to instance from username: User to SSH to instance as ip_version: Version of ip protocol to use for connection returns: Dictionary containing two keys, data and errors. Data is JSON data output by the script. Errors is raw data from the script's standard error stream. Example Script in doc/samples/support/instance_dd_test.sh """ server_name = self._generate_random_name(16) server = self._boot_server(server_name, image_id, flavor_id, key_name="rally_ssh_key", **kwargs) if network not in server.addresses: raise ValueError( "Can't find cloud network %(network)s, so cannot boot " "instance for Rally scenario boot-runcommand-delete. " "Available networks: %(networks)s" % (dict(network=network, networks=server.addresses.keys())) ) server_ip = [ip for ip in server.addresses[network] if ip["version"] == ip_version][0]["addr"] ssh = sshutils.SSH(username, server_ip, port=port, pkey=self.clients("ssh_key_pair")["private"]) ssh.wait() code, out, err = ssh.execute(interpreter, stdin=open(script, "rb")) if code: LOG.error(_("Error running script on instance via SSH. " "Error: %s") % err) try: out = json.loads(out) except ValueError: LOG.warning(_("Script %s did not output valid JSON. ") % script) self._delete_server(server) LOG.debug( _("Output streams from in-instance script execution: " "stdout: %(stdout)s, stderr: $(stderr)s") % dict(stdout=out, stderr=err) ) return {"data": out, "errors": err}
def _cleanup_admin_resources(self): try: admin = osclients.Clients(self.admin) utils.delete_keystone_resources(admin.keystone()) except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning( _('Unable to fully cleanup keystone service: %s') % (e.message))
def wrapper(self, *args, **kwargs): params = { "msg": msg % kw, "obj_name": obj.title(), "uuid": getattr(self, obj)["uuid"] } log(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") % params) result = f(self, *args, **kwargs) log(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") % params) return result
def wait(self, timeout=120, interval=1): """Wait for the host will be available via ssh.""" start_time = time.time() while True: try: return self.execute('uname') except (socket.error, SSHError) as e: LOG.debug(_('Ssh is still unavailable: %r') % e) time.sleep(interval) if time.time() > (start_time + timeout): raise SSHTimeout(_('Timeout waiting for "%s"') % self.host)
def generate_config_file(self): """Generate configuration file of tempest for current deployment.""" LOG.debug("Tempest config file: %s " % self.config_file) if not self.is_configured(): msg = _("Creation of configuration file for tempest.") LOG.info(_("Starting: ") + msg) config.TempestConf(self.deploy_id).generate(self.config_file) LOG.info(_("Completed: ") + msg) else: LOG.info("Tempest is already configured.")
def required_services(config, clients, task, *required_services): """Check if specified services are available. :param args: list of servives names """ available_services = clients.services().values() for service in required_services: if service not in consts.Service: return ValidationResult(False, _("Unknown service: %s") % service) if service not in available_services: return ValidationResult( False, _("Service is not available: %s") % service)
def required_services(*args, **kwargs): """Check if specified services are available. :param args: list of servives names """ available_services = kwargs.get("clients").services().values() for service in args: if service not in consts.Service: return ValidationResult(False, _("Unknown service: %s") % service) if service not in available_services: return ValidationResult( False, _("Service is not available: %s") % service)
def _cleanup_admin_resources(self): if not self.admin: return try: admin = utils.create_openstack_clients(self.admin) utils.delete_keystone_resources(admin["keystone"]) except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup keystone service: %s') % (e.message))
class Quotas(base.Context): """Context class for updating benchmarks' tenants quotas.""" __ctx_name__ = "quotas" __ctx_order__ = 210 __ctx_hidden__ = False CONFIG_SCHEMA = { "type": "object", "$schema": utils.JSON_SCHEMA, "additionalProperties": False, "properties": { "nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA, "cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA, "neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA } } def __init__(self, context): super(Quotas, self).__init__(context) self.clients = osclients.Clients(context["admin"]["endpoint"]) self.manager = { "nova": nova_quotas.NovaQuotas(self.clients), "cinder": cinder_quotas.CinderQuotas(self.clients), "neutron": neutron_quotas.NeutronQuotas(self.clients) } def _service_has_quotas(self, service): return len(self.config.get(service, {})) > 0 @utils.log_task_wrapper(LOG.info, _("Enter context: `quotas`")) def setup(self): for tenant in self.context["tenants"]: for service in self.manager: if self._service_has_quotas(service): self.manager[service].update(tenant["id"], **self.config[service]) @utils.log_task_wrapper(LOG.info, _("Exit context: `quotas`")) def cleanup(self): for service in self.manager: if self._service_has_quotas(service): for tenant in self.context["tenants"]: try: self.manager[service].delete(tenant["id"]) except Exception as e: LOG.warning("Failed to remove quotas for tenant " "%(tenant_id)s in service %(service)s " "\n reason: %(exc)s" % {"tenant_id": tenant["id"], "service": service, "exc": e})
def status(self, task_id): """Get status of task :param task_uuid: Task uuid Returns current status of task """ try: task = db.task_get_by_uuid(task_id) print(_("Task %(task_id)s is %(status)s.") % {"task_id": task_id, "status": task["status"]}) except exceptions.TaskNotFound as e: print(e) except Exception as e: print(_("Something went wrong %s") % e)
def _cleanup_with_clients(cls, indexes): for index in indexes: clients = __openstack_clients__[index] try: cls._delete_nova_resources(clients["nova"]) cls._delete_glance_resources(clients["glance"], clients["keystone"].project_id) cls._delete_cinder_resources(clients["cinder"]) except Exception as e: LOG.debug(_("Not all resources were cleaned."), exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (e.message))
def boot_runcommand_delete(self, image_id, flavor_id, script, interpreter, network='private', username='******', ip_version=4, port=22, **kwargs): """Boot server, run a script that outputs JSON, delete server. Parameters: script: script to run on the server, must output JSON mapping metric names to values. See sample script below. network: Network to choose address to connect to instance from username: User to SSH to instance as ip_version: Version of ip protocol to use for connection returns: Dictionary containing two keys, data and errors. Data is JSON data output by the script. Errors is raw data from the script's standard error stream. Example Script in doc/samples/support/instance_dd_test.sh """ server_name = self._generate_random_name(16) server = self._boot_server(server_name, image_id, flavor_id, key_name='rally_ssh_key', **kwargs) code, out, err = self.run_command(server, username, network, port, ip_version, interpreter, script) if code: LOG.error( _("Error running script on instance via SSH. " "Error: %s") % err) try: out = json.loads(out) except ValueError: LOG.warning(_("Script %s did not output valid JSON.") % script) self._delete_server(server) LOG.debug( _("Output streams from in-instance script execution: " "stdout: %(stdout)s, stderr: $(stderr)s") % dict(stdout=out, stderr=err)) return {"data": out, "errors": err}
class Tempest(base.Context): __ctx_name__ = "tempest" __ctx_order__ = 666 __ctx_hidden__ = True @utils.log_task_wrapper(LOG.info, _("Enter context: `tempest`")) def setup(self): self.verifier = tempest.Tempest(self.task.task.deployment_uuid) self.verifier.log_file_raw = "/dev/null" # Create temporary directory for subunit-results. self.results_dir = os.path.join(tempfile.gettempdir(), "%s-results" % self.task.task.uuid) os.mkdir(self.results_dir) self.context["tmp_results_dir"] = self.results_dir try: if not self.verifier.is_installed(): self.verifier.install() if not self.verifier.is_configured(): self.verifier.generate_config_file() except exceptions.TempestSetupFailure: msg = _("Failing to install tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) except exceptions.TempestConfigCreationFailure: msg = _("Failing to configure tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) self.context["verifier"] = self.verifier @utils.log_task_wrapper(LOG.info, _("Exit context: `tempest`")) def cleanup(self): try: cmd = ("cd %(tempest_dir)s " "&& %(venv)s python tempest/stress/tools/cleanup.py" % { "tempest_dir": self.verifier.tempest_path, "venv": self.verifier.venv_wrapper }) LOG.debug("Cleanup started by the command: %s" % cmd) subprocess.check_call(cmd, shell=True, env=self.verifier.env, cwd=self.verifier.tempest_path) except subprocess.CalledProcessError: LOG.error("Tempest cleanup failed.") if os.path.exists(self.results_dir): shutil.rmtree(self.results_dir)
class AdminCleanup(base.Context): """Context class for admin resource cleanup.""" __ctx_name__ = "admin_cleanup" __ctx_order__ = 200 __ctx_hidden__ = True CONFIG_SCHEMA = { "type": "array", "$schema": rutils.JSON_SCHEMA, "items": { "type": "string", "enum": ["keystone", "quotas"] }, "uniqueItems": True } def __init__(self, context): super(AdminCleanup, self).__init__(context) self.endpoint = None def _cleanup_resources(self): client = osclients.Clients(self.endpoint) cleanup_methods = { "keystone": (utils.delete_keystone_resources, client.keystone()), "quotas": (utils.delete_admin_quotas, client, self.context.get("tenants", [])), } for service_name in self.config: cleanup_method = cleanup_methods[service_name] method, client = cleanup_method[:2] try: method(client, *cleanup_method[2:]) except Exception as e: LOG.debug("Not all admin resources were cleaned.", exc_info=sys.exc_info()) LOG.warning( _('Unable to fully cleanup the cloud: %s') % (six.text_type(e))) @rutils.log_task_wrapper(LOG.info, _("Enter context: `admin cleanup`")) def setup(self): self.endpoint = self.context["admin"]["endpoint"] @rutils.log_task_wrapper(LOG.info, _("Exit context: `admin cleanup`")) def cleanup(self): self._cleanup_resources()
def _load_img(self): cirros_url = ('http://download.cirros-cloud.net/%s/%s' % (CONF.image.cirros_version, CONF.image.cirros_image)) response = urllib2.urlopen(cirros_url) if response.getcode() == httplib.OK: with open(self.img_path, 'wb') as img_file: img_file.write(response.read()) else: if response.getcode() == httplib.NOT_FOUND: msg = _('Error on downloading cirros image, possibly' 'invalid cirros_version or cirros_image in rally.conf') else: msg = _('Error on downloading cirros image, ' 'HTTP error code %s') % response.getcode() raise exceptions.TempestConfigCreationFailure(message=msg)
def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except KeyError: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise ColumnError(msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise ColumnError(msg % column_name) return column
class Keypair(base.Context): __ctx_name__ = "keypair" __ctx_order__ = 300 __ctx_hidden__ = True KEYPAIR_NAME = "rally_ssh_key" def _get_nova_client(self, endpoint): return osclients.Clients(endpoint).nova() def _keypair_safe_remove(self, nova): try: nova.keypairs.delete(self.KEYPAIR_NAME) except novaclient.exceptions.NotFound: pass def _generate_keypair(self, endpoint): nova = self._get_nova_client(endpoint) # NOTE(hughsaunders): If keypair exists, it must be deleted as we can't # retrieve the private key self._keypair_safe_remove(nova) keypair = nova.keypairs.create(self.KEYPAIR_NAME) return {"private": keypair.private_key, "public": keypair.public_key} @utils.log_task_wrapper(LOG.info, _("Enter context: `keypair`")) def setup(self): for user in self.context["users"]: keypair = self._generate_keypair(user["endpoint"]) user["keypair"] = keypair @utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`")) def cleanup(self): for user in self.context["users"]: endpoint = user['endpoint'] try: nova = self._get_nova_client(endpoint) self._keypair_safe_remove(nova) except Exception as e: LOG.warning( "Unable to delete keypair: %(kpname)s for user " "%(tenant)s/%(user)s: %(message)s" % { 'kpname': self.KEYPAIR_NAME, 'tenant': endpoint.tenant_name, 'user': endpoint.username, 'message': e.message })
def deprecated(self, msg, *args, **kwargs): """Call this method when a deprecated feature is used. If the system is configured for fatal deprecations then the message is logged at the 'critical' level and :class:`DeprecatedConfig` will be raised. Otherwise, the message will be logged (once) at the 'warn' level. :raises: :class:`DeprecatedConfig` if the system is configured for fatal deprecations. """ stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) # Using a list because a tuple with dict can't be stored in a set. sent_args = self._deprecated_messages_sent.setdefault(msg, list()) if args in sent_args: # Already logged this message, so don't log it again. return sent_args.append(args) self.warn(stdmsg, *args, **kwargs)
def __exit__(self, exc_type, exc_val, exc_tb): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname)
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise exc_info[0], exc_info[1], exc_info[2] else: # at least get the core message out if something happened message = self.msg_fmt super(RallyException, self).__init__(message)
def tempest_tests_exists(config, clients, task): """Validator checks that specified test exists.""" args = config.get("args", {}) if "test_name" in args: tests = [args["test_name"]] else: tests = args.get("test_names", []) if not tests: return ValidationResult(False, "Parameter 'test_name' or 'test_names' should " "be specified.") verifier = tempest.Tempest(task["deployment_uuid"]) if not verifier.is_installed(): verifier.install() if not verifier.is_configured(): verifier.generate_config_file() allowed_tests = verifier.discover_tests() for i, test in enumerate(tests): if not test.startswith("tempest.api."): tests[i] = "tempest.api." + test wrong_tests = set(tests) - allowed_tests if not wrong_tests: return ValidationResult() else: message = _("One or more tests not found: '%s'") % "', '".join(sorted(wrong_tests)) return ValidationResult(False, message)
def _db_schema_sanity_check(engine): """Ensure all database tables were created with required parameters. :param engine: SQLAlchemy engine instance for a given database """ if engine.name == 'mysql': onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' 'from information_schema.TABLES ' 'where TABLE_SCHEMA=%s and ' 'TABLE_COLLATION NOT LIKE "%%utf8%%"') # NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic # versioning tables from the tables we need to verify utf8 status on. # Non-standard table names are not supported. EXCLUDED_TABLES = ['migrate_version', 'alembic_version'] table_names = [res[0] for res in engine.execute(onlyutf8_sql, engine.url.database) if res[0].lower() not in EXCLUDED_TABLES] if len(table_names) > 0: raise ValueError(_('Tables "%s" have non utf8 collation, ' 'please make sure all tables are CHARSET=utf8' ) % ','.join(table_names))
def inner_func(scenario_obj, *args, **kwargs): if "log_file" not in kwargs: # set temporary log file kwargs["log_file"] = os.path.join( scenario_obj.context()["tmp_results_dir"], os.path.basename(tempfile.NamedTemporaryFile().name)) # run target scenario try: func(scenario_obj, *args, **kwargs) except subprocess.CalledProcessError: pass # parse and save results total, tests = scenario_obj.context()["verifier"].parse_results( kwargs["log_file"]) if total and tests: scenario_obj._add_atomic_actions("test_execution", total.get("time")) if total.get("errors") or total.get("failures"): raise exceptions.TempestBenchmarkFailure([ test for test in six.itervalues(tests) if test["status"] == "FAIL" ]) else: raise exceptions.TempestBenchmarkFailure(_("No information"))
def _set_compute_images(self, section_name='compute'): glanceclient = self.clients.glance() image_list = [ img for img in glanceclient.images.list() if img.status.lower() == 'active' and img.name is not None and 'cirros' in img.name ] # Upload new images if there are no # necessary images in the cloud (cirros) while len(image_list) < 2: now = (datetime.datetime.fromtimestamp( time.time()).strftime('%Y_%m_%d_%H_%M_%S')) try: image = glanceclient.images.create(name=('cirros_%s' % now), disk_format='qcow2', container_format='bare') image.update(data=open(self.img_path, 'rb')) image_list.append(image) except Exception as e: msg = _('There are no desired images (cirros) or only one and ' 'new image could not be created.\n' 'Reason: %s') % e.message raise exceptions.TempestConfigCreationFailure(message=msg) self.conf.set(section_name, 'image_ref', image_list[0].id) self.conf.set(section_name, 'image_ref_alt', image_list[1].id)
class InternalServerError(HttpServerError): """HTTP 500 - Internal Server Error. A generic error message, given when no more specific message is suitable. """ http_status = 500 message = _("Internal Server Error")
class ServiceUnavailable(HttpServerError): """HTTP 503 - Service Unavailable. The server is currently unavailable. """ http_status = 503 message = _("Service Unavailable")
def check(self, deploy_id=None): """Check the deployment. Check keystone authentication and list all available services. :param deploy_id: a UUID of the deployment """ headers = ['services', 'type', 'status'] table_rows = [] try: endpoints = db.deployment_get(deploy_id)['endpoints'] for endpoint_dict in endpoints: clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) client = clients.verified_keystone() print("keystone endpoints are valid and following " "services are available:") for service in client.services.list(): data = [service.name, service.type, 'Available'] table_rows.append(utils.Struct(**dict(zip(headers, data)))) except exceptions.InvalidArgumentsException: data = ['keystone', 'identity', 'Error'] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("Authentication Issues: %s.") % sys.exc_info()[1]) return(1) common_cliutils.print_list(table_rows, headers)
def _cleanup_resources(self): for user in self.users_endpoints: clients = osclients.Clients(user) tenant_id = clients.keystone().tenant_id cleanup_methods = { "nova": (utils.delete_nova_resources, clients.nova), "glance": (utils.delete_glance_resources, clients.glance, tenant_id), "cinder": (utils.delete_cinder_resources, clients.cinder), "neutron": (utils.delete_neutron_resources, clients.neutron, tenant_id), "ceilometer": (utils.delete_ceilometer_resources, clients.ceilometer, tenant_id), "heat": (utils.delete_heat_resources, clients.heat), "sahara": (utils.delete_sahara_resources, clients.sahara) } for service_name in self.config: cleanup_method = cleanup_methods[service_name] method = cleanup_method[0] client = cleanup_method[1]() try: method(client, *cleanup_method[2:]) except Exception as e: LOG.debug("Not all user resources were cleaned.", exc_info=sys.exc_info()) LOG.warning( _('Unable to fully cleanup the cloud: %s') % (six.text_type(e)))
def _cleanup_users_resources(self): for user in self.users: clients = osclients.Clients(user) admin_clients = functools.partial(osclients.Clients, self.admin) tenant_id = clients.keystone().tenant_id cleanup_methods = { "nova": (utils.delete_nova_resources, clients.nova), "glance": (utils.delete_glance_resources, clients.glance, tenant_id), "cinder": (utils.delete_cinder_resources, clients.cinder), "quotas": (utils.delete_quotas, admin_clients, tenant_id), "neutron": (utils.delete_neutron_resources, clients.neutron, tenant_id), "ceilometer": (utils.delete_ceilometer_resources, clients.ceilometer, tenant_id), "heat": (utils.delete_heat_resources, clients.heat), "sahara": (utils.delete_sahara_resources, clients.sahara) } for service_name in self.config: try: service = cleanup_methods[service_name] method = service[0] client = service[1]() args = service[2:] method(client, *args) except Exception as e: LOG.debug("Not all resources were cleaned.", exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (six.text_type(e)))
def _cleanup_resources(self): for user in self.users_endpoints: clients = osclients.Clients(user) tenant_id = clients.keystone().tenant_id cleanup_methods = { "nova": (utils.delete_nova_resources, clients.nova), "glance": (utils.delete_glance_resources, clients.glance, tenant_id), "cinder": (utils.delete_cinder_resources, clients.cinder), "neutron": (utils.delete_neutron_resources, clients.neutron, tenant_id), "ceilometer": (utils.delete_ceilometer_resources, clients.ceilometer, tenant_id), "heat": (utils.delete_heat_resources, clients.heat), "sahara": (utils.delete_sahara_resources, clients.sahara), "designate": (utils.delete_designate_resources, clients.designate), } for service_name in self.config: cleanup_method = cleanup_methods[service_name] method = cleanup_method[0] client = cleanup_method[1]() try: method(client, *cleanup_method[2:]) except Exception as e: LOG.debug("Not all user resources were cleaned.", exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (six.text_type(e)))
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version :param sanity_check: Require schema sanity checking for all tables """ if version is not None: try: version = int(version) except ValueError: raise exception.DbMigrationError( message=_("version should be an integer")) current_version = db_version(engine, abs_path, init_version) repository = _find_migrate_repo(abs_path) if sanity_check: _db_schema_sanity_check(engine) if version is None or version > current_version: return versioning_api.upgrade(engine, repository, version) else: return versioning_api.downgrade(engine, repository, version)
class HttpVersionNotSupported(HttpServerError): """HTTP 505 - HttpVersion Not Supported. The server does not support the HTTP protocol version used in the request. """ http_status = 505 message = _("HTTP Version Not Supported")