def _download_image_from_source(self, target_path, image=None): if image: LOG.debug("Downloading image '%s' from Glance to %s." % (image.name, target_path)) with open(target_path, "wb") as image_file: for chunk in self.clients.glance().images.data(image.id): image_file.write(chunk) else: LOG.debug("Downloading image from %s to %s." % (conf.CONF.openstack.img_url, target_path)) try: response = requests.get(conf.CONF.openstack.img_url, stream=True) except requests.ConnectionError as err: msg = ("Failed to download image. Possibly there is no " "connection to Internet. Error: %s." % (str(err) or "unknown")) raise exceptions.RallyException(msg) if response.status_code == 200: with open(target_path, "wb") as image_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks image_file.write(chunk) image_file.flush() else: if response.status_code == 404: msg = "Failed to download image. Image was not found." else: msg = ("Failed to download image. HTTP error code %d." % response.status_code) raise exceptions.RallyException(msg) LOG.debug("The image has been successfully downloaded!")
def _migrate(self, server, skip_compute_nodes_check=False, skip_host_check=False): """Run migration of the given server. :param server: Server object :param skip_compute_nodes_check: Specifies whether to verify the number of compute nodes :param skip_host_check: Specifies whether to verify the targeted host availability """ if not skip_compute_nodes_check: compute_nodes = len(self._list_hypervisors()) if compute_nodes < 2: raise exceptions.RallyException("Less than 2 compute nodes," " skipping Migration") server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.migrate() utils.wait_for_status( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_migrate_timeout, check_interval=(CONF.openstack.nova_server_migrate_poll_interval)) if not skip_host_check: server_admin = self.admin_clients("nova").servers.get(server.id) host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.RallyException( "Migration failed: Migration complete but instance" " did not change host: %s" % host_pre_migrate)
def abort(self, soft=False): current_status = self.get_status(self.task["uuid"]) if current_status in self.NOT_IMPLEMENTED_STAGES_FOR_ABORT: raise exceptions.RallyException( "Failed to abort task '%(uuid)s'. It doesn't implemented " "for '%(stages)s' stages. Current task status is '%(status)s'." % { "uuid": self.task["uuid"], "status": current_status, "stages": ", ".join(self.NOT_IMPLEMENTED_STAGES_FOR_ABORT) }) elif current_status in [ consts.TaskStatus.FINISHED, consts.TaskStatus.CRASHED, consts.TaskStatus.ABORTED ]: raise exceptions.RallyException( "Failed to abort task '%s', since it already finished." % self.task["uuid"]) new_status = (consts.TaskStatus.SOFT_ABORTING if soft else consts.TaskStatus.ABORTING) self.update_status(new_status, allowed_statuses=(consts.TaskStatus.RUNNING, consts.TaskStatus.SOFT_ABORTING))
def wrapper(plugin): if issubclass(plugin, RequiredPlatformValidator): raise exceptions.RallyException( "Cannot add a validator to RequiredPlatformValidator") elif issubclass(plugin, Validator) and name != "required_platform": raise exceptions.RallyException( "Only RequiredPlatformValidator can be added " "to other validators as a validator") plugin._meta_setdefault("validators", []) plugin._meta_get("validators").append((name, (), kwargs,)) return plugin
def export(self, uuid): """Export results of the task to the file. :param uuid: uuid of the task object """ task = api.Task.get(uuid) LOG.debug("Got the task object by it's uuid %s. " % uuid) task_results = [{ "key": x["key"], "result": x["data"]["raw"], "sla": x["data"]["sla"], "hooks": x["data"].get("hooks"), "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"] } for x in task.get_results()] es_data_list = [] if task_results: LOG.debug("Got the task %s results." % uuid) for task_result in task_results: for result in task_result['result']: if not result['error']: atomic_actions = result['atomic_actions'] for k, v in six.iteritems(atomic_actions): es_data = { 'region': self.region, 'metric': k, 'value': v, 'timestamp': (datetime.datetime.now().isoformat()) } es_data_list.append({'index': {}}) es_data_list.append(es_data) else: msg = ("Task %s results would be available when it will " "finish." % uuid) raise exceptions.RallyException(msg) es_request_data = '\n'.join([json.dumps(x) for x in es_data_list]) LOG.debug("ES Data: \n %s ", es_request_data) es_url = self.proto + self.url + '/performance/key_metrics/_bulk' try: r = requests.post(es_url, data=es_request_data) LOG.debug("Status code: %s", r.status_code) LOG.debug("Response: %s", r.json()) except Exception as e: raise exceptions.RallyException(e)
def wrapper(cls): if not issubclass(cls, Plugin): raise exceptions.RallyException(_LE( "Plugin's Base can be only a subclass of Plugin class.")) parent = cls._get_base() if parent != Plugin: raise exceptions.RallyException(_LE( "'%(plugin_cls)s' can not be marked as plugin base, since it " "inherits from '%(parent)s' which is also plugin base.") % { "plugin_cls": cls.__name__, "parent": parent.__name__}) cls.base_ref = cls return cls
def check_db_revision(self): rev = rally_version.database_revision() # Check that db exists if rev["revision"] is None: raise exceptions.RallyException( _LE("Database is missing. Create database by command " "`rally-manage db create'")) # Check that db is updated if rev["revision"] != rev["current_head"]: raise exceptions.RallyException( _LE("Database seems to be outdated. Run upgrade from " "revision %(revision)s to %(current_head)s by command " "`rally-manage db upgrade'") % rev)
def __init__(self, clients, name_generator=None, atomic_inst=None): """Initialize service class :param clients: an instance of rally.osclients.Clients :param name_generator: a method for generating random names. Usually it is generate_random_name method of RandomNameGeneratorMixin instance. :param atomic_inst: an object to store atomic actions. Usually, it is `_atomic_actions` property of ActionTimerMixin instance """ super(UnifiedService, self).__init__(clients, name_generator, atomic_inst) if self._meta_is_inited(raise_exc=False): # it is an instance of compatibility layer for specific Service impl_cls = self._meta_get("impl") self._impl = impl_cls(self._clients, self._name_generator, self._atomic_actions) self.version = impl_cls._meta_get("version") else: # it is a base class of service impl_cls, _all_impls = self.discover_impl() if not impl_cls: raise exceptions.RallyException( "There is no proper implementation for %s." % self.__class__.__name__) self._impl = impl_cls(self._clients, self._name_generator, self._atomic_actions) self.version = self._impl.version
def test_validate(self, mock_elastic_search_client): validator = elastic.Validator() client = mock_elastic_search_client.return_value validator.validate({}, {}, None, {"destination": "/home/foo"}) self.assertFalse(mock_elastic_search_client.called) client.version.return_value = "2.5.1" validator.validate({}, {}, None, {"destination": None}) client.version.return_value = "5.6.2" validator.validate({}, {}, None, {"destination": None}) client.version.return_value = "1.1.1" e = self.assertRaises(elastic.validation.ValidationError, validator.validate, {}, {}, None, {"destination": None}) self.assertEqual("The unsupported version detected 1.1.1.", e.message) exp_e = exceptions.RallyException("foo") client.version.side_effect = exp_e actual_e = self.assertRaises(elastic.validation.ValidationError, validator.validate, {}, {}, None, {"destination": None}) self.assertEqual(exp_e.format_message(), actual_e.message)
def export(self, uuid): """Export results of the task to the file. :param uuid: uuid of the task object """ task = api.Task.get(uuid) LOG.debug("Got the task object by it's uuid %s. " % uuid) task_results = [{"key": x["key"], "result": x["data"]["raw"], "sla": x["data"]["sla"], "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"]} for x in task.get_results()] if self.type == "json": if task_results: res = json.dumps(task_results, sort_keys=True, indent=4) LOG.debug("Got the task %s results." % uuid) else: msg = ("Task %s results would be available when it will " "finish." % uuid) raise exceptions.RallyException(msg) if os.path.dirname(self.path) and (not os.path.exists(os.path.dirname( self.path))): raise IOError("There is no such directory: %s" % os.path.dirname(self.path)) with open(self.path, "w") as f: LOG.debug("Writing task %s results to the %s." % ( uuid, self.connection_string)) f.write(res) LOG.debug("Task %s results was written to the %s." % ( uuid, self.connection_string))
def __init__(self, *args, **kwargs): super(OldJSONExporter, self).__init__(*args, **kwargs) if len(self.tasks_results) != 1: raise exceptions.RallyException( f"'{self.get_fullname()}' task exporter can be used only for " f"a one task.") self.task = self.tasks_results[0]
def __init__(cls, name, bases, namespaces): super(ServiceMeta, cls).__init__(name, bases, namespaces) bases = [c for c in cls.__bases__ if type(c) == ServiceMeta] if not bases: # nothing to check return # obtain all public apis of bases which should be implemented in # subclasses public_apis = set() for base in bases: for name, field in inspect.getmembers(base): if not name.startswith("_") and callable(field): public_apis.add(name) not_implemented_apis = public_apis - set(namespaces) # NOTE(andreykurilin): there are three specific methods which should # not be always overridden. not_implemented_apis -= { "generate_random_name", "is_applicable", "discover_impl" } if not_implemented_apis: raise exceptions.RallyException( "%s has wrong implementation. Implementation of specific " "version of API should override all public methods of base" " service class. Missed public method(s): %s." % (cls, ", ".join(not_implemented_apis)))
def deployment(self): # TODO(andreykurilin): deprecate this property someday if self._deployment is None: raise exceptions.RallyException( "Verifier is not linked to any deployment. Please, call " "`set_env` method.") return self._deployment
def recreate(cls, deployment, config=None): """Performs a cleanup and then makes a deployment again. :param deployment: UUID or name of the deployment :param config: an optional dict with deployment config to update before redeploy """ deployment = objects.Deployment.get(deployment) deployer = deploy_engine.Engine.get_engine( deployment["config"]["type"], deployment) if config: if deployment["config"]["type"] != config["type"]: raise exceptions.RallyException( "Can't change deployment type.") try: deployer.validate(config) except jsonschema.ValidationError: LOG.error(_LE("Config schema validation error.")) raise with deployer: deployer.make_cleanup() if config: deployment.update_config(config) credentials = deployer.make_deploy() deployment.update_credentials(credentials)
def get_credentials_for(self, namespace): try: return self.deployment["credentials"][namespace][0] except (KeyError, IndexError) as e: LOG.exception(e) raise exceptions.RallyException(_( "No credentials found for %s") % namespace)
def add_output(self, additive=None, complete=None): """Add iteration's custom output data. This saves custom output data to task results. The main way to get this data processed is to find it in HTML report ("Scenario Data" tab), where it is displayed by tables or various charts (StackedArea, Lines, Pie). Take a look at "Processing Output Charts" section of Rally Plugins Reference to find explanations and examples about additive and complete output types and how to display this output data by specific widgets. Here is a simple example how to add both additive and complete data and display them by StackedArea widget in HTML report: .. code-block:: python self.add_output( additive={"title": "Additive data in StackedArea", "description": "Iterations trend for foo and bar", "chart_plugin": "StackedArea", "data": [["foo", 12], ["bar", 34]]}, complete={"title": "Complete data as stacked area", "description": "Data is shown as-is in StackedArea", "chart_plugin": "StackedArea", "data": [["foo", [[0, 5], [1, 42], [2, 15]]], ["bar", [[0, 2], [1, 1.3], [2, 5]]]], "label": "Y-axis label text", "axis_label": "X-axis label text"}) :param additive: dict with additive output :param complete: dict with complete output :raises RallyException: if output has wrong format """ for key, value in (("additive", additive), ("complete", complete)): if value: message = charts.validate_output(key, value) if message: raise exceptions.RallyException(message) self._output[key].append(value)
def _live_migrate(self, server, target_host, block_migration=False, disk_over_commit=False, skip_host_check=False): """Run live migration of the given server. :param server: Server object :param target_host: Specifies the target compute node to migrate :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to overcommit migrated instance or not :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.live_migrate(target_host, block_migration=block_migration, disk_over_commit=disk_over_commit) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_live_migrate_timeout, check_interval=( CONF.benchmark.nova_server_live_migrate_poll_interval)) server_admin = self.admin_clients("nova").servers.get(server.id) if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") and not skip_host_check): raise exceptions.RallyException( _("Live Migration failed: Migration complete " "but instance did not change host: %s") % host_pre_migrate)
def test_make_exception(self): exc = exceptions.RallyException("exc") self.assertEqual(exc, exceptions.make_exception(exc)) mock_exc = mock.Mock() self.assertIsInstance(exceptions.make_exception(mock_exc), exceptions.RallyException)
def rerun(cls, verification_uuid, deployment_id=None, failed=False): """Rerun tests from a verification. :param verification_uuid: Verification UUID :param deployment_id: Deployment name or UUID :param failed: Rerun only failed tests """ # TODO(ylobankov): Improve this method in the future: put some # information about re-run in run_args. verification = cls.get(verification_uuid) tests = verification.tests if failed: tests = [t for t, r in tests.items() if r["status"] == "fail"] if not tests: raise exceptions.RallyException( "There are no failed tests from verification (UUID=%s)." % verification_uuid) else: tests = tests.keys() deployment = _Deployment.get(deployment_id or verification.deployment_uuid) LOG.info( "Re-running %stests from verification (UUID=%s) for " "deployment '%s' (UUID=%s).", "failed " if failed else "", verification.uuid, deployment["name"], deployment["uuid"]) return cls.start(verification.verifier_uuid, deployment["uuid"], load_list=tests)
def add_extension(cls, verifier_id, source, version=None, extra_settings=None): """Add a verifier extension. :param verifier_id: Verifier name or UUID :param source: Path or URL to the repo to clone verifier extension from :param version: Branch, tag or commit ID to checkout before installation of the verifier extension :param extra_settings: Extra installation settings for verifier extension """ verifier = cls.get(verifier_id) if verifier.status != consts.VerifierStatus.INSTALLED: raise exceptions.RallyException( "Failed to add verifier extension because verifier %s " "is in '%s' status, but should be in '%s'." % (verifier, verifier.status, consts.VerifierStatus.INSTALLED)) LOG.info("Adding extension for verifier %s.", verifier) # store original status to rollback it after failure original_status = verifier.status verifier.update_status(consts.VerifierStatus.EXTENDING) try: verifier.manager.install_extension(source, version=version, extra_settings=extra_settings) finally: verifier.update_status(original_status) LOG.info("Extension for verifier %s has been successfully added!", verifier)
def override_configuration(cls, verifier_id, deployment_id, new_configuration): """Override verifier configuration (e.g., rewrite the config file). :param verifier_id: Verifier name or UUID :param deployment_id: Deployment name or UUID :param new_configuration: New configuration for verifier """ verifier = cls.get(verifier_id) if verifier.status != consts.VerifierStatus.INSTALLED: raise exceptions.RallyException( "Failed to override verifier configuration for deployment " "'%s' (UUID=%s) because verifier %s is in '%s' status, but " "should be in '%s'." % (verifier.deployment["name"], verifier.deployment["uuid"], verifier, verifier.status, consts.VerifierStatus.INSTALLED)) verifier.set_deployment(deployment_id) LOG.info( "Overriding configuration of verifier %s for deployment '%s' " "(UUID=%s).", verifier, verifier.deployment["name"], verifier.deployment["uuid"]) verifier.manager.override_configuration(new_configuration) LOG.info( "Configuration of verifier %s has been successfully " "overridden for deployment '%s' (UUID=%s)!", verifier, verifier.deployment["name"], verifier.deployment["uuid"])
def run(self, image, metadata, availability_zone=None, ram=512, vcpus=1, disk=1, boot_server_kwargs=None): """Scenario to create and verify an aggregate This scenario creates an aggregate, adds a compute host and metadata to the aggregate, adds the same metadata to the flavor and creates an instance. Verifies that instance host is one of the hosts in the aggregate. :param image: The image ID to boot from :param metadata: The metadata to be set as flavor extra specs :param availability_zone: The availability zone of the aggregate :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param boot_server_kwargs: Optional additional arguments to verify host aggregates :raises RallyException: if instance and aggregate hosts do not match """ boot_server_kwargs = boot_server_kwargs or {} aggregate = self._create_aggregate(availability_zone) hosts = self._list_hypervisors() host_name = None for i in range(len(hosts)): if hosts[i].state == "up" and hosts[i].status == "enabled": host_name = hosts[i].service["host"] break if not host_name: raise exceptions.RallyException("Could not find an available host") self._aggregate_set_metadata(aggregate, metadata) self._aggregate_add_host(aggregate, host_name) flavor = self._create_flavor(ram, vcpus, disk) flavor.set_keys(metadata) server = self._boot_server(image, flavor.id, **boot_server_kwargs) # NOTE: we need to get server object by admin user to obtain # "hypervisor_hostname" attribute server = self.admin_clients("nova").servers.get(server.id) instance_hostname = getattr(server, "OS-EXT-SRV-ATTR:hypervisor_hostname") if instance_hostname != host_name: raise exceptions.RallyException("Instance host and aggregate " "host are different")
def validate_version(cls, version): from novaclient import api_versions from novaclient import exceptions as nova_exc try: api_versions.get_api_version(version) except nova_exc.UnsupportedVersion: raise exceptions.RallyException( "Version string '%s' is unsupported." % version)
def _job_execution_is_finished(self, je_id): status = self.clients("sahara").job_executions.get( je_id).info["status"] if status.lower() in ("success", "succeeded"): return True elif status.lower() in ("failed", "killed"): raise exceptions.RallyException("Job execution %s has failed" % je_id) return False
def wrapper(cls): # TODO(boris-42): Drop this check as soon as we refactor rally report if "." not in name.strip("."): msg = (_("Scenario name must include a dot: '%s'") % name) raise exceptions.RallyException(msg) cls = plugin.configure(name=name, platform=platform)(cls) cls._meta_set("default_context", context or {}) return cls
def create(self, name, vtype, namespace=None, source=None, version=None, system_wide=False, extra_settings=None): """Create a verifier. :param name: Verifier name :param vtype: Verifier plugin name :param namespace: Verifier plugin namespace. Should be specified when there are two verifier plugins with equal names but in different namespaces :param source: Path or URL to the repo to clone verifier from :param version: Branch, tag or commit ID to checkout before verifier installation :param system_wide: Whether or not to use the system-wide environment for verifier instead of a virtual environment :param extra_settings: Extra installation settings for verifier """ # check that the specified verifier type exists vmanager.VerifierManager.get(vtype, namespace=namespace) LOG.info("Creating verifier '%s'.", name) try: verifier = self._get(name) except exceptions.ResourceNotFound: verifier = objects.Verifier.create( name=name, source=source, system_wide=system_wide, version=version, vtype=vtype, namespace=namespace, extra_settings=extra_settings) else: raise exceptions.RallyException( "Verifier with name '%s' already exists! Please, specify " "another name for verifier and try again." % verifier.name) properties = {} default_namespace = verifier.manager._meta_get("namespace") if not namespace and default_namespace: properties["namespace"] = default_namespace default_source = verifier.manager._meta_get("default_repo") if not source and default_source: properties["source"] = default_source if properties: verifier.update_properties(**properties) verifier.update_status(consts.VerifierStatus.INSTALLING) try: verifier.manager.install() except Exception: verifier.update_status(consts.VerifierStatus.FAILED) raise verifier.update_status(consts.VerifierStatus.INSTALLED) LOG.info("Verifier %s has been successfully created!", verifier) return verifier.uuid
def configure(cls, verifier, deployment_id, extra_options=None, reconfigure=False): """Configure a verifier. :param verifier: Verifier object or (name or UUID) :param deployment_id: Deployment name or UUID :param extra_options: Extend verifier configuration with extra options :param reconfigure: Reconfigure verifier """ if not isinstance(verifier, objects.Verifier): verifier = cls.get(verifier) verifier.set_deployment(deployment_id) LOG.info("Configuring verifier %s for deployment '%s' (UUID=%s).", verifier, verifier.deployment["name"], verifier.deployment["uuid"]) if verifier.status != consts.VerifierStatus.INSTALLED: raise exceptions.RallyException( "Failed to configure verifier %s for deployment '%s' " "(UUID=%s) because verifier is in '%s' status, but should be " "in '%s'." % (verifier, verifier.deployment["name"], verifier.deployment["uuid"], verifier.status, consts.VerifierStatus.INSTALLED)) msg = ("Verifier %s has been successfully configured for deployment " "'%s' (UUID=%s)!" % (verifier, verifier.deployment["name"], verifier.deployment["uuid"])) vm = verifier.manager if vm.is_configured(): LOG.info("Verifier is already configured!") if not reconfigure: if not extra_options: return vm.get_configuration() else: # Just add extra options to the config file. if logging.is_debug(): LOG.debug( "Adding the following extra options: %s " "to verifier configuration.", extra_options) else: LOG.info( "Adding extra options to verifier configuration.") vm.extend_configuration(extra_options) LOG.info(msg) return vm.get_configuration() LOG.info("Reconfiguring verifier.") raw_config = vm.configure(extra_options=extra_options) LOG.info(msg) return raw_config
def uninstall_extension(self, name): """Uninstall a Tempest plugin.""" for ext in self.list_extensions(): if ext["name"] == name and os.path.exists(ext["location"]): shutil.rmtree(ext["location"]) break else: raise exceptions.RallyException( "There is no Tempest plugin with name '%s'. " "Are you sure that it was installed?" % name)
def _delete_encryption_type(self, volume_type): """Delete the encryption type information for the specified volume type. :param volume_type: the volume type whose encryption type information must be deleted """ resp = self.admin_clients("cinder").volume_encryption_types.delete( volume_type) if (resp[0].status_code != 202): raise exceptions.RallyException("EncryptionType Deletion Failed")
def create_keystone_client(args): discover = keystone_discover.Discover(**args) for version_data in discover.version_data(): version = version_data["version"] if version[0] <= 2: return keystone_v2.Client(**args) elif version[0] == 3: return keystone_v3.Client(**args) raise exceptions.RallyException( "Failed to discover keystone version for url %(auth_url)s.", **args)