def _load_img(self): cirros_url = ('http://download.cirros-cloud.net/%s/%s' % (CONF.image.cirros_version, CONF.image.cirros_image)) try: response = requests.get(cirros_url, stream=True) except requests.ConnectionError as err: msg = _('Error on downloading cirros image, possibly' ' no connection to Internet with message %s') % str(err) raise TempestConfigCreationFailure(msg) if response.status_code == 200: with open(self.img_path + '.tmp', 'wb') as img_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks img_file.write(chunk) img_file.flush() os.rename(self.img_path + '.tmp', self.img_path) else: if response.status_code == 404: msg = _('Error on downloading cirros image, possibly' 'invalid cirros_version or cirros_image in rally.conf') else: msg = _('Error on downloading cirros image, ' 'HTTP error code %s') % response.getcode() raise TempestConfigCreationFailure(msg)
def required_openstack(config, clients, task, admin=False, users=False): """Validator that requires OpenStack admin or (and) users. This allows us to create 4 kind of benchmarks: 1) not OpenStack related (validator is not specified) 2) requires OpenStack admin 3) requires OpenStack admin + users 4) requires OpenStack users :param admin: requires OpenStack admin :param users: requires OpenStack users """ if not (admin or users): return ValidationResult( False, _("You should specify admin=True or users=True or both.")) deployment = objects.Deployment.get(task["deployment_uuid"]) if deployment["admin"] and deployment["users"]: return ValidationResult() if deployment["admin"]: if users and not config.get("context", {}).get("users"): return ValidationResult(False, _("You should specify 'users' context")) return ValidationResult() if deployment["users"] and admin: return ValidationResult(False, _("Admin credentials required")) return ValidationResult()
def get_image_uuid(self): """Get image uuid. Download image if necessary.""" image_uuid = self.config['image'].get('uuid', None) if image_uuid: return image_uuid else: if not self.glance: raise exceptions.InvalidConfigException( 'If glance is not available in the service catalog' ' obtained by the openstack server provider, then' ' images cannot be uploaded so the uuid of an' ' existing image must be specified in the' ' deployment config.') for image in self.glance.images.list(): if image.checksum == self.config['image']['checksum']: LOG.info(_('Found image with appropriate checksum. Using it.')) return image.id LOG.info(_('Downloading new image %s') % self.config['image']['url']) image = self.glance.images.create(name=self.config['image']['name']) try: image.update(data=urllib2.urlopen(self.config['image']['url']), disk_format=self.config['image']['format'], container_format='bare') except urllib2.URLError: LOG.error(_('Unable to retrieve %s') % self.config['image']['url']) raise image.get() if image.checksum != self.config['image']['checksum']: raise exceptions.ChecksumMismatch(url=self.config['image']['url']) return image.id
def image_valid_on_flavor(config, clients, task, flavor_name, image_name): """Returns validator for image could be used for current flavor :param flavor_name: defines which variable should be used to get flavor id value. :param image_name: defines which variable should be used to get image id value. """ valid_result, flavor = _get_validated_flavor(config, clients, flavor_name) if not valid_result.is_valid: return valid_result valid_result, image = _get_validated_image(config, clients, image_name) if not valid_result.is_valid: return valid_result if flavor.ram < (image.min_ram or 0): message = _("The memory size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) if flavor.disk: if (image.size or 0) > flavor.disk * (1024 ** 3): message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) if (image.min_disk or 0) > flavor.disk: message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) return ValidationResult()
def image_valid_on_flavor(config, clients, task, flavor_name, image_name): """Returns validator for image could be used for current flavor :param flavor_name: defines which variable should be used to get flavor id value. :param image_name: defines which variable should be used to get image id value. """ valid_result, flavor = _get_validated_flavor(config, clients, flavor_name) if not valid_result.is_valid: return valid_result valid_result, image = _get_validated_image(config, clients, image_name) if not valid_result.is_valid: return valid_result if flavor.ram < (image.min_ram or 0): message = _("The memory size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) if flavor.disk: if (image.size or 0) > flavor.disk * (1024**3): message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) if (image.min_disk or 0) > flavor.disk: message = _("The disk size for flavor '%s' is too small " "for requested image '%s'") % (flavor.id, image.id) return ValidationResult(False, message) return ValidationResult()
class AllowSSH(base.Context): __ctx_name__ = "allow_ssh" __ctx_order__ = 301 __ctx_hidden__ = True def __init__(self, context): super(AllowSSH, self).__init__(context) self.context["allow_ssh"] = SSH_GROUP_NAME self.secgroup = [] @utils.log_task_wrapper(LOG.info, _("Enter context: `allow_ssh`")) def setup(self): used_tenants = [] for user in self.context['users']: endpoint = user['endpoint'] tenant = endpoint.tenant_name if tenant not in used_tenants: secgroup = _prepare_open_secgroup(endpoint) self.secgroup.append(secgroup) used_tenants.append(tenant) @utils.log_task_wrapper(LOG.info, _("Exit context: `allow_ssh`")) def cleanup(self): for secgroup in self.secgroup: try: secgroup.delete() except Exception as ex: LOG.warning("Unable to delete secgroup: %(group_id)s. " "Exception: %(ex)s" % {"group_id": secgroup.id, "ex": ex})
def _initialize_testr(self): if not os.path.isdir(self.path(".testrepository")): msg = _("Test Repository initialization.") LOG.info(_("Starting: ") + msg) subprocess.check_call("%s testr init" % self.venv_wrapper, shell=True, cwd=self.path()) LOG.info(_("Completed: ") + msg)
def wrapper(self, *args, **kwargs): params = {"msg": msg % kw, "obj_name": obj.title(), "uuid": getattr(self, obj)["uuid"]} log(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") % params) result = f(self, *args, **kwargs) log(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") % params) return result
def wrapper(self, *args, **kwargs): params = { "msg": msg % kw, "obj_name": obj.title(), "uuid": getattr(self, obj)["uuid"] } log(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") % params) result = f(self, *args, **kwargs) log(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") % params) return result
def _initialize_testr(self): if not os.path.isdir(self.path(".testrepository")): print(_("Test Repository initialization.")) try: check_output("%s testr init" % self.venv_wrapper, shell=True, cwd=self.path()) except subprocess.CalledProcessError: if os.path.exists(self.path(".testrepository")): shutil.rmtree(self.path(".testrepository")) raise TempestSetupFailure(_("failed to initialize testr"))
class Quotas(base.Context): """Context class for updating benchmarks' tenants quotas.""" __ctx_name__ = "quotas" __ctx_order__ = 210 __ctx_hidden__ = False CONFIG_SCHEMA = { "type": "object", "$schema": utils.JSON_SCHEMA, "additionalProperties": False, "properties": { "nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA, "cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA, "designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA, "neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA } } def __init__(self, context): super(Quotas, self).__init__(context) self.clients = osclients.Clients(context["admin"]["endpoint"]) self.manager = { "nova": nova_quotas.NovaQuotas(self.clients), "cinder": cinder_quotas.CinderQuotas(self.clients), "designate": designate_quotas.DesignateQuotas(self.clients), "neutron": neutron_quotas.NeutronQuotas(self.clients) } def _service_has_quotas(self, service): return len(self.config.get(service, {})) > 0 @utils.log_task_wrapper(LOG.info, _("Enter context: `quotas`")) def setup(self): for tenant in self.context["tenants"]: for service in self.manager: if self._service_has_quotas(service): self.manager[service].update(tenant["id"], **self.config[service]) @utils.log_task_wrapper(LOG.info, _("Exit context: `quotas`")) def cleanup(self): for service in self.manager: if self._service_has_quotas(service): for tenant in self.context["tenants"]: try: self.manager[service].delete(tenant["id"]) except Exception as e: LOG.warning("Failed to remove quotas for tenant " "%(tenant_id)s in service %(service)s " "\n reason: %(exc)s" % {"tenant_id": tenant["id"], "service": service, "exc": e})
def _delete_single_resource(self, resource): """Safe resource deletion with retries and timeouts. Send request to delete resource, in case of failures repeat it few times. After that pull status of resource until it's deleted. Writes in LOG warning with UUID of resource that wasn't deleted :param resource: instance of resource manager initiated with resource that should be deleted. """ msg_kw = { "uuid": resource.id(), "service": resource._service, "resource": resource._resource } try: rutils.retry(resource._max_attempts, resource.delete) except Exception as e: msg_kw["reason"] = e LOG.warning( _("Resource deletion failed, max retries exceeded for " "%(service)s.%(resource)s: %(uuid)s. Reason: %(reason)s") % msg_kw) if CONF.debug: LOG.exception(e) else: started = time.time() failures_count = 0 while time.time() - started < resource._timeout: try: if resource.is_deleted(): return except Exception as e: LOG.warning( _("Seems like %s.%s.is_deleted(self) method is broken " "It shouldn't raise any exceptions.") % (resource.__module__, type(resource).__name__)) LOG.exception(e) # NOTE(boris-42): Avoid LOG spaming in case of bad # is_deleted() method failures_count += 1 if failures_count > resource._max_attempts: break finally: time.sleep(resource._interval) LOG.warning(_("Resource deletion failed, timeout occurred for " "%(service)s.%(resource)s: %(uuid)s.") % msg_kw)
def generate_config_file(self): """Generate configuration file of tempest for current deployment.""" LOG.debug("Tempest config file: %s " % self.config_file) if not self.is_configured(): msg = _("Creation of configuration file for tempest.") LOG.info(_("Starting: ") + msg) config.TempestConf(self.deploy_id).generate(self.config_file) LOG.info(_("Completed: ") + msg) else: LOG.info("Tempest is already configured.")
def required_services(config, clients, task, *required_services): """Validator checks if specified OpenStack services are available. :param *required_services: list of services names """ available_services = clients.services().values() for service in required_services: if service not in consts.Service: return ValidationResult(False, _("Unknown service: %s") % service) if service not in available_services: return ValidationResult( False, _("Service is not available: %s") % service) return ValidationResult()
class Tempest(base.Context): __ctx_name__ = "tempest" __ctx_order__ = 666 __ctx_hidden__ = True @utils.log_task_wrapper(LOG.info, _("Enter context: `tempest`")) def setup(self): self.verifier = tempest.Tempest(self.task.task.deployment_uuid) self.verifier.log_file_raw = "/dev/null" # Create temporary directory for subunit-results. self.results_dir = os.path.join(tempfile.gettempdir(), "%s-results" % self.task.task.uuid) os.mkdir(self.results_dir) self.context["tmp_results_dir"] = self.results_dir try: if not self.verifier.is_installed(): self.verifier.install() if not self.verifier.is_configured(): self.verifier.generate_config_file() except tempest.TempestSetupFailure: msg = _("Failing to install tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) except config.TempestConfigCreationFailure: msg = _("Failing to configure tempest.") LOG.error(msg) raise exceptions.BenchmarkSetupFailure(msg) self.context["verifier"] = self.verifier @utils.log_task_wrapper(LOG.info, _("Exit context: `tempest`")) def cleanup(self): try: cmd = ("cd %(tempest_dir)s " "&& %(venv)s python tempest/stress/tools/cleanup.py" % { "tempest_dir": self.verifier.path, "venv": self.verifier.venv_wrapper }) LOG.debug("Cleanup started by the command: %s" % cmd) subprocess.check_call(cmd, shell=True, env=self.verifier.env, cwd=self.verifier.path) except subprocess.CalledProcessError: LOG.error("Tempest cleanup failed.") if os.path.exists(self.results_dir): shutil.rmtree(self.results_dir)
class AdminCleanup(base.Context): """Context class for admin resource cleanup.""" __ctx_name__ = "admin_cleanup" __ctx_order__ = 200 __ctx_hidden__ = True CONFIG_SCHEMA = { "type": "array", "$schema": rutils.JSON_SCHEMA, "items": { "type": "string", "enum": ["keystone", "quotas"] }, "uniqueItems": True } def __init__(self, context): super(AdminCleanup, self).__init__(context) self.endpoint = None def _cleanup_resources(self): client = osclients.Clients(self.endpoint) cleanup_methods = { "keystone": (utils.delete_keystone_resources, client.keystone()), "quotas": (utils.delete_admin_quotas, client, self.context.get("tenants", [])), } for service_name in self.config: cleanup_method = cleanup_methods[service_name] method, client = cleanup_method[:2] try: method(client, *cleanup_method[2:]) except Exception as e: LOG.debug("Not all admin resources were cleaned.", exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (six.text_type(e))) @rutils.log_task_wrapper(LOG.info, _("Enter context: `admin cleanup`")) def setup(self): self.endpoint = self.context["admin"]["endpoint"] @rutils.log_task_wrapper(LOG.info, _("Exit context: `admin cleanup`")) def cleanup(self): self._cleanup_resources()
class Keypair(base.Context): __ctx_name__ = "keypair" __ctx_order__ = 300 __ctx_hidden__ = True KEYPAIR_NAME = "rally_ssh_key" def _get_nova_client(self, endpoint): return osclients.Clients(endpoint).nova() def _keypair_safe_remove(self, nova): try: nova.keypairs.delete(self.KEYPAIR_NAME) except novaclient.exceptions.NotFound: pass def _generate_keypair(self, endpoint): nova = self._get_nova_client(endpoint) # NOTE(hughsaunders): If keypair exists, it must be deleted as we can't # retrieve the private key self._keypair_safe_remove(nova) keypair = nova.keypairs.create(self.KEYPAIR_NAME) return {"private": keypair.private_key, "public": keypair.public_key} @utils.log_task_wrapper(LOG.info, _("Enter context: `keypair`")) def setup(self): for user in self.context["users"]: keypair = self._generate_keypair(user["endpoint"]) user["keypair"] = keypair @utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`")) def cleanup(self): for user in self.context["users"]: endpoint = user['endpoint'] try: nova = self._get_nova_client(endpoint) self._keypair_safe_remove(nova) except Exception as e: LOG.warning( "Unable to delete keypair: %(kpname)s for user " "%(tenant)s/%(user)s: %(message)s" % { 'kpname': self.KEYPAIR_NAME, 'tenant': endpoint.tenant_name, 'user': endpoint.username, 'message': six.text_type(e) })
def inner_func(scenario_obj, *args, **kwargs): if "log_file" not in kwargs: # set temporary log file kwargs["log_file"] = os.path.join( scenario_obj.context()["tmp_results_dir"], os.path.basename(tempfile.NamedTemporaryFile().name)) # run target scenario try: func(scenario_obj, *args, **kwargs) except subprocess.CalledProcessError: pass # parse and save results total, tests = scenario_obj.context()["verifier"].parse_results( kwargs["log_file"]) if total and tests: scenario_obj._add_atomic_actions("test_execution", total.get("time")) if total.get("errors") or total.get("failures"): raise TempestBenchmarkFailure([ test for test in six.itervalues(tests) if test["status"] == "FAIL" ]) else: raise TempestBenchmarkFailure(_("No information"))
def check(self, deploy_id=None): """Check keystone authentication and list all available services. :param deploy_id: a UUID of the deployment """ headers = ['services', 'type', 'status'] table_rows = [] try: admin = db.deployment_get(deploy_id)['admin'] # TODO(boris-42): make this work for users in future for endpoint_dict in [admin]: clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) client = clients.verified_keystone() print("keystone endpoints are valid and following " "services are available:") for service in client.services.list(): data = [service.name, service.type, 'Available'] table_rows.append(utils.Struct(**dict(zip(headers, data)))) except exceptions.InvalidArgumentsException: data = ['keystone', 'identity', 'Error'] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("Authentication Issues: %s.") % sys.exc_info()[1]) return (1) common_cliutils.print_list(table_rows, headers)
def check(self, deployment=None): """Check keystone authentication and list all available services. :param deployment: a UUID or name of the deployment """ headers = ['services', 'type', 'status'] table_rows = [] try: admin = db.deployment_get(deployment)['admin'] # TODO(boris-42): make this work for users in future for endpoint_dict in [admin]: clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) client = clients.verified_keystone() print("keystone endpoints are valid and following " "services are available:") for service in client.services.list(): data = [service.name, service.type, 'Available'] table_rows.append(utils.Struct(**dict(zip(headers, data)))) except exceptions.InvalidArgumentsException: data = ['keystone', 'identity', 'Error'] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("Authentication Issues: %s.") % sys.exc_info()[1]) return(1) common_cliutils.print_list(table_rows, headers)
def create_deploy(config, name): """Create a deployment. :param config: a dict with deployment configuration :param name: a str represents a name of the deployment """ try: deployment = objects.Deployment(name=name, config=config) except exceptions.DeploymentNameExists as e: if CONF.debug: LOG.exception(e) raise deployer = deploy.EngineFactory.get_engine(deployment['config']['type'], deployment) try: deployer.validate() except jsonschema.ValidationError: LOG.error(_('Deployment %(uuid)s: Schema validation error.') % {'uuid': deployment['uuid']}) deployment.update_status(consts.DeployStatus.DEPLOY_FAILED) raise with deployer: endpoints = deployer.make_deploy() deployment.update_endpoints(endpoints) return deployment
def flavors(self, deploy_id=None): """Display available flavors. :param deploy_id: the UUID of a deployment """ headers = ['ID', 'Name', 'vCPUs', 'RAM (MB)', 'Swap (MB)', 'Disk (GB)'] mixed_case_fields = ['ID', 'Name', 'vCPUs'] float_cols = ['RAM (MB)', 'Swap (MB)', 'Disk (GB)'] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col) for col in float_cols])) table_rows = [] try: for endpoint_dict in self._get_endpoints(deploy_id): clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) nova_client = clients.nova() for flavor in nova_client.flavors.list(): data = [flavor.id, flavor.name, flavor.vcpus, flavor.ram, flavor.swap, flavor.disk] table_rows.append(utils.Struct(**dict(zip(headers, data)))) common_cliutils.print_list(table_rows, fields=headers, formatters=formatters, mixed_case_fields=mixed_case_fields) except exceptions.InvalidArgumentsException as e: print(_("Authentication Issues: %s") % e) return(1)
def start(self, task, deploy_id=None, tag=None, do_use=False): """Start benchmark task. :param task: a file with yaml/json configration :param deploy_id: a UUID of a deployment :param tag: optional tag for this task """ task = os.path.expanduser(task) with open(task, 'rb') as task_file: config_dict = yaml.safe_load(task_file.read()) try: task = api.create_task(deploy_id, tag) print("=" * 80) print(_("Task %(tag)s %(uuid)s is started") % {"uuid": task["uuid"], "tag": task["tag"]}) print("-" * 80) api.start_task(deploy_id, config_dict, task=task) self.detailed(task_id=task['uuid']) if do_use: use.UseCommands().task(task['uuid']) except exceptions.InvalidConfigException: return(1) except KeyboardInterrupt: api.abort_task(task['uuid']) raise
def flavors(self, deployment=None): """Display available flavors. :param deployment: UUID or name of a deployment """ headers = ['ID', 'Name', 'vCPUs', 'RAM (MB)', 'Swap (MB)', 'Disk (GB)'] mixed_case_fields = ['ID', 'Name', 'vCPUs'] float_cols = ['RAM (MB)', 'Swap (MB)', 'Disk (GB)'] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col) for col in float_cols])) table_rows = [] try: for endpoint_dict in self._get_endpoints(deployment): clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) nova_client = clients.nova() for flavor in nova_client.flavors.list(): data = [flavor.id, flavor.name, flavor.vcpus, flavor.ram, flavor.swap, flavor.disk] table_rows.append(utils.Struct(**dict(zip(headers, data)))) common_cliutils.print_list(table_rows, fields=headers, formatters=formatters, mixed_case_fields=mixed_case_fields) except exceptions.InvalidArgumentsException as e: print(_("Authentication Issues: %s") % e) return(1)
def check(criterion_value, result): durations = [r["duration"] for r in result if not r.get("error")] avg = putils.mean(durations) success = avg < criterion_value msg = (_("Maximum average duration per iteration %ss, found with %ss") % (criterion_value, avg)) return SLAResult(success, msg)
def images(self, deploy_id=None): """Display available images. :param deploy_id: the UUID of a deployment """ headers = ['UUID', 'Name', 'Size (B)'] mixed_case_fields = ['UUID', 'Name'] float_cols = ["Size (B)"] table_rows = [] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col) for col in float_cols])) try: for endpoint_dict in self._get_endpoints(deploy_id): clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) glance_client = clients.glance() for image in glance_client.images.list(): data = [image.id, image.name, image.size] table_rows.append(utils.Struct(**dict(zip(headers, data)))) common_cliutils.print_list(table_rows, fields=headers, formatters=formatters, mixed_case_fields=mixed_case_fields) except exceptions.InvalidArgumentsException as e: print(_("Authentication Issues: %s") % e) return(1)
def tempest_tests_exists(config, clients, task): """Validator checks that specified test exists.""" args = config.get("args", {}) if "test_name" in args: tests = [args["test_name"]] else: tests = args.get("test_names", []) if not tests: return ValidationResult( False, "Parameter 'test_name' or 'test_names' should " "be specified.") verifier = tempest.Tempest(task["deployment_uuid"]) if not verifier.is_installed(): verifier.install() if not verifier.is_configured(): verifier.generate_config_file() allowed_tests = verifier.discover_tests() for i, test in enumerate(tests): if not test.startswith("tempest.api."): tests[i] = "tempest.api." + test wrong_tests = set(tests) - allowed_tests if not wrong_tests: return ValidationResult() else: message = (_("One or more tests not found: '%s'") % "', '".join(sorted(wrong_tests))) return ValidationResult(False, message)
def images(self, deployment=None): """Display available images. :param deployment: UUID or name of a deployment """ headers = ['UUID', 'Name', 'Size (B)'] mixed_case_fields = ['UUID', 'Name'] float_cols = ["Size (B)"] table_rows = [] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col) for col in float_cols])) try: for endpoint_dict in self._get_endpoints(deployment): clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) glance_client = clients.glance() for image in glance_client.images.list(): data = [image.id, image.name, image.size] table_rows.append(utils.Struct(**dict(zip(headers, data)))) common_cliutils.print_list(table_rows, fields=headers, formatters=formatters, mixed_case_fields=mixed_case_fields) except exceptions.InvalidArgumentsException as e: print(_("Authentication Issues: %s") % e) return(1)
def _consumer(consume, queue, is_published): """Infinity worker that consumes tasks from queue. This finishes it's work only in case if is_published.isSet(). :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from :param is_published: threading.Event that is used to stop the consumer when the queue is empty """ cache = {} while True: if queue: try: consume(cache, queue.popleft()) except Exception as e: LOG.warning( _("Failed to consume a task from the queue: " "%s") % e) if CONF.debug: LOG.exception(e) elif is_published.isSet(): break else: time.sleep(0.1)
def start(self, task, deploy_id=None, tag=None, do_use=False): """Start benchmark task. :param task: a file with yaml/json configration :param deploy_id: a UUID of a deployment :param tag: optional tag for this task """ task = os.path.expanduser(task) with open(task, 'rb') as task_file: config_dict = yaml.safe_load(task_file.read()) try: task = api.create_task(deploy_id, tag) print("=" * 80) print( _("Task %(tag)s %(uuid)s is started") % { "uuid": task["uuid"], "tag": task["tag"] }) print("-" * 80) api.start_task(deploy_id, config_dict, task=task) self.detailed(task_id=task['uuid']) if do_use: use.UseCommands().task(task['uuid']) except exceptions.InvalidConfigException: return (1) except KeyboardInterrupt: api.abort_task(task['uuid']) raise
def _consumer(consume, queue, is_published): """Infinity worker that consumes tasks from queue. This finishes it's work only in case if is_published.isSet(). :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from :param is_published: threading.Event that is used to stop the consumer when the queue is empty """ cache = {} while True: if queue: try: consume(cache, queue.popleft()) except IndexError: # NOTE(boris-42): queue is accessed from multiple threads so # it's quite possible to have 2 queue accessing # at the same point queue with only 1 element pass except Exception as e: LOG.warning(_("Failed to consume a task from the queue: " "%s") % e) if CONF.debug: LOG.exception(e) elif is_published.isSet(): break else: time.sleep(0.1)
def _cleanup_resources(self): for user in self.users_endpoints: clients = osclients.Clients(user) tenant_id = clients.keystone().tenant_id cleanup_methods = { "nova": (utils.delete_nova_resources, clients.nova), "glance": (utils.delete_glance_resources, clients.glance, tenant_id), "cinder": (utils.delete_cinder_resources, clients.cinder), "neutron": (utils.delete_neutron_resources, clients.neutron, tenant_id), "ceilometer": (utils.delete_ceilometer_resources, clients.ceilometer, tenant_id), "heat": (utils.delete_heat_resources, clients.heat), "sahara": (utils.delete_sahara_resources, clients.sahara), "designate": (utils.delete_designate_resources, clients.designate), "zaqar": (utils.delete_zaqar_resources, clients.zaqar), } for service_name in self.config: cleanup_method = cleanup_methods[service_name] method = cleanup_method[0] client = cleanup_method[1]() try: method(client, *cleanup_method[2:]) except Exception as e: LOG.debug("Not all user resources were cleaned.", exc_info=sys.exc_info()) LOG.warning(_('Unable to fully cleanup the cloud: %s') % (six.text_type(e)))
def main(): # Initialize configuration and logging. CONF(sys.argv[1:], project='rally') log.setup('rally') # Prepare application and bind to the service socket. host = CONF.rest.host port = CONF.rest.port app = rally_app.make_app() server = simple_server.make_server(host, port, app) # Start application. LOG.info(_('Starting server in PID %s') % os.getpid()) LOG.info(_("Configuration:")) CONF.log_opt_values(LOG, logging.INFO) try: server.serve_forever() except KeyboardInterrupt: pass
def check(criterion_value, result): errors = len(filter(lambda x: x['error'], result)) if criterion_value < errors * 100.0 / len(result): success = False else: success = True msg = (_("Maximum failure percent %s%% failures, actually %s%%") % (criterion_value * 100.0, errors * 100.0 / len(result))) return SLAResult(success, msg)
def validate(self): super(LxcProvider, self).validate() if 'start_lxc_network' not in self.config: return lxc_net = netaddr.IPNetwork(self.config['start_lxc_network']) num_containers = self.config['containers_per_host'] if lxc_net.size - 3 < num_containers: message = _("Network size is not enough for %d hosts.") raise exceptions.InvalidConfigException(message % num_containers)
def validate(self): super(LxcEngine, self).validate() if 'start_lxc_network' not in self.config: return lxc_net = netaddr.IPNetwork(self.config['start_lxc_network']) num_containers = self.config['containers_per_host'] if lxc_net.size - 3 < num_containers: message = _("Network size is not enough for %d hosts.") raise exceptions.InvalidConfigException(message % num_containers)
def _publish(admin, user, manager): try: for raw_resource in rutils.retry(3, manager.list): queue.append((admin, user, raw_resource)) except Exception as e: LOG.warning( _("Seems like %s.%s.list(self) method is broken. " "It shouldn't raise any exceptions.") % (manager.__module__, type(manager).__name__)) LOG.exception(e)
def check(criterion_value, result): errors = len(filter(lambda x: x['error'], result)) error_rate = errors * 100.0 / len(result) if len(result) > 0 else 100.0 if criterion_value < error_rate: success = False else: success = True msg = (_("Maximum failure percent %s%% failures, actually %s%%") % (criterion_value * 100.0, error_rate)) return SLAResult(success, msg)
def validate_semantic(cls, config, admin, users, task): """Check if the image service is available.""" try: glance = users[0].glance() list(glance.images.list(limit=0)) except Exception as e: message = _( "The image service is unavailable, Reason: %(reason)s") % { "reason": six.text_type(e)} raise exceptions.InvalidScenarioArgument(message)
def status(self, task_id=None): """Display current status of task. :param task_id: Task uuid Returns current status of task """ task = db.task_get(task_id) print(_("Task %(task_id)s is %(status)s.") % {'task_id': task_id, 'status': task['status']})
def validate(cls, config, non_hidden=False): super(UserCleanup, cls).validate(config, non_hidden) missing = set(config) missing -= manager.list_resource_names(admin_required=False) missing = ", ".join(missing) if missing: LOG.info(_("Couldn't find cleanup resource managers: %s") % missing) raise NoSuchCleanupResources(missing)
def list(self, deployment=None, all_deployments=False, status=None): """List tasks, started and finished. Displayed tasks could be filtered by status or deployment. By default 'rally task list' will display tasks from active deployment without filtering by status. :param deployment: UUID or name of deployment :param status: task status to filter by. Available task statuses are in rally.consts.TaskStatus :param all_deployments: display tasks from all deployments """ filters = dict() headers = ["uuid", "deployment_name", "created_at", "status", "failed", "tag"] if status in consts.TaskStatus: filters.setdefault("status", status) elif status is not None: print(_("Error: Invalid task status '%s'.\n" "Available statuses: %s") % ( status, ", ".join(consts.TaskStatus))) return(1) if not all_deployments: filters.setdefault("deployment", deployment) task_list = objects.Task.list(**filters) if task_list: common_cliutils.print_list(map(lambda x: x.to_dict(), task_list), headers, sortby_index=headers.index( 'created_at')) else: if status: print(_("There are no tasks in '%s' status. " "To run a new task, use:" "\trally task start") % status) else: print(_("There are no tasks. To run a new task, use:" "\trally task start"))
def check(criterion_value, result): duration = 0 success = True for i in result: if i['duration'] >= duration: duration = i['duration'] if i['duration'] > criterion_value: success = False msg = (_("Maximum seconds per iteration %ss, found with %ss") % (criterion_value, duration)) return SLAResult(success, msg)
def required_parameters(config, clients, task, *required_params): """Validtor for checking required parameters are specified. :param *required_params: list of required parameters """ missing = set(required_params) - set(config.get("args", {})) if missing: message = _("%s parameters are not defined in " "the benchmark config file") % ", ".join(missing) return ValidationResult(False, message) return ValidationResult()
def wait(self, timeout=120, interval=1): """Wait for the host will be available via ssh.""" start_time = time.time() while True: try: return self.execute('uname') except (socket.error, SSHError) as e: LOG.debug('Ssh is still unavailable: %r' % e) time.sleep(interval) if time.time() > (start_time + timeout): raise SSHTimeout(_('Timeout waiting for "%s"') % self.host)
def get_engine(name, deployment): """Returns instance of a deploy engine with corresponding name.""" try: engine_cls = EngineFactory.get_by_name(name) return engine_cls(deployment) except exceptions.NoSuchEngine: LOG.error(_('Deployment %(uuid)s: Deploy engine for %(name)s ' 'does not exist.') % {'uuid': deployment['uuid'], 'name': name}) deployment.update_status(consts.DeployStatus.DEPLOY_FAILED) raise exceptions.NoSuchEngine(engine_name=name)
def list(self): """Display all verifications table, started and finished.""" fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures", "Created at", "Status"] verifications = db.verification_list() if verifications: common_cliutils.print_list(verifications, fields, sortby_index=fields.index("Created at")) else: print(_("There are no results from verifier. To run a verifier, " "use:\nrally verify start"))
def tempest_set_exists(config, clients, task): """Validator that check that tempest set_name is valid.""" set_name = config.get("args", {}).get("set_name") if not set_name: return ValidationResult(False, "`set_name` is not specified.") if set_name not in consts.TEMPEST_TEST_SETS: message = _("There is no tempest set with name '%s'.") % set_name return ValidationResult(False, message) return ValidationResult()