def __init__(self, deployment): self.endpoint = db.deployment_get(deployment)["admin"] self.clients = osclients.Clients(objects.Endpoint(**self.endpoint)) try: self.keystoneclient = self.clients.verified_keystone() except exceptions.InvalidAdminException: msg = (_("Admin permission is required to generate tempest " "configuration file. User %s doesn't have admin role.") % self.endpoint["username"]) raise TempestConfigCreationFailure(msg) self.available_services = self.clients.services().values() self.conf = configparser.ConfigParser() self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini")) self.deployment = deployment self.data_path = os.path.join(os.path.expanduser("~"), ".rally", "tempest", "data") if not os.path.exists(self.data_path): os.makedirs(self.data_path) self.img_path = os.path.join(self.data_path, CONF.image.cirros_image) if not os.path.isfile(self.img_path): self._load_img()
def test_cached(self): clients = osclients.Clients(mock.MagicMock()) client_name = "CachedTestCase.test_cached" fake_client = osclients.configure(client_name)(osclients.OSClient( clients.credential, clients.api_info, clients.cache)) fake_client.create_client = mock.MagicMock() self.assertEqual({}, clients.cache) fake_client() self.assertEqual({client_name: fake_client.create_client.return_value}, clients.cache) fake_client.create_client.assert_called_once_with() fake_client() fake_client.create_client.assert_called_once_with() fake_client("2") self.assertEqual( { client_name: fake_client.create_client.return_value, "%s('2',)" % client_name: fake_client.create_client.return_value }, clients.cache) clients.clear() self.assertEqual({}, clients.cache)
def _user_setup(): try: neutron = osclients.Clients( self.context["users"][0]["credential"]).neutron() self.context["user_router"] = neutron.create_router( body={"router": self.config})["router"]["id"] sleep(10) # if subnet has created, add_interface if self.context.get("user_subnet"): neutron.add_interface_router( self.context["user_router"], {"subnet_id": self.context["user_subnet"]}) sleep(10) LOG.debug("User Router with id {0}".format( self.context["user_router"])) except Exception as e: msg = "Can't create Route: %s" % e if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def preprocess(name, context, args): """Run preprocessor on scenario arguments. :param name: Plugin name :param context: dictionary object that must have admin and credential entries :param args: args section of benchmark specification in rally task file :returns processed_args: dictionary object with additional client and resource configuration """ preprocessors = scenario.Scenario.get(name)._meta_get("preprocessors", default={}) clients = osclients.Clients(context["admin"]["credential"]) processed_args = copy.deepcopy(args) for src, preprocessor in preprocessors.items(): resource_cfg = processed_args.get(src) if resource_cfg: processed_args[src] = preprocessor.transform( clients=clients, resource_config=resource_cfg) return processed_args
def _remove_associated_networks(self): """Delete associated Nova networks from tenants.""" # NOTE(rmk): Ugly hack to deal with the fact that Nova Network # networks can only be disassociated in an admin context. Discussed # with boris-42 before taking this approach [LP-Bug #1350517]. clients = osclients.Clients(self.endpoint) if consts.Service.NOVA not in clients.services().values(): return nova_admin = clients.nova() if not utils.check_service_status(nova_admin, "nova-network"): return for network in nova_admin.networks.list(): network_tenant_id = nova_admin.networks.get(network).project_id if network_tenant_id in self.context["tenants"]: try: nova_admin.networks.disassociate(network) except Exception as ex: LOG.warning("Failed disassociate net: %(tenant_id)s. " "Exception: %(ex)s" % {"tenant_id": network_tenant_id, "ex": ex})
def secgroups(self, deploy_id=None): """Display security groups.""" headers = ['ID', 'Name', 'Description'] mixed_case_fields = ['ID', 'Name', 'Description'] table_rows = [] try: for endpoint_dict in self._get_endpoints(deploy_id): clients = osclients.Clients(endpoint.Endpoint(**endpoint_dict)) nova_client = clients.nova() for secgroup in nova_client.security_groups.list(): data = [secgroup.id, secgroup.name, secgroup.description] table_rows.append(utils.Struct(**dict(zip(headers, data)))) common_cliutils.print_list( table_rows, fields=headers, mixed_case_fields=mixed_case_fields) except exceptions.InvalidArgumentsException as e: print(_("Authentication Issues: %s") % e) return(1)
def __init__(self, deploy_id): self.endpoint = db.deployment_get(deploy_id)['endpoints'][0] self.clients = osclients.Clients(endpoint.Endpoint(**self.endpoint)) try: self.keystoneclient = self.clients.verified_keystone() except exceptions.InvalidAdminException: msg = _('Admin permission is required to run tempest. User %s ' 'doesn\'t have admin role') % self.endpoint['username'] raise exceptions.TempestConfigCreationFailure(message=msg) self.available_services = [ service['name'] for service in self.keystoneclient.service_catalog.get_data() ] self.conf = configparser.ConfigParser() self.conf.read(os.path.join(os.path.dirname(__file__), 'config.ini')) self.deploy_id = deploy_id self.data_path = os.path.join(os.path.expanduser('~'), '.rally', 'tempest', 'data') if not os.path.exists(self.data_path): os.makedirs(self.data_path) self.img_path = os.path.join(self.data_path, CONF.image.cirros_image) if not os.path.isfile(self.img_path): self._load_img()
def _create_tenant_users(cls, args): """Create tenant with users and their endpoints. This is suitable for using with pool of threads. :param args: tuple arguments, for Pool.imap() :returns: tuple (dict tenant, list users) """ admin_endpoint, users_num, task_id, i = args users = [] client = osclients.Clients(admin_endpoint).keystone() tenant = client.tenants.create(cls.PATTERN_TENANT % { "task_id": task_id, "iter": i }) LOG.debug("Creating %d users for tenant %s" % (users_num, tenant.id)) for user_id in range(users_num): username = cls.PATTERN_USER % { "tenant_id": tenant.id, "uid": user_id } user = client.users.create(username, "password", "*****@*****.**" % username, tenant.id) user_endpoint = endpoint.Endpoint(client.auth_url, user.name, "password", tenant.name, consts.EndpointPermission.USER, client.region_name) users.append({ "id": user.id, "endpoint": user_endpoint, "tenant_id": tenant.id }) return ({"id": tenant.id, "name": tenant.name}, users)
def setup(self): input_type = self.config["input_type"] input_url = self.config["input_url"] mains = self.config.get("mains", []) libs = self.config.get("libs", []) ready_tenants = set() for user in self.context.get("users", []): tenant_id = user["tenant_id"] if tenant_id not in ready_tenants: ready_tenants.add(tenant_id) clients = osclients.Clients(user["endpoint"]) sahara = clients.sahara() self.setup_inputs(sahara, tenant_id, input_type, input_url) self.context["sahara_mains"][tenant_id] = [] self.context["sahara_libs"][tenant_id] = [] for main in mains: self.download_and_save_lib( sahara=sahara, lib_type="sahara_mains", name=main["name"], download_url=main["download_url"], tenant_id=tenant_id) for lib in libs: self.download_and_save_lib( sahara=sahara, lib_type="sahara_libs", name=lib["name"], download_url=lib["download_url"], tenant_id=tenant_id)
def check(self, deployment=None): """Check keystone authentication and list all available services. :param deployment: a UUID or name of the deployment """ headers = ["services", "type", "status"] table_rows = [] try: deployment = api.Deployment.get(deployment) except exceptions.DeploymentNotFound: print(_("Deployment %s is not found.") % deployment) return(1) try: services = api.Deployment.service_list(deployment) users = deployment["users"] for endpoint_dict in users: osclients.Clients(objects.Endpoint(**endpoint_dict)).keystone() except keystone_exceptions.ConnectionRefused: print(_("Unable to connect %s.") % deployment["admin"]["auth_url"]) return(1) except exceptions.InvalidArgumentsException: data = ["keystone", "identity", "Error"] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("Authentication Issues: %s.") % sys.exc_info()[1]) return(1) for serv_type, serv in services.items(): data = [serv, serv_type, "Available"] table_rows.append(utils.Struct(**dict(zip(headers, data)))) print(_("keystone endpoints are valid and following" " services are available:")) cliutils.print_list(table_rows, headers)
def setup(self): self.context["sahara_output_conf"] = { "output_type": self.config["output_type"], "output_url_prefix": self.config["output_url_prefix"] } self.context["sahara_mains"] = {} self.context["sahara_libs"] = {} input_type = self.config["input_type"] input_url = self.config["input_url"] mains = self.config.get("mains", []) libs = self.config.get("libs", []) for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): clients = osclients.Clients(user["endpoint"]) sahara = clients.sahara() self.setup_inputs(sahara, tenant_id, input_type, input_url) self.context["tenants"][tenant_id]["sahara_mains"] = [] self.context["tenants"][tenant_id]["sahara_libs"] = [] for main in mains: self.download_and_save_lib(sahara=sahara, lib_type="sahara_mains", name=main["name"], download_url=main["download_url"], tenant_id=tenant_id) for lib in libs: self.download_and_save_lib(sahara=sahara, lib_type="sahara_libs", name=lib["name"], download_url=lib["download_url"], tenant_id=tenant_id)
def setup(self): self.context["users"] = [] self.context["tenants"] = {} for user in self.config: user_credential = objects.Credential(**user) user_kclient = osclients.Clients(user_credential).keystone() user_name = user_kclient.username tenant_name = user_kclient.project_name user_id = user_kclient.get_user_id(user_name) tenant_id = user_kclient.get_project_id(tenant_name) if tenant_id not in self.context["tenants"]: self.context["tenants"][tenant_id] = { "id": tenant_id, "name": tenant_name } self.context["users"].append({ "credential": user_credential, "id": user_id, "tenant_id": tenant_id })
def setup(self): super(ExistingUsers, self).setup() self.context["users"] = [] self.context["tenants"] = {} self.context["user_choice_method"] = "random" for user in self.config: user_credential = objects.Credential(**user) user_clients = osclients.Clients(user_credential) user_id = user_clients.keystone.auth_ref.user_id tenant_id = user_clients.keystone.auth_ref.project_id if tenant_id not in self.context["tenants"]: self.context["tenants"][tenant_id] = { "id": tenant_id, "name": user_credential.tenant_name } self.context["users"].append({ "credential": user_credential, "id": user_id, "tenant_id": tenant_id })
def setup(self): utils.init_sahara_context(self) for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): clients = osclients.Clients(user["credential"]) sahara = clients.sahara() self.context["tenants"][tenant_id]["sahara"]["mains"] = [] self.context["tenants"][tenant_id]["sahara"]["libs"] = [] for main in self.config.get("mains", []): self.download_and_save_lib(sahara=sahara, lib_type="mains", name=main["name"], download_url=main["download_url"], tenant_id=tenant_id) for lib in self.config.get("libs", []): self.download_and_save_lib(sahara=sahara, lib_type="libs", name=lib["name"], download_url=lib["download_url"], tenant_id=tenant_id)
def setup(self): """This method is called before the task start.""" try: for user in self.context['users']: clients = osclients.Clients(user['credential']) keystone = clients.keystone creds = keystone().ec2.list(user['id']) if not creds: creds = keystone().ec2.create(user['id'], user['tenant_id']) else: creds = creds[0] url = keystone.service_catalog.url_for(service_type='ec2') user['ec2args'] = { 'region': 'RegionOne', 'url': url, 'access': creds.access, 'secret': creds.secret } if self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON: for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): body = {"quota": {"router": -1, "floatingip": -1}} self.net_wrapper.client.update_quota(tenant_id, body) network = self.net_wrapper.create_network(tenant_id, add_router=True, subnets_num=1) self.context["tenants"][tenant_id]["network"] = network except Exception as e: msg = "Can't prepare ec2 client: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def setup(self): self.context["sahara_images"] = {} # The user may want to use the existing image. In this case he should # make sure that the image is public and has all required metadata. image_uuid = self.config.get("image_uuid") self.context["need_sahara_image_cleanup"] = not image_uuid if image_uuid: # Using the first user to check the existing image. user = self.context["users"][0] clients = osclients.Clients(user["endpoint"]) image = clients.glance().images.get(image_uuid) if not image.is_public: raise exceptions.BenchmarkSetupFailure( "Image provided in the Sahara context should be public.") image_id = image_uuid for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): self.context["tenants"][tenant_id]["sahara_image"] = image_id else: for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): image_id = self._create_image( hadoop_version=self.config["hadoop_version"], image_url=self.config["image_url"], plugin_name=self.config["plugin_name"], user=user, user_name=self.config["username"]) self.context["tenants"][tenant_id]["sahara_image"] = image_id
def setup(self): net_wrapper = network_wrapper.wrap(osclients.Clients( self.context["admin"]["credential"]), self, config=self.config) use_lb, msg = net_wrapper.supports_extension("lbaas") if not use_lb: LOG.info(msg) return # Creates a lb-pool for every subnet created in network context. for user, tenant_id in (utils.iterate_per_tenants( self.context.get("users", []))): for network in self.context["tenants"][tenant_id]["networks"]: for subnet in network.get("subnets", []): if self.config["lbaas_version"] == 1: network.setdefault("lb_pools", []).append( net_wrapper.create_v1_pool(tenant_id, subnet, **self.config["pool"])) else: raise NotImplementedError( "Context for LBaaS version %s not implemented." % self.config["lbaas_version"])
def consume(cache, args): username, password, project_dom, user_dom, tenant_id = args if "client" not in cache: clients = osclients.Clients(self.endpoint) cache["client"] = keystone.wrap(clients.keystone()) client = cache["client"] user = client.create_user(username, password, "*****@*****.**" % username, tenant_id, user_dom) user_endpoint = objects.Endpoint( client.auth_url, user.name, password, self.context["tenants"][tenant_id]["name"], consts.EndpointPermission.USER, client.region_name, project_domain_name=project_dom, user_domain_name=user_dom, endpoint_type=self.endpoint.endpoint_type) users.append({ "id": user.id, "endpoint": user_endpoint, "tenant_id": tenant_id })
def setup(self): try: client = osclients.Clients( self.context['users'][0]['credential']).neutron() body = { "pool": { "name": self.config.get('name', 'test-pool'), "lb_method": self.config.get('lb_method', 'ROUND_ROBIN'), "protocol": self.config.get('protocol', "HTTP"), "subnet_id": self.config.get('subnet_id', self.context['user_subnet']) } } self.context['pool'] = client.create_pool(body=body)['pool']['id'] LOG.debug("LbPool with id '%s'" % self.context['pool']) except Exception as e: msg = "Can't create pool: %s" % e if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
def setup(self): """Create list of flavors.""" self.context["flavors"] = {} clients = osclients.Clients(self.context["admin"]["endpoint"]) for flavor_config in self.config: extra_specs = flavor_config.get("extra_specs") flavor_config = FlavorConfig(**flavor_config) try: flavor = clients.nova().flavors.create(**flavor_config) except nova_exceptions.Conflict as e: LOG.warning("Using already existing flavor %s" % flavor_config["name"]) if logging.is_debug(): LOG.exception(e) continue if extra_specs: flavor.set_keys(extra_specs) self.context["flavors"][flavor_config["name"]] = flavor.to_dict() LOG.debug("Created flavor with id '%s'" % flavor.id)
def setup(self): utils.init_sahara_context(self) for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): clients = osclients.Clients(user["credential"]) sahara = clients.sahara() if self.config["output_type"] == "swift": swift = swift_utils.SwiftScenario(clients=clients, context=self.context) container_name = self.generate_random_name() self.context["tenants"][tenant_id]["sahara"]["container"] = { "name": swift._create_container(container_name=container_name), "output_swift_objects": [] } self.setup_outputs_swift(swift, sahara, tenant_id, container_name, user["credential"].username, user["credential"].password) else: self.setup_outputs_hdfs(sahara, tenant_id, self.config["output_url_prefix"])
def _get_public_network_id(self): nc = osclients.Clients(self.context["admin"]["credential"]).neutron() networks = nc.list_networks(**{"router:external": True})["networks"] return networks[0]["id"]
def consume(cache, user_id): if "client" not in cache: clients = osclients.Clients(self.endpoint) cache["client"] = keystone.wrap(clients.keystone()) cache["client"].delete_user(user_id)
def consume(cache, tenant_id): if "client" not in cache: clients = osclients.Clients(self.endpoint) cache["client"] = keystone.wrap(clients.keystone()) cache["client"].delete_project(tenant_id)
def consume(cache, resource_id): if "client" not in cache: clients = osclients.Clients(self.credential) cache["client"] = identity.Identity(clients) getattr(cache["client"], func_name)(resource_id)
def clients(self, api_info=None): return osclients.Clients(self, api_info=api_info, cache=self._clients_cache)
def _validate_config_semantic(self, config): # map workloads to platforms platforms = collections.defaultdict(list) for subtask in config.subtasks: for workload in subtask.workloads: # TODO(astudenov): We need to use a platform validator # in future to identify what kind of users workload # requires (regular users or admin) scenario_cls = scenario.Scenario.get(workload.name) namespace = scenario_cls.get_namespace() platforms[namespace].append(workload) # FIXME(astudenov): currently there is no credentials for # namespace 'default', thus 'opentack' is used as a workaround if "default" in platforms: default_workloads = platforms.pop("default") platforms["openstack"].extend(default_workloads) for platform, workloads in platforms.items(): creds = self.deployment.get_credentials_for(platform) admin = objects.Credential(**creds["admin"]) # TODO(astudenov): move this check to validator of Credential if platform == "openstack": from rally import osclients clients = osclients.Clients(admin) clients.verified_keystone() workloads_with_users = [] workloads_with_existing_users = [] for workload in workloads: if creds["users"] and "users" not in workload.context: workloads_with_existing_users.append(workload) else: workloads_with_users.append(workload) if workloads_with_users: ctx_conf = {"task": self.task, "admin": {"credential": admin}} user_context = context.Context.get( "users", namespace=platform)(ctx_conf) self._validate_config_semantic_helper(admin, user_context, workloads_with_users, self.deployment) if workloads_with_existing_users: ctx_conf = { "task": self.task, "config": { "existing_users": creds["users"] } } # NOTE(astudenov): allow_hidden=True is required # for openstack existing_users context user_context = context.Context.get("existing_users", namespace=platform, allow_hidden=True)(ctx_conf) self._validate_config_semantic_helper( admin, user_context, workloads_with_existing_users, self.deployment)
def main(): parser = argparse.ArgumentParser(description="Launch rally-verify job.") parser.add_argument( "--mode", type=str, default="light", help="Mode of job. The 'full' mode corresponds to the full set of " "Tempest tests. The 'light' mode corresponds to the smoke set " "of Tempest tests.", choices=MODES_PARAMETERS.keys()) parser.add_argument("--compare", action="store_true", help="Launch 2 verifications and compare them.") parser.add_argument( "--ctx-create-resources", action="store_true", help="Make Tempest context create needed resources for the tests.") args = parser.parse_args() if not os.path.exists("%s/extra" % BASE_DIR): os.makedirs("%s/extra" % BASE_DIR) # Check deployment call_rally("deployment use --deployment devstack", print_output=True) call_rally("deployment check", print_output=True) config = json.loads( subprocess.check_output(["rally", "deployment", "config"])) config.update(config.pop("admin")) del config["type"] clients = osclients.Clients(objects.Credential(**config)) if args.ctx_create_resources: # If the 'ctx-create-resources' arg is provided, delete images and # flavors, and also create a shared network to make Tempest context # create needed resources. LOG.info("The 'ctx-create-resources' arg is provided. Deleting " "images and flavors, and also creating a shared network " "to make Tempest context create needed resources.") LOG.info("Deleting images.") for image in clients.glance().images.list(): clients.glance().images.delete(image.id) LOG.info("Deleting flavors.") for flavor in clients.nova().flavors.list(): clients.nova().flavors.delete(flavor.id) LOG.info("Creating a shared network.") tenant_name = clients.keystone().tenant_name tenant_id = clients.keystone().get_project_id(tenant_name) net_body = { "network": { "name": "shared-net-%s" % str(uuid.uuid4()), "tenant_id": tenant_id, "shared": True } } clients.neutron().create_network(net_body) else: # Otherwise, just in case create only flavors with the following # properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make # Tempest context discover them. LOG.info("The 'ctx-create-resources' arg is not provided. " "Creating flavors to make Tempest context discover them.") for flv_ram in [64, 128]: params = { "name": "flavor-%s" % str(uuid.uuid4()), "ram": flv_ram, "vcpus": 1, "disk": 0 } LOG.info("Creating flavor '%s' with the following properties: RAM " "= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram)) clients.nova().flavors.create(**params) render_vars = {"verifications": []} # Install the latest Tempest version render_vars["install"] = call_rally("verify install") # Get Rally deployment ID rally_deployment_id = subprocess.check_output( "rally deployment list | awk '/devstack/ {print $2}'", shell=True, stderr=subprocess.STDOUT) # Get the penultimate Tempest commit ID tempest_commit_id = subprocess.check_output( "cd /home/jenkins/.rally/tempest/for-deployment-%s " "git log --skip 1 -n 1 | awk '/commit/ {print $2}' | head -1" % rally_deployment_id, shell=True, stderr=subprocess.STDOUT).strip() # Install the penultimate Tempest version render_vars["reinstall"] = call_rally("verify reinstall --version %s" % tempest_commit_id) # Install a simple Tempest plugin render_vars["installplugin"] = call_rally( "verify installplugin --source %s" % TEMPEST_PLUGIN) # List installed Tempest plugins render_vars["listplugins"] = call_rally("verify listplugins") # Discover tests depending on Tempest suite discover_cmd = "verify discover" if args.mode == "light": discover_cmd += " --pattern smoke" render_vars["discover"] = call_rally(discover_cmd) # Generate and show Tempest config file render_vars["genconfig"] = call_rally("verify genconfig") render_vars["showconfig"] = call_rally("verify showconfig") # Create a file with a list of tests that are expected to fail xfails_file_path = create_file_with_xfails() # Launch verification launch_params = "%s --xfails-file %s" % (MODES_PARAMETERS[args.mode], xfails_file_path) render_vars["verifications"].append( launch_verification_once(launch_params)) if args.compare: render_vars["verifications"].append( launch_verification_once(launch_params)) render_vars["compare"] = do_compare( render_vars["verifications"][-2]["uuid"], render_vars["verifications"][-1]["uuid"]) render_vars["list"] = call_rally("verify list") render_page(**render_vars) return _return_status
def _check_cloud(self): clients = osclients.Clients(self.admin) clients.verified_keystone()
def setUp(self): super(OSClientsTestCase, self).setUp() self.endpoint = endpoint.Endpoint("http://auth_url", "use", "pass", "tenant") self.clients = osclients.Clients(self.endpoint)