def setUp(self): super(BaseTestCase, self).setUp() self.servers_keypairs = {} self.servers = {} self.members = [] self.floating_ips = {} self.servers_floating_ips = {} self.server_ips = {} self.start_port = 80 self.num = 50 self.server_fixed_ips = {} self._create_security_group_for_test() self._set_net_and_subnet() mgr = self.get_client_manager() auth_provider = mgr.auth_provider region = config.network.region or config.identity.region self.client_args = [auth_provider, 'load-balancer', region] self.load_balancers_client = ( load_balancers_client.LoadBalancersClient(*self.client_args)) self.listeners_client = (listeners_client.ListenersClient( *self.client_args)) self.pools_client = pools_client.PoolsClient(*self.client_args) self.members_client = members_client.MembersClient(*self.client_args) self.health_monitors_client = ( health_monitors_client.HealthMonitorsClient(*self.client_args)) # admin network client needed for assigning octavia port to flip admin_manager = credentials_factory.AdminManager() admin_manager.auth_provider.fill_credentials() self.floating_ips_client_admin = admin_manager.floating_ips_client self.ports_client_admin = admin_manager.ports_client
def _remove_admin_role(self, tenant_id): LOG.debug("Remove admin user role for tenant: %s" % tenant_id) # Must initialize AdminManager for each user role # Otherwise authentication exception is thrown, weird id_cl = credentials.AdminManager().identity_client if (self._tenant_exists(tenant_id)): try: id_cl.delete_user_role(tenant_id, self.admin_id, self.admin_role_id) except Exception as ex: LOG.exception("Failed removing role from tenant which still" "exists, exception: %s" % ex)
def _get_network_id(net_name, project_name): am = credentials.AdminManager() net_cl = am.networks_client tn_cl = am.tenants_client networks = net_cl.list_networks() tenant = identity.get_tenant_by_name(tn_cl, project_name) t_id = tenant['id'] n_id = None for net in networks['networks']: if (net['tenant_id'] == t_id and net['name'] == net_name): n_id = net['id'] break return n_id
def __init__(self, project_id, user_id, service, extra_target_data=None): """Initialization of Rbac Policy Parser. Parses a policy file to create a dictionary, mapping policy actions to roles. If a policy file does not exist, checks whether the policy file is registered as a namespace under oslo.policy.policies. Nova, for example, doesn't use a policy.json file by default; its policy is implemented in code and registered as 'nova' under oslo.policy.policies. If the policy file is not found in either place, raises an exception. Additionally, if the policy file exists in both code and as a policy.json (for example, by creating a custom nova policy.json file), the custom policy file over the default policy implementation is prioritized. :param project_id: type uuid :param user_id: type uuid :param service: type string :param path: type string """ if extra_target_data is None: extra_target_data = {} # First check if the service is valid service = service.lower().strip() if service else None self.admin_mgr = credentials.AdminManager() services = self.admin_mgr.identity_services_v3_client.\ list_services()['services'] service_names = [s['name'] for s in services] if not service or not any(service in name for name in service_names): LOG.debug(str(service) + " is NOT a valid service.") raise rbac_exceptions.RbacInvalidService # Use default path in /etc/<service_name/policy.json if no path # is provided. path = getattr(CONF.rbac, '%s_policy_file' % str(service), None) if not path: LOG.info("No config option found for %s," " using default path", str(service)) path = os.path.join('/etc', service, 'policy.json') self.path = path self.rules = policy.Rules.load(self._get_policy_data(service), 'default') self.project_id = project_id self.user_id = user_id self.extra_target_data = extra_target_data
def init(self, parsed_args): cleanup_service.init_conf() self.options = parsed_args self.admin_mgr = credentials.AdminManager() self.dry_run_data = {} self.json_data = {} self.admin_id = "" self.admin_role_id = "" self.admin_tenant_id = "" self._init_admin_ids() self.admin_role_added = [] # available services self.tenant_services = cleanup_service.get_tenant_cleanup_services() self.global_services = cleanup_service.get_global_cleanup_services() if parsed_args.init_saved_state: self._init_state() return self._load_json()
def _init_state(self): print("Initializing saved state.") data = {} self.global_services = cleanup_service.get_global_cleanup_services() self.admin_mgr = credentials.AdminManager() admin_mgr = self.admin_mgr kwargs = { 'data': data, 'is_dry_run': False, 'saved_state_json': data, 'is_preserve': False, 'is_save_state': True } for service in self.global_services: svc = service(admin_mgr, **kwargs) svc.run() with open(SAVED_STATE_JSON, 'w+') as f: f.write( json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')))
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False): """Workload driver. Executes an action function against a nova-cluster.""" admin_manager = credentials.AdminManager() ssh_user = CONF.stress.target_ssh_user ssh_key = CONF.stress.target_private_key_path logfiles = CONF.stress.target_logfiles log_check_interval = int(CONF.stress.log_check_interval) default_thread_num = int(CONF.stress.default_thread_number_per_action) if logfiles: controller = CONF.stress.target_controller computes = _get_compute_nodes(controller, ssh_user, ssh_key) for node in computes: do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key) skip = False for test in tests: for service in test.get('required_services', []): if not CONF.service_available.get(service): skip = True break if skip: break # TODO(andreaf) This has to be reworked to use the credential # provider interface. For now only tests marked as 'use_admin' will # work. if test.get('use_admin', False): manager = admin_manager else: raise NotImplemented('Non admin tests are not supported') for p_number in moves.xrange(test.get('threads', default_thread_num)): if test.get('use_isolated_tenants', False): username = data_utils.rand_name("stress_user") tenant_name = data_utils.rand_name("stress_tenant") password = "******" if CONF.identity.auth_version == 'v2': identity_client = admin_manager.identity_client projects_client = admin_manager.tenants_client roles_client = admin_manager.roles_client users_client = admin_manager.users_client domains_client = None else: identity_client = admin_manager.identity_v3_client projects_client = admin_manager.projects_client roles_client = admin_manager.roles_v3_client users_client = admin_manager.users_v3_client domains_client = admin_manager.domains_client domain = (identity_client.auth_provider.credentials. get('project_domain_name', 'Default')) credentials_client = cred_client.get_creds_client( identity_client, projects_client, users_client, roles_client, domains_client, project_domain_name=domain) project = credentials_client.create_project( name=tenant_name, description=tenant_name) user = credentials_client.create_user(username, password, project, "email") # Add roles specified in config file for conf_role in CONF.auth.tempest_roles: credentials_client.assign_user_role(user, project, conf_role) creds = credentials_client.get_credentials(user, project, password) manager = clients.Manager(credentials=creds) test_obj = importutils.import_class(test['action']) test_run = test_obj(manager, max_runs, stop_on_error) kwargs = test.get('kwargs', {}) test_run.setUp(**dict(six.iteritems(kwargs))) LOG.debug("calling Target Object %s" % test_run.__class__.__name__) mp_manager = multiprocessing.Manager() shared_statistic = mp_manager.dict() shared_statistic['runs'] = 0 shared_statistic['fails'] = 0 p = multiprocessing.Process(target=test_run.execute, args=(shared_statistic,)) process = {'process': p, 'p_number': p_number, 'action': test_run.action, 'statistic': shared_statistic} processes.append(process) p.start() if stop_on_error: # NOTE(mkoderer): only the parent should register the handler signal.signal(signal.SIGCHLD, sigchld_handler) end_time = time.time() + duration had_errors = False try: while True: if max_runs is None: remaining = end_time - time.time() if remaining <= 0: break else: remaining = log_check_interval all_proc_term = True for process in processes: if process['process'].is_alive(): all_proc_term = False break if all_proc_term: break time.sleep(min(remaining, log_check_interval)) if stop_on_error: if any([True for proc in processes if proc['statistic']['fails'] > 0]): break if not logfiles: continue if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key, stop_on_error): had_errors = True break except KeyboardInterrupt: LOG.warning("Interrupted, going to print statistics and exit ...") if stop_on_error: signal.signal(signal.SIGCHLD, signal.SIG_DFL) terminate_all_processes() sum_fails = 0 sum_runs = 0 LOG.info("Statistics (per process):") for process in processes: if process['statistic']['fails'] > 0: had_errors = True sum_runs += process['statistic']['runs'] sum_fails += process['statistic']['fails'] print ("Process %d (%s): Run %d actions (%d failed)" % ( process['p_number'], process['action'], process['statistic']['runs'], process['statistic']['fails'])) print ("Summary:") print ("Run %d actions (%d failed)" % (sum_runs, sum_fails)) if not had_errors and CONF.stress.full_clean_stack: LOG.info("cleaning up") cleanup.cleanup() if had_errors: return 1 else: return 0
def cleanup(): admin_manager = credentials.AdminManager() body = admin_manager.servers_client.list_servers(all_tenants=True) LOG.info("Cleanup::remove %s servers" % len(body['servers'])) for s in body['servers']: try: admin_manager.servers_client.delete_server(s['id']) except Exception: pass for s in body['servers']: try: waiters.wait_for_server_termination(admin_manager.servers_client, s['id']) except Exception: pass keypairs = admin_manager.keypairs_client.list_keypairs()['keypairs'] LOG.info("Cleanup::remove %s keypairs" % len(keypairs)) for k in keypairs: try: admin_manager.keypairs_client.delete_keypair(k['name']) except Exception: pass secgrp_client = admin_manager.compute_security_groups_client secgrp = (secgrp_client.list_security_groups(all_tenants=True) ['security_groups']) secgrp_del = [grp for grp in secgrp if grp['name'] != 'default'] LOG.info("Cleanup::remove %s Security Group" % len(secgrp_del)) for g in secgrp_del: try: secgrp_client.delete_security_group(g['id']) except Exception: pass admin_floating_ips_client = admin_manager.compute_floating_ips_client floating_ips = (admin_floating_ips_client.list_floating_ips() ['floating_ips']) LOG.info("Cleanup::remove %s floating ips" % len(floating_ips)) for f in floating_ips: try: admin_floating_ips_client.delete_floating_ip(f['id']) except Exception: pass users = admin_manager.users_client.list_users()['users'] LOG.info("Cleanup::remove %s users" % len(users)) for user in users: if user['name'].startswith("stress_user"): admin_manager.users_client.delete_user(user['id']) tenants = admin_manager.tenants_client.list_tenants()['tenants'] LOG.info("Cleanup::remove %s tenants" % len(tenants)) for tenant in tenants: if tenant['name'].startswith("stress_tenant"): admin_manager.tenants_client.delete_tenant(tenant['id']) # We have to delete snapshots first or # volume deletion may block _, snaps = admin_manager.snapshots_client.list_snapshots( all_tenants=True)['snapshots'] LOG.info("Cleanup::remove %s snapshots" % len(snaps)) for v in snaps: try: admin_manager.snapshots_client.\ wait_for_snapshot_status(v['id'], 'available') admin_manager.snapshots_client.delete_snapshot(v['id']) except Exception: pass for v in snaps: try: admin_manager.snapshots_client.wait_for_resource_deletion(v['id']) except Exception: pass vols = admin_manager.volumes_client.list_volumes( params={"all_tenants": True}) LOG.info("Cleanup::remove %s volumes" % len(vols)) for v in vols: try: admin_manager.volumes_client.\ wait_for_volume_status(v['id'], 'available') admin_manager.volumes_client.delete_volume(v['id']) except Exception: pass for v in vols: try: admin_manager.volumes_client.wait_for_resource_deletion(v['id']) except Exception: pass