def setUp(self): super(ClientTestBase, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or os.environ.get('OS_STDOUT_CAPTURE') == '1'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or os.environ.get('OS_STDERR_CAPTURE') == '1'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) if (os.environ.get('OS_LOG_CAPTURE') != 'False' and os.environ.get('OS_LOG_CAPTURE') != '0'): self.useFixture( fixtures.LoggerFixture(nuke_handlers=False, format=self.log_format, level=None)) # Collecting of credentials: # # Grab the cloud config from a user's clouds.yaml file. # First look for a functional_admin cloud, as this is a cloud # that the user may have defined for functional testing that has # admin credentials. # If that is not found, get the devstack config and override the # username and project_name to be admin so that admin credentials # will be used. # # Finally, fall back to looking for environment variables to support # existing users running these the old way. We should deprecate that # as tox 2.0 blanks out environment. # # TODO(sdague): while we collect this information in # tempest-lib, we do it in a way that's not available for top # level tests. Long term this probably needs to be in the base # class. openstack_config = os_client_config.config.OpenStackConfig() try: cloud_config = openstack_config.get_one_cloud('functional_admin') except os_client_config.exceptions.OpenStackConfigException: try: cloud_config = openstack_config.get_one_cloud( 'devstack', auth=dict(username='******', project_name='admin')) except os_client_config.exceptions.OpenStackConfigException: try: cloud_config = openstack_config.get_one_cloud('envvars') except os_client_config.exceptions.OpenStackConfigException: cloud_config = None if cloud_config is None: raise NoCloudConfigException( "Could not find a cloud named functional_admin or a cloud" " named devstack. Please check your clouds.yaml file and" " try again.") auth_info = cloud_config.config['auth'] user = auth_info['username'] passwd = auth_info['password'] self.project_name = auth_info['project_name'] auth_url = auth_info['auth_url'] user_domain_id = auth_info['user_domain_id'] self.project_domain_id = auth_info['project_domain_id'] if 'insecure' in cloud_config.config: self.insecure = cloud_config.config['insecure'] else: self.insecure = False auth = identity.Password(username=user, password=passwd, project_name=self.project_name, auth_url=auth_url, project_domain_id=self.project_domain_id, user_domain_id=user_domain_id) session = ksession.Session(auth=auth, verify=(not self.insecure)) self.client = self._get_novaclient(session) self.glance = glanceclient.Client('2', session=session) # pick some reasonable flavor / image combo if "flavor" not in CACHE: CACHE["flavor"] = pick_flavor(self.client.flavors.list()) if "image" not in CACHE: CACHE["image"] = pick_image(self.glance.images.list()) self.flavor = CACHE["flavor"] self.image = CACHE["image"] if "network" not in CACHE: tested_api_version = self.client.api_version proxy_api_version = novaclient.api_versions.APIVersion('2.35') if tested_api_version > proxy_api_version: self.client.api_version = proxy_api_version try: # TODO(mriedem): Get the networks from neutron if using neutron networks = self.client.networks.list() # Keep track of whether or not there are multiple networks # available to the given tenant because if so, a specific # network ID has to be passed in on server create requests # otherwise the server POST will fail with a 409. CACHE['multiple_networks'] = len(networks) > 1 CACHE["network"] = pick_network(networks) finally: self.client.api_version = tested_api_version self.network = CACHE["network"] self.multiple_networks = CACHE['multiple_networks'] # create a CLI client in case we'd like to do CLI # testing. tempest.lib does this really weird thing where it # builds a giant factory of all the CLIs that it knows # about. Eventually that should really be unwound into # something more sensible. cli_dir = os.environ.get( 'OS_NOVACLIENT_EXEC_DIR', os.path.join(os.path.abspath('.'), '.tox/functional/bin')) self.cli_clients = tempest.lib.cli.base.CLIClient( username=user, password=passwd, tenant_name=self.project_name, uri=auth_url, cli_dir=cli_dir, insecure=self.insecure) self.keystone = keystoneclient.Client(session=session, username=user, password=passwd) self.cinder = cinderclient.Client(auth=auth, session=session) if "use_neutron" not in CACHE: # check to see if we're running with neutron or not for service in self.keystone.services.list(): if service.type == 'network': CACHE["use_neutron"] = True break else: CACHE["use_neutron"] = False
loader = loading.get_plugin_loader('password') auth = loader.load_from_options( auth_url=config.get('neutron', 'auth_url'), username=config.get('neutron', 'username'), password=config.get('neutron', 'password'), project_name=config.get('neutron', 'project_name'), project_domain_name=config.get('neutron', 'project_domain_name'), user_domain_name=config.get('neutron', 'user_domain_name')) sess = session.Session(auth=auth, verify=options.insecure) nova = client.Client('2.11', session=sess, endpoint_type='internal') # Wait until this host is listed in the service list for i in range(iterations): try: service_list = nova.services.list(binary='nova-compute') for entry in service_list: host = getattr(entry, 'host', '') zone = getattr(entry, 'zone', '') if host == my_host and zone != 'internal': LOG.info('Nova-compute service registered') sys.exit(0) LOG.info('Waiting for nova-compute service to register') except Exception: LOG.exception(
def test_not_found(self): session = client_session.Session() self.stub_url('GET', status_code=404) self.assertRaises(exceptions.NotFound, session.get, self.TEST_URL)
def _get_keystone_session(**kwargs): # TODO(fabgia): the heavy lifting here should be really done by Keystone. # Unfortunately Keystone does not support a richer method to perform # discovery and return a single viable URL. A bug against Keystone has # been filed: https://bugs.launchpad.net/python-keystoneclient/+bug/1330677 # first create a Keystone session cacert = kwargs.pop('cacert', None) cert = kwargs.pop('cert', None) key = kwargs.pop('key', None) insecure = kwargs.pop('insecure', False) auth_url = kwargs.pop('auth_url', None) project_id = kwargs.pop('project_id', None) project_name = kwargs.pop('project_name', None) token = kwargs['token'] timeout = kwargs.get('timeout') if insecure: verify = False else: verify = cacert or True if cert and key: # passing cert and key together is deprecated in favour of the # requests lib form of having the cert and key as a tuple cert = (cert, key) # create the keystone client session ks_session = session.Session(verify=verify, cert=cert, timeout=timeout) v2_auth_url, v3_auth_url = _discover_auth_versions(ks_session, auth_url) username = kwargs.pop('username', None) user_id = kwargs.pop('user_id', None) user_domain_name = kwargs.pop('user_domain_name', None) user_domain_id = kwargs.pop('user_domain_id', None) project_domain_name = kwargs.pop('project_domain_name', None) project_domain_id = kwargs.pop('project_domain_id', None) if v3_auth_url: if not user_domain_id: user_domain_id = 'default' if not user_domain_name: user_domain_name = 'default' if not project_domain_id: project_domain_id = 'default' if not project_domain_name: project_domain_name = 'default' auth = None use_domain = (user_domain_id or user_domain_name or project_domain_id or project_domain_name) use_v3 = v3_auth_url and (use_domain or (not v2_auth_url)) use_v2 = v2_auth_url and not use_domain if use_v3 and token: auth = v3_auth.Token(v3_auth_url, token=token, project_name=project_name, project_id=project_id, project_domain_name=project_domain_name, project_domain_id=project_domain_id) elif use_v2 and token: auth = v2_auth.Token(v2_auth_url, token=token, tenant_id=project_id, tenant_name=project_name) elif use_v3: # the auth_url as v3 specified # e.g. http://no.where:5000/v3 # Keystone will return only v3 as viable option auth = v3_auth.Password(v3_auth_url, username=username, password=kwargs.pop('password', None), user_id=user_id, user_domain_name=user_domain_name, user_domain_id=user_domain_id, project_name=project_name, project_id=project_id, project_domain_name=project_domain_name, project_domain_id=project_domain_id) elif use_v2: # the auth_url as v2 specified # e.g. http://no.where:5000/v2.0 # Keystone will return only v2 as viable option auth = v2_auth.Password(v2_auth_url, username, kwargs.pop('password', None), tenant_id=project_id, tenant_name=project_name) else: raise exc.CommandError('Unable to determine the Keystone version ' 'to authenticate with using the given ' 'auth_url.') ks_session.auth = auth return ks_session
def user_dom_sync(auth, user_domain, dry_run=False, endpoint_override=None): start = time.time() sess = keystone_session.Session(auth=auth) keystone = keystone_client.Client(session=sess, endpoint_override=endpoint_override) # The keystone remembers to the names, but in compares it is # case insensitive domain_to_id = requests.structures.CaseInsensitiveDict() # TODO: create tuple friendly insensitive dict, # or add lower attr to the thing we are adding domain_project_to_id = dict() # lc enfornced on all attribute domain_group_to_id = dict() role_to_id = requests.structures.CaseInsensitiveDict() def get_role_id(role): if role not in role_to_id: r = keystone.roles.create(role) role_to_id[role] = r.id return r.id return role_to_id[role] # We do not expect high number of roles, so list all # BTW, failed to find the right api call to just see one named role initial_roles = keystone.roles.list() for role in initial_roles: role_to_id[role.name] = role.id for domain_name, domain in user_domain.items(): dom_lc_name = domain_name.lower() # case insestive name assert domain_name not in domain_to_id dl = keystone.domains.list(name=domain_name) le = len(dl) desc = domain.get('Description', '') assert le < 2 if le == 1: dom = dl[0] if dom.description != desc or dom.name != domain_name: # case keystone.domains.update(dom.id, name=domain_name, description=desc) else: dom = keystone.domains.create(name=domain_name, description=desc) dom_id = dom.id domain_to_id[dom_lc_name] = dom_id if 'projects' in domain: for project_name, project in domain['projects'].items(): # TODO: parent_id respect desc = project.get('description', '') lc_proj_name = project_name.lower() assert (dom_lc_name, lc_proj_name) not in domain_project_to_id pl = keystone.projects.list(domain=dom_id, name=project_name) le = len(pl) assert le < 2 if le == 0: p = keystone.projects.create(project_name, dom_id, description=desc) else: p = pl[0] if p.description != desc or p.name != project_name: keystone.projects.update(p.id, name=project_name, domain=dom_id, description=desc) domain_project_to_id[(dom_lc_name, lc_proj_name)] = p.id if 'groups' in domain: # TODO: group roles , after _all_ project is ready # (after this loop) desc = project.get('description', '') for group_name, group in domain['groups'].items(): desc = group.get('description', '') lc_grp_name = group_name.lower() assert (dom_lc_name, lc_grp_name) not in domain_group_to_id gl = keystone.groups.list(domain=dom_id, name=group_name) le = len(gl) assert le < 2 if le == 0: g = keystone.groups.create(group_name, dom_id, description=desc) else: g = gl[0] if g.description != desc or g.name != group_name: keystone.groups.update(g.id, name=group_name, domain=dom_id, description=desc) domain_group_to_id[(dom_lc_name, lc_grp_name)] = g.id if 'users' not in domain: domain['users'] = {} # changes original arg # ensure we have all rule id before switching to parallel # ensure we have all main keys role_set = set(()) for user_name, user in domain['users'].items(): if 'project_roles' not in user: user['project_roles'] = {} else: for role_list in list(user['project_roles'].values()): role_set |= set(role_list) if 'domain_roles' not in user: user['domain_roles'] = {} else: for role_list in list(user['domain_roles'].values()): role_set |= set(role_list) if 'member_of' not in user: user['member_of'] = {} for role in role_set: get_role_id(role) # TODO: group roles processes = [] for domain_name, domain in user_domain.items(): dom_id = domain_to_id[domain_name] jobs = list(domain['users'].items()) le = len(jobs) # creating new fork for each 4 user # this was the easiest to add without huge reformating f = le while (f > 0): s = f - 4 if s < 0: s = 0 items = jobs[s:f] f = s p = os.fork() if not p: sess = keystone_session.Session(auth=auth) e = endpoint_override keystone = keystone_client.Client(session=sess, endpoint_override=e) for user_name, user in items: ul = keystone.users.list(domain=dom_id, name=user_name) le = len(ul) assert le < 2 user_rec = {} relevant_args = ('email', 'description', 'password') for arg in relevant_args: user_rec[arg] = user.get(arg, None) if 'default_project' in user: default_project = user['default_project'] dpid = domain_project_to_id[( default_project[0].lower(), default_project[1].lower())] user_rec['default_project'] = dpid else: user_rec['default_project'] = None if 'enabled' not in user: user_rec['enabled'] = True existing_project_roles = {} existing_domain_roles = {} member_of = set() if le == 0: # TODO: check is the inherited roles have any relavant # effect u = keystone.users.create(user_name, domain=dom_id, **user_rec) u_id = u.id else: u = ul[0] u_id = u.id u_email = u.email if hasattr(u, 'email') else None u_desc = (u.description if hasattr(u, 'description') else None) u_default_project = (u.default_project if hasattr(u, 'default_project') else None) if (u.name != user_name or u_email != user_rec['email'] or u_desc != user_rec.get('description') or u.enabled != user_rec.get('enabled') or u_default_project != user_rec[ 'default_project']): u = keystone.users.update(u.id, name=user_name, **user_rec) assigments = keystone.role_assignments.list(user=u.id) for a in assigments: # strange looking api reponse .. role_id = a.role['id'] if 'project' in a.scope: project_id = a.scope['project']['id'] if project_id not in existing_project_roles: existing_project_roles[ project_id] = set((role_id,)) else: existing_project_roles[ project_id].add(role_id) continue if 'domain' in a.scope: d_id = a.scope['domain']['id'] if d_id not in existing_domain_roles: existing_domain_roles[ d_id] = set((role_id,)) else: existing_domain_roles[d_id].add(role_id) grps = keystone.groups.list(user=u_id) for grp in grps: member_of.add(grp.id) # update grp member target_groups = set() for dom, groups in user['member_of'].items(): l_d = dom.lower() for group in groups: target_groups.add( domain_group_to_id[(l_d, group.lower())]) grp_mem_del = member_of - target_groups grp_mem_add = target_groups - member_of for grp in grp_mem_del: keystone.users.remove_from_group(u_id, grp) for grp in grp_mem_add: keystone.users.add_to_group(u_id, grp) target_domain_roles = {} target_project_roles = {} for dom, roles in user['domain_roles'].items(): d_roles = set() for role in roles: d_roles.add(get_role_id(role)) target_domain_roles[ domain_to_id[dom.lower()]] = d_roles for (dom, proj), roles in list(user['project_roles'].items()): p_roles = set() for role in roles: p_roles.add(get_role_id(role)) proj_id = domain_project_to_id[(dom.lower(), proj.lower())] target_project_roles[proj_id] = p_roles related_projects = set(target_project_roles.keys()) related_projects |= set(existing_project_roles.keys()) related_domains = set(target_domain_roles.keys()) related_domains |= set(existing_domain_roles.keys()) # update project roles for proj_id in related_projects: target_roles = target_project_roles.get(proj_id, set()) existing_roles = existing_project_roles.get(proj_id, set()) roles_to_add = target_roles - existing_roles roles_to_del = existing_roles - target_roles for role_id in roles_to_del: keystone.roles.revoke(user=u_id, project=proj_id, role=role_id) for role_id in roles_to_add: keystone.roles.grant(user=u_id, project=proj_id, role=role_id) # update domain roles for d_id in related_domains: target_roles = target_domain_roles.get(d_id, set()) existing_roles = existing_domain_roles.get(d_id, set()) roles_to_add = target_roles - existing_roles roles_to_del = existing_roles - target_roles for role_id in roles_to_del: keystone.roles.revoke(user=u_id, domain=d_id, role=role_id) for role_id in roles_to_add: keystone.roles.grant(user=u_id, domain=d_id, role=role_id) # validate password if not new user # TODO os._exit(0) processes.append(p) if (len(processes) > 32): p = processes.pop(0) assert (p, 0) == os.waitpid(p, 0) for p in processes: assert (p, 0) == os.waitpid(p, 0) LOG.info("Managing keystone accounts took: %f" % (time.time() - start))
def create_tenant(sess, new_project, resource_list): # Let's check do we already project with this name keystone = keystone_client.Client(session=sess) project_exists = check_project(keystone, new_project) # Creating project unless it exists if not project_exists: prj = keystone.projects.create( name=new_project, domain=OS_PROJECT_DOMAIN_ID, description="Automatically created project") print "New project " + prj.name + " was created" admin_user_object_list = keystone.users.list( name=OS_USERNAME, domain=OS_USER_DOMAIN_NAME) admin_role_object_list = keystone.roles.list( name=OS_USERNAME, domain=OS_USER_DOMAIN_NAME) keystone.roles.grant(role=admin_role_object_list[0].id, user=admin_user_object_list[0].id, project=prj.id) print "User " + OS_USERNAME + " was granted role with id " + admin_role_object_list[0].id +\ " on a project " + prj.name ################################# # Authenticating on a new tenant auth = v3.Password(username=OS_USERNAME, password=OS_PASSWORD, project_name=new_project, project_domain_id=OS_PROJECT_DOMAIN_ID, user_domain_id=OS_USER_DOMAIN_NAME, auth_url=OS_AUTH_URL) new_sess = session.Session(auth=auth) neutron = neutron_client.Client(session=new_sess) alter_security_group(new_sess, keystone, new_project, neutron) # Creating networks, then subnets and then routers # This map will store newly created old <-> new net id mapping old_network_ids = {} # Creating networks from source tenant for network in resource_list['networks']: new_net = neutron.create_network( {'network': { 'name': network['name'] }}) old_network_ids[network['id']] = new_net['network']['id'] print "Network " + network['name'] + " was created" # Creating subnets from the source tenant and attaching them to the newly created networks # This map will store newly created old <-> new subnet id mapping old_subnet_ids = {} for subnet in resource_list['subnets']: new_subnet = neutron.create_subnet({ 'subnet': { 'network_id': old_network_ids[subnet['network_id']], 'cidr': subnet['cidr'], 'allocation_pools': subnet['allocation_pools'], 'dns_nameservers': subnet['dns_nameservers'], 'enable_dhcp': subnet['enable_dhcp'], 'gateway_ip': subnet['gateway_ip'], 'name': subnet['name'], 'ip_version': subnet['ip_version'] } }) print "Subnet " + subnet['name'] + " was created and attached to network " + \ old_network_ids[subnet['network_id']] old_subnet_ids[subnet['id']] = new_subnet['subnet']['id'] # Creating Tenant Routers for router in resource_list['routers']: new_router = neutron.create_router( {'router': { 'name': router['name'], 'admin_state_up': True }}) print "Router " + router['name'] + " was created" # Attaching router to subnets original_router_id = router['id'] for port in resource_list[original_router_id]['ports']: # We might have either internal or exernal/gw port if port['device_owner'] == 'network:router_gateway': neutron.add_gateway_router(new_router['router']['id'], {'network_id': port['network_id']}) print "Router gateway interface was added" elif port['device_owner'] == 'network:router_interface': # Extracting new subnet id from the mapping and resource_list old_port_subnet_id = port['fixed_ips'][0]['subnet_id'] new_subnet_id = old_subnet_ids[old_port_subnet_id] neutron.add_interface_router(new_router['router']['id'], {'subnet_id': new_subnet_id}) print "Router internal interface was added" # Modify default security group to enable App traffic # Calling App redeployment deploy_app(new_sess, neutron, keystone, new_project) return
def _authenticate(self, reqs, session=None): """Performs authentication via Keystone. :param reqs: Request dict containing list of parameters required for Keystone authentication. :return: Auth response dict """ if not isinstance(reqs[0], dict): raise TypeError('The input "req" is not typeof dict.') if not isinstance(reqs[1], dict): raise TypeError('The input "req" is not typeof dict.') auth_response = {} req = reqs[0] cacert = req.get('cacert') endpoint_type = req.get('endpoint_type', 'publicURL') insecure = req.get('insecure') mistral_url = req.get('mistral_url') region_name = req.get('region_name') service_type = req.get('service_type', 'workflowv2') verify = self._verification_needed(cacert, insecure) if not session: auth = self._get_auth(**req) if auth: session = ks_session.Session(auth=auth, verify=verify) if session: if not mistral_url: try: mistral_url = session.get_endpoint( service_type=service_type, endpoint_type=endpoint_type, region_name=region_name) except Exception: mistral_url = None auth_response['mistral_url'] = mistral_url auth_response['session'] = session target_req = reqs[1] if "auth_url" in target_req: target_auth = self._get_auth(**target_req) if target_auth: # target cacert and insecure cacert = target_req.get('cacert') insecure = target_req.get('insecure') verify = self._verification_needed(cacert, insecure) target_session = ks_session.Session(auth=target_auth, verify=verify) target_auth_headers = target_session.get_auth_headers() or {} target_auth_token = target_auth_headers.get('X-Auth-Token') auth_response.update({ api.TARGET_AUTH_TOKEN: target_auth_token, api.TARGET_PROJECT_ID: target_session.get_project_id(), api.TARGET_USER_ID: target_session.get_user_id(), api.TARGET_AUTH_URI: target_auth._plugin.auth_url, }) access = target_auth.get_access(target_session) service_catalog = access.service_catalog if self._is_service_catalog_v2(service_catalog): access_data = access._data["access"] if not len(access_data['serviceCatalog']): LOG.warning( "Service Catalog empty, some authentication" "credentials may be missing. This can cause" "malfunction in the Mistral action executions.") sc_json = jsonutils.dumps(access_data) auth_response[api.TARGET_SERVICE_CATALOG] = sc_json if not auth_response: LOG.debug("No valid token or password + user provided. " "Continuing without authentication") return {} return auth_response
def test_get_endpoint(self): a = http_basic.HTTPBasicAuth(endpoint=self.TEST_URL) s = session.Session(auth=a) self.assertEqual(self.TEST_URL, a.get_endpoint(s))
def test_get_endpoint_with_override(self): a = http_basic.HTTPBasicAuth(endpoint=self.TEST_URL) s = session.Session(auth=a) self.assertEqual('foo', a.get_endpoint(s, endpoint_override='foo'))
def setUp(self): super(TestObjectAPIv1, self).setUp() sess = session.Session() self.api = object_store.APIv1(session=sess, endpoint=FAKE_URL) self.requests_mock = self.useFixture(fixture.Fixture())
def _create_session_client(self): auth = token_endpoint.Token(self.endpoint, self.token) sess = session.Session(auth=auth) return http.SessionClient(sess)
def __init__(self, *args, **kwargs): self.auth = v3.Password(*args, **kwargs) self.session = session.Session(auth=self.auth) self.keystone = client.Client(session=self.session) self.neutron = neutrnoclient.Client(session=self.session) self.nova = novaclient.Client('2.0', session=self.session)
def get_session(self, auth_plugin, verify): ses = session.Session(auth=auth_plugin, verify=verify) return ses
def _set_session(self): self.session = session.Session() loader = loading.get_plugin_loader('password') self.session.auth = loader.load_from_options( auth_url=self.identity_url, username='******', password='******')
def __init__(self): auth_plugin = ks_loading.load_auth_from_conf_options( cfg.CONF, 'placement') self._client = session.Session(auth=auth_plugin) self._disabled = False
def _get_keystone_session(self, **kwargs): cacert = kwargs.pop('cacert', None) cert = kwargs.pop('cert', None) key = kwargs.pop('key', None) insecure = kwargs.pop('insecure', False) auth_url = kwargs.pop('auth_url', None) project_id = kwargs.pop('project_id', None) project_name = kwargs.pop('project_name', None) token = kwargs.get('token') if insecure: verify = False else: verify = cacert or True if cert and key: # passing cert and key together is deprecated in favour of the # requests lib form of having the cert and key as a tuple cert = (cert, key) # create the keystone client session ks_session = session.Session(verify=verify, cert=cert) v2_auth_url, v3_auth_url = self._discover_auth_versions( ks_session, auth_url) username = kwargs.pop('username', None) user_id = kwargs.pop('user_id', None) user_domain_name = kwargs.pop('user_domain_name', None) user_domain_id = kwargs.pop('user_domain_id', None) project_domain_name = kwargs.pop('project_domain_name', None) project_domain_id = kwargs.pop('project_domain_id', None) auth = None use_domain = (user_domain_id or user_domain_name or project_domain_id or project_domain_name) use_v3 = v3_auth_url and (use_domain or (not v2_auth_url)) use_v2 = v2_auth_url and not use_domain if use_v3 and token: auth = v3_auth.Token(v3_auth_url, token=token, project_name=project_name, project_id=project_id, project_domain_name=project_domain_name, project_domain_id=project_domain_id) elif use_v2 and token: auth = v2_auth.Token(v2_auth_url, token=token, tenant_id=project_id, tenant_name=project_name) elif use_v3: # The auth_url as v3 specified # e.g. http://no.where:5000/v3 # Keystone will return only v3 as viable option auth = v3_auth.Password(v3_auth_url, username=username, password=kwargs.pop('password', None), user_id=user_id, user_domain_name=user_domain_name, user_domain_id=user_domain_id, project_name=project_name, project_id=project_id, project_domain_name=project_domain_name, project_domain_id=project_domain_id) elif use_v2: # The auth_url as v2 specified # e.g. http://no.where:5000/v2.0 # Keystone will return only v2 as viable option auth = v2_auth.Password(v2_auth_url, username, kwargs.pop('password', None), tenant_id=project_id, tenant_name=project_name) else: raise errors.ZaqarError('Unable to determine the Keystone version ' 'to authenticate with using the given ' 'auth_url.') ks_session.auth = auth return ks_session
USERNAME = os.environ['OS_USERNAME'] PASSWORD = os.environ['OS_PASSWORD'] AUTH_URL = os.environ['OS_AUTH_URL'] PROJECT_NAME = os.environ['OS_PROJECT_NAME'] VERSION = '2.30' #nova api version logging.basicConfig(filename='novaops.log', level=logging.DEBUG, format='%(asctime)s %(message)s') auth = v3.Password(auth_url=AUTH_URL, username=USERNAME, password=PASSWORD, project_name='demo', user_domain_id='default', project_domain_id='default') sess = session.Session(auth=auth) nova = client.Client(VERSION, session=sess) def GetAggregates(): AggregatesArray = [] for aggregate in nova.aggregates.list(): AggregatesArray.append(aggregate.id) return AggregatesArray def GetHostsInAggregate(aggregate): HostsInAggregatesArray = [] for index in getattr(nova.aggregates.get_details(aggregate), 'hosts'): HostsInAggregatesArray.append(index) return HostsInAggregatesArray
def setup_http_client(): """Set `http_client` to a valid instance of `HTTPClient` and pass it as parameter to initialize the client library. Sets http_client to an object which makes HTTP requests with authentication. It chooses an authentication backend as follows: 1. If the environment variables HIL_USERNAME and HIL_PASSWORD are defined, it will use HTTP basic auth, with the corresponding user name and password. 2. If the `python-keystoneclient` library is installed, and the environment variables: * OS_AUTH_URL * OS_USERNAME * OS_PASSWORD * OS_PROJECT_NAME are defined, Keystone is used. 3. Oterwise, do not supply authentication information. This may be extended with other backends in the future. `http_client` is also passed as a parameter to the client library. Until all calls are moved to client library, this will support both ways of intereacting with HIL. """ global http_client global C # initiating the client library # First try basic auth: ep = (os.environ.get('HIL_ENDPOINT') or sys.stdout.write("Error: HIL_ENDPOINT not set \n")) basic_username = os.getenv('HIL_USERNAME') basic_password = os.getenv('HIL_PASSWORD') if basic_username is not None and basic_password is not None: # For calls with no client library support yet. # Includes all headnode calls; registration of nodes and switches. http_client = RequestsHTTPClient() http_client.auth = (basic_username, basic_password) # For calls using the client library C = Client(ep, http_client) return # Next try keystone: try: from keystoneauth1.identity import v3 from keystoneauth1 import session os_auth_url = os.getenv('OS_AUTH_URL') os_password = os.getenv('OS_PASSWORD') os_username = os.getenv('OS_USERNAME') os_user_domain_id = os.getenv('OS_USER_DOMAIN_ID') or 'default' os_project_name = os.getenv('OS_PROJECT_NAME') os_project_domain_id = os.getenv('OS_PROJECT_DOMAIN_ID') or 'default' if None in (os_auth_url, os_username, os_password, os_project_name): raise KeyError("Required openstack environment variable not set.") auth = v3.Password(auth_url=os_auth_url, username=os_username, password=os_password, project_name=os_project_name, user_domain_id=os_user_domain_id, project_domain_id=os_project_domain_id) sess = session.Session(auth=auth) http_client = KeystoneHTTPClient(sess) # For calls using the client library C = Client(ep, http_client) return except (ImportError, KeyError): pass # Finally, fall back to no authentication: http_client = requests.Session() C = Client(ep, http_client)
def get_session(self, **kwargs): return ks_session.Session(verify=False, **kwargs)
def __init__(self, auth_attr): auth = identity.Password(**auth_attr) sess = session.Session(auth=auth) self.client = neutron_client.Client(session=sess)
def keystoneclient(request, admin=False): """Returns a client connected to the Keystone backend. Several forms of authentication are supported: * Username + password -> Unscoped authentication * Username + password + tenant id -> Scoped authentication * Unscoped token -> Unscoped authentication * Unscoped token + tenant id -> Scoped authentication * Scoped token -> Scoped authentication Available services and data from the backend will vary depending on whether the authentication was scoped or unscoped. Lazy authentication if an ``endpoint`` parameter is provided. Calls requiring the admin endpoint should have ``admin=True`` passed in as a keyword argument. The client is cached so that subsequent API calls during the same request/response cycle don't have to be re-authenticated. """ client_version = VERSIONS.get_active_version() user = request.user token_id = user.token.id if is_multi_domain_enabled(): # Cloud Admin, Domain Admin or Mixed Domain Admin if is_domain_admin(request): domain_token = request.session.get('domain_token') if domain_token: token_id = getattr(domain_token, 'auth_token', None) if admin: if not policy.check((("identity", "admin_required"), ), request): raise exceptions.NotAuthorized endpoint_type = 'adminURL' else: endpoint_type = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL') # Take care of client connection caching/fetching a new client. # Admin vs. non-admin clients are cached separately for token matching. cache_attr = "_keystoneclient_admin" if admin \ else backend.KEYSTONE_CLIENT_ATTR if (hasattr(request, cache_attr) and (not user.token.id or getattr(request, cache_attr).auth_token == user.token.id)): conn = getattr(request, cache_attr) else: endpoint = _get_endpoint_url(request, endpoint_type) verify = not getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) verify = verify and cacert LOG.debug("Creating a new keystoneclient connection to %s.", endpoint) remote_addr = request.environ.get('REMOTE_ADDR', '') token_auth = token_endpoint.Token(endpoint=endpoint, token=token_id) keystone_session = session.Session(auth=token_auth, original_ip=remote_addr, verify=verify) conn = client_version['client'].Client(session=keystone_session, debug=settings.DEBUG) setattr(request, cache_attr, conn) return conn
config = SafeConfigParser() config.read(nova_cfg) else: LOG.error('Nova configuration file %s does not exist', nova_cfg) sys.exit(1) # get keystone client with details from [placement] section auth = v3.Password(user_domain_name=config.get('placement', 'user_domain_name'), username=config.get('placement', 'username'), password=config.get('placement', 'password'), project_name=config.get('placement', 'project_name'), project_domain_name=config.get('placement', 'user_domain_name'), auth_url=config.get('placement', 'auth_url') + '/v3') sess = session.Session(auth=auth, verify=False) keystone = client.Client(session=sess, interface='internal') iterations_endpoint = iterations placement_endpoint_url = None while iterations_endpoint > 1: iterations_endpoint -= 1 try: # get placement service id placement_service_id = keystone.services.list( name='placement')[0].id # get placement endpoint (os_interface) placement_endpoint_url = keystone.endpoints.list( service=placement_service_id, interface=config.get('placement', 'os_interface'))[0].url
def endpoint_sync(auth, regions, endpoint_override=None, dry_run=False): # TODO: figure out one valid endpoint_override from the regions # TODO: why python-request has the idea he needs to lookup the .netrc file # on every request start = time.time() session = keystone_session.Session(auth=auth) keystone = keystone_client.Client(session=session, endpoint_override=endpoint_override) changed = False existing_regions = keystone.regions.list() existing_services = keystone.services.list() existing_endpoints = keystone.endpoints.list() final_endpoint_dict = {} final_srv_set = set() for reg_name, reg in regions.items(): if 'parent_region_id' not in reg: reg['parent_region_id'] = None if (reg['parent_region_id'] is not None and reg['parent_region_id'] not in regions): raise # ref undef region region_uniq_sanity_check = [] if 'services' in reg: for srv in reg['services']: if 'name' not in srv: raise if 'type' not in srv: raise if 'description' not in srv: srv['description'] = None # edit origin allowed key_tup = (srv['name'], srv['type']) if key_tup in region_uniq_sanity_check: raise region_uniq_sanity_check.append(key_tup) srv_set_key = (srv['name'], srv['type'], srv['description']) if srv_set_key not in final_srv_set: srv_rec = (srv['name'], srv['type'], srv['description']) final_srv_set.add(srv_set_key) if srv_set_key not in final_endpoint_dict: final_endpoint_dict[srv_set_key] = {} if 'endpoints' in srv: for enp_name, enp_value in srv['endpoints'].items(): rec = (reg_name, enp_name, enp_value) if reg_name not in final_endpoint_dict[srv_set_key]: final_endpoint_dict[srv_set_key] = {reg_name: [rec]} else: k = srv_set_key final_endpoint_dict[k][reg_name].append(rec) existing_srv_dict = dict() srv_dedup_list = dict() is_srv_desc_diff_only = dict() srv_endp = dict() # of lists for srv in existing_services: srv_set_key = (srv.name, srv.type, srv.description if hasattr(srv, 'description') else None) if srv_set_key in srv_dedup_list: srv_dedup_list[srv_set_key].append(srv) else: srv_dedup_list[srv_set_key] = [srv] srv_set_only_key = (srv.name, srv.type) if srv_set_only_key in is_srv_desc_diff_only: is_srv_desc_diff_only[srv_set_only_key] = srv else: is_srv_desc_diff_only[srv_set_only_key] = False existing_srv_dict[srv.id] = srv endp_dict = dict() # faster del by id for endp in existing_endpoints: sid = endp.service_id if sid not in srv_endp: srv_endp[sid] = [] srv_endp[sid].append(endp) endp_dict[endp.id] = endp srv_dict = {} # deduplicated, using new dict to be less confusing for key, srvL in srv_dedup_list.items(): if len(srvL) > 0: # possilbe it has 0 end, and not in the srv_endp def nr_endps(srv): if srv.id not in srv_endp: # consider poppulating it return 0 return len(srv_endp[srv.id]) ordered = sorted(srvL, key=nr_endps, reverse=True) for srv in ordered[1:]: if srv.id in srv_endp: enpL = srv_endp[srv.id] for enp in enpL: if dry_run: return True keystone.endpoints.delete(enp.id) del endp_dict[endp.id] changed = True del srv_endp[srv.id] if dry_run: return True keystone.services.delete(srv.id) changed = True srv_dict[key] = ordered[0] else: srv_dict[key] = srvL[0] # srv name,type,desc is now uniq # srv_dedup_list invalid, use srv_dict # exisitng endpint / service list invalid # srv_endp has already delted records # creating missing services for (srv_name, srv_type, srv_desc) in final_srv_set: srv_key = (srv_name, srv_type, srv_desc) if srv_key not in srv_dict: srv_sh_key = (srv_name, srv_type) if dry_run: return True if (srv_sh_key in is_srv_desc_diff_only and is_srv_desc_diff_only[srv_sh_key]): srv = is_srv_desc_diff_only[srv_sh_key] del srv_dict[(srv_name, srv_type, srv.description)] srv.description = srv_desc srv.enabled = True n = keystone.services.update(srv, name=srv_name, type=srv_type, enabled=True, description=srv_desc) srv_dict[srv_key] = n else: n = keystone.services.create(name=srv_name, type=srv_type, enabled=True, description=srv_desc) srv_dict[srv_key] = n srv_endp[n.id] = [] changed = True # creating missing regions, update desc , update parent reg_dict = {} for reg in existing_regions: reg_dict[reg.id] = reg reg_to_del = reg_dict.copy() regs_to_process = regions.copy() # simpler then constructing a tree tmp_stack = [] while regs_to_process: (name, reg) = regs_to_process.popitem() while (reg['parent_region_id'] is not None and reg['parent_region_id'] not in reg_dict): tmp_stack.append((name, reg)) (name, reg) = (reg['parent_region_id'], regs_to_process.pop(reg['parent_region_id'])) # circular graph loop ? if len(tmp_stack) > 64: raise # too long or loop tmp_stack.append((name, reg)) while tmp_stack: (name, reg) = tmp_stack.pop() # root region(s) first reg_to_del.pop(name, None) if 'description' not in reg: reg['description'] = '' # if you write none to keystone you get '' if reg['description'] is None: reg['description'] = '' if name not in reg_dict: if dry_run: return True prid = reg['parent_region_id'] r = keystone.regions.create(name, description=reg['description'], enabled=True, parent_region=prid) reg_dict[r.id] = r changed = True continue existing = reg_dict[name] if (existing.parent_region_id != reg['parent_region_id'] or existing.description != reg['description']): if dry_run: return True keystone.regions.update(name, description=reg['description'], enabled=True, parent_region=reg['parent_region_id']) changed = True # creating missing endpoints # TODO: jump to multithread api calls for endp srv_enp_dictL = {} # constructing after srv desc changes (list ref used) for enp in list(endp_dict.values()): srv = existing_srv_dict[enp.service_id] srv_set_key = (srv.name, srv.type, srv.description if hasattr(srv, 'description') else None) if srv_set_key not in srv_enp_dictL: srv_enp_dictL[srv_set_key] = {enp.region: {enp.id: enp}} else: srv_enp_dictL[srv_set_key][enp.region][enp.id] = enp all_srv = set(srv_enp_dictL.keys()) all_srv.update(list(final_endpoint_dict.keys())) for srv_key in all_srv: if srv_key not in final_endpoint_dict: # del all if dry_run: return True # duplicate delete required for reg, enp in srv_enp_dictL[srv_key].items(): for enp_id in list(enp.keys()): keystone.endpoints.delete(enp_id) changed = True continue if srv_key not in srv_enp_dictL: # create all if dry_run: return True srv = srv_dict[srv_key] srv_id = srv.id for reg, enps in final_endpoint_dict[srv_key].items(): for (reg_name, enp_interface, enp_value) in enps: keystone.endpoints.create(srv_id, enp_value, enp_interface, region=reg_name, enabled=True) changed = True continue for reg, enp in srv_enp_dictL[srv_key].items(): # duplicate delete enp_to_delete = enp.copy() if reg in final_endpoint_dict[srv_key]: regl = final_endpoint_dict[srv_key][reg] for (reg_name, enp_interface, enp_value) in regl: assert reg_name == reg cands = [] match = None for enp in list(srv_enp_dictL[srv_key][reg].values()): if enp.interface == enp_interface: cands.append(enp) if enp.url == enp_value: match = enp if not match and dry_run: return True if cands: if match: # 1 keep others will be deleted del enp_to_delete[match.id] continue # just url change in one all others will be deleted del enp_to_delete[cands[0].id] keystone.endpoints.update(enp.id, enp.service_id, enp_value, region=reg_name, enabled=True) else: srv = srv_dict[srv_key] keystone.endpoints.create(srv.id, enp_value, enp_interface, region=reg_name, enabled=True) changed = True del_ids = list(enp_to_delete.keys()) if del_ids and dry_run: return True for enp_id in del_ids: keystone.endpoints.delete(enp_id) changed = True # delete extra services for srv_key, srv_rec in srv_dict.items(): if srv_key not in final_srv_set: if dry_run: return True keystone.services.delete(srv_rec.id) changed = True # delete extra regions if reg_to_del: if dry_run: return True for reg_name in list(reg_to_del.keys()): keystone.regions.delete(reg_name) changed = True LOG.info("Managing keystone endpoints took: %f" % (time.time() - start)) return changed
def test_no_reauthenticate(self): a = self._create_expired_auth_plugin(reauthenticate=False) expired_auth_ref = a.auth_ref s = session.Session(auth=a) self.assertIs(expired_auth_ref, a.get_access(s))
def create_nova_connection(options): nova = None try: from novaclient import client from novaclient.exceptions import NotAcceptable except ImportError: fail_usage("Nova not found or not accessible") from keystoneauth1 import loading from keystoneauth1 import session from keystoneclient import discover # Prefer the oldest and strip the leading 'v' keystone_versions = discover.available_versions(options["--auth-url"]) keystone_version = keystone_versions[0]['id'][1:] kwargs = dict(auth_url=options["--auth-url"], username=options["--username"], password=options["--password"]) if discover.version_match("2", keystone_version): kwargs["tenant_name"] = options["--tenant-name"] elif discover.version_match("3", keystone_version): kwargs["project_name"] = options["--tenant-name"] kwargs["user_domain_name"] = options["--user-domain"] kwargs["project_domain_name"] = options["--project-domain"] loader = loading.get_plugin_loader('password') keystone_auth = loader.load_from_options(**kwargs) keystone_session = session.Session(auth=keystone_auth, verify=(not options["--insecure"])) nova_versions = ["2.11", "2"] for version in nova_versions: clientargs = inspect.getargspec(client.Client).varargs # Some versions of Openstack prior to Ocata only # supported positional arguments for username, # password, and tenant. # # Versions since Ocata only support named arguments. # # So we need to use introspection to figure out how to # create a Nova client. # # Happy days # if clientargs: # OSP < 11 # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'], # varargs=None, # keywords='kwargs', defaults=(None, None, None, None)) nova = client.Client( version, None, # User None, # Password None, # Tenant None, # Auth URL insecure=options["--insecure"], region_name=options["--region-name"], endpoint_type=options["--endpoint-type"], session=keystone_session, auth=keystone_auth, http_log_debug="--verbose" in options) else: # OSP >= 11 # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None) nova = client.Client(version, region_name=options["--region-name"], endpoint_type=options["--endpoint-type"], session=keystone_session, auth=keystone_auth, http_log_debug="--verbose" in options) try: nova.hypervisors.list() return nova except NotAcceptable as e: logging.warning(e) except Exception as e: logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e)) logging.warning( "Couldn't obtain a supported connection to nova, tried: %s\n" % repr(nova_versions)) return None
def test_get_auth_properties(self): a = self.create_auth_plugin() s = session.Session() self.assertEqual(self.user_id, a.get_user_id(s)) self.assertEqual(self.project_id, a.get_project_id(s))
def __init__(self, username=None, password=None, aws_creds=None, auth_url=None, roles=None, is_admin=None, read_only=False, show_deleted=False, overwrite=True, trust_id=None, trustor_user_id=None, request_id=None, auth_token_info=None, region_name=None, auth_plugin=None, trusts_auth_plugin=None, user_domain_id=None, project_domain_id=None, project_name=None, **kwargs): """Initialisation of the request context. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ if user_domain_id: kwargs['user_domain'] = user_domain_id if project_domain_id: kwargs['project_domain'] = project_domain_id super(RequestContext, self).__init__(is_admin=is_admin, read_only=read_only, show_deleted=show_deleted, request_id=request_id, roles=roles, overwrite=overwrite, **kwargs) self.username = username self.password = password self.region_name = region_name self.aws_creds = aws_creds self.project_name = project_name self.auth_token_info = auth_token_info self.auth_url = auth_url self._session = None self._clients = None self._keystone_session = session.Session( **config.get_ssl_options('keystone')) self.trust_id = trust_id self.trustor_user_id = trustor_user_id self.policy = policy.get_enforcer() self._auth_plugin = auth_plugin self._trusts_auth_plugin = trusts_auth_plugin if is_admin is None: self.is_admin = self.policy.check_is_admin(self) else: self.is_admin = is_admin # context scoped cache dict where the key is a class of the type of # object being cached and the value is the cache implementation class self._object_cache = {}
def setUp(self): super(GenericAuthPluginTests, self).setUp() self.auth = GenericPlugin() self.session = session.Session(auth=self.auth)
def test_server_error(self): session = client_session.Session() self.stub_url('GET', status_code=500) self.assertRaises(exceptions.InternalServerError, session.get, self.TEST_URL)
if not args.pw_prompt: password = clouds_yaml["clouds"]["openstack"]["auth"][ "password"] credentials = { "auth_url": args.auth_url, "username": args.username, "password": password, "project_id": args.project_id, "user_domain_name": "Default" } print("Running with arguments {}".format(credentials)) print(args.download) # Connect to OpenStack API. loader = loading.get_plugin_loader("password") auth = loader.load_from_options(**credentials) nova_sess = session.Session(auth=auth) nova = novaclient.client.Client(2.1, session=nova_sess) glance_sess = session.Session(auth=auth) glance = glanceclient.Client(2, session=glance_sess) # Override endpoints. openstack_url = urlparse(args.auth_url).hostname glance_default_endpoint = urlparse( glance.images.http_client.get_endpoint()) glance.images.http_client.endpoint_override = replace_netloc( glance_default_endpoint, hostname=openstack_url).geturl() nova_default_endpoint = urlparse(nova.servers.api.client.get_endpoint()) nova.servers.client.endpoint_override = replace_netloc( nova_default_endpoint, hostname=openstack_url).geturl() # Backup the specified instance. servers = search_servers(nova.servers, name="^{}$".format(re.escape(args.name)))