def __init__(self, username=None, password=None, tenantid=None, baseurl=None): """Set instance authentication constants. Args: username (str): OpenStack administrator username. password (str): OpenStack administrator password. tenantid (str): OpenStack tenant for the administrator account. baseurl (str): OpenStack environment URI. """ self._AUTH_USERNAME = username self._AUTH_PASSWORD = password self._AUTH_TENANTID = tenantid self._BASE_URL = baseurl keystone = keystoneClient.Client(username=self._AUTH_USERNAME, password=self._AUTH_PASSWORD, project_name=self._AUTH_TENANTID, auth_url='%s:5001/v3' % self._BASE_URL) self._roleManager = RoleManager(keystone) self._groupManager = GroupManager(keystone) self._domainManager = DomainManager(keystone) self._projectManager = ProjectManager(keystone) self._roleAssignmentManager = RoleAssignmentManager(keystone)
def generate_test_data(): ''' Builds a set of test_data data as returned by Keystone V2. ''' test_data = TestDataContainer() keystone_service = { 'type': 'identity', 'id': uuid.uuid4().hex, 'endpoints': [{ 'url': 'http://admin.localhost:35357/v3', 'region': 'RegionOne', 'interface': 'admin', 'id': uuid.uuid4().hex, }, { 'url': 'http://internal.localhost:5000/v3', 'region': 'RegionOne', 'interface': 'internal', 'id': uuid.uuid4().hex }, { 'url': 'http://public.localhost:5000/v3', 'region': 'RegionOne', 'interface': 'public', 'id': uuid.uuid4().hex }] } # Domains domain_dict = { 'id': uuid.uuid4().hex, 'name': 'domain', 'description': '', 'enabled': True } test_data.domain = Domain(DomainManager(None), domain_dict, loaded=True) # Users user_dict = { 'id': uuid.uuid4().hex, 'name': 'gabriel', 'email': '*****@*****.**', 'password': '******', 'domain_id': domain_dict['id'], 'token': '', 'enabled': True } test_data.user = User(UserManager(None), user_dict, loaded=True) # Projects project_dict_1 = { 'id': uuid.uuid4().hex, 'name': 'tenant_one', 'description': '', 'domain_id': domain_dict['id'], 'enabled': True } project_dict_2 = { 'id': uuid.uuid4().hex, 'name': '', 'description': '', 'domain_id': domain_dict['id'], 'enabled': False } test_data.project_one = Project(ProjectManager(None), project_dict_1, loaded=True) test_data.project_two = Project(ProjectManager(None), project_dict_2, loaded=True) # Roles role_dict = {'id': uuid.uuid4().hex, 'name': 'Member'} test_data.role = Role(RoleManager, role_dict) nova_service = { 'type': 'compute', 'id': uuid.uuid4().hex, 'endpoints': [ { 'url': 'http://nova-admin.localhost:8774/v2.0/%s' \ % (project_dict_1['id']), 'region': 'RegionOne', 'interface': 'admin', 'id': uuid.uuid4().hex, }, { 'url': 'http://nova-internal.localhost:8774/v2.0/%s' \ % (project_dict_1['id']), 'region': 'RegionOne', 'interface': 'internal', 'id': uuid.uuid4().hex }, { 'url':'http://nova-public.localhost:8774/v2.0/%s' \ % (project_dict_1['id']), 'region':'RegionOne', 'interface': 'public', 'id': uuid.uuid4().hex }, { 'url': 'http://nova2-admin.localhost:8774/v2.0/%s' \ % (project_dict_1['id']), 'region': 'RegionTwo', 'interface': 'admin', 'id': uuid.uuid4().hex, }, { 'url': 'http://nova2-internal.localhost:8774/v2.0/%s' \ % (project_dict_1['id']), 'region': 'RegionTwo', 'interface': 'internal', 'id': uuid.uuid4().hex }, { 'url':'http://nova2-public.localhost:8774/v2.0/%s' \ % (project_dict_1['id']), 'region':'RegionTwo', 'interface': 'public', 'id': uuid.uuid4().hex } ] } # Tokens tomorrow = datetime_safe.datetime.now() + timedelta(days=1) expiration = datetime_safe.datetime.isoformat(tomorrow) auth_token = uuid.uuid4().hex auth_response_headers = {'X-Subject-Token': auth_token} auth_response = TestResponse({"headers": auth_response_headers}) scoped_token_dict = { 'token': { 'methods': ['password'], 'expires_at': expiration, 'project': { 'id': project_dict_1['id'], 'name': project_dict_1['name'], 'domain': { 'id': domain_dict['id'], 'name': domain_dict['name'] } }, 'user': { 'id': user_dict['id'], 'name': user_dict['name'], 'domain': { 'id': domain_dict['id'], 'name': domain_dict['name'] } }, 'roles': [role_dict], 'catalog': [keystone_service, nova_service] } } test_data.scoped_access_info = AccessInfo.factory(resp=auth_response, body=scoped_token_dict) unscoped_token_dict = { 'token': { 'methods': ['password'], 'expires_at': expiration, 'user': { 'id': user_dict['id'], 'name': user_dict['name'], 'domain': { 'id': domain_dict['id'], 'name': domain_dict['name'] } }, 'roles': [role_dict], 'catalog': [keystone_service] } } test_data.unscoped_access_info = AccessInfo.factory( resp=auth_response, body=unscoped_token_dict) # Service Catalog test_data.service_catalog = ServiceCatalog.factory( { 'methods': ['password'], 'user': {}, 'catalog': [keystone_service, nova_service], }, token=auth_token) return test_data
class QuotaChecker: """Check and enforce OpenStack tenant quota. Verifies that a given tenant does have its correct allocated quota. Attributes: DEFAULT_QUOTA (dict): The default quota for a service developer. PARTNER_QUOTA (dict): The default quota for a partner with CRA. BIGDATA_QUOTA (dict): The quota for big data enabled projects. """ _DEFAULT_QUOTA_NAME = 'Default CRA quota' _BIGDATA_QUOTA_NAME = 'Bigdata CRA quota' DEFAULT_QUOTA = { 'instances': 16, 'cores': 16, 'ram': 32 * 1024, 'floating_ips': 5, 'cinder_GB': 1024, 'swift_bytes': 1024 * 1024 * 1024 * 1024, 'flavors': ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.x-large'] } PARTNER_QUOTA = { 'instances': 1, 'cores': 1, 'ram': 1024, 'floating_ips': 1, 'cinder_GB': 40, 'swift_bytes': 40 * 1024 * 1024 * 1024, 'flavors': ['m1.tiny'] } BIGDATA_QUOTA = { 'instances': 16, 'cores': 46, 'ram': 400 * 1024, 'floating_ips': 15, 'cinder_GB': 1024, 'swift_bytes': 1024 * 1024 * 1024 * 1024, 'flavors': ['m1.tiny', 'm1.small', 'hadoop.small', 'hadoop.medium', 'hadoop.large'] } def __init__(self, username=None, password=None, tenantid=None, baseurl=None): """Set instance authentication constants. Args: username (str): OpenStack administrator username. password (str): OpenStack administrator password. tenantid (str): OpenStack tenant for the administrator account. baseurl (str): OpenStack environment URI. """ self._AUTH_USERNAME = username self._AUTH_PASSWORD = password self._AUTH_TENANTID = tenantid self._BASE_URL = baseurl keystone = keystoneClient.Client(username=self._AUTH_USERNAME, password=self._AUTH_PASSWORD, project_name=self._AUTH_TENANTID, auth_url='%s:5001/v3' % self._BASE_URL) self._roleManager = RoleManager(keystone) self._groupManager = GroupManager(keystone) self._domainManager = DomainManager(keystone) self._projectManager = ProjectManager(keystone) self._roleAssignmentManager = RoleAssignmentManager(keystone) def _getOpenstackGroup(self, group): try: os_group = self._groupManager.find(name=group) except: return None return os_group def _getTenantId(self, tenant): projectMap = dict(map(lambda assignment: (assignment.group['id'], assignment.scope['project']['id']), filter(lambda a: 'group' in a._info.keys(), self._roleAssignmentManager.list()))) return projectMap[tenant].strip() if tenant in projectMap.keys() else None def _ensureTenantNetwork(self, tenant): neutron = neutronClient.Client(username=self._AUTH_USERNAME, password=self._AUTH_PASSWORD, tenant_id=self._AUTH_TENANTID, auth_url='%s:5001/v2.0' % self._BASE_URL) if not filter(lambda network: network['tenant_id'] == tenant, neutron.list_networks()['networks']): network = neutron.create_network({'network': {'name': 'default', 'tenant_id': tenant}})['network'] while not neutron.list_networks(id=network['id'])['networks']: sleep(1) allocated_cidrs = map(lambda chunk: (int(chunk[0]), int(chunk[1])), map(lambda cidr: cidr['cidr'].split('/')[0].split('.')[-2:], filter(lambda subnet: subnet['cidr'].endswith('/27'), neutron.list_subnets()['subnets']))) if (192, 0) in allocated_cidrs: allocated_cidrs.remove((192, 0)) if allocated_cidrs: max_bigchunk = max(map(lambda chunk: chunk[0], allocated_cidrs)) max_smlchunk = max(map(lambda chunk: chunk[1], filter(lambda c: c[0] == max_bigchunk, allocated_cidrs))) if max_bigchunk == 191 and max_smlchunk == 224: max_bigchunk = 192 max_smlchunk = 0 if max_smlchunk == 224: cidr = '.'.join([str(chunk) for chunk in [192, 168, max_bigchunk + 1, 0]]) + '/27' else: cidr = '.'.join([str(chunk) for chunk in [192, 168, max_bigchunk, max_smlchunk + 32]]) + '/27' else: cidr = '192.168.0.0/27' subnet = neutron.create_subnet({'subnet': {'name': 'default-subnet', 'cidr': cidr, 'dns_nameservers': ['193.166.4.24', '193.166.4.25'], 'tenant_id': tenant, 'network_id': network['id'], 'ip_version': '4'}})['subnet'] while not neutron.list_subnets(id=subnet['id'])['subnets']: sleep(1) router = neutron.create_router({'router': {'tenant_id': tenant, 'name': 'default-router'}})['router'] while not neutron.list_routers(id=router['id'])['routers']: sleep(1) public_net_id = filter(lambda n: n['router:external'], neutron.list_networks(name='public')['networks'])[0]['id'] neutron.add_gateway_router(router['id'], {'network_id': public_net_id}) neutron.add_interface_router(router['id'], {'subnet_id': subnet['id']}) def _getTenantQuota(self, tenant, tenantType): quota = None statedQuota = map(lambda q: q['FIELD_VALUE'], filter(lambda f: f['CUSTOM_FIELD_ID'] == 'PROJECT_FIELD_1', tenant['CUSTOMFIELDS']))[0] if statedQuota == self._BIGDATA_QUOTA_NAME: quota = self.BIGDATA_QUOTA else: if statedQuota == self._DEFAULT_QUOTA_NAME: if tenantType == LDAPUpdater.FPA_CRA: quota = self.PARTNER_QUOTA if tenantType == LDAPUpdater.SDA: quota = self.DEFAULT_QUOTA return quota def _grantAccess(self, client, flavor, tenant): try: client.flavor_access.add_tenant_access(flavor, tenant) except Conflict: pass def _revokeAccess(self, client, flavor, tenant): try: client.flavor_access.remove_tenant_access(flavor, tenant) except NotFound: pass def _enforceQuota(self, ldap_tenant, quotaDefinition, ldap_conn=None): openstackGroup = self._getOpenstackGroup(ldap_tenant) if openstackGroup: tenant = self._getTenantId(ldap_tenant) if not tenant: # Create or map tenant in openstack project = self._projectManager.list(name=ldap_tenant) if not project: project = self._projectManager.create(ldap_tenant, self._domainManager.find(id='default')) self._roleManager.grant(self._roleManager.find(name='member').id, group=openstackGroup.id, project=project.id) tenant = project.id if ldap_conn and ldap_tenant in map(lambda t: t[0].split(',')[0].split('=')[1], ldap_conn.ldap_search('cn=digile.platform,ou=projects,\ dc=forgeservicelab,dc=fi', SCOPE_SUBORDINATE, attrsonly=1)): with novaClient.Client(username=self._AUTH_USERNAME, api_key=self._AUTH_PASSWORD, tenant_id=tenant, auth_url='%s:5001/v2.0' % self._BASE_URL) as nova: try: nova.security_group_rules.create(nova.security_groups.find(name='default').id, ip_protocol='tcp', from_port=22, to_port=22, cidr='86.50.27.230/32') except Unauthorized: # butler.service not yet part of the tenant, wait for next round. pass except BadRequest: # Rule already exists, that's OK. pass self._ensureTenantNetwork(tenant) if quotaDefinition: service_opts = { 'meta': ['quota-bytes:%s' % quotaDefinition['swift_bytes']], 'os_username': self._AUTH_USERNAME, 'os_password': self._AUTH_PASSWORD, 'os_auth_url': '%s:5001/v2.0' % self._BASE_URL, 'os_storage_url': '%s:8081/v1/AUTH_%s' % (self._BASE_URL, self._projectManager.get(tenant).name), 'os_tenant_name': self._AUTH_TENANTID } swift = swiftService.SwiftService(options=service_opts) swift.post() del swift cinder = cinderClient.Client(username=self._AUTH_USERNAME, api_key=self._AUTH_PASSWORD, tenant_id=self._AUTH_TENANTID, auth_url=service_opts['os_auth_url']) cinder.quotas.update(tenant, gigabytes=quotaDefinition['cinder_GB']) del cinder with novaClient.Client(username=self._AUTH_USERNAME, api_key=self._AUTH_PASSWORD, tenant_id=self._AUTH_TENANTID, auth_url=service_opts['os_auth_url']) as nova: nova.quotas.update(tenant, instances=quotaDefinition['instances'], cores=quotaDefinition['cores'], ram=quotaDefinition['ram'], floating_ips=quotaDefinition['floating_ips']) allFlavors = nova.flavors.findall(is_public=None) map(lambda f: self._grantAccess(nova, f, tenant), filter(lambda f: f.name.encode() in quotaDefinition['flavors'], allFlavors)) map(lambda f: self._revokeAccess(nova, f, tenant), filter(lambda f: f.name.encode() not in quotaDefinition['flavors'], allFlavors)) neutron = neutronClient.Client(username=self._AUTH_USERNAME, password=self._AUTH_PASSWORD, tenant_id=self._AUTH_TENANTID, auth_url=service_opts['os_auth_url']) neutron.update_quota(tenant, {'quota': {'floatingip': quotaDefinition['floating_ips']}}) del neutron with novaClient.Client(username=self._AUTH_USERNAME, api_key=self._AUTH_PASSWORD, tenant_id=self._AUTH_TENANTID, auth_url='%s:5001/v2.0' % self._BASE_URL) as nova: self._grantAccess(nova, nova.flavors.find(name='m1.tiny', is_public=None), tenant) def enforceQuotas(self, tenantList, tenantsType, ldap_conn=None): """Enforce the quota for each tenant on the list. Args: tenantList (List): A list of tenants as JSON from Insightly. tenantsType (str): A description of the type of tenant, one of 'SDA', 'FPA' or 'FPA (CRA)'. """ map(lambda t: self._enforceQuota(sanitize(t['PROJECT_NAME']), self._getTenantQuota(t, tenantsType), ldap_conn), tenantList)