def test_get_swiftclient(self): """test_get_swiftclient check that we could retrieve a Session client to work with swift using keystone v3""" osclients = OpenStackClients(modules="swift") swiftClient = osclients.get_swiftclient() self.assertIsInstance(swiftClient, Connection)
def test_get_cinderclient(self): """test_get_cinderclient check that we could retrieve a Session client to work with cinder""" osclients = OpenStackClients(modules="cinder") cinderClient = osclients.get_cinderclient() # api_version = cinderClient.get_volume_api_version_from_endpoint() --> This should return "2" against a server self.assertIsInstance(cinderClient, cinderclient.v2.client.Client)
def test_get_keystoneclient_v2(self): """test_get_keystoneclient_v2 check that we could retrieve a Session client to work with keystone v2""" osclients = OpenStackClients() osclients.use_v3 = False keystoneClient = osclients.get_keystoneclient() self.assertIsInstance(keystoneClient, keystoneclient.v2_0.client.Client)
def test_get_cinderclient_unknown_module(self): """test_get_cinderclient_unknown_module check that we could not retrieve a Session client to work with cinder if there is no modules defined""" try: osclients = OpenStackClients(modules="") osclients.get_cinderclient() except Exception as ex: self.assertRaises(ex)
def test_get_neutronclient_with_all_modules(self): """test_get_neutronclient_with_all_modules check that we could retrieve a Session client to work with neutron if osclients is created with all modules""" osclients = OpenStackClients(modules="auto") neutronClient = osclients.get_neutronclient() self.assertIsInstance(neutronClient, neutronclient.v2_0.client.Client)
def test_get_glanceclient(self): """test_get_glanceclient check that we could retrieve a Session client to work with glance""" osclients = OpenStackClients(modules="glance") osclients.set_keystone_version(use_v3=False) glanceClient = osclients.get_glanceclient() self.assertIsInstance(glanceClient, glanceclient.v1.client.Client)
def __init__(self): """constructor""" self.osclients = OpenStackClients() self.keystone = self.osclients.get_keystoneclient() self.change_domain_name() self.password_changer = PasswordChanger(self.osclients) os.environ['OS_REGION_NAME'] = os.environ['REGION'] self.region_exists(os.environ['REGION'])
def test_get_session_using_token(self): """test creating a session using a token instead of a password""" osclients = OpenStackClients() osclients.set_token('faketoken') session = osclients.get_session() self.assertIsInstance(session, keystoneclient.session.Session) self.assertTrue(type(session.auth) == keystoneclient.auth.identity.v3.token.Token)
def test_get_keystoneclient_v3_with_trust_id(self): """test_get_keystoneclient_v3_with_trust_id check that we could retrieve a Session client to work with keystone v3 and using trust_id""" osclients = OpenStackClients() trust_id = "randomid0000000000000000000000001" osclients.set_credential(self.OS_USERNAME, self.OS_PASSWORD, trust_id=trust_id) keystoneClient = osclients.get_keystoneclient() self.assertIsInstance(keystoneClient, keystoneclient.v3.client.Client)
def test_get_keystoneclient_v2_with_tenant_id(self): """test_get_keystoneclient_v2_with_tenant_id check that we could retrieve a Session client to work with keystone v2 and using tenant_id""" osclients = OpenStackClients() osclients.use_v3 = False osclients.set_credential(self.OS_USERNAME, self.OS_PASSWORD, tenant_id=self.OS_TENANT_ID) keystoneClient = osclients.get_keystoneclient() self.assertIsInstance(keystoneClient, keystoneclient.v2_0.client.Client)
def test_set_region(self): """test_set_region check that we could change the region after create the client""" # FIRST CHECK: Region is recovered from ENV osclients = OpenStackClients() self.assertEqual(osclients.region, self.OS_REGION_NAME) # Check that region is updated to a new Value. region = "Budapest" osclients.set_region(region) self.assertEqual(osclients.region, region)
def get_credentials(self): os.environ['OS_REGION_NAME'] = self.get_region() osclients = OpenStackClients() keystone = osclients.get_keystoneclient() try: self.keystone.domains.find(name="default") return keystone except Exception as e: os.environ['OS_USER_DOMAIN_NAME'] = "Default" os.environ['OS_PROJECT_DOMAIN_ID'] = "default" os.environ['OS_REGION_NAME'] = "Spain2" osclients = OpenStackClients() return osclients.get_keystoneclient()
def __init__(self): """constructor, build the list of basic users included in users_to_delete.txt""" basic_type = settings.BASIC_ROLE_ID self.ids = set( line.strip() for line in open('users_to_delete.txt').readlines()) osclients = OpenStackClients() keystone = osclients.get_keystoneclientv3() self.users_basic = set( asig.user['id'] for asig in keystone.role_assignments.list(domain='default') if asig.role['id'] == basic_type and asig.user['id'] in self.ids)
def __init__(self): """constructor, build the list of basic users included in users_to_delete.txt""" basic_type = settings.BASIC_ROLE_ID self.ids = set(line.strip() for line in open('users_to_delete.txt').readlines()) osclients = OpenStackClients() keystone = osclients.get_keystoneclientv3() self.users_basic = set( asig.user['id'] for asig in keystone.role_assignments.list(domain='default') if asig.role['id'] == basic_type and asig.user['id'] in self.ids)
def setUp(self): d = defaultdict(list) d['catalog'].append(service) self.access = d self.osclients = OpenStackClients() self.url = 'http://fake.org:9090' self.original_url = service['endpoints'][1]['url']
def __init__(self, target): """Create a new Facade for the specified target (a target is shared between regions using the same credential)""" self.osclients = OpenStackClients(target['keystone_url']) self.osclients.set_credential(target['user'], target['password'], target['tenant']) if target.get('use_keystone_v3', False): self.osclients.set_keystone_version(True) else: self.osclients.set_keystone_version(False) self.session = self.osclients.get_session() self.target = target # This is a default value self.images_dir = '/var/lib/glance/images' self.logger = logger_cli
def generate_trust_ids(users_to_delete): """ From a list of users to delete, generate a file with a trustid for each user. The user is acting as the trustor, delegating in a trustee, which will impersonate it to delete its resources. :param users_to_delete: a list of trustors. :return: this function does not return anything. It creates a file. """ global logger osclients = OpenStackClients() users_trusted_ids = open('users_trusted_ids.txt', 'w') check_users = CheckUsers() # Use an alternative URL that allow direct access to the keystone admin # endpoint, because the registered one uses an internal IP address. osclients.override_endpoint( 'identity', osclients.region, 'admin', KEYSTONE_ENDPOINT) trust_factory = TrustFactory(osclients) lines = users_to_delete.readlines() total = len(lines) count = 0 if 'TRUSTEE_USER' in env: trustee = env['TRUSTEE_USER'] else: trustee = TRUSTEE for user in lines: user = user.strip() if user == '': continue try: count += 1 (username, trust_id) = trust_factory.create_trust_admin( user, trustee) users_trusted_ids.write(username + ',' + trust_id + '\n') msg = 'Generated trustid for user {0} ({1}/{2})' logger.info(msg.format(user, count, total)) except Exception, e: msg = 'Failed getting trust-id from trustor {0}. Reason: {1}' logger.error(msg.format(user, str(e)))
def __init__(self, target): """Create a new Facade for the specified target (a target is shared between regions using the same credential)""" self.osclients = OpenStackClients(target['keystone_url']) self.osclients.set_credential(target['user'], target['password'], target['tenant']) if target.get('use_keystone_v3', False): self.osclients.set_keystone_version(True) else: self.osclients.set_keystone_version(False) self.session = self.osclients.get_session() self.target = target # This is a default value self.images_dir = '/var/lib/glance/images' self.logger = logger_cli
def test_get_session_without_username_nor_token(self): """test_get_session_without_username check that we could not retrieve a session without username""" osclients = OpenStackClients() osclients.set_credential("", self.OS_PASSWORD, tenant_id=self.OS_TENANT_ID) # Checking v3 try: osclients.get_session() except Exception as ex: self.assertRaises(ex) # Checking v2 osclients.use_v3 = False try: osclients.get_session() except Exception as ex: self.assertRaises(ex)
def test_get_session_without_auth_url(self): """test_get_session_without_auth_url check that we could not retrieve a session without auth_url""" osclients = OpenStackClients() osclients.auth_url = None # Checking v3 try: osclients.get_session() except Exception as ex: self.assertRaises(ex) # Checking v2 osclients.use_v3 = False try: osclients.get_session() except Exception as ex: self.assertRaises(ex)
def __init__(self, username, password, tenant_id=None, tenant_name=None, trust_id=None): """ Constructor of the class. Tenant_id or tenant_name or tust_id must be provided. There are two ways of using this class: -passing the user, password, tenant_id or tenant_name of the user whose resources are being deleted -passing the user and password of the trustee, and the trust_id generated to impersonate the trustor. :param username: the user name whose resources are deleted or the user name of the trustee :param password: the password of the user whose resources are deleted or the password of the trustee :param tenant_id: the tenant id of the user whose resources must be deleted. :param tenant_name: the tenant name of the user whose resources must be deleted :param trust_id: the trust_id used to impersonate the user whose resources must be deleted :return: nothing """ self.logger = logging.getLogger(__name__) self.clients = OpenStackClients() if tenant_id: self.clients.set_credential(username, password, tenant_id=tenant_id) elif tenant_name: self.clients.set_credential(username, password, tenant_name=tenant_name) elif trust_id: self.clients.set_credential(username, password, trust_id=trust_id) self.trust_id = trust_id else: raise( 'Either tenant_id or tenant_name or trust_id must be provided') region = self.clients.region self.clients.override_endpoint( 'identity', region, 'admin', settings.KEYSTONE_ENDPOINT) self.user_id = self.clients.get_session().get_user_id() session = self.clients.get_session() self.user_name = username self.nova = NovaResources(self.clients) self.cinder = CinderResources(self.clients) self.glance = GlanceResources(self.clients) try: self.neutron = NeutronResources(self.clients) except Exception: # The region does not support Neutron # It would be better to check the endpoint self.neutron = None try: self.blueprints = BluePrintResources(self.clients) except Exception: # The region does not support PaaS Manager # It would be better to check the endpoint self.blueprints = None try: self.swift = SwiftResources(self.clients) except Exception: # The region does not support Swift # It would be better to check the endpoint self.swift = None # Images in use is a set used to avoid deleting formerly glance images # in use by other tenants self.imagesinuse = set() # Regions the user has access self.regions_available = set() self.regions_available.update(self.clients.get_regions('compute'))
class UserResources(object): """Class to list, delete user resources. Also provides a method to stop all the VM of the tenant. This class works creating a instance with the credential of the user owner of the resources. It does not use an admin credential!!!""" def __init__(self, username, password, tenant_id=None, tenant_name=None, trust_id=None): """ Constructor of the class. Tenant_id or tenant_name or tust_id must be provided. There are two ways of using this class: -passing the user, password, tenant_id or tenant_name of the user whose resources are being deleted -passing the user and password of the trustee, and the trust_id generated to impersonate the trustor. :param username: the user name whose resources are deleted or the user name of the trustee :param password: the password of the user whose resources are deleted or the password of the trustee :param tenant_id: the tenant id of the user whose resources must be deleted. :param tenant_name: the tenant name of the user whose resources must be deleted :param trust_id: the trust_id used to impersonate the user whose resources must be deleted :return: nothing """ self.logger = logging.getLogger(__name__) self.clients = OpenStackClients() if tenant_id: self.clients.set_credential(username, password, tenant_id=tenant_id) elif tenant_name: self.clients.set_credential(username, password, tenant_name=tenant_name) elif trust_id: self.clients.set_credential(username, password, trust_id=trust_id) self.trust_id = trust_id else: raise( 'Either tenant_id or tenant_name or trust_id must be provided') region = self.clients.region self.clients.override_endpoint( 'identity', region, 'admin', settings.KEYSTONE_ENDPOINT) self.user_id = self.clients.get_session().get_user_id() session = self.clients.get_session() self.user_name = username self.nova = NovaResources(self.clients) self.cinder = CinderResources(self.clients) self.glance = GlanceResources(self.clients) try: self.neutron = NeutronResources(self.clients) except Exception: # The region does not support Neutron # It would be better to check the endpoint self.neutron = None try: self.blueprints = BluePrintResources(self.clients) except Exception: # The region does not support PaaS Manager # It would be better to check the endpoint self.blueprints = None try: self.swift = SwiftResources(self.clients) except Exception: # The region does not support Swift # It would be better to check the endpoint self.swift = None # Images in use is a set used to avoid deleting formerly glance images # in use by other tenants self.imagesinuse = set() # Regions the user has access self.regions_available = set() self.regions_available.update(self.clients.get_regions('compute')) def change_region(self, region): """ change the region. All the clients need to be updated, but the session does not. :param region: the name of the region :return: nothing. """ self.clients.set_region(region) self.clients.override_endpoint( 'identity', region, 'admin', settings.KEYSTONE_ENDPOINT) self.nova.on_region_changed() self.glance.on_region_changed() try: if self.swift: self.swift.on_region_changed() else: self.swift = SwiftResources(self.clients) except Exception: # The region does not support swift self.swift = None self.cinder.on_region_changed() try: if self.blueprints: self.blueprint.on_region_changed() else: self.blueprints = BluePrintResources(self.clients) except Exception: # The region has not configured paas manager self.blueprints = None try: if self.neutron: self.neutron.on_region_changed() else: self.neutron = NeutronResources(self.clients) except Exception: # The region does not support neutron self.neutron = None def delete_tenant_resources_pri_1(self): """Delete here all the elements that do not depend of others are deleted first""" try: self.nova.delete_user_keypairs() except Exception, e: msg = 'Deletion of keypairs failed. Reason: ' self.logger.error(msg + str(e)) # Snapshots must be deleted before the volumes, because a snapshot # depends of a volume. try: self.cinder.delete_tenant_volume_snapshots() except Exception, e: msg = 'Deletion of volume snaphosts failed. Reason: ' self.logger.error(msg + str(e))
class ServersFacade(object): def __init__(self, target): """Create a new Facade for the specified target (a target is shared between regions using the same credential)""" self.osclients = OpenStackClients(target['keystone_url']) self.osclients.set_credential(target['user'], target['password'], target['tenant']) if target.get('use_keystone_v3', False): self.osclients.set_keystone_version(True) else: self.osclients.set_keystone_version(False) self.session = self.osclients.get_session() self.target = target # This is a default value self.images_dir = '/var/lib/glance/images' self.logger = logger_cli def _get_glanceclient(self, region): """helper method, to get a glanceclient for the region""" self.osclients.set_region(region) return self.osclients.get_glanceclient() def get_regions(self): """It returns the list of regions on the specified target. :return: a list of region names. """ return self.osclients.get_regions('image') def get_imagelist(self, regionobj): """return a image list from the glance of the specified region :param regionobj: The GlanceSyncRegion object of the region to list :return: a list of GlanceSyncImage objects """ client = self._get_glanceclient(regionobj.region) try: target = regionobj.target # We need a Pool to implement a timeout. Unfortunately # setting client.images.client.timeout does nothing. if 'list_images_timeout' in target: timeout = target['list_images_timeout'] else: timeout = _default_timeout pool = Pool(1) result = pool.apply_async(_getrawimagelist, (client,)) images = result.get(timeout=timeout) image_list = list() for image in images: i = GlanceSyncImage( image['name'], image['id'], regionobj.fullname, image['owner'], image['is_public'], image['checksum'], image['size'], image['status'], image['properties'], image) image_list.append(i) except TimeoutError: msg = regionobj.fullname + \ ': Timeout while retrieving image list.' self.logger.error(msg) raise GlanceFacadeException(msg) except Exception, e: cause = str(e) if not cause: cause = repr(e) msg = regionobj.fullname + \ ': Error retrieving image list. Cause: ' + cause self.logger.error(msg) raise GlanceFacadeException(msg) return image_list
def test_get_novaclient(self): """test_get_novaclient check that we could retrieve a Session client to work with nova""" osclients = OpenStackClients(modules="nova") novaClient = osclients.get_novaclient() self.assertIsInstance(novaClient, novaclient.v2.client.Client)
class TestOSClientsOverrideEndpoint(TestCase): """Class to test the endpoint override feature""" def setUp(self): d = defaultdict(list) d['catalog'].append(service) self.access = d self.osclients = OpenStackClients() self.url = 'http://fake.org:9090' self.original_url = service['endpoints'][1]['url'] def restore_catalog(self): """restore catalog""" service['endpoints'][1]['url'] = self.original_url def tearDown(self): """restore objects""" self.restore_catalog() def override_endpoint(self): """method that override the endpoint""" self.osclients.override_endpoint('object-store', 'Spain2', 'admin', self.url) def assertOverrideEndpoint(self): """check that the override has been done""" self.assertEquals(self.osclients.get_admin_endpoint('object-store', 'Spain2'), self.url) def test_override_endpoint_session(self): """test that invoking override endpoint does not create a session""" self.override_endpoint() self.assertFalse(self.osclients._session_v2) self.assertFalse(self.osclients._session_v3) def test_override_endpoint(self): """check that a session catalog is overriden""" mock = MagicMock() config = {'auth.get_access.return_value': self.access} mock.configure_mock(**config) self.osclients._session_v3 = mock self.override_endpoint() self.assertOverrideEndpoint() @patch('utils.osclients.session') def test_override_endpoint_multiple(self, mock): """test that override works with an already created session and then with a new one without invoking the method again""" config = {'Session.return_value.auth.get_access.return_value': self.access} mock.configure_mock(**config) session = self.osclients.get_session() self.override_endpoint() self.assertOverrideEndpoint() # invalidate and create a new session; ensure than catalog is again # the original. Setting a new token invalidate the session. The new # one is created at the invocation of get_admin_endpoint. self.restore_catalog() self.osclients.set_token('faketoken') # check again self.assertOverrideEndpoint()
etckeystone_path = '/home/ubuntu/idm/keystone/etc/keystone.conf' # reset the password p2 = Popen(["curl", "http://169.254.169.254/openstack/latest/meta_data.json"], stdout=PIPE) metadatajson, err = p2.communicate() meta = json.loads(metadatajson)["meta"] keystone_ip = meta["keystone_ip"] region = meta["Region"] region2 = meta["region_keystone"] if region2: os.environ['OS_REGION_NAME'] = region2 wait_net_service(keystone_ip, 5000, timeout=720) osclients = OpenStackClients('http://{0}:5000/v3/'.format(keystone_ip)) osclients.set_credential('idm', 'idm', 'idm') # create idm region user password_changer = PasswordChanger(osclients) idm = password_changer.get_user_byname("idm") idm = password_changer.get_user_byname('idm') # new_password = password_changer.reset_password(idm) new_password = '******' credential = """export OS_AUTH_URL=http://{0}:5000/v3/ export OS_AUTH_URL_V2=http://{0}:5000/v2.0/ export OS_USERNAME={2} export OS_TENANT_NAME=idm
class OpenStackMap(object): """ This class build a map from the resources (VMs, networks, images, volumes, users, tenants, roles...) in an OpenStack infrastructure. This map is optionally cached on disk and can be accessed offline or can be used as a metadata snapshot of an OpenStack infrastructure (e.g. to analyse resource use and build statistics) """ # objects strategy see the __init__ method documentation DIRECT_OBJECTS, NO_CACHE_OBJECTS, REFRESH_OBJECTS, USE_CACHE_OBJECTS, USE_CACHE_OBJECTS_ONLY = \ range(5) # If use_wrapper is True, dictionaries are wrapped to allow access to # resource['field'] also as resource.field. This is not used when # objects_strategy is DIRECT_OBJECTS use_wrapper = True load_filters = True resources_region = ['vms', 'images', 'routers', 'networks', 'subnets', 'ports', 'floatingips', 'security_groups', 'volumes', 'volume_backups', 'volume_snapshots'] def __init__( self, persistence_dir='~/openstackmap', region=None, auth_url=None, objects_strategy=USE_CACHE_OBJECTS, auto_load=True): """ Constructor :param persistence_dir: The path where the data is saved. Ignored if objects_strategy is DIRECT_OBJECTS or NO_CACHE_OBJECTS :param region: the initial region (if undefined, use OS_REGION_NAME) :param auth_url: the keystone URI :param objects_strategy: sets the strategy about the object maps contained here. It can be: * DIRECT_OBJECTS is using the objects as they are got from the API. Be careful because methods as delete are available! The objects are not cached. * NO_CACHE_OBJECTS is using the objects converted to dictionaries, so methods to do operations are not available. The objects are not cached. * REFRESH_OBJECTS is using the objects converted to dictionaries. The objects are cached: the new version replace the old one. * USE_CACHE_OBJECTS is using the objects converted to dictionaries. If a cached copy of the objects are available, it is used. * USE_CACHE_OBJECTS_ONLY is using the objects converted to dictionaries. This strategy used cached objects only. It never contacts with the servers, even when the object is not available in the local cache. :param auto_load: if True, invoke self.load_all() Note that neutron objects returned by the API are already dictionaries """ self.logger = logging.getLogger(__name__) if auth_url: self.osclients = OpenStackClients(auth_url=auth_url) else: self.osclients = OpenStackClients() if region: self.osclients.set_region(region) else: if 'OS_REGION_NAME' not in env: raise Exception('Region parameter must be provided or ' 'OS_REGION_NAME variable must be defined.') else: region = env['OS_REGION_NAME'] if 'KEYSTONE_ADMIN_ENDPOINT' in os.environ: self.osclients.override_endpoint( 'identity', self.osclients.region, 'admin', os.environ['KEYSTONE_ADMIN_ENDPOINT']) self.objects_strategy = objects_strategy self.persistence_dir = os.path.expanduser(persistence_dir) self.pers_region = self.persistence_dir + '/' + region self.pers_keystone = self.persistence_dir + '/keystone' if objects_strategy not in (OpenStackMap.DIRECT_OBJECTS, OpenStackMap.NO_CACHE_OBJECTS): if not os.path.exists(self.persistence_dir): os.mkdir(self.persistence_dir) if not os.path.exists(self.pers_keystone): os.mkdir(self.pers_keystone) if not os.path.exists(self.pers_region): os.mkdir(self.pers_region) self._init_resource_maps() if auto_load: self.load_all() self.region_map = dict() def _init_resource_maps(self): """init all the resources that will be available as empty dictionaries""" # Keystone resources self.users = dict() self.users_by_name = dict() self.roles = dict() self.tenants = dict() self.tenants_by_name = dict() self.roles_a = list() self.roles_by_project = dict() self.roles_by_user = dict() self.filters = dict() self.filters_by_project = dict() # Glance resources self.images = dict() # Neutron resources self.networks = dict() self.subnets = dict() self.routers = dict() self.floatingips = dict() self.floatingips_by_ip = dict() self.ports = dict() self.security_groups = dict() # Nova resources self.vms = dict() self.flavors = dict() # Cinder resources self.volumes = dict() self.volume_backups = dict() self.volume_snapshots = dict() def _load(self, name): """Load the resources persisted with pickle. This resources (e.g. networks, vms, images...) are saved independently for each region. This method is called for load_cinder, load_nova, load_glance, load_neutron... :param name: the resource name :return: a dictionary of objects (dictionaries) indexed by id """ objects = pickle.load(open(self.pers_region + '/' + name + '.pickle', 'rb')) return self._convert(objects) def _load_fkeystone(self, name): """Load the keystone objects persisted with pickle. This resources are shared among the regions. This method is used in load_keystone :param name: the resource name :return: a list/dictionary of objects (dictionaries) """ objects = pickle.load(open(self.pers_keystone + '/' + name + '.pickle', 'rb')) return self._convert(objects) def _convert(self, objects): """if use_wrapper, convert objects from dictionary to __E class; this class allows accessing a['key'] also as a.key""" if self.use_wrapper and len(objects) > 0: class __E(dict): def __init__(self, d=dict()): self.__dict__ = self dict.__init__(self, d) if isinstance(objects, dict): if not isinstance(objects.values()[0], dict): return objects return dict((key, __E(objects[key])) for key in objects) else: return list(__E(object) for object in objects) else: return objects def _get_keystone_data(self): """get data from keystone server""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS keystone = self.osclients.get_keystoneclientv3() roles = keystone.roles.list() users = keystone.users.list() tenants = keystone.projects.list() roles_a = keystone.role_assignments.list() if OpenStackMap.load_filters: ef = {'service_type': 'identity', 'interface': 'public'} resp = keystone.session.get('/OS-EP-FILTER/endpoint_groups', endpoint_filter=ef) filters = resp.json()['endpoint_groups'] projects_by_filter = dict() filters_by_project = dict() for f in filters: filter_id = f['id'] resp = keystone.session.get( '/OS-EP-FILTER/endpoint_groups/' + filter_id + '/projects', endpoint_filter=ef) projects = resp.json()['projects'] projects_by_filter[filter_id] = projects for project in projects: if project['id'] not in filters_by_project: filters_by_project[project['id']] = list() filters_by_project[project['id']].append(filter_id) else: filters = list() filters_by_project = dict() roles_by_user = dict() roles_by_project = dict() for roleasig in roles_a: try: userid = roleasig.user['id'] if 'project' in roleasig.scope: projectid = roleasig.scope['project']['id'] else: projectid = str(roleasig.scope) roleid = roleasig.role['id'] if userid not in roles_by_user: roles_by_user[userid] = list() except AttributeError: # Discard roles not assigned to users continue if projectid not in roles_by_project: roles_by_project[projectid] = list() roles_by_user[userid].append((roleid, projectid)) roles_by_project[projectid].append((roleid, userid)) self.roles_by_user = roles_by_user self.roles_by_project = roles_by_project tenants_by_name = dict() users_by_name = dict() filters = dict((f['id'], f) for f in filters) if dict_object: roles = dict((role.id, role.to_dict()) for role in roles) users = dict((user.id, user.to_dict()) for user in users) tenants = dict((tenant.id, tenant.to_dict()) for tenant in tenants) roles_a = list(asig.to_dict() for asig in roles_a) for tenant in tenants.values(): tenants_by_name[tenant['name']] = tenant for user in users.values(): users_by_name[user['name']] = user else: self.roles = dict((role.id, role) for role in roles) self.users = dict((user.id, user) for user in users) self.tenants = dict((tenant.id, tenant) for tenant in tenants) self.roles_a = roles_a tenants_by_name = dict() for tenant in tenants: if getattr(tenant, 'name', None): tenants_by_name[tenant.name] = tenant for user in users: if getattr(user, 'name', None): users_by_name[user.name] = user self.tenants_by_name = tenants_by_name self.users_by_name = users_by_name self.filters = filters self.filters_by_project = filters_by_project return if save: with open(self.pers_keystone + '/roles.pickle', 'wb') as f: pickle.dump(roles, f, protocol=-1) with open(self.pers_keystone + '/users.pickle', 'wb') as f: pickle.dump(users, f, protocol=-1) with open(self.pers_keystone + '/users_by_name.pickle', 'wb') as f: pickle.dump(users_by_name, f, protocol=-1) with open(self.pers_keystone + '/tenants.pickle', 'wb') as f: pickle.dump(tenants, f, protocol=-1) with open(self.pers_keystone + '/tenants_by_name.pickle', 'wb') as\ f: pickle.dump(tenants_by_name, f) with open(self.pers_keystone + '/roles_a.pickle', 'wb') as f: pickle.dump(roles_a, f, protocol=-1) with open(self.pers_keystone + '/roles_by_user.pickle', 'wb') as f: pickle.dump(roles_by_user, f, protocol=-1) with open(self.pers_keystone + '/roles_by_project.pickle', 'wb') as\ f: pickle.dump(roles_by_project, f, protocol=-1) with open(self.pers_keystone + '/filters.pickle', 'wb') as f: pickle.dump(filters, f, protocol=-1) with open(self.pers_keystone + '/filters_by_project.pickle', 'wb') \ as f: pickle.dump(filters_by_project, f, protocol=-1) self.roles = self._convert(roles) self.users = self._convert(users) self.users_by_name = self._convert(users_by_name) self.tenants = self._convert(tenants) self.tenants_by_name = self._convert(tenants_by_name) self.roles_a = self._convert(roles_a) self.filters = self._convert(filters) self.filters_by_project = self._convert(filters_by_project) def _get_nova_data(self): """ get data from nova""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS nova = self.osclients.get_novaclient() vms = nova.servers.list(search_opts={'all_tenants': 1}) if dict_object: vms = dict((vm.id, vm.to_dict()) for vm in vms) else: self.vms = dict((vm.id, vm) for vm in vms) return if save: with open(self.pers_region + '/vms.pickle', 'wb') as f: pickle.dump(vms, f, protocol=-1) self.vms = self._convert(vms) def _get_cinder_data(self): """get data from cinder""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS cinder = self.osclients.get_cinderclientv1() volumes = cinder.volumes.list(search_opts={'all_tenants': 1}) snapshots = cinder.volume_snapshots.list( search_opts={'all_tenants': 1}) backups = cinder.backups.list(search_opts={'all_tenants': 1}) if dict_object: volumes = dict((volume.id, volume.__dict__) for volume in volumes) snapshots = dict((snapshot.id, snapshot.__dict__) for snapshot in snapshots) backups = dict((backup.id, backup.__dict__) for backup in backups) else: self.volumes = dict((volume.id, volume) for volume in volumes) self.volume_snapshots = dict((snapshot.id, snapshot) for snapshot in snapshots) self.volume_backups = dict((backup.id, backup) for backup in backups) return if save: with open(self.pers_region + '/volumes.pickle', 'wb') as f: pickle.dump(volumes, f, protocol=-1) with open(self.pers_region + '/volume_snapshots.pickle', 'wb') as \ f: pickle.dump(snapshots, f, protocol=-1) with open(self.pers_region + '/volume_backups.pickle', 'wb') as f: pickle.dump(backups, f, protocol=-1) self.volumes = self._convert(volumes) self.volume_snapshots = self._convert(snapshots) self.volume_backups = self._convert(backups) def _get_glance_data(self): """get data from glance""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS glance = self.osclients.get_glanceclient() images = glance.images.findall() if dict_object: images = dict((image.id, image.to_dict()) for image in glance.images.findall()) else: self.images = dict((image.id, image) for image in images) return if save: with open(self.pers_region + '/images.pickle', 'wb') as f: pickle.dump(images, f, protocol=-1) self.images = self._convert(images) def _get_neutron_data(self): """get network data from neutron""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS neutron = self.osclients.get_neutronclient() networks = neutron.list_networks()['networks'] subnets = neutron.list_subnets()['subnets'] routers = neutron.list_routers()['routers'] sec_grps = neutron.list_security_groups()['security_groups'] floatingips = neutron.list_floatingips()['floatingips'] ports = neutron.list_ports()['ports'] nets = dict((network['id'], network) for network in networks) snets = dict((subnet['id'], subnet) for subnet in subnets) routers = dict((router['id'], router) for router in routers) floatingips = dict((floatingip['id'], floatingip) for floatingip in floatingips) sec_grps = dict((sg['id'], sg) for sg in sec_grps) ports = dict((port['id'], port) for port in ports) if dict_object: # neutron objects are already dicts, so they do not need conversion pass else: self.networks = nets self.subnets = snets self.routers = routers self.floatingips = floatingips self.security_groups = sec_grps self.ports = ports return if save: with open(self.pers_region + '/networks.pickle', 'wb') as f: pickle.dump(nets, f, protocol=-1) with open(self.pers_region + '/subnets.pickle', 'wb') as f: pickle.dump(snets, f, protocol=-1) with open(self.pers_region + '/routers.pickle', 'wb') as f: pickle.dump(routers, f, protocol=-1) with open(self.pers_region + '/floatingips.pickle', 'wb') as f: pickle.dump(floatingips, f, protocol=-1) with open(self.pers_region + '/security_groups.pickle', 'wb') as f: pickle.dump(sec_grps, f, protocol=-1) with open(self.pers_region + '/ports.pickle', 'wb') as f: pickle.dump(ports, f, protocol=-1) self.networks = self._convert(nets) self.subnets = self._convert(snets) self.routers = self._convert(routers) self.floatingips = self._convert(floatingips) self.security_groups = self._convert(sec_grps) self.ports = self._convert(ports) def load_nova(self): """load nova data: vms""" if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/vms.pickle'): self.vms = self._load('vms') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception('Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about nova') self._get_nova_data() def load_glance(self): """load glance data: images""" if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/images.pickle'): self.images = self._load('images') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception('Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about glance') self._get_glance_data() def load_neutron(self): """load neutron (network) data: networks, subnets, routers, floatingips, security_groups, ports""" if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/networks.pickle'): self.networks = self._load('networks') # legacy if os.path.exists(self.pers_region + '/subnetworks.pickle'): self.subnets = self._load('subnetworks') else: self.subnets = self._load('subnets') self.routers = self._load('routers') self.floatingips = self._load('floatingips') # legacy if os.path.exists(self.pers_region + '/securitygroups.pickle'): self.security_groups = self._load('securitygroups') else: self.security_groups = self._load('security_groups') self.ports = self._load('ports') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception('Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about neutron') self._get_neutron_data() # make indexes self.floatingips_by_ip = dict((f['floating_ip_address'], f) for f in self.floatingips.values() if 'floating_ip_address' in f) def load_keystone(self): """load keystone data: users, tenants, roles, roles_a, users_by_name, tenants_by_name, roles_by_project, roles_by_user """ if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_keystone + '/users.pickle'): self.users = self._load_fkeystone('users') self.users_by_name = self._load_fkeystone('users_by_name') self.tenants = self._load_fkeystone('tenants') self.tenants_by_name = self._load_fkeystone('tenants_by_name') # legacy code if os.path.exists(self.pers_keystone + '/asignments.pickle'): self.roles_a = self._load_fkeystone('asignments') else: self.roles_a = self._load_fkeystone('roles_a') self.roles = self._load_fkeystone('roles') self.roles_by_project = self._load_fkeystone('roles_by_project') self.roles_by_user = self._load_fkeystone('roles_by_user') self.filters = self._load_fkeystone('filters') # legacy code if os.path.exists(self.pers_keystone + '/filters_byproject.pickle'): self.filters_by_project = self._load_fkeystone('filters_byproject') else: self.filters_by_project = self._load_fkeystone('filters_by_project') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception('Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about keystone') self._get_keystone_data() def load_cinder(self): """load cinder data: volumes, volume_backups, volume_snapshots """ if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/volumes.pickle'): self.volumes = self._load('volumes') self.volume_backups = self._load('volume_backups') self.volume_snapshots = self._load('volume_snapshots') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception('Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about cinder') self._get_cinder_data() def load_all(self): """load all data""" region = self.osclients.region if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: if os.path.exists(self.pers_region + '/vms.pickle'): self.load_nova() if os.path.exists(self.pers_region + '/networks.pickle'): self.load_neutron() if os.path.exists(self.pers_region + '/images.pickle'): self.load_glance() if os.path.exists(self.pers_region + '/volumes.pickle'): self.load_cinder() else: if region in self.osclients.get_regions('compute'): self.load_nova() if region in self.osclients.get_regions('network'): self.load_neutron() if region in self.osclients.get_regions('image'): self.load_glance() if region in self.osclients.get_regions('volume'): self.load_cinder() self.load_keystone() def change_region(self, region, auto_load=True): """change region and clean maps. Optionally load the maps. :param region: the new region :param auto_load: True to invoke load_all :return: nothing """ self.pers_region = self.persistence_dir + '/' + region if not os.path.exists(self.pers_region) and self.objects_strategy\ not in (OpenStackMap.DIRECT_OBJECTS, OpenStackMap.NO_CACHE_OBJECTS): os.mkdir(self.pers_region) self.osclients.set_region(region) self._init_resource_maps() if auto_load: self.load_all() def preload_regions(self, regions=None, all_regions_excluded=None): """Method to preload the data of the specified regions. If regions is None, use all the available regions in the federation, but the specified in all_regions_excluded. The data for each region will be available at the region_map dictionary. It must be noted that this method could affect the current region and the values of the direct maps (vms, networks...). This is because it calls change_region/load_<service_name> methods. If regions is provided, then the last region in the list will be the new current region""" if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: regions_in_disk = set(os.listdir(self.persistence_dir)) if 'keystone' in regions_in_disk: regions_in_disk.remove('keystone') regions_compute = set() regions_network = set() regions_image = set() regions_volume = set() for region in regions_in_disk: path = self.persistence_dir + os.path.sep + region if os.path.exists(path + os.path.sep + 'vms.pickle'): regions_compute.add(region) if os.path.exists(path + os.path.sep + 'networks.pickle'): regions_network.add(region) if os.path.exists(path + os.path.sep + 'images.pickle'): regions_image.add(region) if os.path.exists(path + os.path.sep + 'volumes.pickle'): regions_volume.add(region) else: regions_compute = self.osclients.get_regions('compute') regions_network = self.osclients.get_regions('network') regions_image = self.osclients.get_regions('image') regions_volume = self.osclients.get_regions('volume') if not regions: """ not regions specified, so all existing regions with some resource (compute, network, image or volume) are considered. If all_regions_excluded is defined, then these regions are excluded """ all_regions = regions_compute.union(regions_network).union( regions_image).union(regions_volume) if all_regions_excluded: all_regions.difference_update(all_regions_excluded) # Move the current region to the end of the list. # This is because the direct map dictionaries (vms, networks, etc.) # and the region field are updated with the change_region() call. if self.osclients.region in all_regions: all_regions.remove(self.osclients.region) regions = list(all_regions) regions.append(self.osclients.region) for region in regions: try: self.logger.info('Creating map of region ' + region) self.change_region(region, False) if region in regions_compute: self.load_nova() if region in regions_network: self.load_neutron() if region in regions_image: self.load_glance() if region in regions_volume: self.load_cinder() region_map = dict() for resource in self.resources_region: region_map[resource] = getattr(self, resource) self.region_map[region] = region_map except Exception, e: msg = 'Failed the creation of the map of {0}. Cause: {1}' self.logger.error(msg.format(region, str(e))) # Remove dir if it exists and is empty. dir = os.path.join(self.persistence_dir, region) if os.path.isdir(dir) and len(os.listdir(dir)) == 0: os.rmdir(dir) self.load_keystone()
def test_get_session_with_different_auth_url(self): """test_get_session_without_auth_url check that we could retrieve a session with auth_url formats""" auth_url_v2_1 = "http://cloud.lab.fi-ware.org:4731/v2.0" auth_url_v2_2 = "http://cloud.lab.fi-ware.org:4731/v2.0/" auth_url_v3_1 = "http://cloud.lab.fi-ware.org:4731/v3" auth_url_v3_2 = "http://cloud.lab.fi-ware.org:4731/v3/" osclients = OpenStackClients() # Checking v3 osclients.auth_url = auth_url_v2_1 session = osclients.get_session() self.assertIsInstance(session, keystoneclient.session.Session) session.invalidate() osclients._session_v3 = None osclients.auth_url = auth_url_v2_2 session = osclients.get_session() self.assertIsInstance(session, keystoneclient.session.Session) session.invalidate() osclients._session_v3 = None # Checking v2 osclients.use_v3 = False osclients.auth_url = auth_url_v3_1 session = osclients.get_session() self.assertIsInstance(session, keystoneclient.session.Session) session.invalidate() osclients._session_v2 = None osclients.auth_url = auth_url_v3_2 session = osclients.get_session() self.assertIsInstance(session, keystoneclient.session.Session) session.invalidate() osclients._session_v2 = None
file_path = '/home/ubuntu/idm/conf/settings.py' etckeystone_path = '/home/ubuntu/idm/keystone/etc/keystone.conf' # reset the password p2 = Popen(["curl", "http://169.254.169.254/openstack/latest/meta_data.json"], stdout=PIPE) metadatajson, err = p2.communicate() meta = json.loads(metadatajson)["meta"] keystone_ip = meta["keystone_ip"] region = meta["Region"] region2 = meta["region_keystone"] if region2: os.environ['OS_REGION_NAME'] = region2 wait_net_service(keystone_ip, 5000, timeout=720) osclients = OpenStackClients('http://{0}:5000/v3/'.format(keystone_ip)) osclients.set_credential('idm', 'idm', 'idm') # create idm region user password_changer = PasswordChanger(osclients) idm = password_changer.get_user_byname("idm") idm = password_changer.get_user_byname('idm') # new_password = password_changer.reset_password(idm) new_password = '******' credential = """export OS_AUTH_URL=http://{0}:5000/v3/ export OS_AUTH_URL_V2=http://{0}:5000/v2.0/ export OS_USERNAME={2}
def test_set_credential_to_osclients(self): """test_set_credential_to_osclients check that we could set credentials using method set_credential""" username = "******" password = "******" tenant_name = "new_user cloud" tenant_id = "00000000000000000000000000000002" trust_id = "randomid0000000000000000000000001" # FIRST CHECK: Credentials from ENV osclients = OpenStackClients() self.assertEqual(osclients._OpenStackClients__username, self.OS_USERNAME) self.assertEqual(osclients._OpenStackClients__tenant_id, self.OS_TENANT_ID) # SECOND CHECK: updating Credentials with tenant_id osclients.set_credential(username, password, tenant_id=tenant_id) self.assertEqual(osclients._OpenStackClients__tenant_id, tenant_id) # THIRD CHECK: updating Credentials with tenant_name osclients.set_credential(username, password, tenant_name=tenant_name) self.assertEqual(osclients._OpenStackClients__tenant_name, tenant_name) # FOURTH CHECK: updating Credentials with trust_id osclients.set_credential(username, password, trust_id=trust_id) self.assertEqual(osclients._OpenStackClients__trust_id, trust_id) # FIFTH CHECK: updating Credentials without trust_id, tenant_id and tenant_name osclients.set_credential(username, password) self.assertIsNone(osclients._OpenStackClients__trust_id) self.assertIsNone(osclients._OpenStackClients__tenant_name) self.assertIsNone(osclients._OpenStackClients__tenant_id) # Creating a client to check that set_credential destroy the session with v3 novaclient = osclients.get_novaclient() self.assertIsNotNone(osclients._session_v3) osclients.set_credential(username, password) self.assertIsNone(osclients._session_v3) # Creating a client to check that set_credential destroy the session with v2 osclients.use_v3 = False novaclient = osclients.get_novaclient() self.assertIsNotNone(osclients._session_v2) osclients.set_credential(username, password) self.assertIsNone(osclients._session_v2)
def return_credentails(self): os.environ['OS_USER_DOMAIN_NAME'] = "default" os.environ['OS_PROJECT_DOMAIN_NAME'] = "default" os.environ['OS_REGION_NAME'] = os.environ['REGION'] osclients = OpenStackClients() return osclients.get_keystoneclient()
def test_get_keystoneclient_v3(self): """test_get_keystoneclient_v3 check that we could retrieve a Session client to work with keystone v3""" osclients = OpenStackClients() keystoneClient = osclients.get_keystoneclient() self.assertIsInstance(keystoneClient, keystoneclient.v3.client.Client)
class OpenStackMap(object): """ This class build a map from the resources (VMs, networks, images, volumes, users, tenants, roles...) in an OpenStack infrastructure. This map is optionally cached on disk and can be accessed offline or can be used as a metadata snapshot of an OpenStack infrastructure (e.g. to analyse resource use and build statistics) """ # objects strategy see the __init__ method documentation DIRECT_OBJECTS, NO_CACHE_OBJECTS, REFRESH_OBJECTS, USE_CACHE_OBJECTS, USE_CACHE_OBJECTS_ONLY = \ range(5) # If use_wrapper is True, dictionaries are wrapped to allow access to # resource['field'] also as resource.field. This is not used when # objects_strategy is DIRECT_OBJECTS use_wrapper = True load_filters = True resources_region = [ 'vms', 'images', 'routers', 'networks', 'subnets', 'ports', 'floatingips', 'security_groups', 'volumes', 'volume_backups', 'volume_snapshots' ] def __init__(self, persistence_dir='~/openstackmap', region=None, auth_url=None, objects_strategy=USE_CACHE_OBJECTS, auto_load=True): """ Constructor :param persistence_dir: The path where the data is saved. Ignored if objects_strategy is DIRECT_OBJECTS or NO_CACHE_OBJECTS :param region: the initial region (if undefined, use OS_REGION_NAME) :param auth_url: the keystone URI :param objects_strategy: sets the strategy about the object maps contained here. It can be: * DIRECT_OBJECTS is using the objects as they are got from the API. Be careful because methods as delete are available! The objects are not cached. * NO_CACHE_OBJECTS is using the objects converted to dictionaries, so methods to do operations are not available. The objects are not cached. * REFRESH_OBJECTS is using the objects converted to dictionaries. The objects are cached: the new version replace the old one. * USE_CACHE_OBJECTS is using the objects converted to dictionaries. If a cached copy of the objects are available, it is used. * USE_CACHE_OBJECTS_ONLY is using the objects converted to dictionaries. This strategy used cached objects only. It never contacts with the servers, even when the object is not available in the local cache. :param auto_load: if True, invoke self.load_all() Note that neutron objects returned by the API are already dictionaries """ self.logger = logging.getLogger(__name__) if auth_url: self.osclients = OpenStackClients(auth_url=auth_url) else: self.osclients = OpenStackClients() if region: self.osclients.set_region(region) else: if 'OS_REGION_NAME' not in env: raise Exception('Region parameter must be provided or ' 'OS_REGION_NAME variable must be defined.') else: region = env['OS_REGION_NAME'] if 'KEYSTONE_ADMIN_ENDPOINT' in os.environ: self.osclients.override_endpoint( 'identity', self.osclients.region, 'admin', os.environ['KEYSTONE_ADMIN_ENDPOINT']) self.objects_strategy = objects_strategy self.persistence_dir = os.path.expanduser(persistence_dir) self.pers_region = self.persistence_dir + '/' + region self.pers_keystone = self.persistence_dir + '/keystone' if objects_strategy not in (OpenStackMap.DIRECT_OBJECTS, OpenStackMap.NO_CACHE_OBJECTS): if not os.path.exists(self.persistence_dir): os.mkdir(self.persistence_dir) if not os.path.exists(self.pers_keystone): os.mkdir(self.pers_keystone) if not os.path.exists(self.pers_region): os.mkdir(self.pers_region) self._init_resource_maps() if auto_load: self.load_all() self.region_map = dict() def _init_resource_maps(self): """init all the resources that will be available as empty dictionaries""" # Keystone resources self.users = dict() self.users_by_name = dict() self.roles = dict() self.tenants = dict() self.tenants_by_name = dict() self.roles_a = list() self.roles_by_project = dict() self.roles_by_user = dict() self.filters = dict() self.filters_by_project = dict() # Glance resources self.images = dict() # Neutron resources self.networks = dict() self.subnets = dict() self.routers = dict() self.floatingips = dict() self.floatingips_by_ip = dict() self.ports = dict() self.security_groups = dict() # Nova resources self.vms = dict() self.flavors = dict() # Cinder resources self.volumes = dict() self.volume_backups = dict() self.volume_snapshots = dict() def _load(self, name): """Load the resources persisted with pickle. This resources (e.g. networks, vms, images...) are saved independently for each region. This method is called for load_cinder, load_nova, load_glance, load_neutron... :param name: the resource name :return: a dictionary of objects (dictionaries) indexed by id """ objects = pickle.load( open(self.pers_region + '/' + name + '.pickle', 'rb')) return self._convert(objects) def _load_fkeystone(self, name): """Load the keystone objects persisted with pickle. This resources are shared among the regions. This method is used in load_keystone :param name: the resource name :return: a list/dictionary of objects (dictionaries) """ objects = pickle.load( open(self.pers_keystone + '/' + name + '.pickle', 'rb')) return self._convert(objects) def _convert(self, objects): """if use_wrapper, convert objects from dictionary to __E class; this class allows accessing a['key'] also as a.key""" if self.use_wrapper and len(objects) > 0: class __E(dict): def __init__(self, d=dict()): self.__dict__ = self dict.__init__(self, d) if isinstance(objects, dict): if not isinstance(objects.values()[0], dict): return objects return dict((key, __E(objects[key])) for key in objects) else: return list(__E(object) for object in objects) else: return objects def _get_keystone_data(self): """get data from keystone server""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS keystone = self.osclients.get_keystoneclientv3() roles = keystone.roles.list() users = keystone.users.list() tenants = keystone.projects.list() roles_a = keystone.role_assignments.list() if OpenStackMap.load_filters: ef = {'service_type': 'identity', 'interface': 'public'} resp = keystone.session.get('/OS-EP-FILTER/endpoint_groups', endpoint_filter=ef) filters = resp.json()['endpoint_groups'] projects_by_filter = dict() filters_by_project = dict() for f in filters: filter_id = f['id'] resp = keystone.session.get('/OS-EP-FILTER/endpoint_groups/' + filter_id + '/projects', endpoint_filter=ef) projects = resp.json()['projects'] projects_by_filter[filter_id] = projects for project in projects: if project['id'] not in filters_by_project: filters_by_project[project['id']] = list() filters_by_project[project['id']].append(filter_id) else: filters = list() filters_by_project = dict() roles_by_user = dict() roles_by_project = dict() for roleasig in roles_a: try: userid = roleasig.user['id'] if 'project' in roleasig.scope: projectid = roleasig.scope['project']['id'] else: projectid = str(roleasig.scope) roleid = roleasig.role['id'] if userid not in roles_by_user: roles_by_user[userid] = list() except AttributeError: # Discard roles not assigned to users continue if projectid not in roles_by_project: roles_by_project[projectid] = list() roles_by_user[userid].append((roleid, projectid)) roles_by_project[projectid].append((roleid, userid)) self.roles_by_user = roles_by_user self.roles_by_project = roles_by_project tenants_by_name = dict() users_by_name = dict() filters = dict((f['id'], f) for f in filters) if dict_object: roles = dict((role.id, role.to_dict()) for role in roles) users = dict((user.id, user.to_dict()) for user in users) tenants = dict((tenant.id, tenant.to_dict()) for tenant in tenants) roles_a = list(asig.to_dict() for asig in roles_a) for tenant in tenants.values(): tenants_by_name[tenant['name']] = tenant for user in users.values(): users_by_name[user['name']] = user else: self.roles = dict((role.id, role) for role in roles) self.users = dict((user.id, user) for user in users) self.tenants = dict((tenant.id, tenant) for tenant in tenants) self.roles_a = roles_a tenants_by_name = dict() for tenant in tenants: if getattr(tenant, 'name', None): tenants_by_name[tenant.name] = tenant for user in users: if getattr(user, 'name', None): users_by_name[user.name] = user self.tenants_by_name = tenants_by_name self.users_by_name = users_by_name self.filters = filters self.filters_by_project = filters_by_project return if save: with open(self.pers_keystone + '/roles.pickle', 'wb') as f: pickle.dump(roles, f, protocol=-1) with open(self.pers_keystone + '/users.pickle', 'wb') as f: pickle.dump(users, f, protocol=-1) with open(self.pers_keystone + '/users_by_name.pickle', 'wb') as f: pickle.dump(users_by_name, f, protocol=-1) with open(self.pers_keystone + '/tenants.pickle', 'wb') as f: pickle.dump(tenants, f, protocol=-1) with open(self.pers_keystone + '/tenants_by_name.pickle', 'wb') as\ f: pickle.dump(tenants_by_name, f) with open(self.pers_keystone + '/roles_a.pickle', 'wb') as f: pickle.dump(roles_a, f, protocol=-1) with open(self.pers_keystone + '/roles_by_user.pickle', 'wb') as f: pickle.dump(roles_by_user, f, protocol=-1) with open(self.pers_keystone + '/roles_by_project.pickle', 'wb') as\ f: pickle.dump(roles_by_project, f, protocol=-1) with open(self.pers_keystone + '/filters.pickle', 'wb') as f: pickle.dump(filters, f, protocol=-1) with open(self.pers_keystone + '/filters_by_project.pickle', 'wb') \ as f: pickle.dump(filters_by_project, f, protocol=-1) self.roles = self._convert(roles) self.users = self._convert(users) self.users_by_name = self._convert(users_by_name) self.tenants = self._convert(tenants) self.tenants_by_name = self._convert(tenants_by_name) self.roles_a = self._convert(roles_a) self.filters = self._convert(filters) self.filters_by_project = self._convert(filters_by_project) def _get_nova_data(self): """ get data from nova""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS nova = self.osclients.get_novaclient() vms = nova.servers.list(search_opts={'all_tenants': 1}) if dict_object: vms = dict((vm.id, vm.to_dict()) for vm in vms) else: self.vms = dict((vm.id, vm) for vm in vms) return if save: with open(self.pers_region + '/vms.pickle', 'wb') as f: pickle.dump(vms, f, protocol=-1) self.vms = self._convert(vms) def _get_cinder_data(self): """get data from cinder""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS cinder = self.osclients.get_cinderclientv1() volumes = cinder.volumes.list(search_opts={'all_tenants': 1}) snapshots = cinder.volume_snapshots.list( search_opts={'all_tenants': 1}) backups = cinder.backups.list(search_opts={'all_tenants': 1}) if dict_object: volumes = dict((volume.id, volume.__dict__) for volume in volumes) snapshots = dict( (snapshot.id, snapshot.__dict__) for snapshot in snapshots) backups = dict((backup.id, backup.__dict__) for backup in backups) else: self.volumes = dict((volume.id, volume) for volume in volumes) self.volume_snapshots = dict( (snapshot.id, snapshot) for snapshot in snapshots) self.volume_backups = dict( (backup.id, backup) for backup in backups) return if save: with open(self.pers_region + '/volumes.pickle', 'wb') as f: pickle.dump(volumes, f, protocol=-1) with open(self.pers_region + '/volume_snapshots.pickle', 'wb') as \ f: pickle.dump(snapshots, f, protocol=-1) with open(self.pers_region + '/volume_backups.pickle', 'wb') as f: pickle.dump(backups, f, protocol=-1) self.volumes = self._convert(volumes) self.volume_snapshots = self._convert(snapshots) self.volume_backups = self._convert(backups) def _get_glance_data(self): """get data from glance""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS glance = self.osclients.get_glanceclient() images = glance.images.findall() if dict_object: images = dict((image.id, image.to_dict()) for image in glance.images.findall()) else: self.images = dict((image.id, image) for image in images) return if save: with open(self.pers_region + '/images.pickle', 'wb') as f: pickle.dump(images, f, protocol=-1) self.images = self._convert(images) def _get_neutron_data(self): """get network data from neutron""" dict_object = self.objects_strategy != OpenStackMap.DIRECT_OBJECTS save = dict_object and \ self.objects_strategy != OpenStackMap.NO_CACHE_OBJECTS neutron = self.osclients.get_neutronclient() networks = neutron.list_networks()['networks'] subnets = neutron.list_subnets()['subnets'] routers = neutron.list_routers()['routers'] sec_grps = neutron.list_security_groups()['security_groups'] floatingips = neutron.list_floatingips()['floatingips'] ports = neutron.list_ports()['ports'] nets = dict((network['id'], network) for network in networks) snets = dict((subnet['id'], subnet) for subnet in subnets) routers = dict((router['id'], router) for router in routers) floatingips = dict( (floatingip['id'], floatingip) for floatingip in floatingips) sec_grps = dict((sg['id'], sg) for sg in sec_grps) ports = dict((port['id'], port) for port in ports) if dict_object: # neutron objects are already dicts, so they do not need conversion pass else: self.networks = nets self.subnets = snets self.routers = routers self.floatingips = floatingips self.security_groups = sec_grps self.ports = ports return if save: with open(self.pers_region + '/networks.pickle', 'wb') as f: pickle.dump(nets, f, protocol=-1) with open(self.pers_region + '/subnets.pickle', 'wb') as f: pickle.dump(snets, f, protocol=-1) with open(self.pers_region + '/routers.pickle', 'wb') as f: pickle.dump(routers, f, protocol=-1) with open(self.pers_region + '/floatingips.pickle', 'wb') as f: pickle.dump(floatingips, f, protocol=-1) with open(self.pers_region + '/security_groups.pickle', 'wb') as f: pickle.dump(sec_grps, f, protocol=-1) with open(self.pers_region + '/ports.pickle', 'wb') as f: pickle.dump(ports, f, protocol=-1) self.networks = self._convert(nets) self.subnets = self._convert(snets) self.routers = self._convert(routers) self.floatingips = self._convert(floatingips) self.security_groups = self._convert(sec_grps) self.ports = self._convert(ports) def load_nova(self): """load nova data: vms""" if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/vms.pickle'): self.vms = self._load('vms') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception( 'Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about nova' ) self._get_nova_data() def load_glance(self): """load glance data: images""" if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/images.pickle'): self.images = self._load('images') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception( 'Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about glance' ) self._get_glance_data() def load_neutron(self): """load neutron (network) data: networks, subnets, routers, floatingips, security_groups, ports""" if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/networks.pickle'): self.networks = self._load('networks') # legacy if os.path.exists(self.pers_region + '/subnetworks.pickle'): self.subnets = self._load('subnetworks') else: self.subnets = self._load('subnets') self.routers = self._load('routers') self.floatingips = self._load('floatingips') # legacy if os.path.exists(self.pers_region + '/securitygroups.pickle'): self.security_groups = self._load('securitygroups') else: self.security_groups = self._load('security_groups') self.ports = self._load('ports') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception( 'Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about neutron' ) self._get_neutron_data() # make indexes self.floatingips_by_ip = dict((f['floating_ip_address'], f) for f in self.floatingips.values() if 'floating_ip_address' in f) def load_keystone(self): """load keystone data: users, tenants, roles, roles_a, users_by_name, tenants_by_name, roles_by_project, roles_by_user """ if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_keystone + '/users.pickle'): self.users = self._load_fkeystone('users') self.users_by_name = self._load_fkeystone('users_by_name') self.tenants = self._load_fkeystone('tenants') self.tenants_by_name = self._load_fkeystone('tenants_by_name') # legacy code if os.path.exists(self.pers_keystone + '/asignments.pickle'): self.roles_a = self._load_fkeystone('asignments') else: self.roles_a = self._load_fkeystone('roles_a') self.roles = self._load_fkeystone('roles') self.roles_by_project = self._load_fkeystone('roles_by_project') self.roles_by_user = self._load_fkeystone('roles_by_user') self.filters = self._load_fkeystone('filters') # legacy code if os.path.exists(self.pers_keystone + '/filters_byproject.pickle'): self.filters_by_project = self._load_fkeystone( 'filters_byproject') else: self.filters_by_project = self._load_fkeystone( 'filters_by_project') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception( 'Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about keystone' ) self._get_keystone_data() def load_cinder(self): """load cinder data: volumes, volume_backups, volume_snapshots """ if (self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS or self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY) and \ os.path.exists(self.pers_region + '/volumes.pickle'): self.volumes = self._load('volumes') self.volume_backups = self._load('volume_backups') self.volume_snapshots = self._load('volume_snapshots') else: if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: raise Exception( 'Strategy is USE_CACHE_OBJECTS_ONLY but there are not cached data about cinder' ) self._get_cinder_data() def load_all(self): """load all data""" region = self.osclients.region if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: if os.path.exists(self.pers_region + '/vms.pickle'): self.load_nova() if os.path.exists(self.pers_region + '/networks.pickle'): self.load_neutron() if os.path.exists(self.pers_region + '/images.pickle'): self.load_glance() if os.path.exists(self.pers_region + '/volumes.pickle'): self.load_cinder() else: if region in self.osclients.get_regions('compute'): self.load_nova() if region in self.osclients.get_regions('network'): self.load_neutron() if region in self.osclients.get_regions('image'): self.load_glance() if region in self.osclients.get_regions('volume'): self.load_cinder() self.load_keystone() def change_region(self, region, auto_load=True): """change region and clean maps. Optionally load the maps. :param region: the new region :param auto_load: True to invoke load_all :return: nothing """ self.pers_region = self.persistence_dir + '/' + region if not os.path.exists(self.pers_region) and self.objects_strategy\ not in (OpenStackMap.DIRECT_OBJECTS, OpenStackMap.NO_CACHE_OBJECTS): os.mkdir(self.pers_region) self.osclients.set_region(region) self._init_resource_maps() if auto_load: self.load_all() def preload_regions(self, regions=None, all_regions_excluded=None): """Method to preload the data of the specified regions. If regions is None, use all the available regions in the federation, but the specified in all_regions_excluded. The data for each region will be available at the region_map dictionary. It must be noted that this method could affect the current region and the values of the direct maps (vms, networks...). This is because it calls change_region/load_<service_name> methods. If regions is provided, then the last region in the list will be the new current region""" if self.objects_strategy == OpenStackMap.USE_CACHE_OBJECTS_ONLY: regions_in_disk = set(os.listdir(self.persistence_dir)) if 'keystone' in regions_in_disk: regions_in_disk.remove('keystone') regions_compute = set() regions_network = set() regions_image = set() regions_volume = set() for region in regions_in_disk: path = self.persistence_dir + os.path.sep + region if os.path.exists(path + os.path.sep + 'vms.pickle'): regions_compute.add(region) if os.path.exists(path + os.path.sep + 'networks.pickle'): regions_network.add(region) if os.path.exists(path + os.path.sep + 'images.pickle'): regions_image.add(region) if os.path.exists(path + os.path.sep + 'volumes.pickle'): regions_volume.add(region) else: regions_compute = self.osclients.get_regions('compute') regions_network = self.osclients.get_regions('network') regions_image = self.osclients.get_regions('image') regions_volume = self.osclients.get_regions('volume') if not regions: """ not regions specified, so all existing regions with some resource (compute, network, image or volume) are considered. If all_regions_excluded is defined, then these regions are excluded """ all_regions = regions_compute.union(regions_network).union( regions_image).union(regions_volume) if all_regions_excluded: all_regions.difference_update(all_regions_excluded) # Move the current region to the end of the list. # This is because the direct map dictionaries (vms, networks, etc.) # and the region field are updated with the change_region() call. if self.osclients.region in all_regions: all_regions.remove(self.osclients.region) regions = list(all_regions) regions.append(self.osclients.region) for region in regions: try: self.logger.info('Creating map of region ' + region) self.change_region(region, False) if region in regions_compute: self.load_nova() if region in regions_network: self.load_neutron() if region in regions_image: self.load_glance() if region in regions_volume: self.load_cinder() region_map = dict() for resource in self.resources_region: region_map[resource] = getattr(self, resource) self.region_map[region] = region_map except Exception, e: msg = 'Failed the creation of the map of {0}. Cause: {1}' self.logger.error(msg.format(region, str(e))) # Remove dir if it exists and is empty. dir = os.path.join(self.persistence_dir, region) if os.path.isdir(dir) and len(os.listdir(dir)) == 0: os.rmdir(dir) self.load_keystone()
def __init__(self, persistence_dir='~/openstackmap', region=None, auth_url=None, objects_strategy=USE_CACHE_OBJECTS, auto_load=True): """ Constructor :param persistence_dir: The path where the data is saved. Ignored if objects_strategy is DIRECT_OBJECTS or NO_CACHE_OBJECTS :param region: the initial region (if undefined, use OS_REGION_NAME) :param auth_url: the keystone URI :param objects_strategy: sets the strategy about the object maps contained here. It can be: * DIRECT_OBJECTS is using the objects as they are got from the API. Be careful because methods as delete are available! The objects are not cached. * NO_CACHE_OBJECTS is using the objects converted to dictionaries, so methods to do operations are not available. The objects are not cached. * REFRESH_OBJECTS is using the objects converted to dictionaries. The objects are cached: the new version replace the old one. * USE_CACHE_OBJECTS is using the objects converted to dictionaries. If a cached copy of the objects are available, it is used. * USE_CACHE_OBJECTS_ONLY is using the objects converted to dictionaries. This strategy used cached objects only. It never contacts with the servers, even when the object is not available in the local cache. :param auto_load: if True, invoke self.load_all() Note that neutron objects returned by the API are already dictionaries """ self.logger = logging.getLogger(__name__) if auth_url: self.osclients = OpenStackClients(auth_url=auth_url) else: self.osclients = OpenStackClients() if region: self.osclients.set_region(region) else: if 'OS_REGION_NAME' not in env: raise Exception('Region parameter must be provided or ' 'OS_REGION_NAME variable must be defined.') else: region = env['OS_REGION_NAME'] if 'KEYSTONE_ADMIN_ENDPOINT' in os.environ: self.osclients.override_endpoint( 'identity', self.osclients.region, 'admin', os.environ['KEYSTONE_ADMIN_ENDPOINT']) self.objects_strategy = objects_strategy self.persistence_dir = os.path.expanduser(persistence_dir) self.pers_region = self.persistence_dir + '/' + region self.pers_keystone = self.persistence_dir + '/keystone' if objects_strategy not in (OpenStackMap.DIRECT_OBJECTS, OpenStackMap.NO_CACHE_OBJECTS): if not os.path.exists(self.persistence_dir): os.mkdir(self.persistence_dir) if not os.path.exists(self.pers_keystone): os.mkdir(self.pers_keystone) if not os.path.exists(self.pers_region): os.mkdir(self.pers_region) self._init_resource_maps() if auto_load: self.load_all() self.region_map = dict()
class ServersFacade(object): def __init__(self, target): """Create a new Facade for the specified target (a target is shared between regions using the same credential)""" self.osclients = OpenStackClients(target['keystone_url']) self.osclients.set_credential(target['user'], target['password'], target['tenant']) if target.get('use_keystone_v3', False): self.osclients.set_keystone_version(True) else: self.osclients.set_keystone_version(False) self.session = self.osclients.get_session() self.target = target # This is a default value self.images_dir = '/var/lib/glance/images' self.logger = logger_cli def _get_glanceclient(self, region): """helper method, to get a glanceclient for the region""" self.osclients.set_region(region) return self.osclients.get_glanceclient() def get_regions(self): """It returns the list of regions on the specified target. :return: a list of region names. """ return self.osclients.get_regions('image') def get_imagelist(self, regionobj): """return a image list from the glance of the specified region :param regionobj: The GlanceSyncRegion object of the region to list :return: a list of GlanceSyncImage objects """ client = self._get_glanceclient(regionobj.region) try: target = regionobj.target # We need a Pool to implement a timeout. Unfortunately # setting client.images.client.timeout does nothing. if 'list_images_timeout' in target: timeout = target['list_images_timeout'] else: timeout = _default_timeout pool = Pool(1) result = pool.apply_async(_getrawimagelist, (client, )) images = result.get(timeout=timeout) image_list = list() for image in images: i = GlanceSyncImage(image['name'], image['id'], regionobj.fullname, image['owner'], image['is_public'], image['checksum'], image['size'], image['status'], image['properties'], image) image_list.append(i) except TimeoutError: msg = regionobj.fullname + \ ': Timeout while retrieving image list.' self.logger.error(msg) raise GlanceFacadeException(msg) except Exception, e: cause = str(e) if not cause: cause = repr(e) msg = regionobj.fullname + \ ': Error retrieving image list. Cause: ' + cause self.logger.error(msg) raise GlanceFacadeException(msg) return image_list
def __init__( self, persistence_dir='~/openstackmap', region=None, auth_url=None, objects_strategy=USE_CACHE_OBJECTS, auto_load=True): """ Constructor :param persistence_dir: The path where the data is saved. Ignored if objects_strategy is DIRECT_OBJECTS or NO_CACHE_OBJECTS :param region: the initial region (if undefined, use OS_REGION_NAME) :param auth_url: the keystone URI :param objects_strategy: sets the strategy about the object maps contained here. It can be: * DIRECT_OBJECTS is using the objects as they are got from the API. Be careful because methods as delete are available! The objects are not cached. * NO_CACHE_OBJECTS is using the objects converted to dictionaries, so methods to do operations are not available. The objects are not cached. * REFRESH_OBJECTS is using the objects converted to dictionaries. The objects are cached: the new version replace the old one. * USE_CACHE_OBJECTS is using the objects converted to dictionaries. If a cached copy of the objects are available, it is used. * USE_CACHE_OBJECTS_ONLY is using the objects converted to dictionaries. This strategy used cached objects only. It never contacts with the servers, even when the object is not available in the local cache. :param auto_load: if True, invoke self.load_all() Note that neutron objects returned by the API are already dictionaries """ self.logger = logging.getLogger(__name__) if auth_url: self.osclients = OpenStackClients(auth_url=auth_url) else: self.osclients = OpenStackClients() if region: self.osclients.set_region(region) else: if 'OS_REGION_NAME' not in env: raise Exception('Region parameter must be provided or ' 'OS_REGION_NAME variable must be defined.') else: region = env['OS_REGION_NAME'] if 'KEYSTONE_ADMIN_ENDPOINT' in os.environ: self.osclients.override_endpoint( 'identity', self.osclients.region, 'admin', os.environ['KEYSTONE_ADMIN_ENDPOINT']) self.objects_strategy = objects_strategy self.persistence_dir = os.path.expanduser(persistence_dir) self.pers_region = self.persistence_dir + '/' + region self.pers_keystone = self.persistence_dir + '/keystone' if objects_strategy not in (OpenStackMap.DIRECT_OBJECTS, OpenStackMap.NO_CACHE_OBJECTS): if not os.path.exists(self.persistence_dir): os.mkdir(self.persistence_dir) if not os.path.exists(self.pers_keystone): os.mkdir(self.pers_keystone) if not os.path.exists(self.pers_region): os.mkdir(self.pers_region) self._init_resource_maps() if auto_load: self.load_all() self.region_map = dict()
class RegisterRegion(object): """Class to register users with role assignments, services and endpoints""" def __init__(self): """constructor""" self.osclients = OpenStackClients() self.keystone = self.osclients.get_keystoneclient() self.password_changer = PasswordChanger(self.osclients) def service_exists(self, service_name, service_type): """Ensure that the service exists: create if it does not. :param service_name: the service name (e.g. nova) :param service_type: the service type (e.g. compute) :return: the service id """ try: service = self.keystone.services.find(name=service_name) except NotFound: service = self.keystone.services.create(name=service_name, type=service_type) return service.id def region_exists(self, region_id): """ Ensure that the region exists: create if it does not. :param region_id: the region id (the region name) :return: Nothing """ try: self.keystone.regions.find(id=region_id) except NotFound: self.keystone.regions.create(region_id) def project_exists(self, tenant_name, domain_id='default'): """Ensure that the project exists: create if it does not. :param tenant_name: the tenant (aka project) :param domain_id: the domain-id (or default) :return: the project (a.k.a. tenant) id """ try: project = self.keystone.projects.find(name=tenant_name) except NotFound: project = self.keystone.projects.create(tenant_name, domain_id) return project.id def user_exists(self, username, password, set_passwords=False): """check that user exists, create him/her otherwise. If the user exists and set_password is True, it sets the password. :param username: the username of the user :param password: the password of the user :param set_passwords: if True and the user exists, change the password :return: the user object """ try: user = self.keystone.users.find(name=username) if set_passwords: self.password_changer.change_password(user, password) except NotFound: user = self.keystone.users.create(name=username, password=password) return user def endpoint_exists(self, service_id, interface, url, region): """check that enpoint exists. Otherwise, create it. Also check that the URLs are the same; if they are different, update. :param service_id: the service id :param interface: interface may be public, internal, admin. :param url: the URL of the endpoint :param region: the region id. :return: the endpoint id. """ result = self.keystone.endpoints.list(service=service_id, interface=interface, region=region) if not result: result = self.keystone.endpoints.create(service=service_id, interface=interface, url=url, region=region) else: result = result[0] if result.url != url: self.keystone.endpoints.update(result.id, url=url) return result.id def register_region(self, region, set_passwords=False): """Register the region data. It is intended to create all the users and services required to add a region to a federation. This method is idempotent, that is, the effect of invoking it multiple times is the same that invoking only once. It ensure that the region region['region'] is registered. It ensure that the users region['users'] exist and have the role admin in the service project It ensure that the services region['services'] and its endpoints exist and the URLs are correct. :param region: a dictionary extracted from a JSON with the structure of default_region_json :param set_passwords: if true, override the passwords when the user exists. If false, passwords are only used when the users are created. :return: nothing """ region_name = region['region'] self.region_exists(region_name) for user in region['users']: userobj = self.user_exists(user['username'], user['password']) admin_role = self.keystone.roles.find(name='admin') if user['username'].startswith('admin-'): # admin users use their own tenant instead of the service one project = self.project_exists(user['username']) else: project = self.project_exists('service') self.keystone.roles.grant(admin_role, user=userobj, project=project) for s in region['services']: service_id = self.service_exists(s['name'], s['type']) self.endpoint_exists(service_id, 'public', s['public'], region_name) self.endpoint_exists(service_id, 'admin', s['admin'], region_name) self.endpoint_exists(service_id, 'internal', s['internal'], region_name) @staticmethod def transform_json(data, env): """Utility method, to expand ${VAR} and $VAR in data, using env variables :param data: the template to process :param env: array with the variables :return: the template with the variables expanded """ var_shell_pattern_c = re.compile(r'\${{(\w+)}}') var_shell_pattern = re.compile(r'\$(\w+)') data = data.replace('{', '{{') data = data.replace('}', '}}') data = var_shell_pattern_c.sub(r'{\1}', data) data = var_shell_pattern.sub(r'{\1}', data) return data.format(**env) def register_regions(self, regions_json=default_region_json, env=os.environ): """This is a front-end of the method register region, that receives as parameter the JSON and the environment to override the ${VAR} and $VAR expressions. It admits a JSON with an only region (with the structure of default_region_json) or a JSON with multiple regions. This last has a 'regions' fields that is an array of regions. :param regions_json: a JSON :param env: an environment (array of variables) :return: nothing """ regions_json = self.transform_json(regions_json, env) region = json.loads(regions_json) if 'SET_OPENSTACK_PASSWORDS' in env: set_passwords = True else: set_passwords = False if 'regions' in region: # This is an array of regions for r in region: self.register_region(r, set_passwords) else: # This is an only region self.register_region(region, set_passwords)
def __init__(self): """constructor""" self.osclients = OpenStackClients() self.keystone = self.osclients.get_keystoneclient() self.password_changer = PasswordChanger(self.osclients)
class RegisterRegion(object): """Class to register users with role assignments, services and endpoints""" def __init__(self): """constructor""" self.osclients = OpenStackClients() self.keystone = self.osclients.get_keystoneclient() self.change_domain_name() self.password_changer = PasswordChanger(self.osclients) os.environ['OS_REGION_NAME'] = os.environ['REGION'] self.region_exists(os.environ['REGION']) def get_region(self): p2 = Popen(["curl", "http://169.254.169.254/openstack/latest/meta_data.json"], stdout=PIPE) metadatajson, err = p2.communicate() meta = json.loads(metadatajson)["meta"] region = meta["Region"] region2 = meta["region_keystone"] if region2: return region2 return region def service_exists(self, service_name, service_type): """Ensure that the service exists: create if it does not. :param service_name: the service name (e.g. nova) :param service_type: the service type (e.g. compute) :return: the service id """ try: service = self.keystone.services.find(name=service_name) except NotFound: service = self.keystone.services.create(name=service_name, type=service_type) return service.id def region_exists(self, region_id): """ Ensure that the region exists: create if it does not. :param region_id: the region id (the region name) :return: Nothing """ if not self.is_region(region_id): self.keystone.regions.create(region_id) def change_domain_name(self): """ It change the domain name to default, which is the one used in FIWARE Lab. :return: nothing """ self.keystone = self.get_credentials() try: domain = self.keystone.domains.find(name="default") except: try: domain = self.keystone.domains.find(name="Default") except Exception as e: print e self.keystone.domains.update(domain, name="default") self.keystone = self.return_credentails() def get_credentials(self): os.environ['OS_REGION_NAME'] = self.get_region() osclients = OpenStackClients() keystone = osclients.get_keystoneclient() try: self.keystone.domains.find(name="default") return keystone except Exception as e: os.environ['OS_USER_DOMAIN_NAME'] = "Default" os.environ['OS_PROJECT_DOMAIN_ID'] = "default" os.environ['OS_REGION_NAME'] = "Spain2" osclients = OpenStackClients() return osclients.get_keystoneclient() def return_credentails(self): os.environ['OS_USER_DOMAIN_NAME'] = "default" os.environ['OS_PROJECT_DOMAIN_NAME'] = "default" os.environ['OS_REGION_NAME'] = os.environ['REGION'] osclients = OpenStackClients() return osclients.get_keystoneclient() def is_region(self, region_id): """ It checks if the region exists. :param region_id: the region id :return: True/False """ regions = self.keystone.regions.list() for region in regions: if region.id == region_id: return True return False def project_exists(self, tenant_name, domain_id='default'): """Ensure that the project exists: create if it does not. :param tenant_name: the tenant (aka project) :param domain_id: the domain-id (or default) :return: the project (a.k.a. tenant) id """ try: project = self.keystone.projects.find(name=tenant_name) except NotFound: project = self.keystone.projects.create(tenant_name, domain_id) return project.id def user_exists(self, username, password, set_passwords=False): """check that user exists, create him/her otherwise. If the user exists and set_password is True, it sets the password. :param username: the username of the user :param password: the password of the user :param set_passwords: if True and the user exists, change the password :return: the user object """ try: user = self.keystone.users.find(name=username) if set_passwords: self.password_changer.change_password(user, password) except NotFound: user = self.keystone.users.create(name=username, password=password) return user def delete_spain2_regions(self): service_id = self.keystone.services.find(name="keystone") try: end1 = self.keystone.endpoints.find(service=service_id, interface='public', region='Spain2') self.keystone.endpoints.delete(end1) except: pass try: end2 = self.keystone.endpoints.find(service=service_id, interface='admin', region='Spain2') self.keystone.endpoints.delete(end2) except: pass try: end3 = self.keystone.endpoints.find(service=service_id, interface='internal', region='Spain2') self.keystone.endpoints.delete(end3) except: pass for endpoint_group in self.keystone.endpoint_groups.list(): if endpoint_group.filters and endpoint_group.filters.get('region_id'): if endpoint_group.filters['region_id'] == "Spain2": self.keystone.endpoint_groups.delete(endpoint_group) def create_endpoint_group(self, region): """ It create the endpoint group for the region. :param region: the region :return: nothing """ self.keystone.endpoint_groups.create("Region Group", filters={"region_id": region}) def endpoint_exists(self, service_id, interface, url, region): """check that enpoint exists. Otherwise, create it. Also check that the URLs are the same; if they are different, update. :param service_id: the service id :param interface: interface may be public, internal, admin. :param url: the URL of the endpoint :param region: the region id. :return: the endpoint id. """ endpoint = self.get_endpoint(service_id, interface, region) if endpoint: result = endpoint if result.url != url: self.keystone.endpoints.update(result.id, url=url) else: result = self.keystone.endpoints.create(service=service_id, interface=interface, url=url, region=region) return result.id def get_endpoint(self, service_id, interface, region_id): """ It obtains the endpoint :param service_id: the service associated :param interface: the interface :param region_id: the region :return: the endpoint """ endpoints = self.keystone.endpoints.list(service=service_id, interface=interface) for endpoint in endpoints: if endpoint.region == region_id: return endpoint return None def register_region(self, region, set_passwords=False): """Register the region data. It is intended to create all the users and services required to add a region to a federation. This method is idempotent, that is, the effect of invoking it multiple times is the same that invoking only once. It ensure that the region region['region'] is registered. It ensure that the users region['users'] exist and have the role admin in the service project It ensure that the services region['services'] and its endpoints exist and the URLs are correct. :param region: a dictionary extracted from a JSON with the structure of default_region_json :param set_passwords: if true, override the passwords when the user exists. If false, passwords are only used when the users are created. :return: nothing """ region_name = region['region'] for user in region['users']: userobj = self.user_exists(user['username'], user['password']) admin_role = self.keystone.roles.find(name='admin') if user['username'].startswith('admin-'): # admin users use their own tenant instead of the service one project = self.project_exists(user['username']) else: project = self.project_exists('service') self.keystone.roles.grant(admin_role, user=userobj, project=project) for s in region['services']: service_id = self.service_exists(s['name'], s['type']) self.endpoint_exists(service_id, 'public', s['public'], region_name) self.endpoint_exists(service_id, 'admin', s['admin'], region_name) self.endpoint_exists(service_id, 'internal', s['internal'], region_name) self.create_endpoint_group(region_name) @staticmethod def transform_json(data, env): """Utility method, to expand ${VAR} and $VAR in data, using env variables :param data: the template to process :param env: array with the variables :return: the template with the variables expanded """ var_shell_pattern_c = re.compile(r'\${{(\w+)}}') var_shell_pattern = re.compile(r'\$(\w+)') data = data.replace('{', '{{') data = data.replace('}', '}}') data = var_shell_pattern_c.sub(r'{\1}', data) data = var_shell_pattern.sub(r'{\1}', data) return data.format(**env) def register_regions(self, regions_json=default_region_json, env=os.environ): """This is a front-end of the method register region, that receives as parameter the JSON and the environment to override the ${VAR} and $VAR expressions. It admits a JSON with an only region (with the structure of default_region_json) or a JSON with multiple regions. This last has a 'regions' fields that is an array of regions. :param regions_json: a JSON :param env: an environment (array of variables) :return: nothing """ regions_json = self.transform_json(regions_json, env) region = json.loads(regions_json) if 'SET_OPENSTACK_PASSWORDS' in env: set_passwords = True else: set_passwords = False if 'regions' in region: # This is an array of regions for r in region: self.register_region(r, set_passwords) else: # This is an only region self.register_region(region, set_passwords)