def get_dst_user_from_src_user_id(src_keystone, dst_keystone, src_user_id, fallback_to_admin=True): """Returns user from destination with the same name as on source. None if user does not exist""" try: with proxy_client.expect_exception(ks_exceptions.NotFound): src_user = src_keystone.keystone_client.users.get(src_user_id) src_user_name = src_user.name except ks_exceptions.NotFound: LOG.warning("User '%s' not found on SRC!", src_user_id) if fallback_to_admin: LOG.warning("Replacing user '%s' with SRC admin", src_user_id) src_user_name = cfglib.CONF.src.user else: return if fallback_to_admin: default_user_name = cfglib.CONF.dst.user else: default_user_name = None try: with proxy_client.expect_exception(ks_exceptions.NotFound): return dst_keystone.try_get_user_by_name(src_user_name, default_user_name) except ks_exceptions.NotFound: return None
def get_dst_user_from_src_user_id(src_keystone, dst_keystone, src_user_id, fallback_to_admin=True): """Returns user from destination with the same name as on source. None if user does not exist""" try: with proxy_client.expect_exception(ks_exceptions.NotFound): src_user = src_keystone.keystone_client.users.get(src_user_id) src_user_name = src_user.name except ks_exceptions.NotFound: LOG.warning("User '%s' not found on SRC!", src_user_id) if fallback_to_admin: LOG.warning("Replacing user '%s' with SRC admin", src_user_id) src_user_name = cfglib.CONF.src.user else: return if fallback_to_admin: default_user_name = cfglib.CONF.dst.user else: default_user_name = None try: with proxy_client.expect_exception(ks_exceptions.NotFound): return dst_keystone.try_get_user_by_name( src_user_name, default_user_name) except ks_exceptions.NotFound: return None
def create_volume(self, size, **kwargs): """Creates volume of given size :raises: OverLimit in case quota exceeds for tenant """ cinder = self.cinder_client tenant_id = kwargs.get('project_id') # If migrated volume needs to be created on non default volume_type kwargs['volume_type'] = self.attr_override.get_attr( kwargs, 'volume_type') # if volume needs to be created in non-admin tenant, re-auth is # required in that tenant if tenant_id: identity = self.cloud.resources[utils.IDENTITY_RESOURCE] ks = identity.keystone_client user = self.config.cloud.user with keystone.AddAdminUserToNonAdminTenant(ks, user, tenant_id): tenant = ks.tenants.get(tenant_id) cinder = self.proxy(self.get_client(tenant=tenant.name), self.config) with proxy_client.expect_exception(cinder_exc.OverLimit): return cinder.volumes.create(size, **kwargs) else: with proxy_client.expect_exception(cinder_exc.OverLimit): return cinder.volumes.create(size, **kwargs)
def get_dst_tenant_from_src_tenant_id(src_keystone, dst_keystone, src_tenant_id): try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = src_keystone.keystone_client src_tenant = client.tenants.find(id=src_tenant_id) except ks_exceptions.NotFound: return None try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = dst_keystone.keystone_client return client.tenants.find(name=src_tenant.name) except ks_exceptions.NotFound: return None
def get_dst_tenant_from_src_tenant_id(src_keystone, dst_keystone, src_tenant_id): try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = src_keystone.keystone_client src_tenant = client.tenants.find(id=src_tenant_id) except ks_exceptions.NotFound: return None try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = dst_keystone.keystone_client dst_tenant_name = src_keystone.tenant_name_map.map(src_tenant.name) return find_by_name("tenant", client.tenants.list(), dst_tenant_name) except ks_exceptions.NotFound: return None
def get_availability_zone(self, az_name): try: with proxy_client.expect_exception(nova_exc.NotFound): self.nova_client.availability_zones.find(zoneName=az_name) return az_name except nova_exc.NotFound: return None
def image_exists(self, image_id): with proxy_client.expect_exception(glance_exceptions.HTTPNotFound): try: self.get_image_raw(image_id) return True except glance_exceptions.HTTPNotFound: return False
def _upload_user_tenant_roles(self, user_tenants_roles, users, tenants): roles_id = {role.name.lower(): role.id for role in self.get_roles_list()} dst_users = {user.name.lower(): user.id for user in self.get_users_list()} dst_roles = {role.id: role.name.lower() for role in self.get_roles_list()} get_user_roles = self._get_user_roles_cached() for _user in users: user = _user["user"] if user["name"] not in dst_users: continue for _tenant in tenants: tenant = _tenant["tenant"] user_roles_objs = get_user_roles(_user["meta"]["new_id"], _tenant["meta"]["new_id"]) exists_roles = [ dst_roles[role] if not hasattr(role, "name") else role.name.lower() for role in user_roles_objs ] user_roles = user_tenants_roles[user["name"].lower()] for _role in user_roles[tenant["name"].lower()]: role = _role["role"] if role["name"].lower() in exists_roles: continue try: with proxy_client.expect_exception(ks_exceptions.Conflict): self.keystone_client.roles.add_user_role( _user["meta"]["new_id"], roles_id[role["name"].lower()], _tenant["meta"]["new_id"] ) except ks_exceptions.Conflict: LOG.info( "Role '%s' for user '%s' in tenant '%s' " "already exists, skipping", role["name"], user["name"], tenant["name"], )
def _read_info_resources(self): """ Read info about compute resources except instances from the cloud. """ info = {'flavors': {}, 'default_quotas': {}, 'user_quotas': [], 'project_quotas': []} for flavor in self.get_flavor_list(is_public=None): with proxy_client.expect_exception(nova_exc.NotFound): internal_flavor = self.convert(flavor, cloud=self.cloud) if internal_flavor is None: continue info['flavors'][flavor.id] = internal_flavor LOG.info("Got flavor '%s'", flavor.name) LOG.debug("%s", pprint.pformat(internal_flavor)) if self.config.migrate.migrate_quotas: info['default_quotas'] = self.get_default_quotas() info['project_quotas'], info['user_quotas'] = \ self._read_info_quotas() return info
def _ensure_instance_flavor_exists(self, instance): flavor_id = instance['flavor_id'] flavor_details = instance['flav_details'] new_flavor_id = None try: with proxy_client.expect_exception(nova_exc.NotFound): flavor = self.get_flavor_from_id(flavor_id) need_new_flavor = ( flavor.vcpus != flavor_details['vcpus'] or flavor.ram != flavor_details['memory_mb'] or flavor.disk != flavor_details['root_gb'] or flavor.ephemeral != flavor_details['ephemeral_gb']) except nova_exc.NotFound: need_new_flavor = True if need_new_flavor: new_flavor_id = str(uuid.uuid4()) instance['flavor_id'] = new_flavor_id self.create_flavor(name='deleted_' + flavor_id, flavorid=new_flavor_id, ram=flavor_details['memory_mb'], vcpus=flavor_details['vcpus'], disk=flavor_details['root_gb'], ephemeral=flavor_details['ephemeral_gb']) try: yield finally: if new_flavor_id is not None: self.delete_flavor(new_flavor_id)
def image_exists(self, image_id): with proxy_client.expect_exception(glance_exceptions.HTTPNotFound): try: img = self.get_image_raw(image_id) return not img.deleted except glance_exceptions.HTTPNotFound: return False
def is_image_id_occupied(self, image_id): with proxy_client.expect_exception(glance_exceptions.HTTPNotFound): try: self.get_image_raw(image_id) return True except glance_exceptions.HTTPNotFound: return False
def _read_info_resources(self): """ Read info about compute resources except instances from the cloud. """ info = { 'flavors': {}, 'default_quotas': {}, 'user_quotas': [], 'project_quotas': [] } for flavor in self.get_flavor_list(is_public=None): with proxy_client.expect_exception(nova_exc.NotFound): internal_flavor = self.convert(flavor, cloud=self.cloud) if internal_flavor is None: continue info['flavors'][flavor.id] = internal_flavor LOG.info("Got flavor '%s'", flavor.name) LOG.debug("%s", pprint.pformat(internal_flavor)) if self.config.migrate.migrate_quotas: info['default_quotas'] = self.get_default_quotas() info['project_quotas'], info['user_quotas'] = \ self._read_info_quotas() return info
def run(self, info, **kwargs): info = copy.deepcopy(info) compute_res = self.cloud.resources[utils.COMPUTE_RESOURCE] storage_res = self.cloud.resources[utils.STORAGE_RESOURCE] for instance in info[utils.INSTANCES_TYPE].values(): if not instance[utils.META_INFO].get(utils.VOLUME_BODY): continue for vol in instance[utils.META_INFO][utils.VOLUME_BODY]: volume = vol['volume'] volume_id = volume['id'] status = None with proxy_client.expect_exception(cinder_exceptions.NotFound): try: status = storage_res.get_status(volume_id) except cinder_exceptions.NotFound: dst_volume = storage_res.get_migrated_volume(volume_id) if dst_volume is not None: volume_id = dst_volume.id status = dst_volume.status inst = instance['instance'] source_instance = self._try_get_source_instance(instance) if status is None: msg = ("Cannot attach volume '{vol}' to VM '{vm}': volume " "does not exist").format(vol=volume_id, vm=inst['name']) self.state_notifier.incomplete( objects.MigrationObjectType.VM, source_instance, msg) continue if status == 'available': nova_client = compute_res.nova_client try: nova_client.volumes.create_server_volume( inst['id'], volume_id, volume['device']) timeout = self.cfg.migrate.storage_backend_timeout storage_res.wait_for_status(volume_id, storage_res.get_status, 'in-use', timeout=timeout) except (cinder_exceptions.ClientException, nova_exceptions.ClientException, exception.TimeoutException) as e: msg = ("Failed to attach volume '%s' to instance " "'%s': %s. Skipping" % (volume_id, inst['id'], e.message)) LOG.warning(msg) self.state_notifier.incomplete( objects.MigrationObjectType.VM, source_instance, msg) else: msg = ("Cannot attach volume '%s' to instance '%s' since " "it's status is '%s'" % (volume_id, inst['id'], status)) LOG.warning(msg) self.state_notifier.incomplete( objects.MigrationObjectType.VM, source_instance, msg) return {}
def get_instances_list(self, search_opts=None, tenant_ids=None, detailed=True): """ Get a list of servers. :param search_opts: Search options to filter out servers. :param tenant_ids: The list of ids of tenants to filter out servers. :param detailed: Whether to return detailed server info. :rtype: list of :class:`Server` """ if search_opts is None: search_opts = {} ids = search_opts.get('id') if not ids: search_opts.update(all_tenants=True) if not tenant_ids: servers = self.nova_client.servers.list( detailed=detailed, search_opts=search_opts) else: servers = [] for t in tenant_ids: search_opts.update(tenant_id=t) servers.extend( self.nova_client.servers.list(detailed=detailed, search_opts=search_opts)) else: ids = ids if isinstance(ids, list) else [ids] servers = [] for i in ids: try: with proxy_client.expect_exception(nova_exc.NotFound): servers.append(self.nova_client.servers.get(i)) except nova_exc.NotFound: LOG.warning("No server with ID of '%s' exists.", i) active_computes = self.get_compute_hosts() active_servers = [] for server in servers: if server.status not in ALLOWED_VM_STATUSES: msg = ("Instance '%s' has been excluded from VMs list, " "because the status '%s' is not allowed." % (server.id, server.status)) self.state_notifier.skip(objects.MigrationObjectType.VM, server, msg) continue server_host = getattr(server, INSTANCE_HOST_ATTRIBUTE) if server_host not in active_computes: msg = ( "Instance '%s' has been excluded from VMs list, " "because it is booted on non-active compute host '%s'." % (server.id, server_host)) self.state_notifier.skip(objects.MigrationObjectType.VM, server, msg) continue active_servers.append(server) return active_servers
def get_dst_tenant_from_src_tenant_id(src_keystone, dst_keystone, src_tenant_id): try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = src_keystone.keystone_client src_tenant = client.tenants.find(id=src_tenant_id) except ks_exceptions.NotFound: return None try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = dst_keystone.keystone_client dst_tenant_name = src_keystone.tenant_name_map.map(src_tenant.name) return find_by_name('tenant', client.tenants.list(), dst_tenant_name) except ks_exceptions.NotFound: return None
def missing_vlan_physnets(self, dst_info, dst_neutron_client, ext_net_map): """ Get list of missing physical networks for VLAN network type. :param dst_info: NetworkInfo instance of DST cloud :param dst_neutron_client: DST neutron client :param ext_net_map: External networks mapping dictionary. Format: {<src_external_network>: <dst_external_network>, ...} :return: List of missing VLAN physnets. """ missing_vlan_physnets = [] dst_vlan_physnets = [ net.physnet for net in dst_info.get_networks() if net.network_type == 'vlan' ] # We need to specify segmentation ID in case of VLAN network creation # in OpenStack versions earlier than Juno (f.e. Icehouse, Grizzly etc.) dst_seg_ids = neutron.get_segmentation_ids_from_net_list( dst_info.networks_info) # We do not care about free segmentation ID on source cloud, we only # need to have destination one for checking purpose free_seg_id = neutron.generate_new_segmentation_id( dst_seg_ids, dst_seg_ids, 'vlan') for network in self.get_networks(): if network.network_type != 'vlan': continue if network.physnet in dst_vlan_physnets: continue if network.external and network.id in ext_net_map: LOG.debug( "Network '%s' is external and specified in the " "external networks mapping. Skipping network...", network.id) continue with proxy_client.expect_exception(neutron_exc.BadRequest): try: network_info = { 'network': { 'provider:physical_network': network.physnet, 'provider:network_type': 'vlan', 'provider:segmentation_id': free_seg_id } } new_net = dst_neutron_client.create_network(network_info) except neutron_exc.NeutronClientException: missing_vlan_physnets.append(network.physnet) else: dst_neutron_client.delete_network(new_net['network']['id']) return missing_vlan_physnets
def try_get_tenant_name_by_id(self, tenant_id, default=None): """ Same as `get_tenant_by_id` but returns `default` in case tenant ID is not present """ try: with proxy_client.expect_exception(ks_exceptions.NotFound): return self.keystone_client.tenants.get(tenant_id).name except ks_exceptions.NotFound: LOG.warning("Tenant '%s' not found, returning default value = " "'%s'", tenant_id, default) return default
def check_affinity_api(cloud): compute_resource = cloud.resources[utils.COMPUTE_RESOURCE] with proxy_client.expect_exception(nova_exceptions.NotFound): try: compute_resource.nova_client.server_groups.list() except nova_exceptions.NotFound: raise cf_exceptions.AbortMigrationError( "'%s' cloud does not support affinity/anti-affinity " "(Nova server groups) API." % cloud.position)
def get_ref_image(self, image_id): try: # ssl.ZeroReturnError happens because a size of an image is zero with proxy_client.expect_exception(ssl.ZeroReturnError, glance_exceptions.HTTPException, IOError): return self.glance_client.images.data(image_id) except (ssl.ZeroReturnError, glance_exceptions.HTTPException, IOError): raise exception.ImageDownloadError
def create_tenant(self, tenant_name, description=None, enabled=True): """ Create new tenant in keystone. """ try: with proxy_client.expect_exception(ks_exceptions.Conflict): return self.keystone_client.tenants.create( tenant_name=tenant_name, description=description, enabled=enabled) except ks_exceptions.Conflict: return self.get_tenant_by_name(tenant_name)
def is_vm_deleted(client, instance_id): """ Returns True when there is no VM with ID provided in first argument. """ try: with proxy_client.expect_exception(nova_exc.NotFound): client.servers.get(instance_id) return False except nova_exc.NotFound: return True
def try_get_user_by_id(self, user_id, default=None): if default is None: admin_usr = self.try_get_user_by_name(self.config.cloud.user) default = admin_usr.id try: with proxy_client.expect_exception(ks_exceptions.NotFound): return self.keystone_client.users.find(id=user_id) except ks_exceptions.NotFound: LOG.warning("User '%s' has not been found, returning default " "value = '%s'", user_id, default) return self.keystone_client.users.find(id=default)
def _add_flavor_access_for_tenants(self, flavor_id, tenant_ids): for t in tenant_ids: LOG.debug("Adding access for tenant '%s' to flavor '%s'", t, flavor_id) try: with proxy_client.expect_exception(nova_exc.Conflict): self.add_flavor_access(flavor_id, t) except nova_exc.Conflict: LOG.debug("Tenant '%s' already has access to flavor '%s'", t, flavor_id)
def missing_vlan_physnets(self, dst_info, dst_neutron_client, ext_net_map): """ Get list of missing physical networks for VLAN network type. :param dst_info: NetworkInfo instance of DST cloud :param dst_neutron_client: DST neutron client :param ext_net_map: External networks mapping dictionary. Format: {<src_external_network>: <dst_external_network>, ...} :return: List of missing VLAN physnets. """ missing_vlan_physnets = [] dst_vlan_physnets = [net.physnet for net in dst_info.get_networks() if net.network_type == 'vlan'] # We need to specify segmentation ID in case of VLAN network creation # in OpenStack versions earlier than Juno (f.e. Icehouse, Grizzly etc.) dst_seg_ids = neutron.get_segmentation_ids_from_net_list( dst_info.networks_info) # We do not care about free segmentation ID on source cloud, we only # need to have destination one for checking purpose free_seg_id = neutron.generate_new_segmentation_id(dst_seg_ids, dst_seg_ids, 'vlan') for network in self.get_networks(): if network.network_type != 'vlan': continue if network.physnet in dst_vlan_physnets: continue if network.external and network.id in ext_net_map: LOG.debug("Network '%s' is external and specified in the " "external networks mapping. Skipping network...", network.id) continue with proxy_client.expect_exception(neutron_exc.BadRequest): try: network_info = { 'network': { 'provider:physical_network': network.physnet, 'provider:network_type': 'vlan', 'provider:segmentation_id': free_seg_id } } new_net = dst_neutron_client.create_network(network_info) except neutron_exc.NeutronClientException: missing_vlan_physnets.append(network.physnet) else: dst_neutron_client.delete_network(new_net['network']['id']) return missing_vlan_physnets
def try_get_tenant_name_by_id(self, tenant_id, default=None): """ Same as `get_tenant_by_id` but returns `default` in case tenant ID is not present """ try: with proxy_client.expect_exception(ks_exceptions.NotFound): return self.keystone_client.tenants.get(tenant_id).name except ks_exceptions.NotFound: LOG.warning( "Tenant '%s' not found, returning default value = " "'%s'", tenant_id, default) return default
def create_user(self, name, password=None, email=None, tenant_id=None, enabled=True): """ Create new user in keystone. """ try: with proxy_client.expect_exception(ks_exceptions.Conflict): return self.keystone_client.users.create( name=name, password=password, email=email, tenant_id=tenant_id, enabled=enabled ) except ks_exceptions.Conflict: LOG.warning("Conflict creating user %s", name, exc_info=True) return self.try_get_user_by_name(name)
def try_get_user_by_id(self, user_id, default=None): if default is None: admin_usr = self.try_get_user_by_name(self.config.cloud.user) default = admin_usr.id try: with proxy_client.expect_exception(ks_exceptions.NotFound): return self.keystone_client.users.find(id=user_id) except ks_exceptions.NotFound: LOG.warning( "User '%s' has not been found, returning default " "value = '%s'", user_id, default) return self.keystone_client.users.find(id=default)
def wait_for_instance_to_be_deleted(nv_client, instance): retryer = retrying.Retry(max_time=300, retry_on_return_value=True, return_value=instance, expected_exceptions=[nova_exc.NotFound], retry_message="Instance still exists") try: with proxy_client.expect_exception(nova_exc.NotFound): retryer.run(nv_client.nova_client.servers.get, instance.id) except nova_exc.NotFound: LOG.info("Instance '%s' has been successfully deleted.", instance.name)
def check_quotas(self, cloud): compute_resource = cloud.resources[utils.COMPUTE_RESOURCE] keystone_resource = cloud.resources[utils.IDENTITY_RESOURCE] tenant = cloud.cloud_config['cloud']['tenant'] ten_id = keystone_resource.get_tenant_id_by_name(tenant) with proxy_client.expect_exception(nova_exceptions.ClientException): try: compute_resource.nova_client.quotas.update(ten_id) except nova_exceptions.ClientException: raise cf_exceptions.AbortMigrationError( "'%s' cloud does not support quotas " "(Nova quotas)." % cloud.position)
def force_delete_vm_by_id(self, vm_id): """ Reset state of VM and delete it. :param vm_id: ID of instance """ with proxy_client.expect_exception(nova_exc.NotFound): try: self.reset_state(vm_id) self.delete_vm_by_id(vm_id) except nova_exc.NotFound: pass
def get_server_groups(self): """ Return list of dictionaries containing server group details Returns: list: Empty if no server groups exist or server groups are not supported [ { "user": "******", "tenant": "<tenant name>", "uuid": "<group uuid>", "name": "<group name>", "policies": [<policy_name>, ...] } ] """ groups = [] try: with proxy_client.expect_exception(nova_exc.NotFound): self._nova_client.server_groups.list() for row in self._execute(SQL_SELECT_ALL_GROUPS).fetchall(): LOG.debug("Resulting row: %s", row) sql = SQL_SELECT_POLICY % row[4] policies = [] for policy in self._execute(sql).fetchall(): policies.append(policy[0]) tenant_name = self.identity.try_get_tenant_name_by_id(row[1]) if tenant_name is None: LOG.info( "Tenant '%s' does not exist on the SRC. Skipping " "server group '%s'...", row[1], row[3]) continue groups.append({ "user": self.identity.try_get_username_by_id(row[0]), "tenant": tenant_name, "uuid": row[2], "name": row[3], "policies": policies }) except nova_exc.NotFound: LOG.info("Cloud does not support server_groups") return groups
def wait_for_instance_to_be_deleted(nv_client, instance): retryer = retrying.Retry( max_time=300, retry_on_return_value=True, return_value=instance, expected_exceptions=[nova_exc.NotFound], retry_message="Instance still exists", ) try: with proxy_client.expect_exception(nova_exc.NotFound): retryer.run(nv_client.nova_client.servers.get, instance.id) except nova_exc.NotFound: LOG.info("Instance '%s' has been successfully deleted.", instance.name)
def finish(self, vol): try: with proxy_client.expect_exception(cinder_exc.BadRequest): self.cinder_client.volumes.set_bootable( vol[utils.VOLUME_BODY]['id'], vol[utils.VOLUME_BODY]['bootable']) except cinder_exc.BadRequest: LOG.info( "Can't update bootable flag of volume with id = %s " "using API, trying to use DB...", vol[utils.VOLUME_BODY]['id']) self.__patch_option_bootable_of_volume( vol[utils.VOLUME_BODY]['id'], vol[utils.VOLUME_BODY]['bootable'])
def finish(self, vol): try: with proxy_client.expect_exception(cinder_exc.BadRequest): self.cinder_client.volumes.set_bootable( vol[utils.VOLUME_BODY]['id'], vol[utils.VOLUME_BODY]['bootable']) except cinder_exc.BadRequest: LOG.info("Can't update bootable flag of volume with id = %s " "using API, trying to use DB...", vol[utils.VOLUME_BODY]['id']) self.__patch_option_bootable_of_volume( vol[utils.VOLUME_BODY]['id'], vol[utils.VOLUME_BODY]['bootable'])
def get_ref_image(self, image_id): try: # ssl.ZeroReturnError happens because a size of an image is zero with proxy_client.expect_exception( ssl.ZeroReturnError, glance_exceptions.HTTPException, IOError ): return self.glance_client.images.data(image_id) except (ssl.ZeroReturnError, glance_exceptions.HTTPException, IOError): raise exception.ImageDownloadError
def get_instances_list(self, search_opts=None, tenant_ids=None, detailed=True): """ Get a list of servers. :param search_opts: Search options to filter out servers. :param tenant_ids: The list of ids of tenants to filter out servers. :param detailed: Whether to return detailed server info. :rtype: list of :class:`Server` """ if search_opts is None: search_opts = {} ids = search_opts.get('id') if not ids: search_opts.update(all_tenants=True) if not tenant_ids: servers = self.nova_client.servers.list( detailed=detailed, search_opts=search_opts) else: servers = [] for t in tenant_ids: search_opts.update(tenant_id=t) servers.extend(self.nova_client.servers.list( detailed=detailed, search_opts=search_opts)) else: ids = ids if isinstance(ids, list) else [ids] servers = [] for i in ids: try: with proxy_client.expect_exception(nova_exc.NotFound): servers.append(self.nova_client.servers.get(i)) except nova_exc.NotFound: LOG.warning("No server with ID of '%s' exists.", i) active_computes = self.get_compute_hosts() active_servers = [] for server in servers: if server.status not in ALLOWED_VM_STATUSES: LOG.debug("Instance '%s' has been excluded from VMs list, " "because the status '%s' is not allowed.", server.id, server.status) continue server_host = getattr(server, INSTANCE_HOST_ATTRIBUTE) if server_host not in active_computes: LOG.debug("Instance '%s' has been excluded from VMs list, " "because it is booted on non-active compute host " "'%s'.", server.id, server_host) continue active_servers.append(server) return active_servers
def delete_existing_ports_on_dst(network_resource, dst_net, ip_addresses, mac_address): for ip_address in ip_addresses: port_dict = network_resource.check_existing_port( dst_net['id'], mac_address, ip_address) if port_dict: # port_dict can be DHCP port, so there could be race condition # deleting this port with proxy_client.expect_exception(neutron_exc.NotFound): try: network_resource.delete_port(port_dict['id']) except neutron_exc.NotFound: pass # Ignore ports that were deleted by neutron
def __init__(self, keystone, admin_user, tenant, member_role="admin"): """ :tenant: can be either tenant name or tenant ID """ self.keystone = keystone try: with proxy_client.expect_exception(ks_exceptions.NotFound): self.tenant = find_by_name("tenant", self.keystone.tenants.list(), tenant) except ks_exceptions.NotFound: self.tenant = self.keystone.tenants.get(tenant) self.user = find_by_name("user", self.keystone.users.list(), admin_user) self.role = find_by_name("role", self.keystone.roles.list(), member_role) self.already_member = False
def try_get_tenant_by_id(self, tenant_id, default=None): """Returns `keystoneclient.tenants.Tenant` object based on tenant ID provided. If not found - returns :arg default: tenant. If :arg default: is not specified - returns `config.cloud.tenant`""" tenants = self.keystone_client.tenants try: with proxy_client.expect_exception(ks_exceptions.NotFound): return tenants.get(tenant_id) except ks_exceptions.NotFound: if default is None: return self.get_tenant_by_name(self.config.cloud.tenant) else: return tenants.get(default)
def is_nova_instance(self, object_id): """ Define OpenStack Nova Server instance by id. :param object_id: ID of supposed Nova Server instance :return: True - if it is Nova Server instance, False - if it is not """ try: with proxy_client.expect_exception(nova_exc.NotFound): self.get_instance(object_id) except nova_exc.NotFound: LOG.error("%s is not a Nova Server instance", object_id) return False return True
def is_vm_status_in(client, instance_id, statuses): """ Returns True when nova instance with ID that is equal to instance_id argument have status that is equal to status argument. """ statuses = [s.lower() for s in statuses] try: with proxy_client.expect_exception(nova_exc.NotFound): instance = client.servers.get(instance_id) status = instance.status.lower() if status == ERROR: raise RuntimeError("VM in error status") return status in statuses except nova_exc.NotFound: return False
def get_server_groups(self): """ Return list of dictionaries containing server group details Returns: list: Empty if no server groups exist or server groups are not supported [ { "user": "******", "tenant": "<tenant name>", "uuid": "<group uuid>", "name": "<group name>", "policies": [<policy_name>, ...] } ] """ groups = [] try: with proxy_client.expect_exception(nova_exc.NotFound): self._nova_client.server_groups.list() for row in self._execute(SQL_SELECT_ALL_GROUPS).fetchall(): LOG.debug("Resulting row: %s", row) sql = SQL_SELECT_POLICY % row[4] policies = [] for policy in self._execute(sql).fetchall(): policies.append(policy[0]) tenant_name = self.identity.try_get_tenant_name_by_id(row[1]) if tenant_name is None: LOG.info("Tenant '%s' does not exist on the SRC. Skipping " "server group '%s'...", row[1], row[3]) continue groups.append( { "user": self.identity.try_get_username_by_id(row[0]), "tenant": self.tenant_name_map.map(tenant_name), "uuid": row[2], "name": row[3], "policies": policies, } ) except nova_exc.NotFound: LOG.info("Cloud does not support server_groups") return groups
def __init__(self, keystone, admin_user, tenant, member_role='admin'): """ :tenant: can be either tenant name or tenant ID """ self.keystone = keystone try: with proxy_client.expect_exception(ks_exceptions.NotFound): self.tenant = find_by_name('tenant', self.keystone.tenants.list(), tenant) except ks_exceptions.NotFound: self.tenant = self.keystone.tenants.get(tenant) self.user = find_by_name('user', self.keystone.users.list(), admin_user) self.role = find_by_name('role', self.keystone.roles.list(), member_role) self.already_member = False
def create_user(self, name, password=None, email=None, tenant_id=None, enabled=True): """ Create new user in keystone. """ try: with proxy_client.expect_exception(ks_exceptions.Conflict): return self.keystone_client.users.create(name=name, password=password, email=email, tenant_id=tenant_id, enabled=enabled) except ks_exceptions.Conflict: LOG.warning('Conflict creating user %s', name, exc_info=True) return self.try_get_user_by_name(name)