def _delete_vm(cloud, vm_id): nova_client = clients.compute_client(cloud) for do_reset in (False, True): try: if do_reset: clients.retry(nova_client.servers.reset_state, vm_id, expected_exceptions=[nova_exceptions.NotFound]) try: clients.retry(nova_client.servers.delete, vm_id, expected_exceptions=[nova_exceptions.NotFound]) except nova_exceptions.NotFound: raise except nova_exceptions.ClientException: LOG.error('Failed to delete VM %s from cloud %s', vm_id, cloud.name, exc_info=True) continue if clients.wait_for(_object_is_deleted, nova_client, 'servers', vm_id, nova_exceptions.NotFound): return True except nova_exceptions.NotFound: return True except clients.Timeout: continue LOG.error('Timeout waiting for VM %s from cloud %s to be deleted', vm_id, cloud.name, exc_info=True) return False
def _set_quotas(client, tenant_id, **kwargs): quotas = getattr(clients.retry(client.quotas.get, tenant_id), '_info') original = {} for item, value in kwargs.items(): if quotas[item] != value: original[item] = quotas[item] clients.retry(client.quotas.update, tenant_id, **kwargs) return original
def _object_is_deleted(client, manager, obj_id, expected_exception): try: manager_obj = getattr(client, manager) clients.retry(manager_obj.get, obj_id, expected_exceptions=[expected_exception]) return False except expected_exception: return True
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] identity_client = clients.identity_client(cloud) try: clients.retry(identity_client.roles.remove_user_role, user=self.user_id, role=self.role_id, tenant=self.tenant_id, expected_exceptions=[keystone_exceptions.NotFound]) except keystone_exceptions.NotFound: pass
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] nova_client = clients.compute_client(cloud) try: with model.Session() as session: session.delete(compute.Flavor, object_id=self.object_id) clients.retry(nova_client.flavors.delete, self.flavor_id, expected_exceptions=[nova_exceptions.NotFound]) except nova_exceptions.NotFound: pass
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] image_client = clients.image_client(cloud) try: with model.Session() as session: object_id = model.ObjectId(self.image_id, cloud.name) session.delete(image.Image, object_id=object_id) clients.retry(image_client.images.delete, self.image_id, expected_exceptions=[glance_exceptions.NotFound]) except glance_exceptions.NotFound: pass
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] network_client = clients.network_client(cloud) try: with model.Session() as session: net_obj_id = model.ObjectId(self.network_id, cloud.name) subnet_obj_id = model.ObjectId(self.subnet_id, cloud.name) session.delete(network.Network, object_id=net_obj_id) session.delete(network.Subnet, object_id=subnet_obj_id) clients.retry(network_client.delete_network, self.network_id, expected_exceptions=[neutron_exceptions.NotFound]) except neutron_exceptions.NotFound: pass
def _create_flavor(self): nova_client = clients.compute_client(self.cloud) flavor_id = str(uuid.uuid4()) clients.retry(nova_client.flavors.create, 'tmp.vol_tx', 64, 1, 0, flavorid=flavor_id, is_public=False) flavor_discoverer = discover.get_discoverer(self.config, self.cloud, compute.Flavor) flavor = flavor_discoverer.discover_by_flavor_id(flavor_id) return flavor
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] network_client = clients.network_client(cloud) compute_client = clients.compute_client(cloud) storage_client = clients.volume_client(cloud) try: if self.net_quota is None: clients.retry(network_client.delete_quota, self.admin_tenant_id) else: clients.retry( network_client.update_quota, self.admin_tenant_id, { 'quota': { 'network': self.net_quota['network'], 'subnet': self.net_quota['subnet'], 'port': self.net_quota['port'], } }) except neutron_exceptions.NotFound: pass if self.compute_quota: clients.retry(compute_client.quotas.update, self.admin_tenant_id, **self.compute_quota) if self.storage_quota: clients.retry(storage_client.quotas.update, self.obj_tenant_id, **self.storage_quota)
def migrate(self, source_obj, *args, **kwargs): cloud = self.cloud identity_client = clients.identity_client(cloud) destructor_var = self.destructor_var try: self.user_id = self._user_id(cloud.credential.username) self.role_id = self._role_id(cloud.admin_role) self.tenant_id = _get_object_tenant_id(self.cloud, source_obj) clients.retry(identity_client.roles.add_user_role, user=self.user_id, role=self.role_id, tenant=self.tenant_id, expected_exceptions=[keystone_exceptions.Conflict]) self.destructor = EnsureAdminRoleDestructor( self.location, self.user_id, self.role_id, self.tenant_id) except keystone_exceptions.Conflict: pass return {destructor_var: self.destructor}
def migrate(self, source_obj, *args, **kwargs): identity_client = clients.identity_client(self.dst_cloud) try: destination_obj = clients.retry( identity_client.tenants.create, source_obj.name, description=source_obj.description, enabled=source_obj.enabled, expected_exceptions=[exceptions.Conflict]) self.created_object = destination_obj except exceptions.Conflict: for tenant_obj in clients.retry(identity_client.tenants.list): if tenant_obj.name.lower() == source_obj.name.lower(): destination_obj = tenant_obj break else: raise base.AbortMigration('Invalid state') destination = self.load_from_cloud(identity.Tenant, self.dst_cloud, destination_obj) return dict(dst_object=destination)
def _delete_volume(self, vol): tenant_id = getattr(vol, 'os-vol-tenant-attr:tenant_id') volume_client = clients.volume_client(self.dst_cloud, _scope(tenant_id)) try: volume = clients.retry( volume_client.volumes.get, vol.id, expected_exceptions=[cinder_exceptions.NotFound]) if volume.status not in ('available', 'in-use', 'error', 'error_restoring'): clients.retry(volume_client.volumes.reset_state, volume, 'error', expected_exceptions=[cinder_exceptions.NotFound]) clients.retry(volume_client.volumes.delete, volume, expected_exceptions=[cinder_exceptions.NotFound]) except cinder_exceptions.NotFound: LOG.warning('Can not delete cinder volume: already deleted')
def _object_status_is(client, manager_name, obj_id, status): manager = getattr(client, manager_name) obj = clients.retry(manager.get, obj_id) LOG.debug('Checking object %s is in status \'%s\': actual status \'%s\'', obj_id, status.lower(), obj.status.lower()) if obj.status.lower() == status.lower(): return obj elif obj.status.lower() == 'error': raise base.AbortMigration('Object %s ended up in ERROR state', obj_id) else: return None
def restore_internal_state(self, internal_state): vm_id = internal_state['vm_id'] self.created_object = None if vm_id is not None: compute_client = clients.compute_client(self.cloud) try: self.created_object = clients.retry( compute_client.servers.get, vm_id, expected_exceptions=[nova_exceptions.NotFound]) except nova_exceptions.NotFound: LOG.warning( 'Failed to find VM with id %s when restoring ' 'task state', vm_id)
def restore_internal_state(self, internal_state): tenant_id = internal_state['tenant_id'] volume_id = internal_state['volume_id'] self.created_object = None if tenant_id is not None and volume_id is not None: volume_client = clients.volume_client(self.dst_cloud, _scope(tenant_id)) try: self.created_object = clients.retry( volume_client.volumes.get, volume_id, expected_exceptions=[cinder_exceptions.NotFound]) except cinder_exceptions.NotFound: LOG.warning( 'Failed to find volume with id %s when restoring ' 'task state', volume_id)
def migrate(self, source_obj, *args, **kwargs): cinder_client = clients.volume_client( self.src_cloud, _scope(source_obj.tenant.object_id.id)) raw_volume = clients.retry(cinder_client.volumes.get, source_obj.object_id.id) if raw_volume.attachments: nova_client = clients.compute_client(self.src_cloud) assert len(raw_volume.attachments) == 1 detached_vm_id = raw_volume.attachments[0]['server_id'] shutoff_vm(nova_client, detached_vm_id) self._detach_volume(self.src_cloud, source_obj, detached_vm_id, abort_migration=True) self.detached_vm_id = detached_vm_id return dict(attached_vm_id=self.detached_vm_id)
def migrate(self, source_obj, *args, **kwargs): dst_tenant_id = _get_object_tenant_id(self.dst_cloud, source_obj) volume_client = clients.volume_client(self.dst_cloud, _scope(dst_tenant_id)) ovr_source_obj = self.override(source_obj) zone = ovr_source_obj.availability_zone vol = clients.retry(volume_client.volumes.create, size=ovr_source_obj.size, display_name=ovr_source_obj.name, display_description=ovr_source_obj.description, volume_type=ovr_source_obj.volume_type, availability_zone=zone, metadata=ovr_source_obj.metadata) try: self.created_object = clients.wait_for(_object_status_is, volume_client, 'volumes', vol.id, 'available') except clients.Timeout: self._delete_volume(vol) raise base.AbortMigration('Volume didn\'t become active') result = self.load_from_cloud(storage.Volume, self.dst_cloud, self.created_object) return dict(dst_object=result)
def shutoff_vm(nova_client, instace_id): # TODO: make general-purpose utility function instance = clients.retry(nova_client.servers.get, instace_id) current = instance.status.lower() def wait_status(status): return clients.wait_for(_object_status_is, nova_client, 'servers', instace_id, status) try: if current == 'paused': nova_client.servers.unpause(instance) wait_status('active') nova_client.servers.stop(instance) wait_status('shutoff') elif current == 'suspended': nova_client.servers.resume(instance) wait_status('active') nova_client.servers.stop(instance) wait_status('shutoff') elif current == 'active': nova_client.servers.stop(instance) wait_status('shutoff') elif current == 'verify_resize': nova_client.servers.confirm_resize(instance) wait_status('active') nova_client.servers.stop(instance) wait_status('shutoff') elif current != 'shutoff': raise base.AbortMigration('Invalid state change: %s -> shutoff', current) except clients.Timeout: LOG.debug( "Failed to change state from '%s' to 'shutoff' for VM " "'%s'", current, instace_id)
def _get(fn, uuid, envelope): return clients.retry(fn, uuid, expected_exceptions=[neutron_exceptions.NotFound ])[envelope]
def revert(self, *args, **kwargs): if self.created_object is not None: identity_client = clients.identity_client(self.dst_cloud) clients.retry(identity_client.tenants.delete, self.created_object) super(CreateTenant, self).revert(*args, **kwargs)
def _list(fn, envelope): return clients.retry(fn)[envelope]