def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] network_client = clients.network_client(cloud) compute_client = clients.compute_client(cloud) storage_client = clients.volume_client(cloud) try: if self.net_quota is None: clients.retry(network_client.delete_quota, self.admin_tenant_id) else: clients.retry( network_client.update_quota, self.admin_tenant_id, { 'quota': { 'network': self.net_quota['network'], 'subnet': self.net_quota['subnet'], 'port': self.net_quota['port'], } }) except neutron_exceptions.NotFound: pass if self.compute_quota: clients.retry(compute_client.quotas.update, self.admin_tenant_id, **self.compute_quota) if self.storage_quota: clients.retry(storage_client.quotas.update, self.obj_tenant_id, **self.storage_quota)
def _delete_vm(cloud, vm_id): nova_client = clients.compute_client(cloud) for do_reset in (False, True): try: if do_reset: clients.retry(nova_client.servers.reset_state, vm_id, expected_exceptions=[nova_exceptions.NotFound]) try: clients.retry(nova_client.servers.delete, vm_id, expected_exceptions=[nova_exceptions.NotFound]) except nova_exceptions.NotFound: raise except nova_exceptions.ClientException: LOG.error('Failed to delete VM %s from cloud %s', vm_id, cloud.name, exc_info=True) continue if clients.wait_for(_object_is_deleted, nova_client, 'servers', vm_id, nova_exceptions.NotFound): return True except nova_exceptions.NotFound: return True except clients.Timeout: continue LOG.error('Timeout waiting for VM %s from cloud %s to be deleted', vm_id, cloud.name, exc_info=True) return False
def discover_one(self, uuid): compute_client = clients.compute_client(self.cloud) raw_flavor = self.retry(compute_client.flavors.get, uuid) # TODO: implement return compute.Flavor.load({ 'object_id': self.make_id(raw_flavor.id), })
def discover_one(self, uuid): compute_client = clients.compute_client(self.cloud) try: raw_server = self.retry(compute_client.servers.get, uuid, expected_exceptions=[exceptions.NotFound]) except exceptions.NotFound: raise discover.NotFound() # Check if server host is available avail_hosts = self._list_available_compute_hosts(compute_client) host = getattr(raw_server, EXT_ATTR_HOST) if host not in avail_hosts: LOG.warning('Skipping server %s, host not available.', host) return None # Convert server data to model conforming format server = self.load_from_cloud(raw_server) with remote.RemoteExecutor(self.cloud, server.hypervisor_host) as remote_executor: _populate_ephemeral_disks(remote_executor, server) # Store server with model.Session() as session: session.store(server) if _need_image_membership(server): image_member_uuid = image.ImageMember.make_uuid( server.image, server.tenant) server.image_membership = self.find_obj( image.ImageMember, image_member_uuid) return server
def migrate(self, source_obj, *args, **kwargs): int_ip_address = _allocate_ip_address(self.cloud) nova_client = clients.compute_client(self.cloud) self.created_object = nova_client.servers.create( image=kwargs[self.image_var_name], flavor=kwargs[self.flavor_var_name].flavor_id, name='trans_vol_{}'.format(source_obj.object_id.id), config_drive=True, nics=[{ 'net-id': kwargs[self.net_var_name].object_id.id }], meta=dict(cidr=str(int_ip_address), internal_address=str(int_ip_address.ip), access_key=RSA1024_KEY.get_base64())) try: self.created_object = clients.wait_for(_object_status_is, nova_client, 'servers', self.created_object.id, 'active') except clients.Timeout: self._delete_vm() raise base.AbortMigration( 'Timeout waiting for VM %s to start on %s', self.created_object.id, self.location) result = self.load_from_cloud(compute.Server, self.cloud, self.created_object) return {self.var_name: result}
def discover_one(self, uuid): compute_client = clients.compute_client(self.cloud) try: raw_server = self.retry(compute_client.servers.get, uuid, expected_exceptions=[exceptions.NotFound]) except exceptions.NotFound: raise discover.NotFound() # Check if server host is available avail_hosts = self._list_available_compute_hosts(compute_client) host = getattr(raw_server, EXT_ATTR_HOST) if host not in avail_hosts: LOG.warning('Skipping server %s, host not available.', host) return None # Convert server data to model conforming format server = self.load_from_cloud(raw_server) with remote.RemoteExecutor( self.cloud, server.hypervisor_host) as remote_executor: _populate_ephemeral_disks(remote_executor, server) # Store server with model.Session() as session: session.store(server) if _need_image_membership(server): image_member_uuid = image.ImageMember.make_uuid( server.image, server.tenant) server.image_membership = self.find_obj( image.ImageMember, image_member_uuid) return server
def _set_compute_quotas(self, tenant_id): compute_client = clients.compute_client(self.cloud) return self._set_quotas(compute_client, tenant_id, cores=-1, ram=-1, injected_file_content_bytes=-1, instances=-1, fixed_ips=-1)
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] nova_client = clients.compute_client(cloud) try: with model.Session() as session: session.delete(compute.Flavor, object_id=self.object_id) clients.retry(nova_client.flavors.delete, self.flavor_id, expected_exceptions=[nova_exceptions.NotFound]) except nova_exceptions.NotFound: pass
def discover_one(self, uuid): server_id, volume_id = uuid.split(':') compute_client = clients.compute_client(self.cloud) try: raw_attachment = self.retry( compute_client.volumes.get_server_volume, server_id, volume_id, expected_exceptions=[nova_exceptions.NotFound]) attachment = self.load_from_cloud(raw_attachment) with model.Session() as session: session.store(attachment) return attachment except nova_exceptions.NotFound: raise discover.NotFound()
def _create_flavor(self): nova_client = clients.compute_client(self.cloud) flavor_id = str(uuid.uuid4()) clients.retry(nova_client.flavors.create, 'tmp.vol_tx', 64, 1, 0, flavorid=flavor_id, is_public=False) flavor_discoverer = discover.get_discoverer(self.config, self.cloud, compute.Flavor) flavor = flavor_discoverer.discover_by_flavor_id(flavor_id) return flavor
def discover_all(self): compute_client = clients.compute_client(self.cloud) avail_hosts = self._list_available_compute_hosts(compute_client) servers = {} # Go through each tenant since nova don't return more items than # specified in osapi_max_limit configuration option (1000 by default) # in single API call for tenant in self._get_tenants(): LOG.debug('Discovering servers from cloud "%s" tenant "%s"', self.cloud.name, tenant.name) tenant_id = tenant.id raw_server_list = self.retry(compute_client.servers.list, search_opts={ 'all_tenants': True, 'tenant_id': tenant_id, }, returns_iterable=True) for raw_server in raw_server_list: host = getattr(raw_server, EXT_ATTR_HOST) if host not in avail_hosts: LOG.warning( 'Skipping server %s in tenant %s, host not ' 'available.', host, tenant.name) continue # Convert server data to model conforming format server = self.load_from_cloud(raw_server) hyper_host = getattr(raw_server, EXT_ATTR_HYPER_HOST) servers.setdefault(hyper_host, []).append(server) # Collect information about ephemeral disks # TODO: work with different servers in parallel for host, host_servers in servers.items(): LOG.debug( 'Getting ephemeral disks information from cloud %s ' 'host %s', self.cloud.name, host) with remote.RemoteExecutor(self.cloud, host) as remote_executor: for server in host_servers: _populate_ephemeral_disks(remote_executor, server) # Store data to local database with model.Session() as session: for host_servers in servers.values(): for server in host_servers: session.store(server) if _need_image_membership(server): image_member_uuid = image.ImageMember.make_uuid( server.image, server.tenant) server.image_membership = self.find_obj( image.ImageMember, image_member_uuid)
def restore_internal_state(self, internal_state): vm_id = internal_state['vm_id'] self.created_object = None if vm_id is not None: compute_client = clients.compute_client(self.cloud) try: self.created_object = clients.retry( compute_client.servers.get, vm_id, expected_exceptions=[nova_exceptions.NotFound]) except nova_exceptions.NotFound: LOG.warning( 'Failed to find VM with id %s when restoring ' 'task state', vm_id)
def load_from_cloud(self, data): compute_client = clients.compute_client(self.cloud) # Workaround for grizzly lacking EXT_ATTR_VOL_ATTACHMENTS if hasattr(data, EXT_ATTR_VOL_ATTACHMENTS): raw_attachments = [ '{0}:{1}'.format(data.id, attachment['id']) for attachment in getattr(data, EXT_ATTR_VOL_ATTACHMENTS) ] else: raw_attachments = [ '{0}:{1}'.format(attachment.serverId, attachment.volumeId) for attachment in self.retry( compute_client.volumes.get_server_volumes, data.id, returns_iterable=True) ] server_image = None if data.image: server_image = data.image['id'] attached_volumes = [ self.find_ref(storage.Attachment, attachment) for attachment in raw_attachments ] with cloud_db.connection(self.cloud.nova_db) as db: flavor_id = self._get_flavor(db, data.id) hypervisor_host = getattr(data, EXT_ATTR_HYPER_HOST) server_dict = { 'object_id': self.make_id(data.id), 'security_groups': [], # TODO: implement security groups 'tenant': self.find_ref(identity.Tenant, data.tenant_id), 'image': self.find_ref(image.Image, server_image), 'image_membership': None, 'flavor': self.find_ref(compute.Flavor, flavor_id), 'availability_zone': getattr(data, EXT_ATTR_AZ), 'host': getattr(data, EXT_ATTR_HOST), 'hypervisor_hostname': hypervisor_host, 'instance_name': getattr(data, EXT_ATTR_INSTANCE_NAME), 'attached_volumes': [av for av in attached_volumes if av], 'ephemeral_disks': [], # Ephemeral disks will be filled later 'compute_node': self.find_ref(compute.ComputeNode, hypervisor_host), } for attr_name in ('name', 'status', 'user_id', 'key_name', 'config_drive', 'metadata'): if hasattr(data, attr_name): server_dict[attr_name] = getattr(data, attr_name) return compute.Server.load(server_dict)
def discover_all(self): compute_client = clients.compute_client(self.cloud) avail_hosts = self._list_available_compute_hosts(compute_client) servers = {} # Go through each tenant since nova don't return more items than # specified in osapi_max_limit configuration option (1000 by default) # in single API call for tenant in self._get_tenants(): LOG.debug('Discovering servers from cloud "%s" tenant "%s"', self.cloud.name, tenant.name) tenant_id = tenant.id raw_server_list = self.retry(compute_client.servers.list, search_opts={ 'all_tenants': True, 'tenant_id': tenant_id, }, returns_iterable=True) for raw_server in raw_server_list: host = getattr(raw_server, EXT_ATTR_HOST) if host not in avail_hosts: LOG.warning('Skipping server %s in tenant %s, host not ' 'available.', host, tenant.name) continue # Convert server data to model conforming format server = self.load_from_cloud(raw_server) hyper_host = getattr(raw_server, EXT_ATTR_HYPER_HOST) servers.setdefault(hyper_host, []).append(server) # Collect information about ephemeral disks # TODO: work with different servers in parallel for host, host_servers in servers.items(): LOG.debug('Getting ephemeral disks information from cloud %s ' 'host %s', self.cloud.name, host) with remote.RemoteExecutor(self.cloud, host) as remote_executor: for server in host_servers: _populate_ephemeral_disks(remote_executor, server) # Store data to local database with model.Session() as session: for host_servers in servers.values(): for server in host_servers: session.store(server) if _need_image_membership(server): image_member_uuid = image.ImageMember.make_uuid( server.image, server.tenant) server.image_membership = self.find_obj( image.ImageMember, image_member_uuid)
def migrate(self, source_obj, *args, **kwargs): cinder_client = clients.volume_client( self.src_cloud, _scope(source_obj.tenant.object_id.id)) raw_volume = clients.retry(cinder_client.volumes.get, source_obj.object_id.id) if raw_volume.attachments: nova_client = clients.compute_client(self.src_cloud) assert len(raw_volume.attachments) == 1 detached_vm_id = raw_volume.attachments[0]['server_id'] shutoff_vm(nova_client, detached_vm_id) self._detach_volume(self.src_cloud, source_obj, detached_vm_id, abort_migration=True) self.detached_vm_id = detached_vm_id return dict(attached_vm_id=self.detached_vm_id)
def _detach_volume(self, cloud, volume, vm_id, abort_migration=False): volume_id = volume.object_id.id nova_client = clients.compute_client(cloud) cinder_client = clients.volume_client( cloud, _scope(volume.tenant.object_id.id)) if _object_is_deleted(cinder_client, 'volumes', volume_id, cinder_exceptions.NotFound): return if _object_status_is(cinder_client, 'volumes', volume_id, 'in-use'): nova_client.volumes.delete_server_volume(vm_id, volume_id) try: clients.wait_for(_object_status_is, cinder_client, 'volumes', volume_id, 'available') except clients.Timeout: if abort_migration: raise base.AbortMigration( 'Volume %s in cloud %s couldn\'t attach', volume_id, cloud.name)
def _attach_volume(self, cloud, volume, vm_id): volume_id = volume.object_id.id nova_client = clients.compute_client(cloud) cinder_client = clients.volume_client( cloud, _scope(volume.tenant.object_id.id)) if _object_status_is(cinder_client, 'volumes', volume_id, 'available'): nova_client.volumes.create_server_volume(vm_id, volume_id, '/dev/vdb') try: clients.wait_for(_object_status_is, cinder_client, 'volumes', volume_id, 'in-use') except clients.Timeout: raise base.AbortMigration( 'Volume %s in cloud %s couldn\'t attach', volume_id, cloud.name) else: raise base.AbortMigration( 'Volume %s in cloud %s is not available for attachment', volume_id, cloud.name)
def load_from_cloud(self, data): compute_client = clients.compute_client(self.cloud) # Workaround for grizzly lacking EXT_ATTR_VOL_ATTACHMENTS if hasattr(data, EXT_ATTR_VOL_ATTACHMENTS): raw_attachments = [ '{0}:{1}'.format(data.id, attachment['id']) for attachment in getattr(data, EXT_ATTR_VOL_ATTACHMENTS)] else: raw_attachments = [ '{0}:{1}'.format(attachment.serverId, attachment.volumeId) for attachment in self.retry(compute_client.volumes.get_server_volumes, data.id, returns_iterable=True)] server_image = None if data.image: server_image = data.image['id'] attached_volumes = [self.find_ref(storage.Attachment, attachment) for attachment in raw_attachments] server_dict = { 'object_id': self.make_id(data.id), 'security_groups': [], # TODO: implement security groups 'tenant': self.find_ref(identity.Tenant, data.tenant_id), 'image': self.find_ref(image.Image, server_image), 'image_membership': None, 'flavor': self.find_ref(compute.Flavor, data.flavor['id']), 'availability_zone': getattr(data, EXT_ATTR_AZ), 'host': getattr(data, EXT_ATTR_HOST), 'hypervisor_hostname': getattr(data, EXT_ATTR_HYPER_HOST), 'instance_name': getattr(data, EXT_ATTR_INSTANCE_NAME), 'attached_volumes': [av for av in attached_volumes if av], 'ephemeral_disks': [], # Ephemeral disks will be filled later } for attr_name in ('name', 'status', 'user_id', 'key_name', 'config_drive', 'metadata'): if hasattr(data, attr_name): server_dict[attr_name] = getattr(data, attr_name) return compute.Server.load(server_dict)
def compute_client(self, scope=None): # pylint: disable=no-member return clients.compute_client(self.credential, scope or self.scope)