def _user_id(self, username): with model.Session() as session: for user in session.list(identity.User, self.cloud): if user.name.lower() == username.lower(): return user.object_id.id raise base.AbortMigration('User % not found in cloud %s', username, self.cloud.name)
def _role_id(self, rolename): with model.Session() as session: for role in session.list(identity.Role, self.cloud): if role.name.lower() == rolename.lower(): return role.object_id.id raise base.AbortMigration('Role % not found in cloud %s', rolename, self.cloud.name)
def migrate(self, source_obj, *args, **kwargs): int_ip_address = _allocate_ip_address(self.cloud) nova_client = clients.compute_client(self.cloud) self.created_object = nova_client.servers.create( image=kwargs[self.image_var_name], flavor=kwargs[self.flavor_var_name].flavor_id, name='trans_vol_{}'.format(source_obj.object_id.id), config_drive=True, nics=[{ 'net-id': kwargs[self.net_var_name].object_id.id }], meta=dict(cidr=str(int_ip_address), internal_address=str(int_ip_address.ip), access_key=RSA1024_KEY.get_base64())) try: self.created_object = clients.wait_for(_object_status_is, nova_client, 'servers', self.created_object.id, 'active') except clients.Timeout: self._delete_vm() raise base.AbortMigration( 'Timeout waiting for VM %s to start on %s', self.created_object.id, self.location) result = self.load_from_cloud(compute.Server, self.cloud, self.created_object) return {self.var_name: result}
def _attach_volume(self, cloud, volume, vm_id): volume_id = volume.object_id.id nova_client = clients.compute_client(cloud) cinder_client = clients.volume_client( cloud, _scope(volume.tenant.object_id.id)) if _object_status_is(cinder_client, 'volumes', volume_id, 'available'): nova_client.volumes.create_server_volume(vm_id, volume_id, '/dev/vdb') try: clients.wait_for(_object_status_is, cinder_client, 'volumes', volume_id, 'in-use') except clients.Timeout: raise base.AbortMigration( 'Volume %s in cloud %s couldn\'t attach', volume_id, cloud.name) else: raise base.AbortMigration( 'Volume %s in cloud %s is not available for attachment', volume_id, cloud.name)
def _get_admin_tenant_id(cloud, session): scope = cloud.scope project_name = scope.project_name if scope.project_id is not None: return scope.project_id elif project_name is not None: for tenant in session.list(identity.Tenant, cloud): if tenant.name.lower() == project_name.lower(): return tenant.object_id.id raise base.AbortMigration('Unable to upload image: no admin tenant.')
def _object_status_is(client, manager_name, obj_id, status): manager = getattr(client, manager_name) obj = clients.retry(manager.get, obj_id) LOG.debug('Checking object %s is in status \'%s\': actual status \'%s\'', obj_id, status.lower(), obj.status.lower()) if obj.status.lower() == status.lower(): return obj elif obj.status.lower() == 'error': raise base.AbortMigration('Object %s ended up in ERROR state', obj_id) else: return None
def migrate(self, dst_object, need_restore_deleted, *args, **kwargs): if not need_restore_deleted: return dst_image_id = dst_object.object_id.id with model.Session() as session: boot_disk_infos = self._get_boot_disk_locations(session) for boot_disk_info in boot_disk_infos: if self.upload_server_image(boot_disk_info, dst_image_id): return raise base.AbortMigration( 'Unable to restore deleted image %s: no servers found', dst_image_id)
def _wait_ip_accessible(cloud, rexec, ip_address): waited = 0.0 while waited <= cloud.operation_timeout: before = time.time() try: rexec.run('ping -c 1 -W 1 {ip_address}', ip_address=ip_address) return except remote.RemoteFailure: after = time.time() delta = after - before if delta < 1.0: delta = 1.0 time.sleep(1.0) waited += delta raise base.AbortMigration('VM couldn\'t be reached through %s', ip_address)
def migrate(self, *args, **kwargs): with model.Session() as session: image_id = self._find_supported_cirros_image(session) if image_id is None: try: img = self._upload_cirros_image(session) except clients.Timeout: raise base.AbortMigration( 'Failed to upload transfer VM image') image_obj = self.load_from_cloud(image.Image, self.cloud, img) session.store(image_obj) image_id = img.id self.destructor = ImageDestructor(self.location, image_id) return { self.var_name: image_id, self.destructor_var: self.destructor }
def _detach_volume(self, cloud, volume, vm_id, abort_migration=False): volume_id = volume.object_id.id nova_client = clients.compute_client(cloud) cinder_client = clients.volume_client( cloud, _scope(volume.tenant.object_id.id)) if _object_is_deleted(cinder_client, 'volumes', volume_id, cinder_exceptions.NotFound): return if _object_status_is(cinder_client, 'volumes', volume_id, 'in-use'): nova_client.volumes.delete_server_volume(vm_id, volume_id) try: clients.wait_for(_object_status_is, cinder_client, 'volumes', volume_id, 'available') except clients.Timeout: if abort_migration: raise base.AbortMigration( 'Volume %s in cloud %s couldn\'t attach', volume_id, cloud.name)
def migrate(self, source_obj, *args, **kwargs): identity_client = clients.identity_client(self.dst_cloud) try: destination_obj = clients.retry( identity_client.tenants.create, source_obj.name, description=source_obj.description, enabled=source_obj.enabled, expected_exceptions=[exceptions.Conflict]) self.created_object = destination_obj except exceptions.Conflict: for tenant_obj in clients.retry(identity_client.tenants.list): if tenant_obj.name.lower() == source_obj.name.lower(): destination_obj = tenant_obj break else: raise base.AbortMigration('Invalid state') destination = self.load_from_cloud(identity.Tenant, self.dst_cloud, destination_obj) return dict(dst_object=destination)
def migrate(self, source_obj, *args, **kwargs): dst_tenant_id = _get_object_tenant_id(self.dst_cloud, source_obj) volume_client = clients.volume_client(self.dst_cloud, _scope(dst_tenant_id)) ovr_source_obj = self.override(source_obj) zone = ovr_source_obj.availability_zone vol = clients.retry(volume_client.volumes.create, size=ovr_source_obj.size, display_name=ovr_source_obj.name, display_description=ovr_source_obj.description, volume_type=ovr_source_obj.volume_type, availability_zone=zone, metadata=ovr_source_obj.metadata) try: self.created_object = clients.wait_for(_object_status_is, volume_client, 'volumes', vol.id, 'available') except clients.Timeout: self._delete_volume(vol) raise base.AbortMigration('Volume didn\'t become active') result = self.load_from_cloud(storage.Volume, self.dst_cloud, self.created_object) return dict(dst_object=result)
def shutoff_vm(nova_client, instace_id): # TODO: make general-purpose utility function instance = clients.retry(nova_client.servers.get, instace_id) current = instance.status.lower() def wait_status(status): return clients.wait_for(_object_status_is, nova_client, 'servers', instace_id, status) try: if current == 'paused': nova_client.servers.unpause(instance) wait_status('active') nova_client.servers.stop(instance) wait_status('shutoff') elif current == 'suspended': nova_client.servers.resume(instance) wait_status('active') nova_client.servers.stop(instance) wait_status('shutoff') elif current == 'active': nova_client.servers.stop(instance) wait_status('shutoff') elif current == 'verify_resize': nova_client.servers.confirm_resize(instance) wait_status('active') nova_client.servers.stop(instance) wait_status('shutoff') elif current != 'shutoff': raise base.AbortMigration('Invalid state change: %s -> shutoff', current) except clients.Timeout: LOG.debug( "Failed to change state from '%s' to 'shutoff' for VM " "'%s'", current, instace_id)