def _test_finish_revert_migration_after_crash(self, backup_made, new_made, vm_shutdown=True): instance = {'name': 'foo', 'task_state': task_states.RESIZE_MIGRATING} self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self._vmops, '_destroy') self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label') self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices') self.mox.StubOutWithMock(self._vmops, '_start') self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.lookup(self._session, 'foo-orig').AndReturn(backup_made and 'foo' or None) vm_utils.lookup(self._session, 'foo').AndReturn( (not backup_made or new_made) and 'foo' or None) if backup_made: if new_made: self._vmops._destroy(instance, 'foo') vm_utils.set_vm_name_label(self._session, 'foo', 'foo') self._vmops._attach_mapped_block_devices(instance, []) vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown) if vm_shutdown: self._vmops._start(instance, 'foo') self.mox.ReplayAll() self._vmops.finish_revert_migration(instance, [])
def _test_finish_revert_migration_after_crash(self, backup_made, new_made, vm_shutdown=True): instance = {"name": "foo", "task_state": task_states.RESIZE_MIGRATING} self.mox.StubOutWithMock(vm_utils, "lookup") self.mox.StubOutWithMock(self._vmops, "_destroy") self.mox.StubOutWithMock(vm_utils, "set_vm_name_label") self.mox.StubOutWithMock(self._vmops, "_attach_mapped_block_devices") self.mox.StubOutWithMock(self._vmops, "_start") self.mox.StubOutWithMock(vm_utils, "is_vm_shutdown") vm_utils.lookup(self._session, "foo-orig").AndReturn(backup_made and "foo" or None) vm_utils.lookup(self._session, "foo").AndReturn((not backup_made or new_made) and "foo" or None) if backup_made: if new_made: self._vmops._destroy(instance, "foo") vm_utils.set_vm_name_label(self._session, "foo", "foo") self._vmops._attach_mapped_block_devices(instance, []) vm_utils.is_vm_shutdown(self._session, "foo").AndReturn(vm_shutdown) if vm_shutdown: self._vmops._start(instance, "foo") self.mox.ReplayAll() self._vmops.finish_revert_migration(instance, [])
def _test_finish_revert_migration_after_crash(self, backup_made, new_made, vm_shutdown=True): instance = {'name': 'foo', 'task_state': task_states.RESIZE_MIGRATING} context = 'fake_context' self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self._vmops, '_destroy') self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label') self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices') self.mox.StubOutWithMock(self._vmops, '_start') self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.lookup(self._session, 'foo-orig').AndReturn( backup_made and 'foo' or None) vm_utils.lookup(self._session, 'foo').AndReturn( (not backup_made or new_made) and 'foo' or None) if backup_made: if new_made: self._vmops._destroy(instance, 'foo') vm_utils.set_vm_name_label(self._session, 'foo', 'foo') self._vmops._attach_mapped_block_devices(instance, []) vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown) if vm_shutdown: self._vmops._start(instance, 'foo') self.mox.ReplayAll() self._vmops.finish_revert_migration(context, instance, [])
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self): self.mock = mox.Mox() session = FakeSession() instance = "instance" vm_ref = "vm-ref" self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True) self.mock.StubOutWithMock(vm_utils, 'LOG') self.assertTrue(vm_utils.clean_shutdown_vm(session, instance, vm_ref))
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self): self.mock = mox.Mox() session = FakeSession() instance = "instance" vm_ref = "vm-ref" self.mock.StubOutWithMock(vm_utils, "is_vm_shutdown") vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True) self.mock.StubOutWithMock(vm_utils, "LOG") self.assertTrue(vm_utils.clean_shutdown_vm(session, instance, vm_ref))
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, mountpoint, hotplug): LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s', { 'vdi_ref': vdi_ref, 'vm_ref': vm_ref }) dev_number = volume_utils.get_device_number(mountpoint) # osvol is added to the vbd so we can spot which vbds are volumes vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, dev_number, bootable=False, osvol=True) if hotplug: # NOTE(johngarbutt) can only call VBD.plug on a running vm running = not vm_utils.is_vm_shutdown(self._session, vm_ref) if running: LOG.debug("Plugging VBD: %s", vbd_ref) self._session.VBD.plug(vbd_ref, vm_ref) LOG.info( _LI('Dev %(dev_number)s attached to' ' instance %(instance_name)s'), { 'instance_name': instance_name, 'dev_number': dev_number })
def detach_volume(self, connection_info, instance_name, mountpoint): """Detach volume storage to VM instance.""" LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"), { 'instance_name': instance_name, 'mountpoint': mountpoint }) device_number = volume_utils.get_device_number(mountpoint) vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) try: vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref, device_number) except exception.StorageError: # NOTE(sirp): If we don't find the VBD then it must have been # detached previously. LOG.warn(_('Skipping detach because VBD for %s was' ' not found'), instance_name) return # Unplug VBD if we're NOT shutdown unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) self._detach_vbd(vbd_ref, unplug, vm_ref) LOG.info( _('Mountpoint %(mountpoint)s detached from instance' ' %(instance_name)s'), { 'instance_name': instance_name, 'mountpoint': mountpoint })
def _connect_volume(self, connection_info, dev_number=None, instance_name=None, vm_ref=None, hotplug=True): driver_type = connection_info['driver_volume_type'] if driver_type not in ['iscsi', 'xensm']: raise exception.VolumeDriverNotFound(driver_type=driver_type) connection_data = connection_info['data'] sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info( connection_data, 'Disk-for:%s' % instance_name) # Introduce SR if not already present sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid) if not sr_ref: sr_ref = volume_utils.introduce_sr(self._session, sr_uuid, sr_label, sr_params) try: # Introduce VDI if 'vdi_uuid' in connection_data: vdi_ref = volume_utils.introduce_vdi( self._session, sr_ref, vdi_uuid=connection_data['vdi_uuid']) elif 'target_lun' in connection_data: vdi_ref = volume_utils.introduce_vdi( self._session, sr_ref, target_lun=connection_data['target_lun']) else: # NOTE(sirp): This will introduce the first VDI in the SR vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref) # Attach if vm_ref: vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, dev_number, bootable=False, osvol=True) running = not vm_utils.is_vm_shutdown(self._session, vm_ref) if hotplug and running: volume_utils.vbd_plug(self._session, vbd_ref, vm_ref) vdi_uuid = self._session.call_xenapi("VDI.get_uuid", vdi_ref) return (sr_uuid, vdi_uuid) except Exception: with excutils.save_and_reraise_exception(): # NOTE(sirp): Forgetting the SR will have the effect of # cleaning up the VDI and VBD records, so no need to handle # that explicitly. volume_utils.forget_sr(self._session, sr_ref)
def detach_all(self, vm_ref): """Detach any external nova/cinder volumes and purge the SRs.""" # Generally speaking, detach_all will be called with VM already # shutdown; however if it's still running, we can still perform the # operation by unplugging the VBD first. unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) vbd_refs = self._get_all_volume_vbd_refs(vm_ref) for vbd_ref in vbd_refs: self._detach_vbd(vbd_ref, unplug, vm_ref)
def _detach_vbds_and_srs(self, vm_ref, vbd_refs): is_vm_shutdown = vm_utils.is_vm_shutdown(self._session, vm_ref) for vbd_ref in vbd_refs: # find sr before we destroy the vbd sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) if not is_vm_shutdown: vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref) vm_utils.destroy_vbd(self._session, vbd_ref) # Forget (i.e. disconnect) SR only if not in use volume_utils.purge_sr(self._session, sr_ref)
def _connect_volume(self, connection_info, dev_number=None, instance_name=None, vm_ref=None, hotplug=True): driver_type = connection_info['driver_volume_type'] if driver_type not in ['iscsi', 'xensm']: raise exception.VolumeDriverNotFound(driver_type=driver_type) connection_data = connection_info['data'] sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info( connection_data, 'Disk-for:%s' % instance_name) # Introduce SR if not already present sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid) if not sr_ref: sr_ref = volume_utils.introduce_sr( self._session, sr_uuid, sr_label, sr_params) try: # Introduce VDI if 'vdi_uuid' in connection_data: vdi_ref = volume_utils.introduce_vdi( self._session, sr_ref, vdi_uuid=connection_data['vdi_uuid']) elif 'target_lun' in connection_data: vdi_ref = volume_utils.introduce_vdi( self._session, sr_ref, target_lun=connection_data['target_lun']) else: # NOTE(sirp): This will introduce the first VDI in the SR vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref) # Attach if vm_ref: vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, dev_number, bootable=False, osvol=True) running = not vm_utils.is_vm_shutdown(self._session, vm_ref) if hotplug and running: self._session.VBD.plug(vbd_ref, vm_ref) vdi_uuid = self._session.call_xenapi("VDI.get_uuid", vdi_ref) return (sr_uuid, vdi_uuid) except Exception: with excutils.save_and_reraise_exception(): # NOTE(sirp): Forgetting the SR will have the effect of # cleaning up the VDI and VBD records, so no need to handle # that explicitly. volume_utils.forget_sr(self._session, sr_ref)
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, dev_number, hotplug): msg = _("Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s") LOG.debug(msg, {"vdi_ref": vdi_ref, "vm_ref": vm_ref}) # osvol is added to the vbd so we can spot which vbds are volumes vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, dev_number, bootable=False, osvol=True) if hotplug: # NOTE(johngarbutt) can only call VBD.plug on a running vm running = not vm_utils.is_vm_shutdown(self._session, vm_ref) if running: LOG.debug("Plugging VBD: %s", vbd_ref) self._session.VBD.plug(vbd_ref, vm_ref) LOG.info( _("Dev %(dev_number)s attached to" " instance %(instance_name)s"), {"instance_name": instance_name, "dev_number": dev_number}, )
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, dev_number, hotplug): LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s', {'vdi_ref': vdi_ref, 'vm_ref': vm_ref}) # osvol is added to the vbd so we can spot which vbds are volumes vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, dev_number, bootable=False, osvol=True) if hotplug: # NOTE(johngarbutt) can only call VBD.plug on a running vm running = not vm_utils.is_vm_shutdown(self._session, vm_ref) if running: LOG.debug("Plugging VBD: %s", vbd_ref) self._session.VBD.plug(vbd_ref, vm_ref) LOG.info(_LI('Dev %(dev_number)s attached to' ' instance %(instance_name)s'), {'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint): """Detach volume storage to VM instance.""" LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"), {'instance_name': instance_name, 'mountpoint': mountpoint}) device_number = volume_utils.get_device_number(mountpoint) vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) try: vbd_ref = vm_utils.find_vbd_by_number( self._session, vm_ref, device_number) except volume_utils.StorageError: # NOTE(sirp): If we don't find the VBD then it must have been # detached previously. LOG.warn(_('Skipping detach because VBD for %s was' ' not found'), instance_name) return # Unplug VBD if we're NOT shutdown unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) self._detach_vbd(vbd_ref, unplug, vm_ref) LOG.info(_('Mountpoint %(mountpoint)s detached from instance' ' %(instance_name)s'), {'instance_name': instance_name, 'mountpoint': mountpoint})