Exemple #1
0
    def on_block_threshold(self, target, path, threshold, excess):
        """
        Callback to be executed in the libvirt event handler when
        a BLOCK_THRESHOLD event is delivered.

        Args:
            target: device name (vda) or indexed name (vda[7])
            path: device path
            threshold: the threshold (in bytes) that was exceeded
                       causing the event to trigger
            excess: amount (in bytes) written past the threshold
        """
        self._log.info('Block threshold %s exceeded by %s for drive %r (%s)',
                       threshold, excess, target, path)

        drive_name, index = parse_target(target)

        # We register only indexed name (vda[7]), but libvirt reports
        # also an event for the top volume (vda).
        # See https://bugzilla.redhat.com/1983429
        # TODO: Remove when bug is fixed.
        if index is None:
            self._log.debug('Ignoring unexpected event for drive %r',
                            drive_name)
            return

        try:
            drive = lookup.drive_by_name(self._vm.getDiskDevices()[:],
                                         drive_name)
        except LookupError:
            self._log.warning(
                'Unknown drive %r for vm %s - ignored block threshold event',
                drive_name, self._vm.id)
        else:
            drive.on_block_threshold(path)
Exemple #2
0
    def _extend_replica_completed(self, volInfo):
        clock = volInfo["clock"]
        clock.stop("extend-replica")

        with clock.run("refresh-replica"):
            self._vm.refresh_volume(volInfo)

        self._verify_volume_extension(volInfo)
        vmDrive = lookup.drive_by_name(self._vm.getDiskDevices()[:],
                                       volInfo['name'])
        if not vmDrive.chunked:
            # This was a replica only extension, we are done.
            clock.stop("total")
            self._log.info("Extend replica %s completed %s",
                           volInfo["volumeID"], clock)
            return

        self._log.debug(
            "Requesting extension for the original drive: %s (domainID: %s, "
            "volumeID: %s)", vmDrive.name, vmDrive.domainID, vmDrive.volumeID)
        self._extend_volume(vmDrive,
                            vmDrive.volumeID,
                            volInfo['newSize'],
                            clock,
                            callback=volInfo["callback"])
Exemple #3
0
    def teardown(self, memory_vol_path, memory_vol, new_drives, vm_drives):
        self.vm.log.info('Starting snapshot teardown')
        result = True

        def pad_memory_volume(memory_vol_path, sd_uuid):
            sd_type = sd.name2type(
                self.vm.cif.irs.getStorageDomainInfo(sd_uuid)['info']['type'])
            if sd_type in sd.FILE_DOMAIN_TYPES:
                iop = oop.getProcessPool(sd_uuid)
                iop.fileUtils.padToBlockSize(memory_vol_path)

        try:
            # Must always thaw, even if freeze failed; in case the guest
            # did freeze the filesystems, but failed to reply in time.
            # Libvirt is using same logic (see src/qemu/qemu_driver.c).
            if self.should_freeze:
                self.vm.thaw()

            # We are padding the memory volume with block size of zeroes
            # because qemu-img truncates files such that their size is
            # round down to the closest multiple of block size (bz 970559).
            # This code should be removed once qemu-img will handle files
            # with size that is not multiple of block size correctly.
            if self.memory_params:
                pad_memory_volume(memory_vol_path, memory_vol['domainID'])

            for drive in new_drives.values():
                # Update the drive information
                _, old_volume_id = vm_drives[drive["name"]]
                try:
                    self.vm.updateDriveParameters(drive)
                except Exception:
                    # Here it's too late to fail, the switch already happened
                    # and there's nothing we can do, we must to proceed anyway
                    # to report the live snapshot success.
                    self.vm.log.exception(
                        "Failed to update drive information"
                        " for '%s'", drive)

                drive_obj = lookup.drive_by_name(self.vm.getDiskDevices()[:],
                                                 drive["name"])
                self.vm.clear_drive_threshold(drive_obj, old_volume_id)

                try:
                    self.vm.updateDriveVolume(drive_obj)
                except vdsm.virt.vm.StorageUnavailableError as e:
                    # Will be recovered on the next monitoring cycle
                    self.vm.log.error(
                        "Unable to update drive %r "
                        "volume size: %s", drive["name"], e)
        except Exception as e:
            self.vm.log.error(
                "Snapshot teardown error: %s, "
                "trying to continue teardown", e)
            result = False
        finally:
            self._finalize_vm(memory_vol)
        return result
Exemple #4
0
    def test_not_managed_device_parameter(self):
        drives = vmdevices.common.storage_device_params_from_domain_xml(
            'TESTING', self.dom_desc, self.md_desc, self.log
        )

        disk_objs = [
            vmdevices.storage.Drive(self.log, **params)
            for params in drives
        ]

        drive = lookup.drive_by_name(disk_objs, 'sdd')
        assert not drive.managed
Exemple #5
0
    def test_correct_rbd_disk_metadata(self):
        drives = vmdevices.common.storage_device_params_from_domain_xml(
            'TESTING', self.dom_desc, self.md_desc, self.log
        )

        disk_objs = [
            vmdevices.storage.Drive(self.log, **params)
            for params in drives
        ]

        rbd_drive = lookup.drive_by_name(disk_objs, 'sda')

        assert getattr(rbd_drive, 'RBD') == '/dev/rbd/pool/volume-uuid'
Exemple #6
0
    def _extend_volume_completed(self, volInfo):
        callback = None
        error = None
        try:
            callback = volInfo["callback"]
            clock = volInfo["clock"]
            clock.stop("extend-volume")

            if self._vm.should_refresh_destination_volume():
                with clock.run("refresh-destination-volume"):
                    self._vm.refresh_destination_volume(volInfo)

            # After a volume was refreshed, the monitor thread may wake up and
            # trigger unwanted extend to the next chunk size. Lock the drive
            # during refresh until we set a new threshold. If taking the lock
            # times out, we abort this attempt without refreshing the volume.
            # The next extend attempt will try to take the lock and refresh the
            # drive.

            drive = lookup.drive_by_name(self._vm.getDiskDevices()[:],
                                         volInfo['name'])
            timeout = config.getfloat("thinp", "refresh_timeout")

            with drive.monitor_lock(timeout):
                with clock.run("refresh-volume"):
                    self._vm.refresh_volume(volInfo)
                volSize = self._verify_volume_extension(volInfo)

                clock.stop("total")
                self._log.info("Extend volume %s completed %s",
                               volInfo["volumeID"], clock)

                if not volInfo['internal']:
                    self.update_drive_volume_size(drive, volSize)

            self._vm.extend_volume_completed()

        except exception.DiskRefreshNotSupported as e:
            self._log.warning(
                "Migration destination host does not support "
                "extending disk during migration, disabling disk "
                "extension during migration")
            self.disable()
            error = e
        except virdomain.NotConnectedError as e:
            self._log.debug("VM not running, aborting extend completion")
            error = e
        finally:
            if callback:
                callback(error=sys.exc_info()[1] or error)
Exemple #7
0
    def _extend_volume_completed(self, volInfo):
        callback = None
        error = None
        try:
            callback = volInfo["callback"]
            clock = volInfo["clock"]
            clock.stop("extend-volume")

            if self._vm.should_refresh_destination_volume():
                with clock.run("refresh-destination-volume"):
                    self._vm.refresh_destination_volume(volInfo)

            with clock.run("refresh-volume"):
                self._vm.refresh_volume(volInfo)

            # Check if the extension succeeded.  On failure an exception is
            # raised.
            # TODO: Report failure to the engine.
            volSize = self._verify_volume_extension(volInfo)

            # This was a volume extension or replica and volume extension.
            clock.stop("total")
            self._log.info("Extend volume %s completed %s",
                           volInfo["volumeID"], clock)

            # Only update apparentsize and truesize if we've resized the leaf
            if not volInfo['internal']:
                drive = lookup.drive_by_name(self._vm.getDiskDevices()[:],
                                             volInfo['name'])
                self.update_drive_volume_size(drive, volSize)

            self._vm.extend_volume_completed()

        except exception.DiskRefreshNotSupported as e:
            self._log.warning(
                "Migration destination host does not support "
                "extending disk during migration, disabling disk "
                "extension during migration")
            self.disable()
            error = e
        except virdomain.NotConnectedError as e:
            self._log.debug("VM not running, aborting extend completion")
            error = e
        finally:
            if callback:
                callback(error=sys.exc_info()[1] or error)
Exemple #8
0
    def on_block_threshold(self, dev, path, threshold, excess):
        """
        Callback to be executed in the libvirt event handler when
        a BLOCK_THRESHOLD event is delivered.

        Args:
            dev: device name (e.g. vda, sdb)
            path: device path
            threshold: the threshold (in bytes) that was exceeded
                       causing the event to trigger
            excess: amount (in bytes) written past the threshold
        """
        self._log.info('block threshold %d exceeded on %r (%s)',
                       threshold, dev, path)
        try:
            drive = lookup.drive_by_name(self._vm.getDiskDevices()[:], dev)
        except LookupError:
            self._log.warning(
                'Unknown drive %r for vm %s - ignored block threshold event',
                dev, self._vm.id)
        else:
            drive.on_block_threshold(path)
Exemple #9
0
    def on_block_threshold(self, dev, path, threshold, excess):
        """
        Callback to be executed in the libvirt event handler when
        a BLOCK_THRESHOLD event is delivered.

        Args:
            dev: device name (e.g. vda, sdb)
            path: device path
            threshold: the threshold (in bytes) that was exceeded
                       causing the event to trigger
            excess: amount (in bytes) written past the threshold
        """
        self._log.info('block threshold %d exceeded on %r (%s)',
                       threshold, dev, path)
        try:
            drive = lookup.drive_by_name(self._vm.getDiskDevices()[:], dev)
        except LookupError:
            self._log.warning(
                'Unknown drive %r for vm %s - ignored block threshold event',
                dev, self._vm.id)
        else:
            drive.on_block_threshold(path)
Exemple #10
0
 def test_lookup_drive_by_name_found(self):
     drive = lookup.drive_by_name(self.drives, 'sda')
     assert drive is self.drives[0]
Exemple #11
0
 def test_lookup_drive_by_name_missing(self):
     with pytest.raises(LookupError):
         lookup.drive_by_name(self.drives, 'hdd')
Exemple #12
0
 def test_lookup_drive_by_name_found(self):
     drive = lookup.drive_by_name(self.drives, 'sda')
     assert drive is self.drives[0]