Exemple #1
0
class ServerConfig(properties.Owner):

    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    vol_id = properties.UUID(required=True)
    readonly = properties.Boolean(default=False)
    discard = properties.Boolean(default=False)
    detect_zeroes = properties.Boolean(default=False)
    backing_chain = properties.Boolean(default=True)
    bitmap = properties.UUID()

    def __init__(self, config):
        self.sd_id = config.get("sd_id")
        self.img_id = config.get("img_id")
        self.vol_id = config.get("vol_id")
        self.readonly = config.get("readonly")
        self.discard = config.get("discard")
        self.detect_zeroes = config.get("detect_zeroes")

        # Setting to None overrides the default value.
        # See https://bugzilla.redhat.com/1892403
        self.backing_chain = config.get("backing_chain", True)

        self.bitmap = config.get("bitmap")

        if not self.backing_chain and self.bitmap:
            # When exporting a bitmap we always export the entire chain.
            raise se.UnsupportedOperation(
                "Cannot export bitmap with backing_chain=False")

        if self.bitmap and not self.readonly:
            # Exporting bitmaps makes sense only for incremental backup.
            raise se.UnsupportedOperation("Cannot export bitmap for writing")
Exemple #2
0
class DiskConfig(properties.Owner):
    vol_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    dom_id = properties.UUID(required=True)
    checkpoint = properties.Boolean(required=True)
    backup_mode = properties.Enum(values=("full", "incremental"))

    def __init__(self, disk_config):
        self.vol_id = disk_config.get("volumeID")
        self.img_id = disk_config.get("imageID")
        self.dom_id = disk_config.get("domainID")
        # Mark if the disk is included in the checkpoint.
        self.checkpoint = disk_config.get("checkpoint")
        self.backup_mode = disk_config.get("backup_mode")
        # Initialized when the engine creates the scratch
        # disk on a shared storage
        if "scratch_disk" in disk_config:
            scratch_disk = disk_config.get("scratch_disk")
            self.scratch_disk = ScratchDiskConfig(
                path=scratch_disk.get("path"),
                type=scratch_disk.get("type"),
                sd_id=scratch_disk.get("domainID"),
                img_id=scratch_disk.get("imageID"),
                vol_id=scratch_disk.get("volumeID"))
        else:
            self.scratch_disk = None
Exemple #3
0
class BackupConfig(properties.Owner):

    backup_id = properties.UUID(required=True)
    from_checkpoint_id = properties.UUID(required='')
    to_checkpoint_id = properties.UUID(default='')
    parent_checkpoint_id = properties.UUID(default='')

    def __init__(self, backup_config):
        self.backup_id = backup_config.get("backup_id")
        self.from_checkpoint_id = backup_config.get("from_checkpoint_id")
        self.to_checkpoint_id = backup_config.get("to_checkpoint_id")
        self.parent_checkpoint_id = backup_config.get("parent_checkpoint_id")

        if self.from_checkpoint_id is not None and (self.parent_checkpoint_id
                                                    is None):
            raise exception.BackupError(
                reason="Cannot start an incremental backup without "
                "parent_checkpoint_id",
                backup=self.backup_id)

        self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())]
        if len(self.disks) == 0:
            raise exception.BackupError(
                reason="Cannot start a backup without disks",
                backup=self.backup_id)
Exemple #4
0
class DiskConfig(properties.Owner):
    vol_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    dom_id = properties.UUID(required=True)

    def __init__(self, disk_config):
        self.vol_id = disk_config.get("volumeID")
        self.img_id = disk_config.get("imageID")
        self.dom_id = disk_config.get("domainID")
Exemple #5
0
class Lease(properties.Owner):
    """
    External sanlock lease.
    """
    sd_id = properties.UUID(required=True)
    lease_id = properties.UUID(required=True)

    def __init__(self, params):
        self.sd_id = params.get("sd_id")
        self.lease_id = params.get("lease_id")
Exemple #6
0
class VolumeInfo(properties.Owner):
    """
    VolumeInfo should be used for performing operations on any volume in a
    chain except shared volume.
    A volume is prepared in read-write mode.
    While performing operations, the volume is not set as illegal.
    """
    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    vol_id = properties.UUID(required=True)
    generation = properties.Integer(required=False, minval=0,
                                    maxval=sc.MAX_GENERATION)

    def __init__(self, params, host_id):
        self.sd_id = params.get('sd_id')
        self.img_id = params.get('img_id')
        self.vol_id = params.get('vol_id')
        self.generation = params.get('generation')
        self._host_id = host_id
        self._vol = None

    @property
    def locks(self):
        img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
        ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED),
               rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)]
        dom = sdCache.produce_manifest(self.sd_id)
        if dom.hasVolumeLeases():
            ret.append(volume.VolumeLease(self._host_id, self.sd_id,
                                          self.img_id, self.vol_id))
        return ret

    @property
    def path(self):
        return self.volume.getVolumePath()

    @property
    def volume(self):
        if self._vol is None:
            dom = sdCache.produce_manifest(self.sd_id)
            self._vol = dom.produceVolume(self.img_id, self.vol_id)
        return self._vol

    def volume_operation(self):
        return self.volume.operation(requested_gen=self.generation,
                                     set_illegal=False)

    @contextmanager
    def prepare(self):
        self.volume.prepare(rw=True, justme=False)
        try:
            yield
        finally:
            self.volume.teardown(self.sd_id, self.vol_id, justme=False)
Exemple #7
0
class DiskConfig(properties.Owner):
    vol_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    dom_id = properties.UUID(required=True)
    checkpoint = properties.Boolean(required=True)

    def __init__(self, disk_config):
        self.vol_id = disk_config.get("volumeID")
        self.img_id = disk_config.get("imageID")
        self.dom_id = disk_config.get("domainID")
        # Mark if the disk is included in the checkpoint.
        self.checkpoint = disk_config.get("checkpoint")
Exemple #8
0
class ServerConfig(properties.Owner):

    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    vol_id = properties.UUID(required=True)
    readonly = properties.Boolean(default=False)
    discard = properties.Boolean(default=False)

    def __init__(self, config):
        self.sd_id = config.get("sd_id")
        self.img_id = config.get("img_id")
        self.vol_id = config.get("vol_id")
        self.readonly = config.get("readonly")
        self.discard = config.get("discard")
Exemple #9
0
class ScratchDiskConfig(properties.Owner):
    path = properties.String(required=True)
    type = properties.Enum(required=True,
                           values=[DISK_TYPE.FILE, DISK_TYPE.BLOCK])
    sd_id = properties.UUID(required=False)
    img_id = properties.UUID(required=False)
    vol_id = properties.UUID(required=False)

    def __init__(self, **kw):
        self.path = kw.get("path")
        self.type = kw.get("type")
        self.sd_id = kw.get("sd_id")
        self.img_id = kw.get("img_id")
        self.vol_id = kw.get("vol_id")
Exemple #10
0
class DiskConfig(properties.Owner):
    vol_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    dom_id = properties.UUID(required=True)
    checkpoint = properties.Boolean(required=True)
    backup_mode = properties.Enum(values=("full", "incremental"))

    def __init__(self, disk_config):
        self.vol_id = disk_config.get("volumeID")
        self.img_id = disk_config.get("imageID")
        self.dom_id = disk_config.get("domainID")
        # Mark if the disk is included in the checkpoint.
        self.checkpoint = disk_config.get("checkpoint")
        self.backup_mode = disk_config.get("backup_mode")
Exemple #11
0
class CheckpointConfig(properties.Owner):
    id = properties.UUID(required=True)
    xml = properties.String(required=True)

    def __init__(self, checkpoint_config):
        self.id = checkpoint_config.get("id")
        self.xml = checkpoint_config.get("xml")
Exemple #12
0
class StorageDomainReduceParams(properties.Owner):
    sd_id = properties.UUID(required=True)
    guid = properties.String(required=True)

    def __init__(self, params):
        self.sd_id = params.get('sd_id')
        self.guid = params.get('guid')
Exemple #13
0
class SealImageInfo(properties.Owner):
    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    vol_id = properties.UUID(required=True)

    def __init__(self, params, sp_id, irs):
        self.sd_id = params.get('sd_id')
        self.img_id = params.get('img_id')
        self.vol_id = params.get('vol_id')
        self._sp_id = sp_id
        self._irs = irs
        self._path = None

    @property
    def path(self):
        return self._path

    def prepare(self):
        res = self._irs.prepareImage(self.sd_id,
                                     self._sp_id,
                                     self.img_id,
                                     self.vol_id,
                                     allowIllegal=True)
        if res['status']['code']:
            raise ImagePreparingError("Cannot prepare image %s: %s" %
                                      (self, res['status']['message']))

        self._path = res['path']

    def teardown(self):
        res = self._irs.teardownImage(self.sd_id,
                                      self._sp_id,
                                      self.img_id)
        if res['status']['code']:
            raise ImageTearingDownError("Cannot tear down image %s: %s" %
                                        (self, res['status']['message']))

    def __repr__(self):
        return ("<%s sd_id=%s img_id=%s vol_id=%s at 0x%s>" %
                (self.__class__.__name__, self.sd_id, self.img_id, self.vol_id,
                 id(self)))
Exemple #14
0
class BackupConfig(properties.Owner):

    backup_id = properties.UUID(required=True)
    from_checkpoint_id = properties.UUID(required='')
    to_checkpoint_id = properties.UUID(default='')
    require_consistency = properties.Boolean()
    creation_time = properties.Integer(minval=0)

    def __init__(self, backup_config):
        self.backup_id = backup_config.get("backup_id")
        self.from_checkpoint_id = backup_config.get("from_checkpoint_id")
        self.to_checkpoint_id = backup_config.get("to_checkpoint_id")
        self.require_consistency = backup_config.get("require_consistency")
        self.creation_time = backup_config.get("creation_time")

        self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())]
        for disk in self.disks:
            if (self.from_checkpoint_id is None
                    and disk.backup_mode == MODE_INCREMENTAL):
                raise exception.BackupError(
                    reason="Cannot start an incremental backup for disk, "
                    "full backup is requested",
                    backup=self.backup_id,
                    disk=disk)
Exemple #15
0
class CheckpointConfig(properties.Owner):
    id = properties.UUID(required=True)
    xml = properties.String()

    def __init__(self, checkpoint_config):
        self.id = checkpoint_config.get("id")
        self.xml = checkpoint_config.get("xml")
        if "config" in checkpoint_config:
            self.config = BackupConfig(checkpoint_config["config"])
        else:
            self.config = None

        if self.config is None and self.xml is None:
            raise exception.CheckpointError(
                reason="Cannot redefine checkpoint without "
                "checkpoint XML or backup config",
                checkpoint_id=self.id)
Exemple #16
0
class JobMetadata(properties.Owner):
    """
    JobMetadata - stored on external leases
    """
    type = properties.Enum(required=True, values=("JOB"))
    generation = properties.Integer(required=True,
                                    minval=0,
                                    maxval=sc.MAX_GENERATION)
    job_id = properties.UUID(required=True)
    job_status = properties.Enum(required=True,
                                 values=("PENDING", "FAILED", "SUCCEEDED",
                                         "FENCED"))

    def __init__(self, params):
        self.type = params.get("type")
        self.generation = params.get("generation")
        self.job_id = params.get("job_id")
        self.job_status = params.get("job_status")
Exemple #17
0
class StorageDomainDeviceMoveParams(properties.Owner):
    sd_id = properties.UUID(required=True)
    src_guid = properties.String(required=True)

    def __init__(self, params):
        self.sd_id = params.get('sd_id')
        self.src_guid = params.get('src_guid')
        dst_guids = params.get('dst_guids') or []
        # TODO: using properties.List for dst_guids when it is available
        self.dst_guids = frozenset(dst_guids)

        if type(dst_guids) is not list:
            raise ValueError("dst_guids is not a list")

        for item in dst_guids:
            if not isinstance(item, six.string_types):
                raise ValueError("dst_guids item %s isn't a string" % item)

        if len(self.dst_guids) != len(dst_guids):
            raise ValueError("dst_guids contains duplicate values")

        if self.src_guid in self.dst_guids:
            raise ValueError("src_guid is in dst_guids")
Exemple #18
0
class CopyDataDivEndpoint(properties.Owner):
    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    vol_id = properties.UUID(required=True)
    generation = properties.Integer(required=False, minval=0,
                                    maxval=sc.MAX_GENERATION)

    def __init__(self, params, host_id, writable):
        self.sd_id = params.get('sd_id')
        self.img_id = params.get('img_id')
        self.vol_id = params.get('vol_id')
        self.generation = params.get('generation')
        self._host_id = host_id
        self._writable = writable
        self._vol = None

    @property
    def locks(self):
        img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
        mode = rm.EXCLUSIVE if self._writable else rm.SHARED
        ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED),
               rm.ResourceManagerLock(img_ns, self.img_id, mode)]
        if self._writable:
            dom = sdCache.produce_manifest(self.sd_id)
            if dom.hasVolumeLeases():
                ret.append(volume.VolumeLease(self._host_id, self.sd_id,
                                              self.img_id, self.vol_id))
        return ret

    @property
    def path(self):
        return self.volume.getVolumePath()

    def is_invalid_vm_conf_disk(self):
        return workarounds.invalid_vm_conf_disk(self.volume)

    @property
    def qemu_format(self):
        return sc.fmt2str(self.volume.getFormat())

    @property
    def backing_path(self):
        parent_vol = self.volume.getParentVolume()
        if not parent_vol:
            return None
        return volume.getBackingVolumePath(self.img_id, parent_vol.volUUID)

    @property
    def qcow2_compat(self):
        dom = sdCache.produce_manifest(self.sd_id)
        return dom.qcow2_compat()

    @property
    def backing_qemu_format(self):
        parent_vol = self.volume.getParentVolume()
        if not parent_vol:
            return None
        return sc.fmt2str(parent_vol.getFormat())

    @property
    def preallocation(self):
        dom = sdCache.produce_manifest(self.sd_id)
        if (dom.supportsSparseness and
                self.volume.getType() == sc.PREALLOCATED_VOL):
            return qemuimg.PREALLOCATION.FALLOC
        return None

    @property
    def volume(self):
        if self._vol is None:
            dom = sdCache.produce_manifest(self.sd_id)
            self._vol = dom.produceVolume(self.img_id, self.vol_id)
        return self._vol

    def volume_operation(self):
        return self.volume.operation(self.generation)

    @contextmanager
    def prepare(self):
        self.volume.prepare(rw=self._writable, justme=False)
        try:
            yield
        finally:
            self.volume.teardown(self.sd_id, self.vol_id, justme=False)
Exemple #19
0
class SubchainInfo(properties.Owner):
    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    top_id = properties.UUID(required=True)
    base_id = properties.UUID(required=True)
    base_generation = properties.Integer(required=False,
                                         minval=0,
                                         maxval=sc.MAX_GENERATION)

    def __init__(self, params, host_id):
        self.sd_id = params.get('sd_id')
        self.img_id = params.get('img_id')
        self.top_id = params.get('top_id')
        self.base_id = params.get('base_id')
        self.base_generation = params.get('base_generation')
        self.host_id = host_id
        self._base_vol = None
        self._top_vol = None
        self._chain = None

    @property
    def base_vol(self):
        if self._base_vol is None:
            dom = sdCache.produce_manifest(self.sd_id)
            self._base_vol = dom.produceVolume(self.img_id, self.base_id)
        return self._base_vol

    @property
    def top_vol(self):
        if self._top_vol is None:
            dom = sdCache.produce_manifest(self.sd_id)
            self._top_vol = dom.produceVolume(self.img_id, self.top_id)
        return self._top_vol

    @property
    def chain(self):
        if self._chain is None:
            dom = sdCache.produce_manifest(self.sd_id)
            repoPath = dom.getRepoPath()
            image_repo = image.Image(repoPath)
            chain = image_repo.getChain(self.sd_id, self.img_id)
            # When the VM is cloned from a template, the root volume of the
            # volumes chain is a shared volume. Shared volumes are not returned
            # in the volumes list when calling Image.getChain hence, we have to
            # add that volume manually.
            template = chain[0].getParentVolume()
            if template is not None:
                if not template.isShared():
                    raise se.UnexpectedVolumeState(template.volUUID, "Shared",
                                                   "Not Shared")
                chain.insert(0, template)
            self._chain = [vol.volUUID for vol in chain]
        return self._chain

    @property
    def locks(self):
        img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
        ret = [
            rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED),
            rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)
        ]
        dom = sdCache.produce_manifest(self.sd_id)
        if dom.hasVolumeLeases():
            # We take only the base lease since no other volumes are modified
            ret.append(
                volume.VolumeLease(self.host_id, self.sd_id, self.img_id,
                                   self.base_id))
        return ret

    def validate(self):
        if self.base_id not in self.chain:
            raise se.VolumeIsNotInChain(self.sd_id, self.img_id, self.base_id)

        if self.top_id not in self.chain:
            raise se.VolumeIsNotInChain(self.sd_id, self.img_id, self.top_id)

        # Validate that top volume is the parent of the base.
        if self.top_vol.getParent() != self.base_id:
            raise se.WrongParentVolume(self.base_id, self.top_id)

        if self.base_vol.isShared():
            raise se.SharedVolumeNonWritable(self.base_vol)

        if self.top_vol.isShared():
            raise se.SharedVolumeNonWritable(self.top_vol)

    def volume_operation(self):
        return self.base_vol.operation(requested_gen=self.base_generation,
                                       set_illegal=False)

    @contextmanager
    def prepare(self):
        top_index = self.chain.index(self.top_id)
        chain_to_prepare = self.chain[:top_index + 1]
        dom = sdCache.produce_manifest(self.sd_id)
        for vol_id in chain_to_prepare:
            vol = dom.produceVolume(self.img_id, vol_id)
            rw = True if vol_id == self.base_id else False
            # TODO: to improve this late to use subchain.top_vol
            # subchain.base_vol.
            vol.prepare(rw=rw, justme=True)
        try:
            yield
        finally:
            self.top_vol.teardown(self.sd_id, self.top_id)

    def __repr__(self):
        return ("<SubchainInfo sd_id=%s, img_id=%s, top_id=%s, base_id=%s "
                "base_generation=%s at 0x%x>") % (
                    self.sd_id,
                    self.img_id,
                    self.top_id,
                    self.base_id,
                    self.base_generation,  # May be None
                    id(self),
                )
Exemple #20
0
 class Cls(properties.Owner):
     value = properties.UUID(required=True)
Exemple #21
0
 class Cls(properties.Owner):
     value = properties.UUID(default="00000000-0000-0000-0000-000000000000")
Exemple #22
0
class Snapshot(properties.Owner):
    job_uuid = properties.UUID(required=True)

    def __init__(self, vm, snap_drives, memory_params, frozen, job_uuid):
        self.vm = vm
        self.snap_drives = snap_drives
        self.memory_params = memory_params
        self.frozen = frozen
        self.job_uuid = job_uuid
        # When creating memory snapshot libvirt will pause the vm
        self.should_freeze = not (self.memory_params or self.frozen)
        self._snapshot_job = {
            "jobUUID": job_uuid,
            "frozen": self.frozen,
            "memoryParams": self.memory_params
        }
        if self.snap_drives is not None:
            # Regular flow, not recovery
            self.vm.update_snapshot_metadata(self._snapshot_job)

    def _finalize_vm(self, memory_vol):
        try:
            self.vm.drive_monitor.enable()
            if self.memory_params:
                self.vm.cif.teardownVolumePath(memory_vol)
            if config.getboolean('vars', 'time_sync_snapshot_enable'):
                self.vm.syncGuestTime()
        finally:
            # Cleaning snapshot job metadata
            self._snapshot_job = None
            self.vm.update_snapshot_metadata(self._snapshot_job)

    def teardown(self, memory_vol_path, memory_vol, new_drives, vm_drives):
        self.vm.log.info('Starting snapshot teardown')
        result = True

        def pad_memory_volume(memory_vol_path, sd_uuid):
            sd_type = sd.name2type(
                self.vm.cif.irs.getStorageDomainInfo(sd_uuid)['info']['type'])
            if sd_type in sd.FILE_DOMAIN_TYPES:
                iop = oop.getProcessPool(sd_uuid)
                iop.fileUtils.padToBlockSize(memory_vol_path)

        try:
            # Must always thaw, even if freeze failed; in case the guest
            # did freeze the filesystems, but failed to reply in time.
            # Libvirt is using same logic (see src/qemu/qemu_driver.c).
            if self.should_freeze:
                self.vm.thaw()

            # We are padding the memory volume with block size of zeroes
            # because qemu-img truncates files such that their size is
            # round down to the closest multiple of block size (bz 970559).
            # This code should be removed once qemu-img will handle files
            # with size that is not multiple of block size correctly.
            if self.memory_params:
                pad_memory_volume(memory_vol_path, memory_vol['domainID'])

            for drive in new_drives.values():
                # Update the drive information
                _, old_volume_id = vm_drives[drive["name"]]
                try:
                    self.vm.updateDriveParameters(drive)
                except Exception:
                    # Here it's too late to fail, the switch already happened
                    # and there's nothing we can do, we must to proceed anyway
                    # to report the live snapshot success.
                    self.vm.log.exception(
                        "Failed to update drive information"
                        " for '%s'", drive)

                drive_obj = lookup.drive_by_name(self.vm.getDiskDevices()[:],
                                                 drive["name"])
                self.vm.clear_drive_threshold(drive_obj, old_volume_id)

                try:
                    self.vm.updateDriveVolume(drive_obj)
                except vdsm.virt.vm.StorageUnavailableError as e:
                    # Will be recovered on the next monitoring cycle
                    self.vm.log.error(
                        "Unable to update drive %r "
                        "volume size: %s", drive["name"], e)
        except Exception as e:
            self.vm.log.error(
                "Snapshot teardown error: %s, "
                "trying to continue teardown", e)
            result = False
        finally:
            self._finalize_vm(memory_vol)
        return result

    def __repr__(self):
        return ("<%s vm=%s job=%s 0x%s>" %
                (self.__class__.__name__, self.vm.id, self.job_uuid, id(self)))

    def snapshot(self):
        """Live snapshot command"""
        def norm_snap_drive_params(drive):
            """Normalize snapshot parameters"""

            if "baseVolumeID" in drive:
                base_drv = {
                    "device": "disk",
                    "domainID": drive["domainID"],
                    "imageID": drive["imageID"],
                    "volumeID": drive["baseVolumeID"]
                }
                target_drv = base_drv.copy()
                target_drv["volumeID"] = drive["volumeID"]

            elif "baseGUID" in drive:
                base_drv = {"GUID": drive["baseGUID"]}
                target_drv = {"GUID": drive["GUID"]}

            elif "baseUUID" in drive:
                base_drv = {"UUID": drive["baseUUID"]}
                target_drv = {"UUID": drive["UUID"]}

            else:
                base_drv, target_drv = (None, None)

            return base_drv, target_drv

        def rollback_drives(new_drives):
            """Rollback the prepared volumes for the snapshot"""

            for vm_dev_name, drive in new_drives.items():
                try:
                    self.vm.cif.teardownVolumePath(drive)
                except Exception:
                    self.vm.log.exception("Unable to teardown drive: %s",
                                          vm_dev_name)

        def memory_snapshot(memory_volume_path):
            """Libvirt snapshot XML"""

            return vmxml.Element('memory',
                                 snapshot='external',
                                 file=memory_volume_path)

        def vm_conf_for_memory_snapshot():
            """Returns the needed vm configuration with the memory snapshot"""

            return {
                'restoreFromSnapshot': True,
                '_srcDomXML': self.vm.migratable_domain_xml(),
                'elapsedTimeOffset': time.time() - self.vm.start_time
            }

        snap = vmxml.Element('domainsnapshot')
        disks = vmxml.Element('disks')
        new_drives = {}
        vm_drives = {}

        for drive in self.snap_drives:
            base_drv, tget_drv = norm_snap_drive_params(drive)

            try:
                self.vm.findDriveByUUIDs(tget_drv)
            except LookupError:
                # The vm is not already using the requested volume for the
                # snapshot, continuing.
                pass
            else:
                # The snapshot volume is the current one, skipping
                self.vm.log.debug("The volume is already in use: %s", tget_drv)
                continue  # Next drive

            try:
                vm_drive = self.vm.findDriveByUUIDs(base_drv)
            except LookupError:
                # The volume we want to snapshot doesn't exist
                self.vm.log.error("The base volume doesn't exist: %s",
                                  base_drv)
                return response.error('snapshotErr')

            if vm_drive.hasVolumeLeases:
                self.vm.log.error('disk %s has volume leases', vm_drive.name)
                return response.error('noimpl')

            if vm_drive.transientDisk:
                self.vm.log.error('disk %s is a transient disk', vm_drive.name)
                return response.error('transientErr')

            vm_dev_name = vm_drive.name

            new_drives[vm_dev_name] = tget_drv.copy()
            new_drives[vm_dev_name]["type"] = "disk"
            new_drives[vm_dev_name]["diskType"] = vm_drive.diskType
            new_drives[vm_dev_name]["poolID"] = vm_drive.poolID
            new_drives[vm_dev_name]["name"] = vm_dev_name
            new_drives[vm_dev_name]["format"] = "cow"

            # We need to keep track of the drive object because
            # it keeps original data and used to generate snapshot element.
            # We keep the old volume ID so we can clear the block threshold.
            vm_drives[vm_dev_name] = (vm_drive, base_drv["volumeID"])

        prepared_drives = {}

        for vm_dev_name, vm_device in new_drives.items():
            # Adding the device before requesting to prepare it as we want
            # to be sure to teardown it down even when prepareVolumePath
            # failed for some unknown issue that left the volume active.
            prepared_drives[vm_dev_name] = vm_device
            try:
                new_drives[vm_dev_name]["path"] = \
                    self.vm.cif.prepareVolumePath(new_drives[vm_dev_name])
            except Exception:
                self.vm.log.exception(
                    'unable to prepare the volume path for '
                    'disk %s', vm_dev_name)
                rollback_drives(prepared_drives)
                return response.error('snapshotErr')

            drive, _ = vm_drives[vm_dev_name]
            snapelem = drive.get_snapshot_xml(vm_device)
            disks.appendChild(snapelem)

        snap.appendChild(disks)

        snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
                      | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)

        if self.memory_params:
            # Save the needed vm configuration
            # TODO: this, as other places that use pickle.dump
            # directly to files, should be done with outOfProcess
            vm_conf_vol = self.memory_params['dstparams']
            vm_conf_vol_path = self.vm.cif.prepareVolumePath(vm_conf_vol)
            try:
                with open(vm_conf_vol_path, "rb+") as f:
                    vm_conf = vm_conf_for_memory_snapshot()
                    # protocol=2 is needed for clusters < 4.4
                    # (for Python 2 host compatibility)
                    data = pickle.dumps(vm_conf, protocol=2)

                    # Ensure that the volume is aligned; qemu-img may segfault
                    # when converting unligned images.
                    # https://bugzilla.redhat.com/1649788
                    aligned_length = utils.round(len(data), 4096)
                    data = data.ljust(aligned_length, b"\0")

                    f.write(data)
                    f.flush()
                    os.fsync(f.fileno())
            finally:
                self.vm.cif.teardownVolumePath(vm_conf_vol)

            # Adding the memory volume to the snapshot xml
            memory_vol = self.memory_params['dst']
            memory_vol_path = self.vm.cif.prepareVolumePath(memory_vol)
            snap.appendChild(memory_snapshot(memory_vol_path))
        else:
            memory_vol = memory_vol_path = None
            snap_flags |= libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY

        snapxml = xmlutils.tostring(snap)
        # TODO: this is debug information. For 3.6.x we still need to
        # see the XML even with 'info' as default level.
        self.vm.log.info("%s", snapxml)

        self._snapshot_job['memoryVolPath'] = memory_vol_path
        self._snapshot_job['memoryVol'] = memory_vol
        self._snapshot_job['newDrives'] = new_drives
        vm_drives_serialized = {}
        for k, v in vm_drives.items():
            vm_drives_serialized[k] = [xmlutils.tostring(v[0].getXML()), v[1]]
        self._snapshot_job['vmDrives'] = vm_drives_serialized
        self.vm.update_snapshot_metadata(self._snapshot_job)

        # We need to stop the drive monitoring for two reasons, one is to
        # prevent spurious libvirt errors about missing drive paths (since
        # we're changing them), and also to prevent to trigger a drive
        # extension for the new volume with the apparent size of the old one
        # (the apparentsize is updated as last step in updateDriveParameters)
        self.vm.drive_monitor.disable()

        try:
            if self.should_freeze:
                self.vm.freeze()
            try:
                self.vm.log.info(
                    "Taking a live snapshot (drives=%s,"
                    "memory=%s)",
                    ', '.join(drive["name"] for drive in new_drives.values()),
                    self.memory_params is not None)
                self.vm.run_dom_snapshot(snapxml, snap_flags)
                self.vm.log.info("Completed live snapshot")
            except libvirt.libvirtError:
                self.vm.log.exception("Unable to take snapshot")
                if self.should_freeze:
                    self.vm.thaw()
                return response.error('snapshotErr')
        except:
            # In case the VM was shutdown in the middle of the snapshot
            # operation we keep doing the finalizing and reporting the failure.
            self._finalize_vm(memory_vol)
            res = False
        else:
            res = self.teardown(memory_vol_path, memory_vol, new_drives,
                                vm_drives)
        if not res:
            raise RuntimeError("Failed to execute snapshot, "
                               "considering the operation as failure")
Exemple #23
0
class CopyDataDivEndpoint(properties.Owner):
    sd_id = properties.UUID(required=True)
    img_id = properties.UUID(required=True)
    vol_id = properties.UUID(required=True)
    generation = properties.Integer(required=False,
                                    minval=0,
                                    maxval=sc.MAX_GENERATION)
    prepared = properties.Boolean(default=False)

    def __init__(self,
                 params,
                 host_id,
                 writable,
                 is_destination=False,
                 lock_image=True):
        self.sd_id = params.get('sd_id')
        self.img_id = params.get('img_id')
        self.vol_id = params.get('vol_id')
        self.generation = params.get('generation')
        self.prepared = params.get('prepared')
        self.is_destination = is_destination
        self.lock_image = lock_image
        self._host_id = host_id
        self._writable = writable
        self._vol = None

    @property
    def locks(self):
        # A shared lock is always required
        ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED)]

        # An exclusive lock will be taken if source and destination images
        # are not the same, otherwise there will be a deadlock.
        if self.lock_image:
            img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id)
            mode = rm.EXCLUSIVE if self._writable else rm.SHARED
            ret.append(rm.Lock(img_ns, self.img_id, mode))

        if self._writable:
            dom = sdCache.produce_manifest(self.sd_id)
            if dom.hasVolumeLeases():
                ret.append(
                    volume.VolumeLease(self._host_id, self.sd_id, self.img_id,
                                       self.vol_id))
        return ret

    @property
    def path(self):
        return self.volume.getVolumePath()

    def is_invalid_vm_conf_disk(self):
        return workarounds.invalid_vm_conf_disk(self.volume)

    @property
    def qemu_format(self):
        return sc.fmt2str(self.volume.getFormat())

    @property
    def backing_path(self):
        parent_vol = self.volume.getParentVolume()
        if not parent_vol:
            return None
        return volume.getBackingVolumePath(self.img_id, parent_vol.volUUID)

    @property
    def qcow2_compat(self):
        dom = sdCache.produce_manifest(self.sd_id)
        return dom.qcow2_compat()

    @property
    def backing_qemu_format(self):
        parent_vol = self.volume.getParentVolume()
        if not parent_vol:
            return None
        return sc.fmt2str(parent_vol.getFormat())

    @property
    def recommends_unordered_writes(self):
        dom = sdCache.produce_manifest(self.sd_id)
        return dom.recommends_unordered_writes(self.volume.getFormat())

    @property
    def requires_create(self):
        return self.volume.requires_create()

    @property
    def zero_initialized(self):
        return self.volume.zero_initialized()

    @property
    def volume(self):
        if self._vol is None:
            dom = sdCache.produce_manifest(self.sd_id)
            self._vol = dom.produceVolume(self.img_id, self.vol_id)
        return self._vol

    def volume_operation(self):
        return self.volume.operation(self.generation)

    @contextmanager
    def prepare(self):
        if self.prepared:
            yield
        else:
            self.volume.prepare(rw=self._writable,
                                justme=False,
                                allow_illegal=self.is_destination)
            try:
                yield
            finally:
                self.volume.teardown(self.sd_id, self.vol_id, justme=False)