def __init__(self, backup_config): self.backup_id = backup_config.get("backup_id") self.from_checkpoint_id = backup_config.get("from_checkpoint_id") self.to_checkpoint_id = backup_config.get("to_checkpoint_id") self.parent_checkpoint_id = backup_config.get("parent_checkpoint_id") if self.from_checkpoint_id is not None and (self.parent_checkpoint_id is None): raise exception.BackupError( reason="Cannot start an incremental backup without " "parent_checkpoint_id", backup=self.backup_id) self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())] if len(self.disks) == 0: raise exception.BackupError( reason="Cannot start a backup without disks", backup=self.backup_id) for disk in self.disks: if (self.from_checkpoint_id is None and disk.backup_mode == MODE_INCREMENTAL): raise exception.BackupError( reason="Cannot start an incremental backup for disk, " "full backup is requested", backup=self.backup_id, disk=disk)
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) if not backup_cfg.disks: raise exception.BackupError( reason="Cannot start a backup without disks", backup=backup_cfg.backup_id) drives = _get_disks_drives(vm, backup_cfg) path = socket_path(backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(path) # Create scratch disk for each drive _create_scratch_disks(vm, dom, backup_cfg.backup_id, drives) try: res = vm.freeze() if response.is_error(res) and backup_cfg.require_consistency: raise exception.BackupError(reason="Failed freeze VM: {}".format( res["status"]["message"]), vm_id=vm.id, backup=backup_cfg) backup_xml = create_backup_xml(nbd_addr, drives, backup_cfg.from_checkpoint_id) checkpoint_xml = create_checkpoint_xml(backup_cfg, drives) vm.log.info( "Starting backup for backup_id: %r, " "backup xml: %s\ncheckpoint xml: %s", backup_cfg.backup_id, backup_xml, checkpoint_xml) _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml) except: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise finally: # Must always thaw, even if freeze failed; in case the guest # did freeze the filesystems, but failed to reply in time. # Libvirt is using same logic (see src/qemu/qemu_driver.c). vm.thaw() disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives) } result = {'disks': disks_urls} if backup_cfg.to_checkpoint_id is not None: _add_checkpoint_xml(vm, dom, backup_cfg.backup_id, backup_cfg.to_checkpoint_id, result) return dict(result=result)
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) if (backup_cfg.from_checkpoint_id is not None or backup_cfg.to_checkpoint_id is not None): raise exception.BackupError( reason="Incremental backup not supported yet", vm_id=vm.id, backup=backup_cfg) try: drives = _get_disks_drives(vm, backup_cfg.disks) except LookupError as e: raise exception.BackupError( reason="Failed to find one of the backup disks: {}".format(e), vm_id=vm.id, backup=backup_cfg) # TODO: We need to create a vm directory in # /run/vdsm/backup for each vm backup socket. # This way we can prevent vms from accessing # other vms backup socket with selinux. socket_path = os.path.join(P_BACKUP, backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(socket_path) # Create scratch disk for each drive scratch_disks = _create_scratch_disks( vm, dom, backup_cfg.backup_id, drives) backup_xml = create_backup_xml(nbd_addr, drives, scratch_disks) vm.log.debug("VM backup XML request: %s", backup_xml) vm.log.info( "Starting backup for backup_id: %r", backup_cfg.backup_id) checkpoint_xml = None # pylint: disable=no-member flags = libvirt.VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL try: dom.backupBegin(backup_xml, checkpoint_xml, flags=flags) except libvirt.libvirtError as e: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise exception.BackupError( reason="Error starting backup: {}".format(e), vm_id=vm.id, backup=backup_cfg) disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives)} return {'result': {'disks': disks_urls}}
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) if (backup_cfg.from_checkpoint_id is not None or backup_cfg.to_checkpoint_id is not None): raise exception.BackupError( reason="Incremental backup not supported yet", vm_id=vm.id, backup=backup_cfg) try: drives = _get_disks_drives(vm, backup_cfg.disks) except LookupError as e: raise exception.BackupError( reason="Failed to find one of the backup disks: {}".format(e), vm_id=vm.id, backup=backup_cfg) path = socket_path(backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(path) # Create scratch disk for each drive scratch_disks = _create_scratch_disks(vm, dom, backup_cfg.backup_id, drives) try: vm.freeze() backup_xml = create_backup_xml(nbd_addr, drives, scratch_disks) vm.log.info("Starting backup for backup_id: %r, backup xml: %s", backup_cfg.backup_id, backup_xml) checkpoint_xml = None _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml) except: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise finally: # Must always thaw, even if freeze failed; in case the guest # did freeze the filesystems, but failed to reply in time. # Libvirt is using same logic (see src/qemu/qemu_driver.c). vm.thaw() disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives) } return {'result': {'disks': disks_urls}}
def _get_drive_capacity(dom, drive): try: capacity, _, _ = dom.blockInfo(drive.path) return capacity except libvirt.libvirtError as e: raise exception.BackupError( reason="Failed to get drive {} capacity: {}".format(drive.name, e))
def __init__(self, backup_config): self.backup_id = backup_config.get("backup_id") self.from_checkpoint_id = backup_config.get("from_checkpoint_id") self.to_checkpoint_id = backup_config.get("to_checkpoint_id") self.parent_checkpoint_id = backup_config.get("parent_checkpoint_id") if self.from_checkpoint_id is not None and (self.parent_checkpoint_id is None): raise exception.BackupError( reason="Cannot start an incremental backup without " "parent_checkpoint_id", backup=self.backup_id) self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())] if len(self.disks) == 0: raise exception.BackupError( reason="Cannot start a backup without disks", backup=self.backup_id)
def __init__(self, backup_config): self.backup_id = backup_config.get("backup_id") self.from_checkpoint_id = backup_config.get("from_checkpoint_id") self.to_checkpoint_id = backup_config.get("to_checkpoint_id") self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())] if len(self.disks) == 0: raise exception.BackupError( reason="Cannot start a backup without disks", backup=self.backup_id)
def _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml): # pylint: disable=no-member flags = libvirt.VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL try: dom.backupBegin(backup_xml, checkpoint_xml, flags=flags) except libvirt.libvirtError as e: raise exception.BackupError( reason="Error starting backup: {}".format(e), vm_id=vm.id, backup=backup_cfg)
def stop_backup(vm, dom, backup_id): if _backup_exists(vm, dom, backup_id): try: dom.abortJob() except libvirt.libvirtError as e: if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID: raise exception.BackupError( reason="Failed to end VM backup: {}".format(e), vm_id=vm.id, backup_id=backup_id) _remove_scratch_disks(vm, backup_id)
def _create_transient_disk(vm, dom, backup_id, drive): disk_name = "{}.{}".format(backup_id, drive.name) drive_size = _get_drive_capacity(dom, drive) res = vm.cif.irs.create_transient_disk(owner_name=vm.id, disk_name=disk_name, size=drive_size) if response.is_error(res): raise exception.BackupError( reason='Failed to create transient disk: {}'.format(res), vm_id=vm.id, backup_id=backup_id, drive_name=drive.name) return res['result']['path']
def __init__(self, backup_config): self.backup_id = backup_config.get("backup_id") self.from_checkpoint_id = backup_config.get("from_checkpoint_id") self.to_checkpoint_id = backup_config.get("to_checkpoint_id") self.require_consistency = backup_config.get("require_consistency") self.creation_time = backup_config.get("creation_time") self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())] for disk in self.disks: if (self.from_checkpoint_id is None and disk.backup_mode == MODE_INCREMENTAL): raise exception.BackupError( reason="Cannot start an incremental backup for disk, " "full backup is requested", backup=self.backup_id, disk=disk)
def _remove_scratch_disks(vm, backup_id): log.info("Removing scratch disks for backup id: %s", backup_id) res = vm.cif.irs.list_transient_disks(vm.id) if response.is_error(res): raise exception.BackupError( reason="Failed to fetch scratch disks: {}".format(res), vm_id=vm.id, backup_id=backup_id) for disk_name in res['result']: res = vm.cif.irs.remove_transient_disk(vm.id, disk_name) if response.is_error(res): log.error( "Failed to remove backup '%s' " "scratch disk for drive name: %s, ", backup_id, disk_name)
def _get_backup_xml(vm_id, dom, backup_id): try: backup_xml = dom.backupGetXMLDesc() except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_BACKUP: raise exception.NoSuchBackupError( reason="VM backup not exists: {}".format(e), vm_id=vm_id, backup_id=backup_id) raise exception.BackupError( reason="Failed to fetch VM ''backup info: {}".format(e), vm_id=vm_id, backup_id=backup_id) return backup_xml
def _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml): flags = libvirt.VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL try: dom.backupBegin(backup_xml, checkpoint_xml, flags=flags) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_CHECKPOINT_INCONSISTENT: raise exception.InconsistentCheckpointError( reason="Checkpoint can't be used: {}".format(e), vm_id=vm.id, backup=backup_cfg, checkpoint_xml=checkpoint_xml) raise exception.BackupError( reason="Error starting backup: {}".format(e), vm_id=vm.id, backup=backup_cfg)
def _get_backup_disks(vm, backup_cfg): backup_disks = {} try: for disk in backup_cfg.disks: drive = vm.findDriveByUUIDs({ 'domainID': disk.dom_id, 'imageID': disk.img_id, 'volumeID': disk.vol_id }) backup_disks[disk.img_id] = BackupDisk(drive, disk.backup_mode, disk.scratch_disk) except LookupError as e: raise exception.BackupError( reason="Failed to find one of the backup disks: {}".format(e), vm_id=vm.id, backup=backup_cfg) return backup_disks
def stop_backup(vm, dom, backup_id): try: _get_backup_xml(vm.id, dom, backup_id) except exception.NoSuchBackupError: vm.log.info("No backup with id '%s' found for vm '%s'", backup_id, vm.id) _remove_scratch_disks(vm, backup_id) return try: dom.abortJob() except libvirt.libvirtError as e: if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID: raise exception.BackupError( reason="Failed to end VM backup: {}".format(e), vm_id=vm.id, backup_id=backup_id) _remove_scratch_disks(vm, backup_id)
def _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml): # pylint: disable=no-member flags = libvirt.VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL try: dom.backupBegin(backup_xml, checkpoint_xml, flags=flags) except libvirt.libvirtError as e: # TODO: Simplify when libvirt 6.6.0-9 is required on centos. if e.get_error_code() == getattr(libvirt, "VIR_ERR_CHECKPOINT_INCONSISTENT", None): raise exception.InconsistentCheckpointError( reason="Checkpoint can't be used: {}".format(e), vm_id=vm.id, backup=backup_cfg, checkpoint_xml=checkpoint_xml) raise exception.BackupError( reason="Error starting backup: {}".format(e), vm_id=vm.id, backup=backup_cfg)
def _raise_parse_error(vm_id, backup_id, backup_xml): raise exception.BackupError(reason="Failed to parse invalid libvirt " "backup XML: {}".format(backup_xml), vm_id=vm_id, backup_id=backup_id)