def start_server(server_id, config): cfg = ServerConfig(config) dom = sdCache.produce_manifest(cfg.sd_id) vol = dom.produceVolume(cfg.img_id, cfg.vol_id) if vol.isShared() and not cfg.readonly: raise se.SharedVolumeNonWritable(vol) _create_rundir() sock = _socket_path(server_id) log.info( "Starting transient service %s, serving volume %s/%s via unix " "socket %s", _service_name(server_id), cfg.sd_id, cfg.vol_id, sock) qemu_nbd_config = QemuNBDConfig(format=sc.fmt2str(vol.getFormat()), readonly=cfg.readonly, discard=cfg.discard, path=vol.getVolumePath()) start_transient_service(server_id, qemu_nbd_config) if not _wait_for_socket(sock, 1.0): raise Timeout("Timeout starting NBD server {}: {}".format( server_id, config)) unix_address = nbdutils.UnixAddress(sock) return unix_address.url()
def test_incremental_backup_xml(tmp_backupdir): # drives must be sorted for the disks to appear # each time in the same order in the backup XML drives = collections.OrderedDict() drives["img-id-1"] = FakeDrive("sda", "img-id-1") drives["img-id-2"] = FakeDrive("vda", "img-id-2") socket_path = backup.socket_path(BACKUP_ID) addr = nbdutils.UnixAddress(socket_path) backup_xml = backup.create_backup_xml( addr, drives, FAKE_SCRATCH_DISKS, from_checkpoint_id=FROM_CHECKPOINT_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='/path/to/scratch_sda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='/path/to/scratch_vda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(FROM_CHECKPOINT_ID, socket_path) assert xmlutils.indented(expected_xml) == xmlutils.indented(backup_xml)
def test_backup_xml(tmp_backupdir): backup_id = 'backup_id' # drives must be sorted for the disks to appear # each time in the same order in the backup XML drives = collections.OrderedDict() drives["img-id-1"] = FakeDrive("sda", "img-id-1") drives["img-id-2"] = FakeDrive("vda", "img-id-2") socket_path = os.path.join(backup.P_BACKUP, backup_id) addr = nbdutils.UnixAddress(socket_path) backup_xml = backup.create_backup_xml(addr, drives, FAKE_SCRATCH_DISKS) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='/path/to/scratch_sda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='/path/to/scratch_vda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path) assert indented(expected_xml) == indented(backup_xml)
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) if not backup_cfg.disks: raise exception.BackupError( reason="Cannot start a backup without disks", backup=backup_cfg.backup_id) drives = _get_disks_drives(vm, backup_cfg) path = socket_path(backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(path) # Create scratch disk for each drive _create_scratch_disks(vm, dom, backup_cfg.backup_id, drives) try: res = vm.freeze() if response.is_error(res) and backup_cfg.require_consistency: raise exception.BackupError(reason="Failed freeze VM: {}".format( res["status"]["message"]), vm_id=vm.id, backup=backup_cfg) backup_xml = create_backup_xml(nbd_addr, drives, backup_cfg.from_checkpoint_id) checkpoint_xml = create_checkpoint_xml(backup_cfg, drives) vm.log.info( "Starting backup for backup_id: %r, " "backup xml: %s\ncheckpoint xml: %s", backup_cfg.backup_id, backup_xml, checkpoint_xml) _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml) except: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise finally: # Must always thaw, even if freeze failed; in case the guest # did freeze the filesystems, but failed to reply in time. # Libvirt is using same logic (see src/qemu/qemu_driver.c). vm.thaw() disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives) } result = {'disks': disks_urls} if backup_cfg.to_checkpoint_id is not None: _add_checkpoint_xml(vm, dom, backup_cfg.backup_id, backup_cfg.to_checkpoint_id, result) return dict(result=result)
def start_server(server_id, config): cfg = ServerConfig(config) dom = sdCache.produce_manifest(cfg.sd_id) vol = dom.produceVolume(cfg.img_id, cfg.vol_id) if vol.isShared() and not cfg.readonly: raise se.SharedVolumeNonWritable(vol) if cfg.bitmap and vol.getFormat() != sc.COW_FORMAT: raise se.UnsupportedOperation("Cannot export bitmap from RAW volume") _create_rundir() using_overlay = cfg.bitmap and vol.getParent() != sc.BLANK_UUID if using_overlay: path = _create_overlay(server_id, vol.volumePath, cfg.bitmap) format = "qcow2" is_block = False else: path = vol.volumePath format = sc.fmt2str(vol.getFormat()) is_block = vol.is_block() try: sock = _socket_path(server_id) log.info( "Starting transient service %s, serving %s via unix socket %s", _service_name(server_id), path, sock) qemu_nbd_config = QemuNBDConfig(format=format, readonly=cfg.readonly, discard=cfg.discard, detect_zeroes=cfg.detect_zeroes, path=path, backing_chain=cfg.backing_chain, is_block=is_block, bitmap=cfg.bitmap) start_transient_service(server_id, qemu_nbd_config) if not _wait_for_socket(sock, 10.0): raise Timeout("Timeout starting NBD server {}: {}".format( server_id, config)) finally: if using_overlay: # When qemu-nbd is ready it has an open file descriptor, and it # does not need the overlay. Removing the overlay now simplifies # cleanup when stopping the service. _remove_overlay(server_id) os.chmod(sock, DEFAULT_SOCKET_MODE) unix_address = nbdutils.UnixAddress(sock) return unix_address.url()
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) if (backup_cfg.from_checkpoint_id is not None or backup_cfg.to_checkpoint_id is not None): raise exception.BackupError( reason="Incremental backup not supported yet", vm_id=vm.id, backup=backup_cfg) try: drives = _get_disks_drives(vm, backup_cfg.disks) except LookupError as e: raise exception.BackupError( reason="Failed to find one of the backup disks: {}".format(e), vm_id=vm.id, backup=backup_cfg) # TODO: We need to create a vm directory in # /run/vdsm/backup for each vm backup socket. # This way we can prevent vms from accessing # other vms backup socket with selinux. socket_path = os.path.join(P_BACKUP, backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(socket_path) # Create scratch disk for each drive scratch_disks = _create_scratch_disks( vm, dom, backup_cfg.backup_id, drives) backup_xml = create_backup_xml(nbd_addr, drives, scratch_disks) vm.log.debug("VM backup XML request: %s", backup_xml) vm.log.info( "Starting backup for backup_id: %r", backup_cfg.backup_id) checkpoint_xml = None # pylint: disable=no-member flags = libvirt.VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL try: dom.backupBegin(backup_xml, checkpoint_xml, flags=flags) except libvirt.libvirtError as e: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise exception.BackupError( reason="Error starting backup: {}".format(e), vm_id=vm.id, backup=backup_cfg) disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives)} return {'result': {'disks': disks_urls}}
def start_backup(vm, dom, config): backup_cfg = BackupConfig(config) _validate_parent_id(vm, dom, backup_cfg.parent_checkpoint_id) try: drives = _get_disks_drives(vm, backup_cfg.disks) except LookupError as e: raise exception.BackupError( reason="Failed to find one of the backup disks: {}".format(e), vm_id=vm.id, backup=backup_cfg) path = socket_path(backup_cfg.backup_id) nbd_addr = nbdutils.UnixAddress(path) # Create scratch disk for each drive scratch_disks = _create_scratch_disks(vm, dom, backup_cfg.backup_id, drives) try: vm.freeze() backup_xml = create_backup_xml(nbd_addr, drives, scratch_disks, backup_cfg.from_checkpoint_id) checkpoint_xml = create_checkpoint_xml(backup_cfg, drives) vm.log.info( "Starting backup for backup_id: %r, " "backup xml: %s\ncheckpoint xml: %s", backup_cfg.backup_id, backup_xml, checkpoint_xml) _begin_backup(vm, dom, backup_cfg, backup_xml, checkpoint_xml) except: # remove all the created scratch disks _remove_scratch_disks(vm, backup_cfg.backup_id) raise finally: # Must always thaw, even if freeze failed; in case the guest # did freeze the filesystems, but failed to reply in time. # Libvirt is using same logic (see src/qemu/qemu_driver.c). vm.thaw() disks_urls = { img_id: nbd_addr.url(drive.name) for img_id, drive in six.iteritems(drives) } result = {'disks': disks_urls} if backup_cfg.to_checkpoint_id is not None: _add_checkpoint_xml(vm, dom, backup_cfg.backup_id, backup_cfg.to_checkpoint_id, result) return dict(result=result)
def _backup_info(vm, dom, backup_id, backup, checkpoint_id=None): nbd_addr = nbdutils.UnixAddress(backup["socket"]) backup_urls = {} for name, disk in backup["disks"].items(): drive = vm.find_device_by_name_or_path(name) backup_urls[drive.imageID] = nbd_addr.url(disk["exportname"]) result = {"disks": backup_urls} # TODO: Remove this; engine >= 4.4.6 does not need the checkpoint xml, and # older engine did not support incremental backup. if checkpoint_id is not None: _add_checkpoint_xml(vm, dom, backup_id, checkpoint_id, result) return dict(result=result)
def _parse_backup_info(vm, backup_id, backup_xml): """ Parse the backup info returned XML, For example using Unix socket: <domainbackup mode='pull' id='1'> <server transport='unix' socket='/run/vdsm/backup-id'/> <disks> <disk name='vda' backup='yes' type='file'> <driver type='qcow2'/> <scratch file='/path/to/scratch/disk.qcow2'/> </disk> <disk name='sda' backup='yes' type='file'> <driver type='qcow2'/> <scratch file='/path/to/scratch/disk.qcow2'/> </disk> </disks> </domainbackup> """ domainbackup = xmlutils.fromstring(backup_xml) server = domainbackup.find('./server') if server is None: _raise_parse_error(vm.id, backup_id, backup_xml) path = server.get('socket') if path is None: _raise_parse_error(vm.id, backup_id, backup_xml) address = nbdutils.UnixAddress(path) disks_urls = {} for disk in domainbackup.findall("./disks/disk[@backup='yes']"): disk_name = disk.get('name') if disk_name is None: _raise_parse_error(vm.id, backup_id, backup_xml) drive = vm.find_device_by_name_or_path(disk_name) disks_urls[drive.imageID] = address.url(disk_name) return disks_urls
from __future__ import absolute_import from __future__ import division import pytest from vdsm.common import nbdutils # This code is based on imageio - # https://github.com/oVirt/ovirt-imageio/blob/ # e2fd416f026eee3b7b4acd4fc7c867ceb7ab87f1/ # common/test/nbd_test.py#L29 @pytest.mark.parametrize("addr,export,url", [ (nbdutils.UnixAddress("/sock"), None, "nbd:unix:/sock"), (nbdutils.UnixAddress("/sock"), "", "nbd:unix:/sock"), (nbdutils.UnixAddress("/sock"), "sda", "nbd:unix:/sock:exportname=sda"), (nbdutils.TCPAddress("host", 0), None, "nbd:host:0"), (nbdutils.TCPAddress("host", 10900), "", "nbd:host:10900"), (nbdutils.TCPAddress("host", 65535), "sdb", "nbd:host:65535:exportname=sdb"), ]) def test_url(addr, export, url): assert addr.url(export) == url @pytest.mark.parametrize("port", [-1, 65535 + 1]) def test_invalid_tcp_port(port): with pytest.raises(ValueError): nbdutils.TCPAddress("host", port)
def verify_backup_urls(backup_id, result_disks): for image_id, drive in FAKE_DRIVES.items(): socket_path = backup.socket_path(backup_id) exp_addr = nbdutils.UnixAddress(socket_path).url(drive.name) assert result_disks[image_id] == exp_addr