Ejemplo n.º 1
0
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                             "glusterfs")
            utils.run("setsebool virt_use_fusefs on")
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": [host_dict]
            })
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type" %
                              dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
Ejemplo n.º 2
0
def create_disk(disk_type, disk_path, disk_format, disk_device_type,
                disk_device, disk_target, disk_bus):
    """
    Create another disk for a given path,customize some attributes.

    :param disk_type: the type of disk.
    :param disk_path: the path of disk.
    :param disk_format: the format to disk image.
    :param disk_device_type: the disk device type.
    :param disk_device: the device of disk.
    :param disk_target: the target of disk.
    :param disk_bus: the target bus of disk.
    :return: disk object if created successfully.
    """
    disk_source = libvirt.create_local_disk(disk_type, disk_path, '1', disk_format)
    custom_disk = Disk(type_name=disk_device_type)
    custom_disk.device = disk_device
    source_dict = {'file': disk_source}
    custom_disk.source = custom_disk.new_disk_source(
        **{"attrs": source_dict})
    target_dict = {"dev": disk_target, "bus": disk_bus}
    custom_disk.target = target_dict
    driver_dict = {"name": "qemu", 'type': disk_format}
    custom_disk.driver = driver_dict
    return custom_disk
Ejemplo n.º 3
0
        def test_device_update_boot_order(disk_type, disk_path, error_msg):
            """
            Wrap device update boot order steps here.
            Firstly,create another disk for a given path.
            Update previous disk with newly created disk and modified boot order.
            Eject disk finally.
            :param disk_type: the type of disk.
            :param disk_path: the path of disk.
            :param error_msg: the expected error message.
            """

            addtional_disk = create_addtional_disk(disk_type, disk_path, device_formats[0], device_types[0],
                                                   devices[0], device_targets[0],
                                                   device_bus[0])
            addtional_disk.boot = '2'
            addtional_disk.readonly = 'True'
            # Update disk cdrom/floppy with modified boot order,it expect fail.
            ret = virsh.update_device(vm_name, addtional_disk.xml, debug=True)
            if ret.exit_status == 0 or not ret.stderr.count("Operation not supported: " +
                                                            "cannot modify field 'boot order' of the disk"):
                test.fail(error_msg)
            # Force eject cdrom or floppy, it expect succeed.
            eject_disk = Disk(type_name='block')
            eject_disk.target = {"dev": device_targets[0], "bus": device_bus[0]}
            eject_disk.device = devices[0]
            ret = virsh.update_device(vm_name, eject_disk.xml, flagstr='--force', debug=True)
            libvirt.check_exit_status(ret)
Ejemplo n.º 4
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
Ejemplo n.º 5
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
Ejemplo n.º 6
0
 def build_disk_xml(disk_img, disk_format, host_ip):
     """
     Try to rebuild disk xml
     """
     if default_pool:
         disk_xml = Disk(type_name="file")
     else:
         disk_xml = Disk(type_name="network")
     disk_xml.device = "disk"
     driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
     if driver_iothread:
         driver_dict.update({"iothread": driver_iothread})
     disk_xml.driver = driver_dict
     disk_xml.target = {"dev": "vdb", "bus": "virtio"}
     if default_pool:
         utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                          "glusterfs")
         process.run("setsebool virt_use_fusefs on", shell=True)
         source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict})
     else:
         source_dict = {
             "protocol": "gluster",
             "name": "%s/%s" % (vol_name, disk_img)
         }
         host_dict = [{"name": host_ip, "port": "24007"}]
         # If mutiple_hosts is True, attempt to add multiple hosts.
         if multiple_hosts:
             host_dict.append({
                 "name": params.get("dummy_host1"),
                 "port": "24007"
             })
             host_dict.append({
                 "name": params.get("dummy_host2"),
                 "port": "24007"
             })
         if transport:
             host_dict[0]['transport'] = transport
         disk_xml.source = disk_xml.new_disk_source(**{
             "attrs": source_dict,
             "hosts": host_dict
         })
     return disk_xml
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils.run(
                "mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" %
                (host_ip, vol_name, default_pool))
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": [host_dict]
            })
        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
Ejemplo n.º 8
0
def build_disk_xml(vm_name,
                   disk_format,
                   host_ip,
                   disk_src_protocol,
                   volume_name,
                   disk_img=None,
                   transport=None,
                   auth=None):
    """
    Try to build disk xml

    :param vm_name: specified VM name.
    :param disk_format: disk format,e.g raw or qcow2
    :param host_ip: host ip address
    :param disk_src_protocol: access disk protocol ,e.g network or file
    :param volume_name: volume name
    :param disk_img: disk image name
    :param transport: transport pattern,e.g TCP
    :param auth: dict containing ceph parameters
    """
    # Delete existed disks first.
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    disks_dev = vmxml.get_devices(device_type="disk")
    for disk in disks_dev:
        vmxml.del_device(disk)

    disk_xml = Disk(type_name="network")
    driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
    disk_xml.driver = driver_dict
    disk_xml.target = {"dev": "vda", "bus": "virtio"}
    # If protocol is rbd,create ceph disk xml.
    if disk_src_protocol == "rbd":
        disk_xml.device = "disk"
        vol_name = volume_name
        source_dict = {
            "protocol": disk_src_protocol,
            "name": "%s/%s" % (vol_name, disk_img)
        }
        host_dict = {"name": host_ip, "port": "6789"}
        if transport:
            host_dict.update({"transport": transport})
        if auth:
            disk_xml.auth = disk_xml.new_auth(**auth)
        disk_xml.source = disk_xml.new_disk_source(**{
            "attrs": source_dict,
            "hosts": [host_dict]
        })
    # Add the new disk xml.
    vmxml.add_device(disk_xml)
    vmxml.sync()
Ejemplo n.º 9
0
    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = options["disk_device"]
        if "sgio" in options and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"], 'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {
                disk_attr: dev_name
            }})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if "driver" in options:
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if "share" in options:
            if options["share"] == "shareable":
                disk_xml.share = True

        if "readonly" in options:
            if options["readonly"] == "readonly":
                disk_xml.readonly = True

        logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

        return disk_xml
Ejemplo n.º 10
0
    def prepare_virt_disk_xml(virt_disk_device_target,
                              virt_disk_device_bus,
                              usb_bus=None,
                              virt_disk_bus=None,
                              virt_disk_slot=None):
        """
        Prepare the virt disk xml to be attached/detached.

        :param virt_disk_device_target: The target to the local image.
        :param virt_disk_bus: The bus to the local image.
        :return: The virtual disk xml.
        """
        image_filename = ''.join(
            random.choice(string.ascii_lowercase) for _ in range(8)) + ".qcow2"
        virt_disk_device = 'disk'
        virt_disk_device_type = 'file'
        virt_disk_device_format = 'qcow2'
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {
            'attrs': {
                'file': prepare_local_image(image_filename),
                'type_name': 'file'
            }
        }
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {
            "dev": virt_disk_device_target,
            "bus": virt_disk_device_bus
        }
        if virt_disk_device_bus == 'usb':
            disk_addr_dict = {'bus': str(usb_bus), 'port': '1'}
            disk_xml.new_disk_address(type_name='usb',
                                      **{"attrs": disk_addr_dict})
        elif virt_disk_device_bus == 'virtio':
            disk_addr_dict = {
                'bus': virt_disk_bus,
                'slot': virt_disk_slot,
                'domain': '0x0000',
                'function': '0x0'
            }
            disk_xml.address = disk_xml.new_disk_address(
                type_name='pci', **{"attrs": disk_addr_dict})
        return disk_xml
Ejemplo n.º 11
0
def add_cdrom_device(v_xml, iso_file, target_dev, device_bus):
    """
    Add cdrom disk in VM XML

    :param v_xml: The instance of VMXML class
    :param iso_file: The iso file path
    :param target_dev: The target dev in Disk XML
    :param device_bus: The target bus in Disk XML
    """
    disk_xml = Disk(type_name="file")
    disk_xml.device = "cdrom"
    disk_xml.target = {"dev": target_dev, "bus": device_bus}
    disk_xml.driver = {"name": "qemu", "type": "raw"}
    src_dict = {"file": iso_file}
    disk_xml.source = disk_xml.new_disk_source(**{"attrs": src_dict})
    disk_xml.readonly = False
    v_xml.add_device(disk_xml)
    return v_xml
Ejemplo n.º 12
0
def create_custom_metadata_disk(disk_path,
                                disk_format,
                                disk_device,
                                device_target,
                                device_bus,
                                max_size,
                                disk_inst=None):
    """
    Create another disk for a given path,customize driver metadata attribute

    :param disk_path: the path of disk
    :param disk_format: the format to disk image
    :param disk_device: the disk device type
    :param device_target: the target of disk
    :param device_bus: device bus
    :param max_size: metadata_cache max size
    :param disk_inst: disk instance
    :return: disk object if created or updated successfully
    """
    if disk_inst:
        custom_disk = disk_inst
    else:
        custom_disk = Disk(type_name='file')
    if disk_device:
        custom_disk.device = disk_device
    source_dict = {}
    if disk_path:
        source_dict.update({'file': disk_path})
    custom_disk.source = custom_disk.new_disk_source(**{"attrs": source_dict})
    if device_target:
        target_dict = {"dev": device_target, "bus": device_bus}
        custom_disk.target = target_dict
    driver_dict = {"name": "qemu", 'type': disk_format}
    # Create drivermetadata object
    new_one_drivermetadata = custom_disk.new_drivermetadata(
        **{"attrs": driver_dict})
    metadata_cache_dict = {"max_size": max_size, "max_size_unit": "bytes"}
    # Attach metadatacache into drivermetadata object
    new_one_drivermetadata.metadata_cache = custom_disk.DriverMetadata(
    ).new_metadatacache(**metadata_cache_dict)
    custom_disk.drivermetadata = new_one_drivermetadata
    logging.debug("disk xml in create_custom_metadata_disk: %s\n", custom_disk)
    return custom_disk
Ejemplo n.º 13
0
    def prepare_secret_disk(self, image_path, secret_disk_dict=None):
        """
        Add secret disk for domain.

        :params image_path: image path for disk source file
        :secret_disk_dict: secret disk dict to add new disk
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(self.vm.name)
        sec_dict = eval(self.params.get("sec_dict", '{}'))
        device_target = self.params.get("target_disk", "vdd")
        sec_passwd = self.params.get("private_key_password")
        if not secret_disk_dict:
            secret_disk_dict = {
                'type_name': "file",
                'target': {
                    "dev": device_target,
                    "bus": "virtio"
                },
                'driver': {
                    "name": "qemu",
                    "type": "qcow2"
                },
                'source': {
                    'encryption': {
                        "encryption": 'luks',
                        "secret": {
                            "type": "passphrase"
                        }
                    }
                }
            }
        # Create secret
        libvirt_secret.clean_up_secrets()
        sec_uuid = libvirt_secret.create_secret(sec_dict=sec_dict)
        virsh.secret_set_value(sec_uuid, sec_passwd, encode=True, debug=True)

        secret_disk_dict['source']['encryption']['secret']["uuid"] = sec_uuid
        secret_disk_dict['source']['attrs'] = {'file': image_path}
        new_disk = Disk()
        new_disk.setup_attrs(**secret_disk_dict)
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.xmltreefile.write()
        vmxml.sync()
Ejemplo n.º 14
0
    def add_luks_disk(secret_type, sec_encryption_uuid):
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=disk_type)
        disk_xml.device = "disk"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {'file': img_path}})
        disk_xml.driver = {"name": "qemu", "type": img_format,
                           "cache": "none"}
        disk_xml.target = {"dev": disk_target, "bus": disk_bus}
        encryption_dict = {"encryption": 'luks',
                           "secret": {"type": secret_type,
                                      "uuid": sec_encryption_uuid}}
        disk_source.encryption = disk_xml.new_encryption(**encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        vmxml.add_device(disk_xml)
        logging.debug("guest xml: %s", vmxml.xml)
        return vmxml
Ejemplo n.º 15
0
    def prepare_virt_disk_xml(image_path):
        """
        Prepare the virtual disk xml to be attached/detached.

        :param image_path: The path to the local image.
        :return: The virtual disk xml.
        """
        virt_disk_device = params.get("virt_disk_device", "disk")
        virt_disk_device_type = params.get("virt_disk_device_type", "file")
        virt_disk_device_format = params.get("virt_disk_device_format", "raw")
        virt_disk_device_target = params.get("virt_disk_device_target", "sdb")
        virt_disk_device_bus = params.get("virt_disk_device_bus", "usb")
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {'attrs': {'file': image_path, 'type_name': 'file'}}
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": virt_disk_device_target,
                           "bus": virt_disk_device_bus}
        return disk_xml
Ejemplo n.º 16
0
def create_primitive_disk_xml(type_name, disk_device, device_target,
                              device_bus, device_format, disk_src_dict,
                              disk_auth_dict):
    """
    Creates primitive disk xml

    :param type_name: disk type
    :param disk_device: disk device
    :param device_target: target device
    :param device_bus: device bus
    :param device_format: device format
    :param disk_src_dict: disk source, dict format like below
           disk_src_dict = {"attrs": {"protocol": "rbd",
                                   "name": disk_name},
                            "hosts":  [{"name": host_ip,
                                     "port": host_port}]}
    :param disk_auth_dict: disk auth information, dict format like below
           disk_auth_dict = {"auth_user": auth_user,
                             "secret_type": auth_sec_usage_type,
                             "secret_uuid": auth_sec_uuid}
    :return: disk xml object
    """
    disk_xml = Disk(type_name=type_name)
    disk_xml.device = disk_device
    disk_xml.target = {"dev": device_target, "bus": device_bus}
    driver_dict = {"name": "qemu", "type": device_format}
    disk_xml.driver = driver_dict
    if disk_src_dict:
        logging.debug("disk src dict is: %s" % disk_src_dict)
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        disk_xml.source = disk_source
    if disk_auth_dict:
        logging.debug("disk auth dict is: %s" % disk_auth_dict)
        disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
    logging.debug("new disk xml in create_primitive_disk is: %s", disk_xml)
    return disk_xml
Ejemplo n.º 17
0
def run(test, params, env):
    """
    Test nbd disk option.

    1.Prepare backend storage
    2.Use nbd to export the backend storage with or without TLS
    3.Prepare a disk xml indicating to the backend storage
    4.Start VM with disk hotplug/coldplug
    5.Start snapshot or save/restore operations on ndb disk
    6.Check some behaviours on VM
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def check_disk_save_restore(save_file):
        """
        Check domain save and restore operation.

        :param save_file: the path to saved file
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Check domain snapshot operations.
        """
        # Cleaup dirty data if exists
        if os.path.exists(snapshot_name1_file):
            os.remove(snapshot_name1_file)
        if os.path.exists(snapshot_name2_mem_file):
            os.remove(snapshot_name2_mem_file)
        if os.path.exists(snapshot_name2_disk_file):
            os.remove(snapshot_name2_disk_file)
        device_target = 'vda'
        snapshot_name1_option = "--diskspec %s,file=%s,snapshot=external --disk-only --atomic" % (
            device_target, snapshot_name1_file)
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s %s" %
                                       (snapshot_name1, snapshot_name1_option),
                                       debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name1 not in snap_lists:
            test.fail("Snapshot %s doesn't exist" % snapshot_name1)
        # Check file can be created after snapshot

        def _check_file_create(filename):
            """
            Check whether file with specified filename exists or not.

            :param filename: finename
            """
            try:
                session = vm.wait_for_login()
                if platform.platform().count('ppc64'):
                    time.sleep(10)
                cmd = ("echo" " teststring > /tmp/{0}".format(filename))
                status, output = session.cmd_status_output(cmd)
                if status != 0:
                    test.fail("Failed to touch one file on VM internal")
            except (remote.LoginError, virt_vm.VMError,
                    aexpect.ShellError) as e:
                logging.error(str(e))
                raise
            finally:
                if session:
                    session.close()

        _check_file_create("disk.txt")
        # Create memory snapshot.
        snapshot_name2_mem_option = "--memspec file=%s,snapshot=external" % (
            snapshot_name2_mem_file)
        snapshot_name2_disk_option = "--diskspec %s,file=%s,snapshot=external --atomic" % (
            device_target, snapshot_name2_disk_file)
        snapshot_name2_option = "%s %s" % (snapshot_name2_mem_option,
                                           snapshot_name2_disk_option)
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s %s" %
                                       (snapshot_name2, snapshot_name2_option),
                                       debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name2 not in snap_lists:
            test.fail("Snapshot: %s doesn't exist" % snapshot_name2)
        _check_file_create("mem.txt")

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdb")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    image_path = params.get("emulated_image")
    # Get config parameters
    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    tls_enabled = "yes" == params.get("enable_tls", "no")
    enable_private_key_encryption = "yes" == params.get(
        "enable_private_key_encryption", "no")
    private_key_encrypt_passphrase = params.get("private_key_password")
    domain_operation = params.get("domain_operation")
    secret_uuid = None

    # Get snapshot attributes.
    snapshot_name1 = params.get("snapshot_name1")
    snapshot_name1_file = params.get("snapshot_name1_file")
    snapshot_name2 = params.get("snapshot_name2")
    snapshot_name2_mem_file = params.get("snapshot_name2_mem_file")
    snapshot_name2_disk_file = params.get("snapshot_name2_disk_file")
    # Initialize one NbdExport object
    nbd = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        export_name = params.get("export_name", None)
        deleteExisted = "yes" == params.get("deleteExisted", "yes")
        tls_bit = "no"
        if tls_enabled:
            tls_bit = "yes"

        # Create secret
        if enable_private_key_encryption:
            # this feature is enabled after libvirt 6.6.0
            if not libvirt_version.version_compare(6, 6, 0):
                test.cancel(
                    "current libvirt version doesn't support client private key encryption"
                )
            utils_secret.clean_up_secrets()
            private_key_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'",
                          private_key_sec_uuid)
            private_key_sec_passwd = params.get("private_key_password",
                                                "redhat")
            ret = virsh.secret_set_value(private_key_sec_uuid,
                                         private_key_sec_passwd,
                                         encode=True,
                                         use_file=True,
                                         debug=True)
            libvirt.check_exit_status(ret)
            secret_uuid = private_key_sec_uuid

        # Initialize special test environment config for snapshot operations.
        if domain_operation == "snap_shot":
            first_disk = vm.get_first_disk_devices()
            image_path = first_disk['source']
            device_target = 'vda'
            # Remove previous xml
            disks = vmxml.get_devices(device_type="disk")
            for disk_ in disks:
                if disk_.target['dev'] == device_target:
                    vmxml.del_device(disk_)
                    break

        # Create NbdExport object
        nbd = NbdExport(
            image_path,
            image_format=device_format,
            port=nbd_server_port,
            export_name=export_name,
            tls=tls_enabled,
            deleteExisted=deleteExisted,
            private_key_encrypt_passphrase=private_key_encrypt_passphrase,
            secret_uuid=secret_uuid)
        nbd.start_nbd_server()
        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % tls_bit}
        if export_name:
            source_attrs_dict.update({"name": "%s" % export_name})
        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        # Add disk xml.
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": 'raw'}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Hotplug disk.
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        # Check save and restore operation and its result
        if domain_operation == 'save_restore':
            save_file = "/tmp/%s.save" % vm_name
            check_disk_save_restore(save_file)

        # Check attached nbd disk
        if check_partitions and not status_error:
            logging.debug("wait seconds for starting in checking vm part")
            time.sleep(2)
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        # Check snapshot operation and its result
        if domain_operation == 'snap_shot':
            check_snapshot()

        # Unplug disk.
        if hotplug_disk:
            result = virsh.detach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True,
                                         wait_for_event=True)
            libvirt.check_exit_status(result, status_error)
    finally:
        if enable_private_key_encryption:
            utils_secret.clean_up_secrets()
        # Clean up backend storage and TLS
        try:
            if nbd:
                nbd.cleanup()
            # Clean up snapshots if exist
            if domain_operation == 'snap_shot':
                snap_lists = virsh.snapshot_list(vm_name, debug=True)
                for snap_name in snap_lists:
                    virsh.snapshot_delete(vm_name,
                                          snap_name,
                                          "--metadata",
                                          debug=True,
                                          ignore_status=True)
                # Cleaup dirty data if exists
                if os.path.exists(snapshot_name1_file):
                    os.remove(snapshot_name1_file)
                if os.path.exists(snapshot_name2_mem_file):
                    os.remove(snapshot_name2_mem_file)
                if os.path.exists(snapshot_name2_disk_file):
                    os.remove(snapshot_name2_disk_file)
        except Exception as ndbEx:
            logging.info("Clean Up nbd failed: %s" % str(ndbEx))

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")
Ejemplo n.º 18
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = (
            "qemu-img create -f luks "
            "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
            "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h',
                                 ignore_status=True,
                                 shell=True,
                                 verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source)
            and not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {
                        "sec_usage": "iscsi",
                        "sec_target": auth_sec_usage
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid,
                                           chap_passwd,
                                           encode=True,
                                           debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        chap_user=chap_user,
                        chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_uuid": auth_sec_uuid
                        }
                    elif use_auth_usage:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_usage": auth_sec_usage_target
                        }
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name",
                                           "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            device_source = "gluster://%s/%s/%s" % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            disk_src_dict = {
                "attrs": {
                    "protocol": "gluster",
                    "name": "%s/%s" % (gluster_vol_name, gluster_img_name)
                },
                "hosts": [{
                    "name": gluster_host_ip,
                    "port": "24007"
                }]
            }
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid
                    }
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name,
                                                        ceph_mon_ip)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid,
                                     luks_secret_passwd,
                                     encode=True,
                                     debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {
            "encryption": "luks",
            "secret": {
                "type": "passphrase",
                "uuid": luks_sec_uuid
            }
        }
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Ejemplo n.º 19
0
def run(test, params, env):
    """
    Test virsh blockcopy --xml option.

    1.Prepare backend storage (file/block/iscsi/ceph/nbd)
    2.Start VM
    3.Prepare target xml
    4.Execute virsh blockcopy --xml command
    5.Check VM xml after operation accomplished
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    ignore_check = False

    def check_blockcopy_xml(vm_name, source_image, ignore_check=False):
        """
        Check blockcopy xml in VM.

        :param vm_name: VM name
        :param source_image: source image name.
        :param ignore_check: default is False.
        """
        if ignore_check:
            return
        source_imge_list = []
        blklist = virsh.domblklist(vm_name).stdout_text.splitlines()
        for line in blklist:
            if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')):
                source_imge_list.append(line.split()[-1])
        logging.debug('domblklist %s:\n%s', vm_name, source_imge_list)
        if not any(source_image in s for s in source_imge_list):
            test.fail("Cannot find expected source image: %s" % source_image)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    blockcopy_option = params.get("blockcopy_option")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    disk_auth_dict = {}
    size = "1"

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            if blockcopy_option in ['reuse_external']:
                device_source = libvirt.create_local_disk(
                    backend_storage_type, disk_path, storage_size,
                    device_format)
            else:
                device_source = disk_path
            disks_img.append({
                "format": device_format,
                "source": disk_path,
                "path": disk_path
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
            checkout_device_source = image_filename
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
                checkout_device_source = device_source
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True,
                    is_login=False,
                    image_size=storage_size,
                    chap_user=chap_user,
                    chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {
                    "auth_user": chap_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_usage": auth_sec_usage_target
                }
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
                checkout_device_source = 'emulated-iscsi'
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            size = "0.15"

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm whether it needs delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {
                    "sec_usage": auth_sec_usage_type,
                    "sec_name": "ceph_auth_secret"
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       ceph_auth_key,
                                       ignore_status=False,
                                       debug=True)
                disk_auth_dict = {
                    "auth_user": ceph_auth_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_uuid": auth_sec_uuid
                }
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                ceph_disk_name, ceph_mon_ip, key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            if blockcopy_option in ['reuse_external']:
                # Create an local image and make FS on it.
                libvirt.create_local_disk("file", img_file, storage_size,
                                          device_format)
                # Convert the image to remote storage
                disk_path = ("rbd:%s:mon_host=%s" %
                             (ceph_disk_name, ceph_mon_ip))
                if ceph_client_name and ceph_client_key:
                    disk_path += (":id=%s:key=%s" %
                                  (ceph_auth_user, ceph_auth_key))
                rbd_cmd = (
                    "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                    " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                   device_format, img_file, disk_path))
                process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
            checkout_device_source = ceph_disk_name
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = socket.gethostname().strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image",
                                    "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path,
                            image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({
                "hosts": [{
                    "name": nbd_server_host,
                    "port": nbd_server_port
                }]
            })
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port, image_path)
            checkout_device_source = image_path
            if blockcopy_option in ['pivot']:
                ignore_check = True

        logging.debug("device source is: %s", device_source)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        auth_in_source = True
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw")
        tmp_device_source = libvirt.create_local_disk("file",
                                                      path=device_source_path,
                                                      size=size,
                                                      disk_format="raw")
        s_attach = virsh.attach_disk(vm_name,
                                     tmp_device_source,
                                     device_target,
                                     "--config",
                                     debug=True)
        libvirt.check_exit_status(s_attach)
        try:
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Additional operations before set block threshold
        options = params.get("options",
                             "--pivot --transient-job --verbose --wait")
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 "--xml %s" % disk_xml.xml,
                                 options=options,
                                 debug=True)
        libvirt.check_exit_status(result)
        check_source_image = None
        if blockcopy_option in ['pivot']:
            check_source_image = checkout_device_source
        else:
            check_source_image = tmp_device_source
        check_blockcopy_xml(vm_name, check_source_image, ignore_check)
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To Delete snapshot, destroy vm first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])
        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result.stdout_text)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.error("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
Ejemplo n.º 20
0
def setup_auth_enabled_iscsi_disk(vm, params):
    """
    Create one separate thread to do blockcopy

    :param vm: VM
    :param params: config params
    """
    disk_type = params.get("disk_type", "file")
    disk_target = params.get("disk_target", 'vda')
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_format = params.get("disk_format", "qcow2")
    disk_device = params.get("disk_device", "lun")
    first_disk = vm.get_first_disk_devices()
    logging.debug("first disk is %s", first_disk)
    blk_source = first_disk['source']
    if vm.is_alive():
        vm.destroy(gracefully=False)
    image_size = params.get("image_size", "5G")
    chap_user = params.get("chap_user", "redhat")
    chap_passwd = params.get("chap_passwd", "password")
    auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
    auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage}
    auth_sec_uuid = utl.create_secret(auth_sec_dict)
    # Set password of auth secret
    virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True)
    emu_image = params.get("emulated_image", "emulated-iscsi")
    utl.setup_or_cleanup_iscsi(is_setup=False)
    iscsi_target, lun_num = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=False,
                                                       image_size=image_size,
                                                       chap_user=chap_user,
                                                       chap_passwd=chap_passwd)
    # Copy first disk to emulated backing store path
    tmp_dir = data_dir.get_tmp_dir()
    emulated_path = os.path.join(tmp_dir, emu_image)
    cmd = "qemu-img convert -f %s -O %s %s %s" % ('qcow2', disk_format,
                                                  blk_source, emulated_path)
    process.run(cmd, ignore_status=False, shell=True)

    # ISCSI auth attributes for disk xml
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    disk_auth_dict = {
        "auth_user": chap_user,
        "secret_type": auth_sec_usage_type,
        "secret_usage": auth_sec_usage_target
    }
    disk_src_dict = {
        "attrs": {
            "protocol": "iscsi",
            "name": "%s/%s" % (iscsi_target, lun_num)
        },
        "hosts": [{
            "name": '127.0.0.1',
            "port": '3260'
        }]
    }
    # Add disk xml.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
    # Delete disk elements
    disk_deleted = False
    disks = vmxml.get_devices(device_type="disk")
    for disk_ in disks:
        if disk_.target['dev'] == disk_target:
            vmxml.del_device(disk_)
            disk_deleted = True
    if disk_deleted:
        vmxml.sync()
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)

    disk_xml = Disk(type_name=disk_type)
    disk_xml.device = disk_device
    disk_xml.target = {"dev": disk_target, "bus": disk_target_bus}
    driver_dict = {"name": "qemu", "type": disk_format}
    disk_xml.driver = driver_dict
    disk_source = disk_xml.new_disk_source(**disk_src_dict)
    if disk_auth_dict:
        logging.debug("disk auth dict is: %s" % disk_auth_dict)
        auth_in_source = randint(0, 50) % 2 == 0
        if auth_in_source:
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        else:
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
    disk_xml.source = disk_source
    logging.debug("new disk xml is: %s", disk_xml)
    vmxml.add_device(disk_xml)
    vmxml.sync()
    vm.start()
Ejemplo n.º 21
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101"
                   .format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name, target,
                                   blockcommit_options, ignore_status=False, **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" % (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks") and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format)
            disks_img.append({"format": device_format,
                              "source": disk_path, "path": disk_path})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                   encode=True, ignore_status=False, debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(), image_filename)

            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}

            cmd = ("qemu-img create -f luks "
                   "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                   "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage",
                                            "libvirtiscsi")
                auth_sec_dict = {"sec_usage": "iscsi",
                                 "sec_target": auth_sec_usage}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                       encode=True, ignore_status=False, debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True, is_login=False, image_size=storage_size,
                    chap_user=chap_user, chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {"auth_user": chap_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_usage": auth_sec_usage_target}
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)

            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                 "sec_name": "ceph_auth_secret"}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {"auth_user": ceph_auth_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_uuid": auth_sec_uuid}
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_ip,
                                                               key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" %
                         (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                       " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                      device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path, image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]})
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port,
                                                image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s", str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target, snap_path)
                    virsh.snapshot_create_as(vm_name, snap_option,
                                             ignore_status=False, debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata",
                                         ignore_status=False, debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit,
                                                             args=(vm_name, 'vda', blockcommit_options,),
                                                             kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                # Do transient blockcopy in backgroud.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy,
                                                           args=(vm_name, 'vda', dest_path, blockcopy_options,),
                                                           kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
        set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True})
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off",
                                 shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Ejemplo n.º 22
0
def set_domain_disk(vmxml, blk_source, params, test):
    """
    Replace the domain disk with new setup device or download image

    :param vmxml: The instance of VMXML class
    :param params: Avocado params object
    :param test: Avocado test object
    :param blk_source: The domain disk image path
    """
    disk_type = params.get("disk_type", "file")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    device_bus = params.get("device_bus", "virtio")
    disk_img = params.get("disk_img")
    image_size = params.get("image_size", "3G")
    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    driver_type = params.get("driver_type", "qcow2")
    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    source_protocol = params.get("source_protocol", "")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    release_os_url = params.get("release_os_url", "")
    download_released_file_path = os.path.join(data_dir.get_tmp_dir(),
                                               "released_os.qcow2")
    brick_path = os.path.join(test.virtdir, "gluster-pool")
    usb_index = params.get("usb_index", "0")
    bus_controller = params.get("bus_controller", "")
    usb_controller = params.get("usb_controller", "")
    usb_model = params.get("usb_model", "")

    global cleanup_iscsi
    global cleanup_gluster
    disk_params = {
        'disk_type': disk_type,
        'target_dev': target_dev,
        'target_bus': device_bus,
        'driver_type': driver_type
    }
    if source_protocol == 'iscsi':
        if disk_type == 'block':
            if release_os_url:
                blk_source = download_released_file_path
            kwargs = {'image_size': image_size, 'disk_format': disk_format}
            iscsi_target = prepare_iscsi_disk(blk_source, **kwargs)
            if iscsi_target is None:
                test.error("Failed to create iscsi disk")
            else:
                cleanup_iscsi = True
                disk_params.update({'source_file': iscsi_target})
    elif source_protocol == 'usb':

        # assemble the xml of usb controller
        controllers = vmxml.get_devices(device_type="controller")
        for dev in controllers:
            if dev.type == "usb":
                vmxml.del_device(dev)

        for model in usb_model.split(','):
            controller = Controller("controller")
            controller.type = "usb"
            controller.index = usb_index
            controller.model = model
            vmxml.add_device(controller)

        # prepare virtual disk device
        dir_name = os.path.dirname(blk_source)
        device_name = os.path.join(dir_name, "usb_virtual_disk.qcow2")
        cmd = ("qemu-img convert -O {} {} {}".format(disk_format, blk_source,
                                                     device_name))
        process.run(cmd, shell=True)
        disk_params.update({'source_file': device_name})

    elif source_protocol == 'gluster':
        if disk_type == 'network':
            if release_os_url:
                blk_source = download_released_file_path
            host_ip = prepare_gluster_disk(blk_source,
                                           test,
                                           brick_path=brick_path,
                                           **params)
            if host_ip is None:
                test.error("Failed to create glusterfs disk")
            else:
                cleanup_gluster = True
            source_name = "%s/%s" % (vol_name, disk_img)
            disk_params.update({
                'source_name': source_name,
                'source_host_name': host_ip,
                'source_host_port': '24007',
                'source_protocol': source_protocol
            })
    elif source_protocol == 'rbd':
        if disk_type == 'network':
            if release_os_url:
                blk_source = download_released_file_path
            disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
            disk_cmd = ("qemu-img convert -O %s %s %s" %
                        (disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False)
            disk_params.update({
                'source_name': disk_src_name,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port,
                'source_protocol': source_protocol
            })
    elif non_release_os_url:
        disk_params.update({'source_file': download_file_path})
    elif boot_dev == "cdrom":
        disk_params.update({
            'device_type': 'cdrom',
            'source_file': boot_iso_file
        })
    elif release_os_url:
        disk_params.update({'source_file': download_released_file_path})
    else:
        disk_params.update({'source_file': blk_source})

    new_disk = Disk(type_name=disk_type)
    new_disk.xml = open(create_disk_xml(disk_params)).read()
    vmxml.remove_all_disk()
    vmxml.add_device(new_disk)
Ejemplo n.º 23
0
def set_domain_disk(vmxml, blk_source, params, test):
    """
    Replace the domain disk with new setup device or download image

    :param vmxml: The instance of VMXML class
    :param params: Avocado params object
    :param test: Avocado test object
    :param blk_source: The domain disk image path
    """
    disk_type = params.get("disk_type", "file")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    device_bus = params.get("device_bus", "virtio")
    disk_img = params.get("disk_img")
    image_size = params.get("image_size", "3G")
    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    driver_type = params.get("driver_type", "qcow2")
    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    source_protocol = params.get("source_protocol", "")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    brick_path = os.path.join(test.virtdir, "gluster-pool")

    global cleanup_iscsi
    global cleanup_gluster
    disk_params = {
        'disk_type': disk_type,
        'target_dev': target_dev,
        'target_bus': device_bus,
        'driver_type': driver_type
    }
    if source_protocol == 'iscsi':
        if disk_type == 'block':
            kwargs = {'image_size': image_size, 'disk_format': disk_format}
            iscsi_target = prepare_iscsi_disk(blk_source, **kwargs)
            if iscsi_target is None:
                test.error("Failed to create iscsi disk")
            else:
                cleanup_iscsi = True
                disk_params.update({'source_file': iscsi_target})
    elif source_protocol == 'gluster':
        if disk_type == 'network':
            kwargs = {
                'vol_name': vol_name,
                'brick_path': brick_path,
                'disk_img': disk_img,
                'disk_format': disk_format
            }
            host_ip = prepare_gluster_disk(blk_source, test, **kwargs)
            if host_ip is None:
                test.error("Failed to create glusterfs disk")
            else:
                cleanup_gluster = True
            source_name = "%s/%s" % (vol_name, disk_img)
            disk_params.update({
                'source_name': source_name,
                'source_host_name': host_ip,
                'source_host_port': '24007',
                'source_protocol': source_protocol
            })
    elif source_protocol == 'rbd':
        if disk_type == 'network':
            disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
            disk_cmd = ("qemu-img convert -O %s %s %s" %
                        (disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False)
            disk_params.update({
                'source_name': disk_src_name,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port,
                'source_protocol': source_protocol
            })
    elif non_release_os_url:
        disk_params.update({'source_file': download_file_path})
    elif boot_dev == "cdrom":
        disk_params.update({
            'device_type': 'cdrom',
            'source_file': boot_iso_file
        })
    else:
        disk_params.update({'source_file': blk_source})

    new_disk = Disk(type_name=disk_type)
    new_disk.xml = open(create_disk_xml(disk_params)).read()
    vmxml.remove_all_disk()
    vmxml.add_device(new_disk)
def run(test, params, env):
    """
    Test SCSI3 Persistent Reservation functions.

    1.Prepare iscsi backend storage.
    2.Prepare disk xml.
    3.Hot/cold plug the disk to vm.
    4.Check if SCSI3 Persistent Reservation commands can be issued to that disk.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def get_delta_parts(vm, old_parts):
        """
        Get the newly added partitions/blockdevs in vm.
        :param vm: The vm to be operated.
        :param old_parts: The original partitions/blockdevs in vm.
        :return: Newly added partitions/blockdevs.
        """
        session = vm.wait_for_login()
        new_parts = utils_disk.get_parts_list(session)
        new_parts = list(set(new_parts).difference(set(old_parts)))
        session.close()
        return new_parts

    def check_pr_cmds(vm, blk_dev):
        """
        Check if SCSI3 Persistent Reservation commands can be used in vm.
        :param vm: The vm to be checked.
        :param blk_dev: The block device in vm to be checked.
        """
        session = vm.wait_for_login()
        cmd = ("sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0}"
               .format(blk_dev))
        cmd_status, cmd_output = session.cmd_status_output(cmd)
        session.close()
        if cmd_status == 127:
            test.error("sg3_utils not installed in test image")
        elif cmd_status != 0:
            test.fail("persistent reservation failed for /dev/%s" % blk_dev)
        else:
            logging.info("persistent reservation successful for /dev/%s" % blk_dev)

    def start_or_stop_qemu_pr_helper(is_start=True, path_to_sock="/var/run/qemu-pr-helper.sock"):
        """
        Start or stop qemu-pr-helper daemon
        :param is_start: Set True to start, False to stop.
        """
        service_mgr = service.ServiceManager()
        if is_start:
            service_mgr.start('qemu-pr-helper')
            time.sleep(2)
            shutil.chown(path_to_sock, "qemu", "qemu")
        else:
            service_mgr.stop('qemu-pr-helper')

    def ppc_controller_update():
        """
        Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type

        :return:
        """
        if params.get('machine_type') == 'pseries' and device_bus == 'scsi':
            if not vmxml.get_controllers(device_bus, 'virtio-scsi'):
                vmxml.del_controller(device_bus)
                ppc_controller = Controller('controller')
                ppc_controller.type = device_bus
                ppc_controller.index = '0'
                ppc_controller.model = 'virtio-scsi'
                vmxml.add_device(ppc_controller)
                vmxml.sync()

    # Check if SCSI3 Persistent Reservations supported by
    # current libvirt versions.
    if not libvirt_version.version_compare(4, 4, 0):
        test.cancel("The <reservations> tag supported by libvirt from version "
                    "4.4.0")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Disk specific attributes
    device = params.get("virt_disk_device", "lun")
    device_target = params.get("virt_disk_device_target", "sdb")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "block")
    device_bus = params.get("virt_disk_device_bus", "scsi")
    # Iscsi options
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1G")
    auth_uuid = "yes" == params.get("auth_uuid")
    auth_usage = "yes" == params.get("auth_usage")
    # SCSI3 PR options
    reservations_managed = "yes" == params.get("reservations_managed", "yes")
    reservations_source_type = params.get("reservations_source_type", "unix")
    reservations_source_path = params.get("reservations_source_path",
                                          "/var/run/qemu-pr-helper.sock")
    reservations_source_mode = params.get("reservations_source_mode", "client")
    secret_uuid = ""
    # Case step options
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")

    # Start vm and get all partions in vm
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_in_source = "yes" == params.get("auth_in_source", "no")
            if auth_in_source and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in "
                            "current libvirt version.")
            auth_type = params.get("auth_type", "chap")
            secret_usage_target = params.get("secret_usage_target",
                                             "libvirtiscsi")
            secret_usage_type = params.get("secret_usage_type", "iscsi")
            chap_user = params.get("iscsi_user", "redhat")
            chap_passwd = params.get("iscsi_password", "redhat")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(str(chap_passwd).encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                 is_login=True,
                                                 image_size=emulated_size,
                                                 chap_user=chap_user,
                                                 chap_passwd=chap_passwd,
                                                 portal_ip=iscsi_host)

        # Add disk xml
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": secret_uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"dev": blk_dev}})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if auth_in_source:
                disk_source.auth = disk_auth
            else:
                disk_xml.auth = disk_auth
        if reservations_managed:
            reservations_dict = {"reservations_managed": "yes"}
        else:
            start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path)
            reservations_dict = {"reservations_managed": "no",
                                 "reservations_source_type": reservations_source_type,
                                 "reservations_source_path": reservations_source_path,
                                 "reservations_source_mode": reservations_source_mode}
        disk_source.reservations = disk_xml.new_reservations(**reservations_dict)
        disk_xml.source = disk_source

        # Update controller of ppc vms
        ppc_controller_update()

        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            # Start the VM and check status
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
            time.sleep(5)
            if hotplug_disk:
                result = virsh.attach_device(vm_name, disk_xml.xml,
                                             ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            new_parts = get_delta_parts(vm, old_parts)
            if len(new_parts) != 1:
                logging.error("Expected 1 dev added but has %s" % len(new_parts))
            new_part = new_parts[0]
            check_pr_cmds(vm, new_part)
            result = virsh.detach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True, wait_remove_event=True)
            libvirt.check_exit_status(result)
        except virt_vm.VMStartError as e:
            test.fail("VM failed to start."
                      "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            test.fail("Failed to define VM:\n%s" % xml_error)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")
        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
        # Stop qemu-pr-helper daemon
        start_or_stop_qemu_pr_helper(is_start=False)
Ejemplo n.º 25
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --disk-only --diskspec vda,"
            "file=/tmp/testvm-snap1" % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --memspec file=%s,snapshot=external"
            " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' " "in command line" % cmd)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(
                    3, 9, 0):
                test.cancel(
                    "place auth in source is not supported in current libvirt version"
                )
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = (
                "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" %
                (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_uuid": uuid
            }
        elif auth_usage:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_usage": secret_usage_target
            }
        disk_source = disk_xml.new_disk_source(
            **{
                "attrs": {
                    "protocol": "iscsi",
                    "name": "%s/%s" % (iscsi_target, lun_num)
                },
                "hosts": [{
                    "name": iscsi_host,
                    "port": iscsi_port
                }]
            })
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update(
                        {addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start." "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
Ejemplo n.º 26
0
                    img["disk_dev"].cleanup()
            else:
                if img["format"] == "iscsi":
                    libvirt.setup_or_cleanup_iscsi(is_setup=False)
                if img["format"] not in ["dir", "scsi"]:
                    os.remove(img["source"])
        raise error.TestNAError("Creating disk failed")

    # Build disks xml.
    disks_xml = []
    # Additional disk images.
    disks_img = []
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        for i in range(len(disks)):
            disk_xml = Disk(type_name=device_types[i])
            # If we are testing image file on iscsi disk,
            # mount the disk and then create the image.
            if test_file_img_on_disk:
                mount_path = "/tmp/diskimg"
                if utils.run("mkdir -p %s && mount %s %s" %
                             (mount_path, disks[i]["source"], mount_path),
                             ignore_status=True).exit_status:
                    raise error.TestNAError("Prepare disk failed")
                disk_path = "%s/%s.qcow2" % (mount_path,
                                             device_source_names[i])
                disk_source = libvirt.create_local_disk("file",
                                                        disk_path,
                                                        "1",
                                                        disk_format="qcow2")
                disks_img.append({
Ejemplo n.º 27
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        test.cancel("This version of libvirt does not support domblkerror "
                    "test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")
    ubuntu = distro.detect().name == 'Ubuntu'
    rhel = distro.detect().name == 'rhel'
    nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server")
    nfs_service = None
    selinux_bool = None
    session = None
    selinux_bak = ""

    vm = env.get_vm(vm_name)
    if error_type == "unspecified error":
        selinux_local = params.get("setup_selinux_local", "yes") == "yes"
        if not ubuntu and not rhel:
            nfs_service_package = "nfs"
        elif rhel:
            nfs_service_package = "nfs-server"
        if not rhel and not utils_package.package_install(nfs_service_package):
            test.cancel("NFS package not available in host to test")
        # backup /etc/exports
        shutil.copyfile(export_file, "%s.bak" % export_file)
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        process.run("qemu-img create %s %s" %
                    (os.path.join(img_dir, img_name), img_size),
                    shell=True, verbose=True)

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = libvirt.setup_or_cleanup_nfs(is_setup=True,
                                               mount_dir=nfs_dir,
                                               is_mount=False,
                                               export_options=mount_opt,
                                               export_dir=img_dir)
            if not ubuntu:
                selinux_bak = res["selinux_status_bak"]
            process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                        "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True,
                        verbose=True)
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service(nfs_service_package)
            if not ubuntu and selinux_local:
                params['set_sebool_local'] = "yes"
                params['local_boolean_varible'] = "virt_use_nfs"
                params['local_boolean_value'] = "on"
                selinux_bool = utils_misc.SELinuxBoolean(params)
                selinux_bool.setup()

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            _pool_vol = None
            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.driver = {'name': "qemu",
                           'type': "raw",
                           'cache': "none",
                           'error_policy': "stop"}
        img_disk.target = {'dev': target_dev,
                           'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk_list_debug = %s", bef_list)

        # Attach disk to guest
        ret = virsh.attach_device(vm_name, img_disk.xml)
        if ret.exit_status != 0:
            test.fail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk list after attaching - %s", aft_list)
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # install dependent packages
            pkg_list = ["parted", "e2fsprogs"]
            for pkg in pkg_list:
                if not utils_package.package_install(pkg, session):
                    test.error("Failed to install dependent package %s" % pkg)

            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext3 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception as err:
                logging.debug("Expected Fail %s", err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            if nfs_service is not None:
                nfs_service.stop()
                logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            # If not paused, perform one more IO operation to the mnt disk
            session = vm.wait_for_login()
            session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir)
            if not utils_misc.wait_for(_check_state, 60):
                test.fail("Guest does not paused, it is %s now" % vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    test.fail("Failed to get expect result, get %s" %
                              output.stdout.strip())
            else:
                test.fail("Fail to get domblkerror info:%s" % output.stderr)
    finally:
        logging.info("Do clean steps")
        if session:
            session.close()
        if error_type == "unspecified error":
            if nfs_service is not None:
                nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            res = libvirt.setup_or_cleanup_nfs(is_setup=False,
                                               mount_dir=nfs_dir,
                                               export_dir=img_dir,
                                               restore_selinux=selinux_bak)
            if selinux_bool:
                selinux_bool.cleanup(keep_authorized_keys=True)
        elif error_type == "no space":
            vm.destroy()
            if _pool_vol:
                _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
        data_dir.clean_tmp_files()
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def get_secret_list():
        """
        Get secret list.

        :return: secret list
        """
        logging.info("Get secret list ...")
        secret_list = virsh.secret_list().stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is not empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded",
                                            "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {
            "name": volume_name,
            "capacity": int(volume_cap),
            "allocation": int(volume_alloc),
            "format": volume_target_format,
            "path": volume_target_path,
            "label": volume_target_label,
            "capacity_unit": volume_cap_unit
        }
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update(
                {"secret": {
                    "type": secret_type,
                    "uuid": sec_encryption_uuid
                }})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume." "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {
                dev_attrs: volume_target_path
            }})
        disk_xml.driver = {
            "name": "qemu",
            "type": volume_target_format,
            "cache": "none"
        }
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {
                "encryption": v_xml.encryption.format,
                "secret": {
                    "type": v_xml.encryption.secret["type"],
                    "uuid": v_xml.encryption.secret["uuid"]
                }
            }
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                    **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                    **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail(
                        "In hotplug scenario, VM should "
                        "start successfully but not."
                        "Error: %s", str(e))
                else:
                    logging.debug(
                        "VM failed to start as expected."
                        "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Ejemplo n.º 29
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    case = params.get('case', '')
    blockcommand = params.get('blockcommand', '')
    blk_top = int(params.get('top', 0))
    blk_base = int(params.get('base', 0))
    opts = params.get('opts', '--verbose --wait')
    check_func = params.get('check_func', '')
    disk_type = params.get('disk_type', '')
    disk_src = params.get('disk_src', '')
    driver_type = params.get('driver_type', 'qcow2')
    vol_name = params.get('vol_name', 'vol_blockpull')
    pool_name = params.get('pool_name', '')
    brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name)
    vg_name = params.get('vg_name', 'HostVG')
    vol_size = params.get('vol_size', '10M')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    # List to collect paths to delete after test
    file_to_del = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        all_disks = vmxml.get_disk_source(vm_name)
        if not all_disks:
            test.error('Not found any disk file in vm.')
        image_file = all_disks[0].find('source').get('file')
        logging.debug('Image file of vm: %s', image_file)

        # Get all dev of virtio disks to calculate the dev of new disk
        all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')]
        disk_dev = all_vdisks[-1].find('target').get('dev')
        new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1)

        # Setup iscsi target
        if disk_src == 'iscsi':
            disk_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target: %s', disk_target)

        # Setup lvm
        elif disk_src == 'lvm':
            # Stop multipathd to avoid vgcreate fail
            multipathd = service.Factory.create_service("multipathd")
            multipathd_status = multipathd.status()
            if multipathd_status:
                multipathd.stop()

            # Setup iscsi target
            device_name = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target for lvm: %s', device_name)

            # Create logical device
            logical_device = device_name
            lv_utils.vg_create(vg_name, logical_device)
            vg_created = True

            # Create logical volume as backing store
            vol_bk, vol_disk = 'vol1', 'vol2'
            lv_utils.lv_create(vg_name, vol_bk, vol_size)

            disk_target = '/dev/%s/%s' % (vg_name, vol_bk)
            src_vol = '/dev/%s/%s' % (vg_name, vol_disk)

        # Setup gluster
        elif disk_src == 'gluster':
            host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True, brick_path=brick_path, **params)
            logging.debug(host_ip)
            gluster_img = 'test.img'
            img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img
            process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
                        % (host_ip, vol_name, img_create_cmd), shell=True)
            disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img)

        else:
            test.error('Wrong disk source, unsupported by this test.')

        new_image = os.path.join(os.path.split(image_file)[0], 'test.img')
        params['snapshot_list'] = ['s%d' % i for i in range(1, 5)]

        if disk_src == 'lvm':
            new_image = src_vol
            if disk_type == 'block':
                new_image = disk_target
                for i in range(2, 6):
                    lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size)
                snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)]
        else:
            file_to_del.append(new_image)
            snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']]
        cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image)
        if disk_type == 'block' and driver_type == 'raw':
            pass
        else:
            process.run(cmd_create_img, verbose=True, shell=True)
        info_new = utils_misc.get_image_info(new_image)
        logging.debug(info_new)

        # Create xml of new disk and add it to vmxml
        if disk_type:
            new_disk = Disk()
            new_disk.xml = libvirt.create_disk_xml({
                'type_name': disk_type,
                'driver_type': driver_type,
                'target_dev': new_dev,
                'source_file': new_image
            })

            logging.debug(new_disk.xml)

            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.xmltreefile.write()
            logging.debug(vmxml)
            vmxml.sync()

        vm.start()
        logging.debug(virsh.dumpxml(vm_name))

        # Create backing chain
        for i in range(len(params['snapshot_list'])):
            virsh.snapshot_create_as(
                vm_name,
                '%s --disk-only --diskspec %s,file=%s,stype=%s' %
                (params['snapshot_list'][i], new_dev, snapshot_image_list[i],
                 disk_type),
                **virsh_dargs
            )

            # Get path of each snapshot file
            snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines()
            for line in snaps:
                if line.lstrip().startswith(('hd', 'sd', 'vd')):
                    file_to_del.append(line.split()[-1])

        qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1]
        if libvirt_storage.check_qemu_image_lock_support():
            qemu_img_cmd += " -U"
        bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text

        if not disk_type == 'block':
            bc_chain = snapshot_image_list[::-1] + [new_image, disk_target]
        else:
            bc_chain = snapshot_image_list[::-1] + [new_image]
        bc_result = check_backingchain(bc_chain, bc_info)
        if not bc_result:
            test.fail('qemu-img info output of backing chain is not correct: %s'
                      % bc_info)

        # Generate blockpull/blockcommit options
        virsh_blk_cmd = eval('virsh.%s' % blockcommand)
        if blockcommand == 'blockpull' and blk_base != 0:
            opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev)
        elif blockcommand == 'blockcommit':
            opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else ''
            opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else ''
            opts += opt_top + opt_base + ' --active' if blk_top == 0 else ''

        # Do blockpull/blockcommit
        virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs)
        if blockcommand == 'blockcommit':
            virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("XML after %s: %s" % (blockcommand, vmxml))

        # Check backing chain after blockpull/blockcommit
        check_bc_func_name = 'check_bc_%s' % check_func
        if check_bc_func_name in globals():
            check_bc = eval(check_bc_func_name)
            if not callable(check_bc):
                logging.warning('Function "%s" is not callable.', check_bc_func_name)
            if not check_bc(blockcommand, vmxml, new_dev, bc_chain):
                test.fail('Backing chain check after %s failed' % blockcommand)
        else:
            logging.warning('Function "%s" is not implemented.', check_bc_func_name)

        virsh.dumpxml(vm_name, debug=True)

        # Check whether login is successful
        try:
            vm.wait_for_login().close()
        except Exception as e:
            test.fail('Vm login failed')

    finally:
        logging.info('Start cleaning up.')
        for ss in params.get('snapshot_list', []):
            virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True)
        bkxml.sync()
        for path in file_to_del:
            logging.debug('Remove %s', path)
            if os.path.exists(path):
                os.remove(path)
        if disk_src == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'lvm':
            process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True)
            if 'vol_bk' in locals():
                lv_utils.lv_remove(vg_name, vol_bk)
            if 'vg_created' in locals() and vg_created:
                lv_utils.vg_remove(vg_name)
                cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
                pv_name = process.system_output(cmd, shell=True, verbose=True).strip()
                if pv_name:
                    process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'gluster':
            gluster.setup_or_cleanup_gluster(
                is_setup=False, brick_path=brick_path, **params)
        if 'multipathd_status' in locals() and multipathd_status:
            multipathd.start()
Ejemplo n.º 30
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        raise error.TestNAError("This version of libvirt does not support "
                                "domblkerror test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")

    vm = env.get_vm(vm_name)
    # backup /etc/exports
    shutil.copyfile(export_file, "%s.bak" % export_file)
    selinux_bak = ""
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        utils.run("qemu-img create %s %s" %
                  (os.path.join(img_dir, img_name), img_size))

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = utils_test.libvirt.setup_or_cleanup_nfs(
                is_setup=True,
                mount_dir=nfs_dir,
                is_mount=False,
                export_options=mount_opt,
                export_dir=img_dir)
            selinux_bak = res["selinux_status_bak"]
            utils.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                      "127.0.0.1:%s %s" % (img_dir, nfs_dir))
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service("nfs")

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = utils_test.libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name,
                               "fs",
                               pool_target,
                               img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {
                'file': img_path
            }})
        img_disk.driver = {
            'name': "qemu",
            'type': "raw",
            'cache': "none",
            'error_policy': "stop"
        }
        img_disk.target = {'dev': target_dev, 'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = session.cmd_output(get_disks_cmd).split("\n")

        # Attach disk to guest
        ret = virsh.attach_device(domain_opt=vm_name, file_opt=img_disk.xml)
        if ret.exit_status != 0:
            raise error.TestFail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = session.cmd_output(get_disks_cmd).split("\n")
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext3 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception, err:
                logging.debug("Expected Fail %s" % err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            nfs_service.stop()
            logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            raise error.TestFail("Guest does not paused, it is %s now" %
                                 vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    raise error.TestFail(
                        "Failed to get expect result, get %s" %
                        output.stdout.strip())
            else:
                raise error.TestFail("Fail to get domblkerror info:%s" %
                                     output.stderr)