コード例 #1
0
ファイル: virsh_domfstrim.py プロジェクト: xigao/virt-test
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
                                            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
コード例 #2
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
コード例 #3
0
        def test_device_update_boot_order(disk_type, disk_path, error_msg):
            """
            Wrap device update boot order steps here.
            Firstly,create another disk for a given path.
            Update previous disk with newly created disk and modified boot order.
            Eject disk finally.
            :param disk_type: the type of disk.
            :param disk_path: the path of disk.
            :param error_msg: the expected error message.
            """

            addtional_disk = create_addtional_disk(disk_type, disk_path, device_formats[0], device_types[0],
                                                   devices[0], device_targets[0],
                                                   device_bus[0])
            addtional_disk.boot = '2'
            addtional_disk.readonly = 'True'
            # Update disk cdrom/floppy with modified boot order,it expect fail.
            ret = virsh.update_device(vm_name, addtional_disk.xml, debug=True)
            if ret.exit_status == 0 or not ret.stderr.count("Operation not supported: " +
                                                            "cannot modify field 'boot order' of the disk"):
                test.fail(error_msg)
            # Force eject cdrom or floppy, it expect succeed.
            eject_disk = Disk(type_name='block')
            eject_disk.target = {"dev": device_targets[0], "bus": device_bus[0]}
            eject_disk.device = devices[0]
            ret = virsh.update_device(vm_name, eject_disk.xml, flagstr='--force', debug=True)
            libvirt.check_exit_status(ret)
コード例 #4
0
ファイル: migrate_ceph.py プロジェクト: smitterl/tp-libvirt
def build_disk_xml(vm_name,
                   disk_format,
                   host_ip,
                   disk_src_protocol,
                   volume_name,
                   disk_img=None,
                   transport=None,
                   auth=None):
    """
    Try to build disk xml

    :param vm_name: specified VM name.
    :param disk_format: disk format,e.g raw or qcow2
    :param host_ip: host ip address
    :param disk_src_protocol: access disk protocol ,e.g network or file
    :param volume_name: volume name
    :param disk_img: disk image name
    :param transport: transport pattern,e.g TCP
    :param auth: dict containing ceph parameters
    """
    # Delete existed disks first.
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    disks_dev = vmxml.get_devices(device_type="disk")
    for disk in disks_dev:
        vmxml.del_device(disk)

    disk_xml = Disk(type_name="network")
    driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
    disk_xml.driver = driver_dict
    disk_xml.target = {"dev": "vda", "bus": "virtio"}
    # If protocol is rbd,create ceph disk xml.
    if disk_src_protocol == "rbd":
        disk_xml.device = "disk"
        vol_name = volume_name
        source_dict = {
            "protocol": disk_src_protocol,
            "name": "%s/%s" % (vol_name, disk_img)
        }
        host_dict = {"name": host_ip, "port": "6789"}
        if transport:
            host_dict.update({"transport": transport})
        if auth:
            disk_xml.auth = disk_xml.new_auth(**auth)
        disk_xml.source = disk_xml.new_disk_source(**{
            "attrs": source_dict,
            "hosts": [host_dict]
        })
    # Add the new disk xml.
    vmxml.add_device(disk_xml)
    vmxml.sync()
コード例 #5
0
def create_disk(disk_type, disk_path, disk_format, disk_device_type,
                disk_device, disk_target, disk_bus):
    """
    Create another disk for a given path,customize some attributes.

    :param disk_type: the type of disk.
    :param disk_path: the path of disk.
    :param disk_format: the format to disk image.
    :param disk_device_type: the disk device type.
    :param disk_device: the device of disk.
    :param disk_target: the target of disk.
    :param disk_bus: the target bus of disk.
    :return: disk object if created successfully.
    """
    disk_source = libvirt.create_local_disk(disk_type, disk_path, '1', disk_format)
    custom_disk = Disk(type_name=disk_device_type)
    custom_disk.device = disk_device
    source_dict = {'file': disk_source}
    custom_disk.source = custom_disk.new_disk_source(
        **{"attrs": source_dict})
    target_dict = {"dev": disk_target, "bus": disk_bus}
    custom_disk.target = target_dict
    driver_dict = {"name": "qemu", 'type': disk_format}
    custom_disk.driver = driver_dict
    return custom_disk
コード例 #6
0
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name),
                             default_pool, "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": [host_dict]})
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type"
                              % dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
コード例 #7
0
    def prepare_secret_disk(self, image_path, secret_disk_dict=None):
        """
        Add secret disk for domain.

        :params image_path: image path for disk source file
        :secret_disk_dict: secret disk dict to add new disk
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(self.vm.name)
        sec_dict = eval(self.params.get("sec_dict", '{}'))
        device_target = self.params.get("target_disk", "vdd")
        sec_passwd = self.params.get("private_key_password")
        if not secret_disk_dict:
            secret_disk_dict = {
                'type_name': "file",
                'target': {
                    "dev": device_target,
                    "bus": "virtio"
                },
                'driver': {
                    "name": "qemu",
                    "type": "qcow2"
                },
                'source': {
                    'encryption': {
                        "encryption": 'luks',
                        "secret": {
                            "type": "passphrase"
                        }
                    }
                }
            }
        # Create secret
        libvirt_secret.clean_up_secrets()
        sec_uuid = libvirt_secret.create_secret(sec_dict=sec_dict)
        virsh.secret_set_value(sec_uuid, sec_passwd, encode=True, debug=True)

        secret_disk_dict['source']['encryption']['secret']["uuid"] = sec_uuid
        secret_disk_dict['source']['attrs'] = {'file': image_path}
        new_disk = Disk()
        new_disk.setup_attrs(**secret_disk_dict)
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.xmltreefile.write()
        vmxml.sync()
コード例 #8
0
    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = "disk"
        if options.has_key("sgio") and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"],
                           'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {disk_attr: dev_name}})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if options.has_key("driver"):
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if options.has_key("share"):
            if options["share"] == "shareable":
                disk_xml.share = True

        return disk_xml
コード例 #9
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
コード例 #10
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
コード例 #11
0
ファイル: libvirt_disk.py プロジェクト: peixiu/avocado-vt
def create_custom_metadata_disk(disk_path,
                                disk_format,
                                disk_device,
                                device_target,
                                device_bus,
                                max_size,
                                disk_inst=None):
    """
    Create another disk for a given path,customize driver metadata attribute

    :param disk_path: the path of disk
    :param disk_format: the format to disk image
    :param disk_device: the disk device type
    :param device_target: the target of disk
    :param device_bus: device bus
    :param max_size: metadata_cache max size
    :param disk_inst: disk instance
    :return: disk object if created or updated successfully
    """
    if disk_inst:
        custom_disk = disk_inst
    else:
        custom_disk = Disk(type_name='file')
    if disk_device:
        custom_disk.device = disk_device
    source_dict = {}
    if disk_path:
        source_dict.update({'file': disk_path})
    custom_disk.source = custom_disk.new_disk_source(**{"attrs": source_dict})
    if device_target:
        target_dict = {"dev": device_target, "bus": device_bus}
        custom_disk.target = target_dict
    driver_dict = {"name": "qemu", 'type': disk_format}
    # Create drivermetadata object
    new_one_drivermetadata = custom_disk.new_drivermetadata(
        **{"attrs": driver_dict})
    metadata_cache_dict = {"max_size": max_size, "max_size_unit": "bytes"}
    # Attach metadatacache into drivermetadata object
    new_one_drivermetadata.metadata_cache = custom_disk.DriverMetadata(
    ).new_metadatacache(**metadata_cache_dict)
    custom_disk.drivermetadata = new_one_drivermetadata
    logging.debug("disk xml in create_custom_metadata_disk: %s\n", custom_disk)
    return custom_disk
コード例 #12
0
ファイル: virsh_boot.py プロジェクト: uyvan/tp-libvirt
def add_cdrom_device(v_xml, iso_file, target_dev, device_bus):
    """
    Add cdrom disk in VM XML

    :param v_xml: The instance of VMXML class
    :param iso_file: The iso file path
    :param target_dev: The target dev in Disk XML
    :param device_bus: The target bus in Disk XML
    """
    disk_xml = Disk(type_name="file")
    disk_xml.device = "cdrom"
    disk_xml.target = {"dev": target_dev, "bus": device_bus}
    disk_xml.driver = {"name": "qemu", "type": "raw"}
    src_dict = {"file": iso_file}
    disk_xml.source = disk_xml.new_disk_source(**{"attrs": src_dict})
    disk_xml.readonly = False
    v_xml.add_device(disk_xml)
    return v_xml
コード例 #13
0
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils.run("mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" %
                      (host_ip, vol_name, default_pool))
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": [host_dict]})
        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
コード例 #14
0
ファイル: embedded_qemu.py プロジェクト: uyvan/tp-libvirt
    def add_luks_disk(secret_type, sec_encryption_uuid):
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=disk_type)
        disk_xml.device = "disk"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {'file': img_path}})
        disk_xml.driver = {"name": "qemu", "type": img_format,
                           "cache": "none"}
        disk_xml.target = {"dev": disk_target, "bus": disk_bus}
        encryption_dict = {"encryption": 'luks',
                           "secret": {"type": secret_type,
                                      "uuid": sec_encryption_uuid}}
        disk_source.encryption = disk_xml.new_encryption(**encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        vmxml.add_device(disk_xml)
        logging.debug("guest xml: %s", vmxml.xml)
        return vmxml
コード例 #15
0
 def build_disk_xml(disk_img, disk_format, host_ip):
     """
     Try to rebuild disk xml
     """
     if default_pool:
         disk_xml = Disk(type_name="file")
     else:
         disk_xml = Disk(type_name="network")
     disk_xml.device = "disk"
     driver_dict = {"name": "qemu",
                    "type": disk_format,
                    "cache": "none"}
     if driver_iothread:
         driver_dict.update({"iothread": driver_iothread})
     disk_xml.driver = driver_dict
     disk_xml.target = {"dev": "vdb", "bus": "virtio"}
     if default_pool:
         utils_misc.mount("%s:%s" % (host_ip, vol_name),
                          default_pool, "glusterfs")
         process.run("setsebool virt_use_fusefs on", shell=True)
         source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict})
     else:
         source_dict = {"protocol": "gluster",
                        "name": "%s/%s" % (vol_name, disk_img)}
         host_dict = [{"name": host_ip, "port": "24007"}]
         # If mutiple_hosts is True, attempt to add multiple hosts.
         if multiple_hosts:
             host_dict.append({"name": params.get("dummy_host1"), "port": "24007"})
             host_dict.append({"name": params.get("dummy_host2"), "port": "24007"})
         if transport:
             host_dict[0]['transport'] = transport
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict, "hosts": host_dict})
     return disk_xml
コード例 #16
0
ファイル: libvirt_disk.py プロジェクト: peixiu/avocado-vt
def create_primitive_disk_xml(type_name, disk_device, device_target,
                              device_bus, device_format, disk_src_dict,
                              disk_auth_dict):
    """
    Creates primitive disk xml

    :param type_name: disk type
    :param disk_device: disk device
    :param device_target: target device
    :param device_bus: device bus
    :param device_format: device format
    :param disk_src_dict: disk source, dict format like below
           disk_src_dict = {"attrs": {"protocol": "rbd",
                                   "name": disk_name},
                            "hosts":  [{"name": host_ip,
                                     "port": host_port}]}
    :param disk_auth_dict: disk auth information, dict format like below
           disk_auth_dict = {"auth_user": auth_user,
                             "secret_type": auth_sec_usage_type,
                             "secret_uuid": auth_sec_uuid}
    :return: disk xml object
    """
    disk_xml = Disk(type_name=type_name)
    disk_xml.device = disk_device
    disk_xml.target = {"dev": device_target, "bus": device_bus}
    driver_dict = {"name": "qemu", "type": device_format}
    disk_xml.driver = driver_dict
    if disk_src_dict:
        logging.debug("disk src dict is: %s" % disk_src_dict)
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        disk_xml.source = disk_source
    if disk_auth_dict:
        logging.debug("disk auth dict is: %s" % disk_auth_dict)
        disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
    logging.debug("new disk xml in create_primitive_disk is: %s", disk_xml)
    return disk_xml
コード例 #17
0
ファイル: virsh_boot.py プロジェクト: balamuruhans/tp-libvirt
def add_cdrom_device(v_xml, iso_file, target_dev, device_bus):
    """
    Add cdrom disk in VM XML

    :param v_xml: The instance of VMXML class
    :param iso_file: The iso file path
    :param target_dev: The target dev in Disk XML
    :param device_bus: The target bus in Disk XML
    """
    disk_xml = Disk(type_name="file")
    disk_xml.device = "cdrom"
    disk_xml.target = {"dev": target_dev, "bus": device_bus}
    disk_xml.driver = {"name": "qemu", "type": "raw"}
    src_dict = {"file": iso_file}
    disk_xml.source = disk_xml.new_disk_source(
        **{"attrs": src_dict})
    disk_xml.readonly = False
    v_xml.add_device(disk_xml)
    return v_xml
コード例 #18
0
    def prepare_virt_disk_xml(image_path):
        """
        Prepare the virtual disk xml to be attached/detached.

        :param image_path: The path to the local image.
        :return: The virtual disk xml.
        """
        virt_disk_device = params.get("virt_disk_device", "disk")
        virt_disk_device_type = params.get("virt_disk_device_type", "file")
        virt_disk_device_format = params.get("virt_disk_device_format", "raw")
        virt_disk_device_target = params.get("virt_disk_device_target", "sdb")
        virt_disk_device_bus = params.get("virt_disk_device_bus", "usb")
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {'attrs': {'file': image_path, 'type_name': 'file'}}
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": virt_disk_device_target,
                           "bus": virt_disk_device_bus}
        return disk_xml
コード例 #19
0
    def prepare_virt_disk_xml(image_path):
        """
        Prepare the virtual disk xml to be attached/detached.

        :param image_path: The path to the local image.
        :return: The virtual disk xml.
        """
        virt_disk_device = params.get("virt_disk_device", "disk")
        virt_disk_device_type = params.get("virt_disk_device_type", "file")
        virt_disk_device_format = params.get("virt_disk_device_format", "raw")
        virt_disk_device_target = params.get("virt_disk_device_target", "sdb")
        virt_disk_device_bus = params.get("virt_disk_device_bus", "usb")
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {'attrs': {'file': image_path, 'type_name': 'file'}}
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": virt_disk_device_target,
                           "bus": virt_disk_device_bus}
        return disk_xml
コード例 #20
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = (
            "qemu-img create -f luks "
            "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
            "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h',
                                 ignore_status=True,
                                 shell=True,
                                 verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source)
            and not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {
                        "sec_usage": "iscsi",
                        "sec_target": auth_sec_usage
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid,
                                           chap_passwd,
                                           encode=True,
                                           debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        chap_user=chap_user,
                        chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_uuid": auth_sec_uuid
                        }
                    elif use_auth_usage:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_usage": auth_sec_usage_target
                        }
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name",
                                           "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            device_source = "gluster://%s/%s/%s" % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            disk_src_dict = {
                "attrs": {
                    "protocol": "gluster",
                    "name": "%s/%s" % (gluster_vol_name, gluster_img_name)
                },
                "hosts": [{
                    "name": gluster_host_ip,
                    "port": "24007"
                }]
            }
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid
                    }
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name,
                                                        ceph_mon_ip)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid,
                                     luks_secret_passwd,
                                     encode=True,
                                     debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {
            "encryption": "luks",
            "secret": {
                "type": "passphrase",
                "uuid": luks_sec_uuid
            }
        }
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
コード例 #21
0
def setup_auth_enabled_iscsi_disk(vm, params):
    """
    Create one separate thread to do blockcopy

    :param vm: VM
    :param params: config params
    """
    disk_type = params.get("disk_type", "file")
    disk_target = params.get("disk_target", 'vda')
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_format = params.get("disk_format", "qcow2")
    disk_device = params.get("disk_device", "lun")
    first_disk = vm.get_first_disk_devices()
    logging.debug("first disk is %s", first_disk)
    blk_source = first_disk['source']
    if vm.is_alive():
        vm.destroy(gracefully=False)
    image_size = params.get("image_size", "5G")
    chap_user = params.get("chap_user", "redhat")
    chap_passwd = params.get("chap_passwd", "password")
    auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
    auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage}
    auth_sec_uuid = utl.create_secret(auth_sec_dict)
    # Set password of auth secret
    virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True)
    emu_image = params.get("emulated_image", "emulated-iscsi")
    utl.setup_or_cleanup_iscsi(is_setup=False)
    iscsi_target, lun_num = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=False,
                                                       image_size=image_size,
                                                       chap_user=chap_user,
                                                       chap_passwd=chap_passwd)
    # Copy first disk to emulated backing store path
    tmp_dir = data_dir.get_tmp_dir()
    emulated_path = os.path.join(tmp_dir, emu_image)
    cmd = "qemu-img convert -f %s -O %s %s %s" % ('qcow2', disk_format,
                                                  blk_source, emulated_path)
    process.run(cmd, ignore_status=False, shell=True)

    # ISCSI auth attributes for disk xml
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    disk_auth_dict = {
        "auth_user": chap_user,
        "secret_type": auth_sec_usage_type,
        "secret_usage": auth_sec_usage_target
    }
    disk_src_dict = {
        "attrs": {
            "protocol": "iscsi",
            "name": "%s/%s" % (iscsi_target, lun_num)
        },
        "hosts": [{
            "name": '127.0.0.1',
            "port": '3260'
        }]
    }
    # Add disk xml.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
    # Delete disk elements
    disk_deleted = False
    disks = vmxml.get_devices(device_type="disk")
    for disk_ in disks:
        if disk_.target['dev'] == disk_target:
            vmxml.del_device(disk_)
            disk_deleted = True
    if disk_deleted:
        vmxml.sync()
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)

    disk_xml = Disk(type_name=disk_type)
    disk_xml.device = disk_device
    disk_xml.target = {"dev": disk_target, "bus": disk_target_bus}
    driver_dict = {"name": "qemu", "type": disk_format}
    disk_xml.driver = driver_dict
    disk_source = disk_xml.new_disk_source(**disk_src_dict)
    if disk_auth_dict:
        logging.debug("disk auth dict is: %s" % disk_auth_dict)
        auth_in_source = randint(0, 50) % 2 == 0
        if auth_in_source:
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        else:
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
    disk_xml.source = disk_source
    logging.debug("new disk xml is: %s", disk_xml)
    vmxml.add_device(disk_xml)
    vmxml.sync()
    vm.start()
コード例 #22
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def get_secret_list():
        """
        Get secret list.

        :return: secret list
        """
        logging.info("Get secret list ...")
        secret_list = virsh.secret_list().stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is not empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded",
                                            "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {
            "name": volume_name,
            "capacity": int(volume_cap),
            "allocation": int(volume_alloc),
            "format": volume_target_format,
            "path": volume_target_path,
            "label": volume_target_label,
            "capacity_unit": volume_cap_unit
        }
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update(
                {"secret": {
                    "type": secret_type,
                    "uuid": sec_encryption_uuid
                }})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume." "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {
                dev_attrs: volume_target_path
            }})
        disk_xml.driver = {
            "name": "qemu",
            "type": volume_target_format,
            "cache": "none"
        }
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {
                "encryption": v_xml.encryption.format,
                "secret": {
                    "type": v_xml.encryption.secret["type"],
                    "uuid": v_xml.encryption.secret["uuid"]
                }
            }
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                    **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                    **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail(
                        "In hotplug scenario, VM should "
                        "start successfully but not."
                        "Error: %s", str(e))
                else:
                    logging.debug(
                        "VM failed to start as expected."
                        "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
コード例 #23
0
                    if result.exit_status or (result.stdout.find("OK") == -1):
                        raise error.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise error.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}})
                    dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"}
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes})
コード例 #24
0
    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = options["disk_device"]
        if "sgio" in options and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"],
                           'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {disk_attr: dev_name}})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if "driver" in options:
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if "share" in options:
            if options["share"] == "shareable":
                disk_xml.share = True

        if "readonly" in options:
            if options["readonly"] == "readonly":
                disk_xml.readonly = True

        logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

        return disk_xml
コード例 #25
0
ファイル: limit.py プロジェクト: lento-sun/tp-libvirt
def run(test, params, env):
    """
    cpu, memory, network, disk limit test:
    1) prepare the guest with given topology, memory and if any devices
    2) Start and login to the guest
    3) Check if the guest functional
    4) if given run some stress test

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    TODO: 1. Add multiple pci-bridge and pci devices respectively.
          2. Get no. VM interfaces at test start and use for validation.
    """

    failures = {"Failed to allocate KVM HPT of order":
                "Host does no have enough cma to boot, try increasing \
                kvm_cma_resv_ratio in host boot",
                "unable to map backing store for guest RAM":
                "Not enough memory in the host to boot the guest"}
    vm_name = params.get("main_vm")
    max_vcpu = current_vcpu = int(params.get("max_vcpu", 240))
    vm_cores = int(params.get("limit_vcpu_cores", 240))
    vm_threads = int(params.get("limit_vcpu_threads", 1))
    vm_sockets = int(params.get("limit_vcpu_sockets", 1))
    usermaxmem = params.get("usermaxmem", '')
    default_mem = int(params.get("default_mem", 8))
    maxmem = params.get("maxmem", "no") == "yes"
    swap_setup = params.get("swap_setup", "yes") == "yes"
    blk_partition = params.get("blk_part", '')
    graphic = params.get("graphics", "no") == "yes"
    vm = env.get_vm(vm_name)
    guestmemory = None
    max_network = params.get("max_network", "no") == "yes"
    max_disk = params.get("max_disk", "no") == "yes"
    num_network = int(params.get("num_network", 16))
    num_disk = int(params.get("num_disk", 16))
    drive_format = params.get("drive_format", "scsi")
    disk_format = params.get("disk_format", "qcow2")
    netdst = params.get("netdst", "virbr0")
    memunit = params.get("memunit", 'G')
    failed = False
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Setup swap
        if swap_setup:
            if not blk_partition:
                test.cancel("block partition is not given")
            # Check if valid partition or raise
            cmd = "blkid /dev/%s|awk -F" " '{print $2}'" % blk_partition
            output = process.system_output(cmd, shell=True)
            if "UUID" not in output:
                test.cancel("Not a valid partition given for swap creation")
            # Create swap partition
            cmd = "mkswap /dev/%s" % blk_partition
            process.system(cmd, shell=True)
            cmd = "swapon /dev/%s" % blk_partition
            process.system(cmd, shell=True)

        # Check for host memory and cpu levels and validate against
        # requested limits, allow to max of 10X for CPU and 2.5X for memory
        host_memory = int(memory.rounded_memtotal())
        if maxmem:
            if usermaxmem:
                guestmemory = usermaxmem
            else:
                # Normalize to GB
                guestmemory = int(2.5 * host_memory/(1024 * 1024))
        else:
            pass
        if not guestmemory:
            # assign default memory
            guestmemory = default_mem

        # Set the current and max memory params
        vmxml.current_mem_unit = memunit
        vmxml.max_mem_unit = memunit
        vmxml.current_mem = int(guestmemory)
        vmxml.max_mem = int(guestmemory)
        vmxml.sync()

        # Set vcpu and topology
        libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,
                                       vm_sockets, vm_cores, vm_threads)
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set vnc display as needed
        graphics = vmxml.get_device_class('graphics')()
        if graphic:
            if not vmxml.get_graphics_devices("vnc"):
                graphics.add_graphic(vm_name, graphic="vnc")
        else:
            if vmxml.get_graphics_devices("vnc"):
                graphics.del_graphic(vm_name)
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        network_str = None
        disk_str = None
        # Set network devices
        if max_network:
            network_str = "ip link|grep ^[1-9]|wc -l"
            for idx in range(num_network):
                network = Interface(type_name="bridge")
                network.mac_address = utils_net.generate_mac_address_simple()
                network.source = {"bridge": netdst}
                vmxml.add_device(network)

        # Set disk devices
        if max_disk:
            for idx in range(num_disk):
                disk_str = "lsblk|grep ^[s,v]|grep 1G|wc -l"
                disk = Disk()
                disk_path = os.path.join(data_dir.get_data_dir(), "images", "%s.qcow2" % idx)
                if "scsi" in drive_format:
                    drive_format = "scsi"
                    disk_target = "sd%s" % letters[(idx % 51)+1]
                else:
                    drive_format = "virtio"
                    disk_target = "vd%s" % letters[(idx % 51)+1]
                disk_source = libvirt.create_local_disk("file", disk_path, '1', "qcow2")
                disk.device = "disk"
                disk.source = disk.new_disk_source(**{"attrs": {'file': disk_source}})
                disk.target = {"dev": disk_target, "bus": drive_format}
                disk.driver = {"name": "qemu", 'type': disk_format}
                vmxml.add_device(disk)
        vmxml.sync()

        # Start VM
        logging.debug("VM XML: \n%s", vmxml)
        try:
            vm.start()
        except virt_vm.VMStartError, detail:
            for msg in failures.items():
                if msg[0] in detail:
                    test.cancel("%s", msg[1])
            test.fail("%s" % detail)

        # Check the memory and vcpu inside guest
        memtotal = vm.get_totalmem_sys()
        cpucount = vm.get_cpu_count()
        session = vm.wait_for_login()
        if network_str:
            guestnetworks = int(session.cmd_output(network_str))
            logging.debug("guestnet: %d", guestnetworks)
            if (guestnetworks - 2) != num_network:
                failed = True
                logging.error("mismatch in guest network devices: \n"
                              "Expected: %d\nActual: %d", num_network,
                              guestnetworks)
        if disk_str:
            guestdisks = int(session.cmd_output(disk_str))
            logging.debug("guestdisk: %d", guestdisks)
            if guestdisks != num_disk:
                failed = True
                logging.error("mismatch in guest disk devices: \n"
                              "Expected: %d\nActual: %s", num_disk, guestdisks)
        session.close()
        guestmem = utils_misc.normalize_data_size("%s G" % guestmemory)
        # TODO:512 MB threshold deviation value, need to normalize
        if int(float(guestmem) - memtotal) > 512:
            failed = True
            logging.error("mismatch in guest memory: \nExpected: "
                          "%s\nActual: %s", float(guestmem), memtotal)
        if cpucount != current_vcpu:
            failed = True
            logging.error("mismatch in guest vcpu:\nExpected: %d\nActual: "
                          "%d", current_vcpu, cpucount)
        if failed:
            test.fail("Consult previous failures")
コード例 #26
0
    try:
        # Prepare the disk.
        sec_uuid = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        create_vol(pool_name, volume_target_encypt, vol_params)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_xml.source = disk_xml.new_disk_source(
            **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}

        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuid.append(v_xml.encryption.secret["uuid"])
コード例 #27
0
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep")

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep")

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += " 0 id=drive-usb-disk%s,if=none,file=%s" % (i, path)

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
                        disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
                        disk_xml.target = {"dev": "sdb", "bus": "usb"}

                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += " drive-usb-disk"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += " mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += " keyboard"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += " tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
        except process.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n" "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
コード例 #28
0
ファイル: libvirt_scsi.py プロジェクト: spiceqa/virt-test
def run_libvirt_scsi(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILABLE.PARTITION")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controller_exists = False
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controller_exists = True
            break
    if not scsi_controller_exists:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)
    # Add disk with bus of scsi into vmxml.
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {
                'file': img_path
            }})
        img_disk.target = {'dev': "vde", 'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
        cdrom.close()
        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf", 'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {
                'file': cdrom_path
            }})
        vmxml.add_device(cdrom_disk)
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test"
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg", 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {
                'dev': partition
            }})
        vmxml.add_device(partition_disk)
    # sync the vmxml with VM.
    vmxml.sync()
    # Check the result of scsi disk.
    try:
        try:
            vm.start()
            # Start VM successfully.
            if status_error:
                raise error.TestFail('Starting VM successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up.
        backup_xml.sync()
コード例 #29
0
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output("fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path))

                    result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK") == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}})
                    dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"}
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
コード例 #30
0
ファイル: virsh_boot.py プロジェクト: balamuruhans/tp-libvirt
def set_domain_disk(vmxml, blk_source, params, test):
    """
    Replace the domain disk with new setup device or download image

    :param vmxml: The instance of VMXML class
    :param params: Avocado params object
    :param test: Avocado test object
    :param blk_source: The domain disk image path
    """
    disk_type = params.get("disk_type", "file")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    device_bus = params.get("device_bus", "virtio")
    disk_img = params.get("disk_img")
    image_size = params.get("image_size", "3G")
    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    driver_type = params.get("driver_type", "qcow2")
    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    source_protocol = params.get("source_protocol", "")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(), "non_released_os.qcow2")
    brick_path = os.path.join(test.virtdir, "gluster-pool")

    global cleanup_iscsi
    global cleanup_gluster
    disk_params = {'disk_type': disk_type,
                   'target_dev': target_dev,
                   'target_bus': device_bus,
                   'driver_type': driver_type}
    if source_protocol == 'iscsi':
        if disk_type == 'block':
            kwargs = {'image_size': image_size, 'disk_format': disk_format}
            iscsi_target = prepare_iscsi_disk(blk_source, **kwargs)
            if iscsi_target is None:
                test.error("Failed to create iscsi disk")
            else:
                cleanup_iscsi = True
                disk_params.update({'source_file': iscsi_target})
    elif source_protocol == 'gluster':
        if disk_type == 'network':
            kwargs = {'vol_name': vol_name,
                      'brick_path': brick_path,
                      'disk_img': disk_img,
                      'disk_format': disk_format}
            host_ip = prepare_gluster_disk(blk_source, test, **kwargs)
            if host_ip is None:
                test.error("Failed to create glusterfs disk")
            else:
                cleanup_gluster = True
            source_name = "%s/%s" % (vol_name, disk_img)
            disk_params.update({'source_name': source_name,
                                'source_host_name': host_ip,
                                'source_host_port': '24007',
                                'source_protocol': source_protocol})
    elif source_protocol == 'rbd':
        if disk_type == 'network':
            disk_path = ("rbd:%s:mon_host=%s" %
                         (disk_src_name, mon_host))
            disk_cmd = ("qemu-img convert -O %s %s %s"
                        % (disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False)
            disk_params.update({'source_name': disk_src_name,
                                'source_host_name': disk_src_host,
                                'source_host_port': disk_src_port,
                                'source_protocol': source_protocol})
    elif non_release_os_url:
        disk_params.update({'source_file': download_file_path})
    elif boot_dev == "cdrom":
        disk_params.update({'device_type': 'cdrom',
                            'source_file': boot_iso_file})
    else:
        disk_params.update({'source_file': blk_source})

    new_disk = Disk(type_name=disk_type)
    new_disk.xml = open(create_disk_xml(disk_params)).read()
    vmxml.remove_all_disk()
    vmxml.add_device(new_disk)
コード例 #31
0
ファイル: libvirt_scsi.py プロジェクト: ayiyaliing/virt-test
def run_libvirt_scsi(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILIBLE.PARTITION")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controller_exists = False
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controller_exists = True
            break
    if not scsi_controller_exists:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)
    # Add disk with bus of scsi into vmxml.
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.target = {'dev': "vde",
                           'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
        cdrom.close()
        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf",
                             'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {'file': cdrom_path}})
        vmxml.add_device(cdrom_disk)
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test"
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg",
                                 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {'dev': partition}})
        vmxml.add_device(partition_disk)
    # sync the vmxml with VM.
    vmxml.sync()
    # Check the result of scsi disk.
    try:
        try:
            vm.start()
            # Start VM successfully.
            if status_error:
                raise error.TestFail('Starting VM successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up.
        backup_xml.sync()
コード例 #32
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = ("qemu-img create -f luks "
               "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
               "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h', ignore_status=True,
                                 shell=True, verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source) and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {"sec_usage": "iscsi",
                                     "sec_target": auth_sec_usage}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                           encode=True, debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        chap_user=chap_user, chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_uuid": auth_sec_uuid}
                    elif use_auth_usage:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_usage": auth_sec_usage_target}
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name)
            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                     "sec_name": "ceph_auth_secret"}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {"auth_user": ceph_auth_user,
                                      "secret_type": auth_sec_usage_type,
                                      "secret_uuid": auth_sec_uuid}
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                                   ceph_mon_ip,
                                                                   key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                     encode=True, debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {"encryption": "luks",
                                "secret": {"type": "passphrase",
                                           "uuid": luks_sec_uuid}}
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" % str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
コード例 #33
0
ファイル: blockcommand.py プロジェクト: yalzhang/tp-libvirt
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    case = params.get('case', '')
    blockcommand = params.get('blockcommand', '')
    blk_top = int(params.get('top', 0))
    blk_base = int(params.get('base', 0))
    opts = params.get('opts', '--verbose --wait')
    check_func = params.get('check_func', '')
    disk_type = params.get('disk_type', '')
    disk_src = params.get('disk_src', '')
    driver_type = params.get('driver_type', 'qcow2')
    vol_name = params.get('vol_name', 'vol_blockpull')
    pool_name = params.get('pool_name', '')
    brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name)
    vg_name = params.get('vg_name', 'HostVG')
    vol_size = params.get('vol_size', '10M')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    # List to collect paths to delete after test
    file_to_del = []
    virsh_dargs = {'debug': True, 'ignore_status': False}

    try:
        all_disks = vmxml.get_disk_source(vm_name)
        if not all_disks:
            test.error('Not found any disk file in vm.')
        image_file = all_disks[0].find('source').get('file')
        logging.debug('Image file of vm: %s', image_file)

        # Get all dev of virtio disks to calculate the dev of new disk
        all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')]
        disk_dev = all_vdisks[-1].find('target').get('dev')
        new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1)

        # Setup iscsi target
        if disk_src == 'iscsi':
            disk_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target: %s', disk_target)

        # Setup lvm
        elif disk_src == 'lvm':
            # Stop multipathd to avoid vgcreate fail
            multipathd = service.Factory.create_service("multipathd")
            multipathd_status = multipathd.status()
            if multipathd_status:
                multipathd.stop()

            # Setup iscsi target
            device_name = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True,
                image_size='1G')
            logging.debug('ISCSI target for lvm: %s', device_name)

            # Create logical device
            logical_device = device_name
            lv_utils.vg_create(vg_name, logical_device)
            vg_created = True

            # Create logical volume as backing store
            vol_bk, vol_disk = 'vol1', 'vol2'
            lv_utils.lv_create(vg_name, vol_bk, vol_size)

            disk_target = '/dev/%s/%s' % (vg_name, vol_bk)
            src_vol = '/dev/%s/%s' % (vg_name, vol_disk)

        # Setup gluster
        elif disk_src == 'gluster':
            host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True, brick_path=brick_path, **params)
            logging.debug(host_ip)
            gluster_img = 'test.img'
            img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img
            process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt"
                        % (host_ip, vol_name, img_create_cmd), shell=True)
            disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img)

        else:
            test.error('Wrong disk source, unsupported by this test.')

        new_image = os.path.join(os.path.split(image_file)[0], 'test.img')
        params['snapshot_list'] = ['s%d' % i for i in range(1, 5)]

        if disk_src == 'lvm':
            new_image = src_vol
            if disk_type == 'block':
                new_image = disk_target
                for i in range(2, 6):
                    lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size)
                snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)]
        else:
            file_to_del.append(new_image)
            snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']]
        cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image)
        if disk_type == 'block' and driver_type == 'raw':
            pass
        else:
            process.run(cmd_create_img, verbose=True, shell=True)
        info_new = utils_misc.get_image_info(new_image)
        logging.debug(info_new)

        # Create xml of new disk and add it to vmxml
        if disk_type:
            new_disk = Disk()
            new_disk.xml = libvirt.create_disk_xml({
                'type_name': disk_type,
                'driver_type': driver_type,
                'target_dev': new_dev,
                'source_file': new_image
            })

            logging.debug(new_disk.xml)

            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.xmltreefile.write()
            logging.debug(vmxml)
            vmxml.sync()

        vm.start()
        logging.debug(virsh.dumpxml(vm_name))

        # Create backing chain
        for i in range(len(params['snapshot_list'])):
            virsh.snapshot_create_as(
                vm_name,
                '%s --disk-only --diskspec %s,file=%s,stype=%s' %
                (params['snapshot_list'][i], new_dev, snapshot_image_list[i],
                 disk_type),
                **virsh_dargs
            )

            # Get path of each snapshot file
            snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines()
            for line in snaps:
                if line.lstrip().startswith(('hd', 'sd', 'vd')):
                    file_to_del.append(line.split()[-1])

        qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1]
        if libvirt_storage.check_qemu_image_lock_support():
            qemu_img_cmd += " -U"
        bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text

        if not disk_type == 'block':
            bc_chain = snapshot_image_list[::-1] + [new_image, disk_target]
        else:
            bc_chain = snapshot_image_list[::-1] + [new_image]
        bc_result = check_backingchain(bc_chain, bc_info)
        if not bc_result:
            test.fail('qemu-img info output of backing chain is not correct: %s'
                      % bc_info)

        # Generate blockpull/blockcommit options
        virsh_blk_cmd = eval('virsh.%s' % blockcommand)
        if blockcommand == 'blockpull' and blk_base != 0:
            opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev)
        elif blockcommand == 'blockcommit':
            opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else ''
            opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else ''
            opts += opt_top + opt_base + ' --active' if blk_top == 0 else ''

        # Do blockpull/blockcommit
        virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs)
        if blockcommand == 'blockcommit':
            virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("XML after %s: %s" % (blockcommand, vmxml))

        # Check backing chain after blockpull/blockcommit
        check_bc_func_name = 'check_bc_%s' % check_func
        if check_bc_func_name in globals():
            check_bc = eval(check_bc_func_name)
            if not callable(check_bc):
                logging.warning('Function "%s" is not callable.', check_bc_func_name)
            if not check_bc(blockcommand, vmxml, new_dev, bc_chain):
                test.fail('Backing chain check after %s failed' % blockcommand)
        else:
            logging.warning('Function "%s" is not implemented.', check_bc_func_name)

        virsh.dumpxml(vm_name, debug=True)

        # Check whether login is successful
        try:
            vm.wait_for_login().close()
        except Exception as e:
            test.fail('Vm login failed')

    finally:
        logging.info('Start cleaning up.')
        for ss in params.get('snapshot_list', []):
            virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True)
        bkxml.sync()
        for path in file_to_del:
            logging.debug('Remove %s', path)
            if os.path.exists(path):
                os.remove(path)
        if disk_src == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'lvm':
            process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True)
            if 'vol_bk' in locals():
                lv_utils.lv_remove(vg_name, vol_bk)
            if 'vg_created' in locals() and vg_created:
                lv_utils.vg_remove(vg_name)
                cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
                pv_name = process.system_output(cmd, shell=True, verbose=True).strip()
                if pv_name:
                    process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src == 'gluster':
            gluster.setup_or_cleanup_gluster(
                is_setup=False, brick_path=brick_path, **params)
        if 'multipathd_status' in locals() and multipathd_status:
            multipathd.start()
コード例 #34
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        raise error.TestNAError("This version of libvirt does not support "
                                "domblkerror test")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    tmp_file = params.get("domblkerror_tmp_file", "/tmp/fdisk-cmd")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")

    vm = env.get_vm(vm_name)
    # backup /etc/exports
    shutil.copyfile(export_file, "%s.bak" % export_file)
    selinux_bak = ""
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        utils.run("qemu-img create %s %s" %
                  (os.path.join(img_dir, img_name), img_size))

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = utils_test.libvirt.setup_or_cleanup_nfs(
                is_setup=True, mount_dir=nfs_dir, is_mount=False,
                export_options=mount_opt, export_dir=img_dir)
            selinux_bak = res["selinux_status_bak"]
            utils.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                      "127.0.0.1:%s %s" % (img_dir, nfs_dir))
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service("nfs")

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = utils_test.libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.driver = {'name': "qemu",
                           'type': "raw",
                           'cache': "none",
                           'error_policy': "stop"}
        img_disk.target = {'dev': target_dev,
                           'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = session.cmd_output(get_disks_cmd).split("\n")

        # Attach disk to guest
        ret = virsh.attach_device(domain_opt=vm_name,
                                  file_opt=img_disk.xml)
        if ret.exit_status != 0:
            raise error.TestFail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = session.cmd_output(get_disks_cmd).split("\n")
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # create partition and file system
            session.cmd("echo 'n\np\n\n\n\nw\n' > %s" % tmp_file)
            # mount disk and write file in it
            try:
                # The following steps may cause guest paused before it return
                session.cmd("fdisk %s < %s" % (new_disk, tmp_file))
                session.cmd("mkfs.ext3 %s1" % new_disk)
                session.cmd("mkdir -p %s && mount %s1 %s" %
                            (mnt_dir, new_disk, mnt_dir))
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception, err:
                logging.debug("Expected Fail %s" % err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            nfs_service.stop()
            logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            raise error.TestFail("Guest does not paused, it is %s now" %
                                 vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    raise error.TestFail("Failed to get expect result, get %s"
                                         % output.stdout.strip())
            else:
                raise error.TestFail("Fail to get domblkerror info:%s" %
                                     output.stderr)
コード例 #35
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded", "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update({"secret": {"type": secret_type, "uuid": sec_encryption_uuid}})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume."
                           "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {"encryption": v_xml.encryption.format,
                               "secret": {"type": v_xml.encryption.secret["type"],
                                          "uuid": v_xml.encryption.secret["uuid"]}}
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                        **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                        **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail("In hotplug scenario, VM should "
                              "start successfully but not."
                              "Error: %s", str(e))
                else:
                    logging.debug("VM failed to start as expected."
                                  "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start."
                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
コード例 #36
0
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      image_size=emulated_size,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/1 %s"
                   % (iscsi_host, iscsi_port, iscsi_target, emulated_size))
            utils.run(cmd)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.source = disk_xml.new_disk_source(
            **{"attrs": {"protocol": "iscsi", "name": "%s/1" % iscsi_target},
               "hosts": [{"name": iscsi_host, "port": iscsi_port}]})
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": device_format}

        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": uuid}
コード例 #37
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101"
                   .format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name, target,
                                   blockcommit_options, ignore_status=False, **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" % (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks") and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format)
            disks_img.append({"format": device_format,
                              "source": disk_path, "path": disk_path})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                   encode=True, ignore_status=False, debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(), image_filename)

            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}

            cmd = ("qemu-img create -f luks "
                   "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                   "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage",
                                            "libvirtiscsi")
                auth_sec_dict = {"sec_usage": "iscsi",
                                 "sec_target": auth_sec_usage}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                       encode=True, ignore_status=False, debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True, is_login=False, image_size=storage_size,
                    chap_user=chap_user, chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {"auth_user": chap_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_usage": auth_sec_usage_target}
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)

            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                 "sec_name": "ceph_auth_secret"}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {"auth_user": ceph_auth_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_uuid": auth_sec_uuid}
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_ip,
                                                               key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" %
                         (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                       " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                      device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path, image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]})
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port,
                                                image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s", str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target, snap_path)
                    virsh.snapshot_create_as(vm_name, snap_option,
                                             ignore_status=False, debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata",
                                         ignore_status=False, debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit,
                                                             args=(vm_name, 'vda', blockcommit_options,),
                                                             kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                # Do transient blockcopy in backgroud.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy,
                                                           args=(vm_name, 'vda', dest_path, blockcopy_options,),
                                                           kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
        set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True})
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off",
                                 shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
コード例 #38
0
ファイル: virsh_boot.py プロジェクト: sssst315/tp-libvirt
def set_domain_disk(vmxml, blk_source, params, test):
    """
    Replace the domain disk with new setup device or download image

    :param vmxml: The instance of VMXML class
    :param params: Avocado params object
    :param test: Avocado test object
    :param blk_source: The domain disk image path
    """
    disk_type = params.get("disk_type", "file")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    device_bus = params.get("device_bus", "virtio")
    disk_img = params.get("disk_img")
    image_size = params.get("image_size", "3G")
    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    driver_type = params.get("driver_type", "qcow2")
    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    source_protocol = params.get("source_protocol", "")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    brick_path = os.path.join(test.virtdir, "gluster-pool")

    global cleanup_iscsi
    global cleanup_gluster
    disk_params = {
        'disk_type': disk_type,
        'target_dev': target_dev,
        'target_bus': device_bus,
        'driver_type': driver_type
    }
    if source_protocol == 'iscsi':
        if disk_type == 'block':
            kwargs = {'image_size': image_size, 'disk_format': disk_format}
            iscsi_target = prepare_iscsi_disk(blk_source, **kwargs)
            if iscsi_target is None:
                test.error("Failed to create iscsi disk")
            else:
                cleanup_iscsi = True
                disk_params.update({'source_file': iscsi_target})
    elif source_protocol == 'gluster':
        if disk_type == 'network':
            kwargs = {
                'vol_name': vol_name,
                'brick_path': brick_path,
                'disk_img': disk_img,
                'disk_format': disk_format
            }
            host_ip = prepare_gluster_disk(blk_source, test, **kwargs)
            if host_ip is None:
                test.error("Failed to create glusterfs disk")
            else:
                cleanup_gluster = True
            source_name = "%s/%s" % (vol_name, disk_img)
            disk_params.update({
                'source_name': source_name,
                'source_host_name': host_ip,
                'source_host_port': '24007',
                'source_protocol': source_protocol
            })
    elif source_protocol == 'rbd':
        if disk_type == 'network':
            disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
            disk_cmd = ("qemu-img convert -O %s %s %s" %
                        (disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False)
            disk_params.update({
                'source_name': disk_src_name,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port,
                'source_protocol': source_protocol
            })
    elif non_release_os_url:
        disk_params.update({'source_file': download_file_path})
    elif boot_dev == "cdrom":
        disk_params.update({
            'device_type': 'cdrom',
            'source_file': boot_iso_file
        })
    else:
        disk_params.update({'source_file': blk_source})

    new_disk = Disk(type_name=disk_type)
    new_disk.xml = open(create_disk_xml(disk_params)).read()
    vmxml.remove_all_disk()
    vmxml.add_device(new_disk)
コード例 #39
0
    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                             "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = [{"name": host_ip, "port": "24007"}]
            # If mutiple_hosts is True, attempt to add multiple hosts.
            if multiple_hosts:
                host_dict.append({
                    "name": params.get("dummy_host1"),
                    "port": "24007"
                })
                host_dict.append({
                    "name": params.get("dummy_host2"),
                    "port": "24007"
                })
            if transport:
                host_dict[0]['transport'] = transport
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": host_dict
            })
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type" %
                              dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()
コード例 #40
0
ファイル: utils_stress.py プロジェクト: jcfaracco/avocado-vt
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(vm.name,
                                            random.choice(
                                                self.host_cpu_list),
                                            **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.max_vcpu,
                             'guest_live': self.max_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.current_vcpu,
                             'guest_live': self.current_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
         elif "reboot" in event:
             vm.reboot()
         elif "nethotplug" in event:
             for iface_num in range(int(self.iface_num)):
                 logging.debug("Try to attach interface %d" % iface_num)
                 mac = utils_net.generate_mac_address_simple()
                 options = ("%s %s --model %s --mac %s %s" %
                            (self.iface_type, self.iface_source['network'],
                             self.iface_model, mac, self.attach_option))
                 logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                 ret = virsh.attach_interface(vm.name, options,
                                              ignore_status=True)
                 time.sleep(self.event_sleep_time)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret)
                 if self.detach_option:
                     options = ("--type %s --mac %s %s" %
                                (self.iface_type, mac, self.detach_option))
                     logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                     ret = virsh.detach_interface(vm.name, options,
                                                  ignore_status=True)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
         elif "diskhotplug" in event:
             for disk_num in range(len(self.device_source_names)):
                 disk = {}
                 disk_attach_error = False
                 disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                 device_source = libvirt.create_local_disk(
                     self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                 disk.update({"format": self.disk_format,
                              "source": device_source})
                 disk_xml = Disk(self.disk_type)
                 disk_xml.device = self.disk_device
                 disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                 ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret, disk_attach_error)
                 if self.detach_option:
                     ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
                     libvirt.delete_local_disk(self.disk_type, disk_name)
         else:
             raise NotImplementedError
コード例 #41
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only --diskspec vda,"
                                       "file=/tmp/testvm-snap1"
                                       % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       " --diskspec vda,file=/tmp/testvm-snap2"
                                       % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' "
                      "in command line" % cmd)

    def check_auth_plaintext(vm_name, password):
        """
        Check if libvirt passed the plaintext of the chap authentication
        password to qemu.
        :param vm_name: The name of vm to be checked.
        :param password: The plaintext of password used for chap authentication.
        :return: True if using plaintext, False if not.
        """
        cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s"
               % (vm_name, password))
        return process.system(cmd, ignore_status=True, shell=True) == 0

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in current libvirt version")
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
                   % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"protocol": "iscsi",
                         "name": "%s/%s" % (iscsi_target, lun_num)},
               "hosts": [{"name": iscsi_host, "port": iscsi_port}]})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update({addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start."
                          "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)
            # Test libvirt doesn't pass the plaintext of chap password to qemu,
            # this function is implemented in libvirt 4.3.0-1.
            if (libvirt_version.version_compare(4, 3, 0) and
                    (auth_uuid or auth_usage) and
                    chap_passwd):
                if(check_auth_plaintext(vm_name, chap_passwd)):
                    test.fail("Libvirt should not pass plaintext of chap "
                              "password to qemu-kvm.")

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
コード例 #42
0
 def build_disk_xml(disk_img, disk_format, host_ip):
     """
     Try to rebuild disk xml
     """
     if default_pool:
         disk_xml = Disk(type_name="file")
     else:
         disk_xml = Disk(type_name="network")
     disk_xml.device = "disk"
     driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
     if driver_iothread:
         driver_dict.update({"iothread": driver_iothread})
     disk_xml.driver = driver_dict
     disk_xml.target = {"dev": "vdb", "bus": "virtio"}
     if default_pool:
         utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                          "glusterfs")
         process.run("setsebool virt_use_fusefs on", shell=True)
         source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
         disk_xml.source = disk_xml.new_disk_source(
             **{"attrs": source_dict})
     else:
         source_dict = {
             "protocol": "gluster",
             "name": "%s/%s" % (vol_name, disk_img)
         }
         host_dict = [{"name": host_ip, "port": "24007"}]
         # If mutiple_hosts is True, attempt to add multiple hosts.
         if multiple_hosts:
             host_dict.append({
                 "name": params.get("dummy_host1"),
                 "port": "24007"
             })
             host_dict.append({
                 "name": params.get("dummy_host2"),
                 "port": "24007"
             })
         if transport:
             host_dict[0]['transport'] = transport
         disk_xml.source = disk_xml.new_disk_source(**{
             "attrs": source_dict,
             "hosts": host_dict
         })
     return disk_xml
コード例 #43
0
                    img["disk_dev"].cleanup()
            else:
                if img["format"] == "iscsi":
                    libvirt.setup_or_cleanup_iscsi(is_setup=False)
                if img["format"] not in ["dir", "scsi"]:
                    os.remove(img["source"])
        raise error.TestNAError("Creating disk failed")

    # Build disks xml.
    disks_xml = []
    # Additional disk images.
    disks_img = []
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        for i in range(len(disks)):
            disk_xml = Disk(type_name=device_types[i])
            # If we are testing image file on iscsi disk,
            # mount the disk and then create the image.
            if test_file_img_on_disk:
                mount_path = "/tmp/diskimg"
                if utils.run("mkdir -p %s && mount %s %s" %
                             (mount_path, disks[i]["source"], mount_path),
                             ignore_status=True).exit_status:
                    raise error.TestNAError("Prepare disk failed")
                disk_path = "%s/%s.qcow2" % (mount_path,
                                             device_source_names[i])
                disk_source = libvirt.create_local_disk("file",
                                                        disk_path,
                                                        "1",
                                                        disk_format="qcow2")
                disks_img.append({
コード例 #44
0
def run(test, params, env):
    """
    Test virsh blockcopy --xml option.

    1.Prepare backend storage (file/block/iscsi/ceph/nbd)
    2.Start VM
    3.Prepare target xml
    4.Execute virsh blockcopy --xml command
    5.Check VM xml after operation accomplished
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    ignore_check = False

    def check_blockcopy_xml(vm_name, source_image, ignore_check=False):
        """
        Check blockcopy xml in VM.

        :param vm_name: VM name
        :param source_image: source image name.
        :param ignore_check: default is False.
        """
        if ignore_check:
            return
        source_imge_list = []
        blklist = virsh.domblklist(vm_name).stdout_text.splitlines()
        for line in blklist:
            if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')):
                source_imge_list.append(line.split()[-1])
        logging.debug('domblklist %s:\n%s', vm_name, source_imge_list)
        if not any(source_image in s for s in source_imge_list):
            test.fail("Cannot find expected source image: %s" % source_image)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    blockcopy_option = params.get("blockcopy_option")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    disk_auth_dict = {}
    size = "1"

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            if blockcopy_option in ['reuse_external']:
                device_source = libvirt.create_local_disk(
                    backend_storage_type, disk_path, storage_size,
                    device_format)
            else:
                device_source = disk_path
            disks_img.append({
                "format": device_format,
                "source": disk_path,
                "path": disk_path
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
            checkout_device_source = image_filename
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
                checkout_device_source = device_source
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True,
                    is_login=False,
                    image_size=storage_size,
                    chap_user=chap_user,
                    chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {
                    "auth_user": chap_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_usage": auth_sec_usage_target
                }
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
                checkout_device_source = 'emulated-iscsi'
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            size = "0.15"

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm whether it needs delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {
                    "sec_usage": auth_sec_usage_type,
                    "sec_name": "ceph_auth_secret"
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       ceph_auth_key,
                                       ignore_status=False,
                                       debug=True)
                disk_auth_dict = {
                    "auth_user": ceph_auth_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_uuid": auth_sec_uuid
                }
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                ceph_disk_name, ceph_mon_ip, key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            if blockcopy_option in ['reuse_external']:
                # Create an local image and make FS on it.
                libvirt.create_local_disk("file", img_file, storage_size,
                                          device_format)
                # Convert the image to remote storage
                disk_path = ("rbd:%s:mon_host=%s" %
                             (ceph_disk_name, ceph_mon_ip))
                if ceph_client_name and ceph_client_key:
                    disk_path += (":id=%s:key=%s" %
                                  (ceph_auth_user, ceph_auth_key))
                rbd_cmd = (
                    "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                    " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                   device_format, img_file, disk_path))
                process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
            checkout_device_source = ceph_disk_name
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = socket.gethostname().strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image",
                                    "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path,
                            image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({
                "hosts": [{
                    "name": nbd_server_host,
                    "port": nbd_server_port
                }]
            })
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port, image_path)
            checkout_device_source = image_path
            if blockcopy_option in ['pivot']:
                ignore_check = True

        logging.debug("device source is: %s", device_source)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        auth_in_source = True
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw")
        tmp_device_source = libvirt.create_local_disk("file",
                                                      path=device_source_path,
                                                      size=size,
                                                      disk_format="raw")
        s_attach = virsh.attach_disk(vm_name,
                                     tmp_device_source,
                                     device_target,
                                     "--config",
                                     debug=True)
        libvirt.check_exit_status(s_attach)
        try:
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Additional operations before set block threshold
        options = params.get("options",
                             "--pivot --transient-job --verbose --wait")
        result = virsh.blockcopy(vm_name,
                                 device_target,
                                 "--xml %s" % disk_xml.xml,
                                 options=options,
                                 debug=True)
        libvirt.check_exit_status(result)
        check_source_image = None
        if blockcopy_option in ['pivot']:
            check_source_image = checkout_device_source
        else:
            check_source_image = tmp_device_source
        check_blockcopy_xml(vm_name, check_source_image, ignore_check)
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To Delete snapshot, destroy vm first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])
        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result.stdout_text)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.error("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
コード例 #45
0
ファイル: virsh_boot.py プロジェクト: uyvan/tp-libvirt
def set_domain_disk(vmxml, blk_source, params, test):
    """
    Replace the domain disk with new setup device or download image

    :param vmxml: The instance of VMXML class
    :param params: Avocado params object
    :param test: Avocado test object
    :param blk_source: The domain disk image path
    """
    disk_type = params.get("disk_type", "file")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    device_bus = params.get("device_bus", "virtio")
    disk_img = params.get("disk_img")
    image_size = params.get("image_size", "3G")
    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    driver_type = params.get("driver_type", "qcow2")
    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    source_protocol = params.get("source_protocol", "")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    release_os_url = params.get("release_os_url", "")
    download_released_file_path = os.path.join(data_dir.get_tmp_dir(),
                                               "released_os.qcow2")
    brick_path = os.path.join(test.virtdir, "gluster-pool")
    usb_index = params.get("usb_index", "0")
    bus_controller = params.get("bus_controller", "")
    usb_controller = params.get("usb_controller", "")
    usb_model = params.get("usb_model", "")

    global cleanup_iscsi
    global cleanup_gluster
    disk_params = {
        'disk_type': disk_type,
        'target_dev': target_dev,
        'target_bus': device_bus,
        'driver_type': driver_type
    }
    if source_protocol == 'iscsi':
        if disk_type == 'block':
            if release_os_url:
                blk_source = download_released_file_path
            kwargs = {'image_size': image_size, 'disk_format': disk_format}
            iscsi_target = prepare_iscsi_disk(blk_source, **kwargs)
            if iscsi_target is None:
                test.error("Failed to create iscsi disk")
            else:
                cleanup_iscsi = True
                disk_params.update({'source_file': iscsi_target})
    elif source_protocol == 'usb':

        # assemble the xml of usb controller
        controllers = vmxml.get_devices(device_type="controller")
        for dev in controllers:
            if dev.type == "usb":
                vmxml.del_device(dev)

        for model in usb_model.split(','):
            controller = Controller("controller")
            controller.type = "usb"
            controller.index = usb_index
            controller.model = model
            vmxml.add_device(controller)

        # prepare virtual disk device
        dir_name = os.path.dirname(blk_source)
        device_name = os.path.join(dir_name, "usb_virtual_disk.qcow2")
        cmd = ("qemu-img convert -O {} {} {}".format(disk_format, blk_source,
                                                     device_name))
        process.run(cmd, shell=True)
        disk_params.update({'source_file': device_name})

    elif source_protocol == 'gluster':
        if disk_type == 'network':
            if release_os_url:
                blk_source = download_released_file_path
            host_ip = prepare_gluster_disk(blk_source,
                                           test,
                                           brick_path=brick_path,
                                           **params)
            if host_ip is None:
                test.error("Failed to create glusterfs disk")
            else:
                cleanup_gluster = True
            source_name = "%s/%s" % (vol_name, disk_img)
            disk_params.update({
                'source_name': source_name,
                'source_host_name': host_ip,
                'source_host_port': '24007',
                'source_protocol': source_protocol
            })
    elif source_protocol == 'rbd':
        if disk_type == 'network':
            if release_os_url:
                blk_source = download_released_file_path
            disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
            disk_cmd = ("qemu-img convert -O %s %s %s" %
                        (disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False)
            disk_params.update({
                'source_name': disk_src_name,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port,
                'source_protocol': source_protocol
            })
    elif non_release_os_url:
        disk_params.update({'source_file': download_file_path})
    elif boot_dev == "cdrom":
        disk_params.update({
            'device_type': 'cdrom',
            'source_file': boot_iso_file
        })
    elif release_os_url:
        disk_params.update({'source_file': download_released_file_path})
    else:
        disk_params.update({'source_file': blk_source})

    new_disk = Disk(type_name=disk_type)
    new_disk.xml = open(create_disk_xml(disk_params)).read()
    vmxml.remove_all_disk()
    vmxml.add_device(new_disk)
コード例 #46
0
def run(test, params, env):
    """
    Test nbd disk option.

    1.Prepare backend storage
    2.Use nbd to export the backend storage with or without TLS
    3.Prepare a disk xml indicating to the backend storage
    4.Start VM with disk hotplug/coldplug
    5.Start snapshot or save/restore operations on ndb disk
    6.Check some behaviours on VM
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def check_disk_save_restore(save_file):
        """
        Check domain save and restore operation.

        :param save_file: the path to saved file
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Check domain snapshot operations.
        """
        # Cleaup dirty data if exists
        if os.path.exists(snapshot_name1_file):
            os.remove(snapshot_name1_file)
        if os.path.exists(snapshot_name2_mem_file):
            os.remove(snapshot_name2_mem_file)
        if os.path.exists(snapshot_name2_disk_file):
            os.remove(snapshot_name2_disk_file)
        device_target = 'vda'
        snapshot_name1_option = "--diskspec %s,file=%s,snapshot=external --disk-only --atomic" % (
            device_target, snapshot_name1_file)
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s %s" %
                                       (snapshot_name1, snapshot_name1_option),
                                       debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name1 not in snap_lists:
            test.fail("Snapshot %s doesn't exist" % snapshot_name1)
        # Check file can be created after snapshot

        def _check_file_create(filename):
            """
            Check whether file with specified filename exists or not.

            :param filename: finename
            """
            try:
                session = vm.wait_for_login()
                if platform.platform().count('ppc64'):
                    time.sleep(10)
                cmd = ("echo" " teststring > /tmp/{0}".format(filename))
                status, output = session.cmd_status_output(cmd)
                if status != 0:
                    test.fail("Failed to touch one file on VM internal")
            except (remote.LoginError, virt_vm.VMError,
                    aexpect.ShellError) as e:
                logging.error(str(e))
                raise
            finally:
                if session:
                    session.close()

        _check_file_create("disk.txt")
        # Create memory snapshot.
        snapshot_name2_mem_option = "--memspec file=%s,snapshot=external" % (
            snapshot_name2_mem_file)
        snapshot_name2_disk_option = "--diskspec %s,file=%s,snapshot=external --atomic" % (
            device_target, snapshot_name2_disk_file)
        snapshot_name2_option = "%s %s" % (snapshot_name2_mem_option,
                                           snapshot_name2_disk_option)
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s %s" %
                                       (snapshot_name2, snapshot_name2_option),
                                       debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name2 not in snap_lists:
            test.fail("Snapshot: %s doesn't exist" % snapshot_name2)
        _check_file_create("mem.txt")

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdb")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    image_path = params.get("emulated_image")
    # Get config parameters
    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    tls_enabled = "yes" == params.get("enable_tls", "no")
    enable_private_key_encryption = "yes" == params.get(
        "enable_private_key_encryption", "no")
    private_key_encrypt_passphrase = params.get("private_key_password")
    domain_operation = params.get("domain_operation")
    secret_uuid = None

    # Get snapshot attributes.
    snapshot_name1 = params.get("snapshot_name1")
    snapshot_name1_file = params.get("snapshot_name1_file")
    snapshot_name2 = params.get("snapshot_name2")
    snapshot_name2_mem_file = params.get("snapshot_name2_mem_file")
    snapshot_name2_disk_file = params.get("snapshot_name2_disk_file")
    # Initialize one NbdExport object
    nbd = None

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get server hostname.
        hostname = process.run('hostname',
                               ignore_status=False,
                               shell=True,
                               verbose=True).stdout_text.strip()
        # Setup backend storage
        nbd_server_host = hostname
        nbd_server_port = params.get("nbd_server_port")
        image_path = params.get("emulated_image",
                                "/var/lib/libvirt/images/nbdtest.img")
        export_name = params.get("export_name", None)
        deleteExisted = "yes" == params.get("deleteExisted", "yes")
        tls_bit = "no"
        if tls_enabled:
            tls_bit = "yes"

        # Create secret
        if enable_private_key_encryption:
            # this feature is enabled after libvirt 6.6.0
            if not libvirt_version.version_compare(6, 6, 0):
                test.cancel(
                    "current libvirt version doesn't support client private key encryption"
                )
            utils_secret.clean_up_secrets()
            private_key_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'",
                          private_key_sec_uuid)
            private_key_sec_passwd = params.get("private_key_password",
                                                "redhat")
            ret = virsh.secret_set_value(private_key_sec_uuid,
                                         private_key_sec_passwd,
                                         encode=True,
                                         use_file=True,
                                         debug=True)
            libvirt.check_exit_status(ret)
            secret_uuid = private_key_sec_uuid

        # Initialize special test environment config for snapshot operations.
        if domain_operation == "snap_shot":
            first_disk = vm.get_first_disk_devices()
            image_path = first_disk['source']
            device_target = 'vda'
            # Remove previous xml
            disks = vmxml.get_devices(device_type="disk")
            for disk_ in disks:
                if disk_.target['dev'] == device_target:
                    vmxml.del_device(disk_)
                    break

        # Create NbdExport object
        nbd = NbdExport(
            image_path,
            image_format=device_format,
            port=nbd_server_port,
            export_name=export_name,
            tls=tls_enabled,
            deleteExisted=deleteExisted,
            private_key_encrypt_passphrase=private_key_encrypt_passphrase,
            secret_uuid=secret_uuid)
        nbd.start_nbd_server()
        # Prepare disk source xml
        source_attrs_dict = {"protocol": "nbd", "tls": "%s" % tls_bit}
        if export_name:
            source_attrs_dict.update({"name": "%s" % export_name})
        disk_src_dict = {}
        disk_src_dict.update({"attrs": source_attrs_dict})
        disk_src_dict.update(
            {"hosts": [{
                "name": nbd_server_host,
                "port": nbd_server_port
            }]})

        # Add disk xml.
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": 'raw'}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        # Hotplug disk.
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        # Check save and restore operation and its result
        if domain_operation == 'save_restore':
            save_file = "/tmp/%s.save" % vm_name
            check_disk_save_restore(save_file)

        # Check attached nbd disk
        if check_partitions and not status_error:
            logging.debug("wait seconds for starting in checking vm part")
            time.sleep(2)
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        # Check snapshot operation and its result
        if domain_operation == 'snap_shot':
            check_snapshot()

        # Unplug disk.
        if hotplug_disk:
            result = virsh.detach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True,
                                         wait_for_event=True)
            libvirt.check_exit_status(result, status_error)
    finally:
        if enable_private_key_encryption:
            utils_secret.clean_up_secrets()
        # Clean up backend storage and TLS
        try:
            if nbd:
                nbd.cleanup()
            # Clean up snapshots if exist
            if domain_operation == 'snap_shot':
                snap_lists = virsh.snapshot_list(vm_name, debug=True)
                for snap_name in snap_lists:
                    virsh.snapshot_delete(vm_name,
                                          snap_name,
                                          "--metadata",
                                          debug=True,
                                          ignore_status=True)
                # Cleaup dirty data if exists
                if os.path.exists(snapshot_name1_file):
                    os.remove(snapshot_name1_file)
                if os.path.exists(snapshot_name2_mem_file):
                    os.remove(snapshot_name2_mem_file)
                if os.path.exists(snapshot_name2_disk_file):
                    os.remove(snapshot_name2_disk_file)
        except Exception as ndbEx:
            logging.info("Clean Up nbd failed: %s" % str(ndbEx))

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")
コード例 #47
0
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
                   % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.source = disk_xml.new_disk_source(
            **{"attrs": {"protocol": "iscsi",
                         "name": "%s/%s" % (iscsi_target, lun_num)},
               "hosts": [{"name": iscsi_host, "port": iscsi_port}]})
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)
コード例 #48
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {'model': sec_model, 'label': img_label, 'relabel': sec_relabel}
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {'name': vol_name, 'format': vol_format,
                           'capacity': 1073741824,
                           'allocation': 1048576, }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" % qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path, context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path},
                                                      "seclabels": source_seclabel})
        else:
            disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1, 0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s", to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name, filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
コード例 #49
0
                    img["disk_dev"].cleanup()
            else:
                if img["format"] == "iscsi":
                    libvirt.setup_or_cleanup_iscsi(is_setup=False)
                if img["format"] not in ["dir", "scsi"]:
                    os.remove(img["source"])
        raise error.TestNAError("Creating disk failed")

    # Build disks xml.
    disks_xml = []
    # Additional disk images.
    disks_img = []
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    try:
        for i in range(len(disks)):
            disk_xml = Disk(type_name=device_types[i])
            # If we are testing image file on iscsi disk,
            # mount the disk and then create the image.
            if test_file_img_on_disk:
                mount_path = "/tmp/diskimg"
                if utils.run("mkdir -p %s && mount %s %s"
                             % (mount_path, disks[i]["source"],
                                mount_path), ignore_status=True).exit_status:
                    raise error.TestNAError("Prepare disk failed")
                disk_path = "%s/%s.qcow2" % (mount_path, device_source_names[i])
                disk_source = libvirt.create_local_disk("file", disk_path, "1",
                                                        disk_format="qcow2")
                disks_img.append({"format": "qcow2",
                                  "source": disk_source, "path": mount_path})
            else:
                disk_source = disks[i]["source"]
コード例 #50
0
    try:
        # Prepare the disk.
        sec_uuid = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        create_vol(pool_name, volume_target_encypt, vol_params)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_xml.source = disk_xml.new_disk_source(
            **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}

        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuid.append(v_xml.encryption.secret["uuid"])
コード例 #51
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --disk-only --diskspec vda,"
            "file=/tmp/testvm-snap1" % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --memspec file=%s,snapshot=external"
            " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' " "in command line" % cmd)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(
                    3, 9, 0):
                test.cancel(
                    "place auth in source is not supported in current libvirt version"
                )
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = (
                "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" %
                (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_uuid": uuid
            }
        elif auth_usage:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_usage": secret_usage_target
            }
        disk_source = disk_xml.new_disk_source(
            **{
                "attrs": {
                    "protocol": "iscsi",
                    "name": "%s/%s" % (iscsi_target, lun_num)
                },
                "hosts": [{
                    "name": iscsi_host,
                    "port": iscsi_port
                }]
            })
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update(
                        {addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start." "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
コード例 #52
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        test.cancel("This version of libvirt does not support domblkerror "
                    "test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")
    ubuntu = distro.detect().name == 'Ubuntu'
    rhel = distro.detect().name == 'rhel'
    nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server")
    nfs_service = None
    selinux_bool = None
    session = None
    selinux_bak = ""

    vm = env.get_vm(vm_name)
    if error_type == "unspecified error":
        selinux_local = params.get("setup_selinux_local", "yes") == "yes"
        if not ubuntu and not rhel:
            nfs_service_package = "nfs"
        elif rhel:
            nfs_service_package = "nfs-server"
        if not rhel and not utils_package.package_install(nfs_service_package):
            test.cancel("NFS package not available in host to test")
        # backup /etc/exports
        shutil.copyfile(export_file, "%s.bak" % export_file)
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        process.run("qemu-img create %s %s" %
                    (os.path.join(img_dir, img_name), img_size),
                    shell=True, verbose=True)

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = libvirt.setup_or_cleanup_nfs(is_setup=True,
                                               mount_dir=nfs_dir,
                                               is_mount=False,
                                               export_options=mount_opt,
                                               export_dir=img_dir)
            if not ubuntu:
                selinux_bak = res["selinux_status_bak"]
            process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                        "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True,
                        verbose=True)
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service(nfs_service_package)
            if not ubuntu and selinux_local:
                params['set_sebool_local'] = "yes"
                params['local_boolean_varible'] = "virt_use_nfs"
                params['local_boolean_value'] = "on"
                selinux_bool = utils_misc.SELinuxBoolean(params)
                selinux_bool.setup()

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            _pool_vol = None
            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.driver = {'name': "qemu",
                           'type': "raw",
                           'cache': "none",
                           'error_policy': "stop"}
        img_disk.target = {'dev': target_dev,
                           'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk_list_debug = %s", bef_list)

        # Attach disk to guest
        ret = virsh.attach_device(vm_name, img_disk.xml)
        if ret.exit_status != 0:
            test.fail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk list after attaching - %s", aft_list)
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # install dependent packages
            pkg_list = ["parted", "e2fsprogs"]
            for pkg in pkg_list:
                if not utils_package.package_install(pkg, session):
                    test.error("Failed to install dependent package %s" % pkg)

            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext3 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception as err:
                logging.debug("Expected Fail %s", err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            if nfs_service is not None:
                nfs_service.stop()
                logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            # If not paused, perform one more IO operation to the mnt disk
            session = vm.wait_for_login()
            session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir)
            if not utils_misc.wait_for(_check_state, 60):
                test.fail("Guest does not paused, it is %s now" % vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    test.fail("Failed to get expect result, get %s" %
                              output.stdout.strip())
            else:
                test.fail("Fail to get domblkerror info:%s" % output.stderr)
    finally:
        logging.info("Do clean steps")
        if session:
            session.close()
        if error_type == "unspecified error":
            if nfs_service is not None:
                nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            res = libvirt.setup_or_cleanup_nfs(is_setup=False,
                                               mount_dir=nfs_dir,
                                               export_dir=img_dir,
                                               restore_selinux=selinux_bak)
            if selinux_bool:
                selinux_bool.cleanup(keep_authorized_keys=True)
        elif error_type == "no space":
            vm.destroy()
            if _pool_vol:
                _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
        data_dir.clean_tmp_files()
コード例 #53
0
def run(test, params, env):
    """
    Test SCSI3 Persistent Reservation functions.

    1.Prepare iscsi backend storage.
    2.Prepare disk xml.
    3.Hot/cold plug the disk to vm.
    4.Check if SCSI3 Persistent Reservation commands can be issued to that disk.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def get_delta_parts(vm, old_parts):
        """
        Get the newly added partitions/blockdevs in vm.
        :param vm: The vm to be operated.
        :param old_parts: The original partitions/blockdevs in vm.
        :return: Newly added partitions/blockdevs.
        """
        session = vm.wait_for_login()
        new_parts = utils_disk.get_parts_list(session)
        new_parts = list(set(new_parts).difference(set(old_parts)))
        session.close()
        return new_parts

    def check_pr_cmds(vm, blk_dev):
        """
        Check if SCSI3 Persistent Reservation commands can be used in vm.
        :param vm: The vm to be checked.
        :param blk_dev: The block device in vm to be checked.
        """
        session = vm.wait_for_login()
        cmd = ("sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0}"
               .format(blk_dev))
        cmd_status, cmd_output = session.cmd_status_output(cmd)
        session.close()
        if cmd_status == 127:
            test.error("sg3_utils not installed in test image")
        elif cmd_status != 0:
            test.fail("persistent reservation failed for /dev/%s" % blk_dev)
        else:
            logging.info("persistent reservation successful for /dev/%s" % blk_dev)

    def start_or_stop_qemu_pr_helper(is_start=True, path_to_sock="/var/run/qemu-pr-helper.sock"):
        """
        Start or stop qemu-pr-helper daemon
        :param is_start: Set True to start, False to stop.
        """
        service_mgr = service.ServiceManager()
        if is_start:
            service_mgr.start('qemu-pr-helper')
            time.sleep(2)
            shutil.chown(path_to_sock, "qemu", "qemu")
        else:
            service_mgr.stop('qemu-pr-helper')

    def ppc_controller_update():
        """
        Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type

        :return:
        """
        if params.get('machine_type') == 'pseries' and device_bus == 'scsi':
            if not vmxml.get_controllers(device_bus, 'virtio-scsi'):
                vmxml.del_controller(device_bus)
                ppc_controller = Controller('controller')
                ppc_controller.type = device_bus
                ppc_controller.index = '0'
                ppc_controller.model = 'virtio-scsi'
                vmxml.add_device(ppc_controller)
                vmxml.sync()

    # Check if SCSI3 Persistent Reservations supported by
    # current libvirt versions.
    if not libvirt_version.version_compare(4, 4, 0):
        test.cancel("The <reservations> tag supported by libvirt from version "
                    "4.4.0")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Disk specific attributes
    device = params.get("virt_disk_device", "lun")
    device_target = params.get("virt_disk_device_target", "sdb")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "block")
    device_bus = params.get("virt_disk_device_bus", "scsi")
    # Iscsi options
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1G")
    auth_uuid = "yes" == params.get("auth_uuid")
    auth_usage = "yes" == params.get("auth_usage")
    # SCSI3 PR options
    reservations_managed = "yes" == params.get("reservations_managed", "yes")
    reservations_source_type = params.get("reservations_source_type", "unix")
    reservations_source_path = params.get("reservations_source_path",
                                          "/var/run/qemu-pr-helper.sock")
    reservations_source_mode = params.get("reservations_source_mode", "client")
    secret_uuid = ""
    # Case step options
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")

    # Start vm and get all partions in vm
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_in_source = "yes" == params.get("auth_in_source", "no")
            if auth_in_source and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in "
                            "current libvirt version.")
            auth_type = params.get("auth_type", "chap")
            secret_usage_target = params.get("secret_usage_target",
                                             "libvirtiscsi")
            secret_usage_type = params.get("secret_usage_type", "iscsi")
            chap_user = params.get("iscsi_user", "redhat")
            chap_passwd = params.get("iscsi_password", "redhat")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(str(chap_passwd).encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                 is_login=True,
                                                 image_size=emulated_size,
                                                 chap_user=chap_user,
                                                 chap_passwd=chap_passwd,
                                                 portal_ip=iscsi_host)

        # Add disk xml
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": secret_uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"dev": blk_dev}})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if auth_in_source:
                disk_source.auth = disk_auth
            else:
                disk_xml.auth = disk_auth
        if reservations_managed:
            reservations_dict = {"reservations_managed": "yes"}
        else:
            start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path)
            reservations_dict = {"reservations_managed": "no",
                                 "reservations_source_type": reservations_source_type,
                                 "reservations_source_path": reservations_source_path,
                                 "reservations_source_mode": reservations_source_mode}
        disk_source.reservations = disk_xml.new_reservations(**reservations_dict)
        disk_xml.source = disk_source

        # Update controller of ppc vms
        ppc_controller_update()

        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            # Start the VM and check status
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
            time.sleep(5)
            if hotplug_disk:
                result = virsh.attach_device(vm_name, disk_xml.xml,
                                             ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            new_parts = get_delta_parts(vm, old_parts)
            if len(new_parts) != 1:
                logging.error("Expected 1 dev added but has %s" % len(new_parts))
            new_part = new_parts[0]
            check_pr_cmds(vm, new_part)
            result = virsh.detach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True, wait_remove_event=True)
            libvirt.check_exit_status(result)
        except virt_vm.VMStartError as e:
            test.fail("VM failed to start."
                      "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            test.fail("Failed to define VM:\n%s" % xml_error)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")
        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
        # Stop qemu-pr-helper daemon
        start_or_stop_qemu_pr_helper(is_start=False)
コード例 #54
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        raise error.TestNAError("This version of libvirt does not support "
                                "domblkerror test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")

    vm = env.get_vm(vm_name)
    # backup /etc/exports
    shutil.copyfile(export_file, "%s.bak" % export_file)
    selinux_bak = ""
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        utils.run("qemu-img create %s %s" %
                  (os.path.join(img_dir, img_name), img_size))

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = utils_test.libvirt.setup_or_cleanup_nfs(
                is_setup=True,
                mount_dir=nfs_dir,
                is_mount=False,
                export_options=mount_opt,
                export_dir=img_dir)
            selinux_bak = res["selinux_status_bak"]
            utils.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                      "127.0.0.1:%s %s" % (img_dir, nfs_dir))
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service("nfs")

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = utils_test.libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name,
                               "fs",
                               pool_target,
                               img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {
                'file': img_path
            }})
        img_disk.driver = {
            'name': "qemu",
            'type': "raw",
            'cache': "none",
            'error_policy': "stop"
        }
        img_disk.target = {'dev': target_dev, 'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = session.cmd_output(get_disks_cmd).split("\n")

        # Attach disk to guest
        ret = virsh.attach_device(domain_opt=vm_name, file_opt=img_disk.xml)
        if ret.exit_status != 0:
            raise error.TestFail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = session.cmd_output(get_disks_cmd).split("\n")
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext3 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception, err:
                logging.debug("Expected Fail %s" % err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            nfs_service.stop()
            logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            raise error.TestFail("Guest does not paused, it is %s now" %
                                 vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    raise error.TestFail(
                        "Failed to get expect result, get %s" %
                        output.stdout.strip())
            else:
                raise error.TestFail("Fail to get domblkerror info:%s" %
                                     output.stderr)
コード例 #55
0
ファイル: libvirt_scsi.py プロジェクト: CongLi/tp-libvirt
def run(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILABLE.PARTITION")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controllers = []
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controllers.append(device)

    if not scsi_controllers:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

    # Add disk with bus of scsi into vmxml.
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test "
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg",
                                 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {'dev': partition}})
        vmxml.add_device(partition_disk)
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.target = {'dev': "vde",
                           'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        try:
            cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
            cdrom.close()
        except process.CmdError, detail:
            raise error.TestNAError("Failed to create cdrom disk: %s" % detail)

        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf",
                             'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {'file': cdrom_path}})
        vmxml.add_device(cdrom_disk)