示例#1
0
    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist"
                      % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)))
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name)
        if ret.stdout.count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name)
        if not ret.stdout.count(snapshot_name2):
            test.fail("Failed to find snapshot disk")
示例#2
0
def test_with_label(vm, params, test):
    """
    Test nvdimm with label setting

    :param vm: vm object
    :param params: dict, test parameters
    :param test: test object
    :raises: test.fail if checkpoints fail
    """
    test_str = params.get('test_str')
    test_file = params.get('test_file')
    vm_name = params.get('main_vm')
    vm_session = vm.wait_for_login()
    # Create a file on the nvdimm device.
    create_file_within_nvdimm_disk(vm_session, test_file, test_str, test, block_size=4096)

    # Reboot the guest, and remount the nvdimm device in the guest.
    # Check the file foo-label is exited
    vm_session.close()
    virsh.reboot(vm_name, debug=True)
    vm_session = vm.wait_for_login()

    vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
    if test_str not in vm_session.cmd('cat /mnt/foo-label '):
        test.fail('"%s" should be in output' % test_str)
    vm_session.close()
    if params.get('check_life_cycle', 'no') == 'yes':
        virsh.managedsave(vm_name, ignore_status=False, debug=True)
        vm.start()
        vm_session = vm.wait_for_login()
        check_nvdimm_file(test_str, test_file, vm_session, test)
        vm_session.close()
        vm_s1 = vm_name + ".s1"
        virsh.save(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.restore(vm_s1, ignore_status=False, debug=True)
        vm_session = vm.wait_for_login()
        check_nvdimm_file(test_str, test_file, vm_session, test)
        vm_session.close()
        virsh.snapshot_create_as(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.snapshot_revert(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.snapshot_delete(vm_name, vm_s1, ignore_status=False, debug=True)
    def check_snapshot_startuppolicy(devices, device_targets):
        """
        Check disk snapshot with startup policy option.
        """
        ret = virsh.snapshot_create(vm_name, "")
        libvirt.check_exit_status(ret)

        snapshot_name = re.search(
            "\d+", ret.stdout.strip()).group(0)
        vm.destroy(gracefully=False)

        # Remove the disk and revert snapshot.
        os.remove(disks[0]["source"])
        ret = virsh.snapshot_revert(vm_name, snapshot_name, "")
        libvirt.check_exit_status(ret)

        if vm.is_dead():
            error.TestError("Revert snapshot failed")

        # Check the disk partitions again, the disk should not exist.
        if not check_vm_partitions(devices, device_targets, False):
            error.TestError("Check partition failed after snapshot")
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))

    # Do xml backup for final recovery
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Get a tmp_dir.
    tmp_dir = data_dir.get_tmp_dir()
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    params['image_size'] = "1M"
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                               extra="--persistent --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestNAError("Failed to attach disk %s to VM."
                                "Detail: %s." % (img_path, result.stderr))

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n"]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    disk_external = os.path.join(tmp_dir,
                                                 "%s.snap" % os.path.basename(disk['source']))
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(
                vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
        else:
            options = ""
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = ["<domainsnapshot>\n",
                         "<description>Snapshot Test</description>\n",
                         "<state>running</state>\n",
                         "<creationTime>%s</creationTime>" % snapshot_name,
                         "</domainsnapshot>"]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            raise error.TestFail("Success to create snapshot in negative case\n"
                                 "Detail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options)
        if revert_result.exit_status:
            raise error.TestFail(
                "Revert snapshot failed. %s" % revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail("Revert command successed, but VM is not "
                                     "paused after reverting with --paused option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocal = params.get("disk_source_protocal", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    + " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocal
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.hostname = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_host
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocal,
                               'source_name': iscsi_target + "/1",
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'source_mode': disk_src_mode}
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocal,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        # Attach the iscsi network disk to domain
        logging.debug("Attach disk by XML: %s", open(disk_xml).read())
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstrs=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            vm.start()
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
示例#6
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
示例#7
0
                       "Incorrect children count")
            check_info(infos["Descendants"], sni["Descendants"],
                       "Incorrect descendants count")

        except error.CmdError:
            handle_error("Failed getting snapshots info", vm_name)
        except error.TestFail, e:
            handle_error(str(e), vm_name)
        logging.info("Snapshot %s verified", sni["Name"])

    logging.info("Test snapshot switching")
    for sni in snapshot_info:
        try:
            # Assure VM is shut off before revert.
            virsh.destroy(vm_name)
            result = virsh.snapshot_revert(vm_name, sni["Name"])
            if result.exit_status:
                raise error.TestFail("Snapshot revert failed.\n"
                                     "Error: %s." % result.stderr)
            state = normalize_state(virsh.domstate(vm_name).stdout.strip())
            if state != sni["State"]:
                raise error.TestFail("Incorrect state after revert - %s"
                                     % (sni["Name"]))
            if state == normalize_state('shutoff'):
                vm.start()
            elif state == normalize_state('paused'):
                vm.resume()

            session = vm.wait_for_login()
            test_file(session, sni["to_create"], 0)
            test_file(session, sni["to_delete"], 2)
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi",
                           "disk", "gluster"]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    tmp_dir = data_dir.get_tmp_dir()
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s", pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        # Do the attach action.
        out = utils.run("qemu-img info %s" % img_path)
        logging.debug("The img info is:\n%s" % out.stdout.strip())
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestNAError("Failed to attach disk %s to VM."
                                    "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n"]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    snap_path = "%s.snap" % os.path.basename(disk['source'])
                    disk_external = os.path.join(tmp_dir, snap_path)
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            logging.debug("The xml content for snapshot create is:")
            with open(snapshot_xml_path, 'r') as fin:
                logging.debug(fin.read())

            options += " --xmlfile %s " % snapshot_xml_path
            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search("live disk snapshot not supported with this QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search("internal snapshot of a running VM" +
                                     " must include the memory state",
                                     out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = ["<domainsnapshot>\n",
                         "<description>Snapshot Test</description>\n",
                         "<state>running</state>\n",
                         "<creationTime>%s</creationTime>" % snapshot_name,
                         "</domainsnapshot>"]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                logging.debug("The xml content for snapshot create is:")
                with open(snapshot_xml_path, 'r') as fin:
                    logging.debug(fin.read())
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # Destroy vm for snapshot revert.
        if not libvirt_version.version_compare(1, 2, 3):
            virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options,
                                              debug=True)
        if revert_result.exit_status:
            # As commit d410e6f for libvirt 1.2.3, attempts to revert external
            # snapshots will FAIL with an error "revert to external snapshot
            # not supported yet". Thus, let's check for that and handle as a
            # SKIP for now. Check bug:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
            if libvirt_version.version_compare(1, 2, 3):
                if re.search("revert to external snapshot not supported yet",
                             revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
            else:
                raise error.TestFail("Revert snapshot failed. %s" %
                                     revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail("Revert command successed, but VM is not "
                                     "paused after reverting with --paused"
                                     "  option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("After revert cat file output='%s'", output)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name, snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s missing"
                                                 % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still"
                                                 % snap_xml_path + " exist")

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")
        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except error.TestFail, detail:
                logging.error(str(detail))
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in list(params.keys()):
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi",
                           "disk", "gluster"]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            test.cancel("%s not in support list %s" %
                        (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            test.cancel("QED support changed, check bug: "
                        "https://bugzilla.redhat.com/show_bug.cgi"
                        "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            test.cancel("Snapshot on glusterfs not support in "
                        "current version. Check more info with "
                        "https://bugzilla.redhat.com/buglist.cgi?"
                        "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = process.run(cmd, ignore_status=True, shell=True)
                    if cmd_result.exit_status:
                        test.cancel("Failed to format volume, %s" %
                                    cmd_result.stdout_text.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = process.run("qemu-img info %s" % img_path, shell=True)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                       extra=extra, debug=True)
            if result.exit_status:
                test.cancel("Failed to attach disk %s to VM."
                            "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if 'file' in disk_xml.source.attrs:
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif 'name' in disk_xml.source.attrs:
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif ('dev' in disk_xml.source.attrs and
                          disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search("live disk snapshot not supported with this "
                                 "QEMU binary", out_err):
                        test.cancel(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search("internal snapshot of a running VM" +
                                     " must include the memory state",
                                     out_err):
                            test.cancel("Check Bug #1083345, %s" %
                                        out_err)

                    test.fail("Failed to create snapshot. Error:%s."
                              % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    test.fail("Failed to create snapshot. Error:%s."
                              % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(vm_name,
                                                              snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options, debug=True)
                if snapshot_result.exit_status:
                    test.fail("Failed to create snapshot --current."
                              "Error:%s." %
                              snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                test.fail("Success to create snapshot in negative"
                          " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            test.fail("'%s' run failed with '%s'" %
                      (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search("revert to external \w* ?snapshot not supported yet",
                             revert_result.stderr):
                    test.cancel(revert_result.stderr.strip())
                else:
                    test.fail("Revert snapshot failed. %s" %
                              revert_result.stderr.strip())

            if vm.is_dead():
                test.fail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    test.fail("Revert command successed, but VM is not "
                              "paused after reverting with --paused"
                              "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" % tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                test.fail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name, snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        test.fail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            test.fail("Snapshot xml file %s missing"
                                      % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        test.fail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            test.fail("Snapshot xml file %s still"
                                      % snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, source_name=vol_name)
            except exceptions.TestFail as detail:
                libvirtd.restart()
                logging.error(str(detail))
示例#10
0
def run_virsh_snapshot_disk(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))

    # Get a tmp_dir.
    tmp_dir = test.tmpdir
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format)

    # Init snapshot_name
    snapshot_name = None
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n",
                     "<memory snapshot=\'internal\'/>\n",
                     "</domainsnapshot>"]
            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
        else:
            snapshot_result = virsh.snapshot_create(vm_name)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0)


        # Touch a file in VM.
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name)
        if revert_result.exit_status:
            raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip())

        if not vm.is_alive():
            raise error.TestFail("Revert snapshot failed.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name)
示例#11
0
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s",
                          vm_name, snapshot_name)
            utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s",
                          file_existence, file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s",
                          file_existence, file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
示例#12
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir,
                            vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn,
                              first_online_hba, pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(
                set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail("block device not found with scsi_%s",
                                          new_vhba_scsibus)
            first_blk_dev = new_blks[0]
            utils_misc.wait_for(
                lambda: get_symbols_by_blk(first_blk_dev),
                timeout=_TIMEOUT)
            lun_sl = get_symbols_by_blk(first_blk_dev)
            if not lun_sl:
                raise exceptions.TestFail("lun symbolic links not found under "
                                          "/dev/disk/by-path/ for blk dev %s" %
                                          first_blk_dev)
            lun_dev = lun_sl[0]
            path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml(
                    {"nodedev_parent": first_online_hba,
                     "scsi_wwnn": wwnn,
                     "scsi_wwpn": wwpn})
            utils_misc.wait_for(
                lambda: utils_npiv.is_vhbas_added(old_vhbas),
                timeout=_TIMEOUT*2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT*5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(set(cur_mpath_devs).difference(
                    set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(
                    lambda: get_symbols_by_blk(first_blk_dev),
                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail("lun symbolic links not found in "
                                              "/dev/disk/by-path/ for %s" %
                                              first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s",
                                          detail)
        else:
            raise exceptions.TestFail("Don't have a vaild path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                         timeout=_TIMEOUT*3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {'type_name': disk_type,
                           'target_dev': device_target,
                           'target_bus': target_bus,
                           'source_pool': pool_name,
                           'source_volume': test_vol,
                           'driver_type': driver_type}
        else:
            disk_params = {'type_name': disk_type,
                           'device': disk_device,
                           'driver_name': driver_name,
                           'driver_type': driver_type,
                           'source_file': path_to_blk,
                           'target_dev': device_target,
                           'target_bus': target_bus}
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s",
                          vm_name, snapshot_name)
            utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s",
                          file_existence, file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(session,
                                                          "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s",
                          file_existence, file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
            # if diskonly, delete --metadata and remove files
            # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s",
                          snap, options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
示例#13
0
def run(test, params, env):
    """
    Test startupPolicy for CD-ROM/floppy/Volume disks.

    Steps:
    1. Prepare disk media image.
    2. Setup startupPolicy for a disk.
    3. Start the domain.
    4. Save the domain.
    5. Remove the disk source file and restore the domain.
    6. Update startupPolicy for  a disk.
    7. Destroy the domain.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    startup_policy = params.get("policy")

    def create_iscsi_pool():
        """
        Setup iSCSI target,and create one iSCSI pool.
        """
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user="",
                                                               chap_passwd="",
                                                               portal_ip=disk_src_host)
        # Define an iSCSI pool xml to create it
        pool_src_xml = pool_xml.SourceXML()
        pool_src_xml.host_name = pool_src_host
        pool_src_xml.device_path = iscsi_target
        poolxml = pool_xml.PoolXML(pool_type=pool_type)
        poolxml.name = pool_name
        poolxml.set_source(pool_src_xml)
        poolxml.target_path = "/dev/disk/by-path"

        # Create iSCSI pool.
        virsh.pool_destroy(pool_name, **virsh_dargs)
        cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def create_volume(pvt, created_vol_name=None):
        """
        Create iSCSI volume.

        :param pvt: PoolVolumeTest object
        :param created_vol_name: Created volume name
        """
        try:
            if pool_type == "iscsi":
                create_iscsi_pool()
            else:
                pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
                pvt.pre_vol(vol_name=created_vol_name, vol_format=vol_format,
                            capacity=capacity, allocation=None,
                            pool_name=pool_name)
        except Exception as pool_exception:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **virsh_dargs)
            test.error("Error occurred when prepare" +
                       "pool xml with message %s:\n" % str(pool_exception))

        def get_vol():
            """Get the volume info"""
            # Refresh the pool
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(pool_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            vol_list = []
            vol_list = re.findall(r"(\S+)\ +(\S+)",
                                  str(cmd_result.stdout.strip()))
            try:
                return vol_list[1]
            except IndexError:
                return None
        # Wait for a while so that we can get the volume info
        vol_info = utils_misc.wait_for(get_vol, 10)
        if vol_info:
            tmp_vol_name, tmp_vol_path = vol_info
        else:
            test.error("Failed to get volume info")
        process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'),
                    shell=True)
        return vol_info

    def check_disk_source(vm_name, target_dev, expect_value):
        """
        Check the disk source: file and startupPolicy.

        :param vm_name: Domain name
        :param target_dev: Disk's target device
        :param expect_value: Expect value of source file and source startupPolicy
        """
        logging.debug("Expect source file is '%s'", expect_value[0])
        logging.debug("Expect source startupPolicy is '%s'", expect_value[1])
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.get_disk_all()
        source_value = []
        try:
            disk_source = disks[target_dev].find('source')
            source_value.append(disk_source.get('file'))
            source_value.append(disk_source.get('startupPolicy'))
        except KeyError:
            test.error("No %s in domain %s" % (target_dev, vm_name))
        logging.debug("Actual source file is '%s'", source_value[0])
        logging.debug("Actual source startupPolicy is '%s'", source_value[1])
        if source_value == expect_value:
            logging.debug("Domain disk XML check pass")
        else:
            test.error("Domain disk XML check fail")

    def create_disk_xml():
        """
        Create a disk xml file for attaching to a domain.
        """
        if disk_type == "file":
            process.run("qemu-img create %s %s" % (media_file, image_size), shell=True)
        disk_params = {'device_type': device_type,
                       'type_name': disk_type,
                       'target_dev': target_dev,
                       'target_bus': target_bus}
        if disk_type == "file":
            disk_params_src = {'source_protocol': "file",
                               'source_file': media_file,
                               'source_startupPolicy': startup_policy}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': pool_name,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_startupPolicy': startup_policy}
            if pool_type == "iscsi":
                disk_params_src.update({'source_mode': "host"})
        disk_params.update(disk_params_src)
        disk_xml = libvirt.create_disk_xml(disk_params)
        shutil.copyfile(disk_xml, disk_xml_file)
        return disk_xml

    def check_in_vm(old_parts):
        """
        Check mount/read/write disk in VM.

        :param old_parts: pre-operated partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            logging.debug("new parted:%s", new_parts)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            added_part = added_parts[0]
            if not added_part:
                logging.error("Can't see added partition in VM")
                return False
            if 'sr' not in added_part and 'fd' not in added_part:
                cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                       "mkdir -p test && mount /dev/{0} test && echo"
                       " teststring > test/testfile && umount test"
                       .format(added_part))
                status, output = session.cmd_status_output(cmd)
                logging.info("Check disk operation in VM:\n%s", output)
                if status != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_policy_update(origin_policy, policy_list, xml_policy_file, device_type, flag_str):
        """
        Check updated policy after executing virsh update-device.

        :param origin_policy: the inherit startup policy value.
        :param policy_list: updated policy list.
        :param xml_policy_file: xml file for startupPolicy.
        :param device_type: device type,cdrom or disk.,etc
        :param flag_str: it can be --config,--live and --persistent.
        """
        for policy in policy_list:
            xmltreefile = XMLTreeFile(xml_policy_file)
            try:
                policy_item = xmltreefile.find('/source')
                policy_item.set('startupPolicy', policy)
            except AttributeError as elem_attr:
                test.error("Fail to find startupPolicy attribute.%s", str(elem_attr))
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_str, debug=True)
            if all([device_type == "disk", policy == "requisite"]):
                libvirt.check_exit_status(ret, True)
                return
            else:
                libvirt.check_exit_status(ret)

            def check_policy_value(active_policy, inactive_policy):
                """
                Check policy value in dumpxml with active or inactive option

                :param active_policy: active policy attribute value
                :param inactive_policy: inactive policy attribute value
                """
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list)-1]
                if not active_policy == disk.source.attrs["startupPolicy"]:
                    test.error("Actual policy:%s in active state is not equal to expected:%s"
                               % (active_policy, disk.source.attrs["startupPolicy"]))
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                disk_list = vmxml.devices.by_device_tag("disk")
                disk = disk_list[len(disk_list)-1]
                if not inactive_policy == disk.source.attrs["startupPolicy"]:
                    test.error("Actual policy:%s in inactive state is not equal to expected: %s"
                               % (inactive_policy, disk.source.attrs["startupPolicy"]))
            if flag_str == "--live":
                check_policy_value(policy, origin_policy)
            elif flag_str == "--config":
                check_policy_value(origin_policy, policy)
            elif flag_str == "--persistent":
                check_policy_value(policy, policy)

    def check_source_update(xml_policy_file):
        """
        Update source and policy at the same time,then check those changes.

        :param xml_policy_file: VM xml policy file
        """
        xmltreefile = XMLTreeFile(xml_policy_file)
        policy_item = xmltreefile.find('/source')

        def configure_startup_policy(update=False, policy='optional'):
            """
            Configure startupPolicy attribute value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            if update:
                del policy_item.attrib["startupPolicy"]
            else:
                policy_item.set("startupPolicy", policy)
            flag_option = "--live"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        # Update source and startUpPolicy attribute value.
        def update_source_policy(update=True, policy='optional'):
            """
            Update startupPolicy source value.

            :param update: update value or not
            :param policy: policy value
            :return: flag_option and boolean value
            """
            source_file = policy_item.get('file')
            if update:
                new_source_file = source_file+".empty"
            else:
                new_source_file = source_file+".new"
            shutil.copyfile(source_file, new_source_file)
            policy_item.set("file", new_source_file)
            policy_item.set("startupPolicy", policy)
            flag_option = "--persistent"
            xmltreefile.write(xml_policy_file, encoding="UTF-8")
            return flag_option, False

        function_list = [configure_startup_policy, update_source_policy,
                         configure_startup_policy, update_source_policy]
        function_parameter = [False, False, True, True]
        # Loop all above scenarios to update device.
        for index in list(range(len(function_list))):
            try:
                func = function_list[index]
                para = function_parameter[index]
                flag_option, update_error = func(para)
                ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True)
                libvirt.check_exit_status(ret, expect_error=update_error)
            except AttributeError as elem_attr:
                test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr))
            except Exception as update_device_exception:
                test.error("Fail to update device:%s" % str(update_device_exception))
            finally:
                source_file = policy_item.get('file')
                new_source_file = source_file+".new"
                if os.path.exists(new_source_file):
                    os.remove(new_source_file)

    def rename_file(source_file, target_file, revert=False):
        """
        Rename a file or revert it.

        :param source_file: The source file name.
        :param target_file: The target file name.
        :param revert: It can be True or False.
        """
        try:
            if not revert:
                os.rename(source_file, target_file)
                logging.debug("Rename %s to %s", source_file, target_file)
            else:
                os.rename(target_file, source_file)
                logging.debug("Rename %s to %s", target_file, source_file)
        except OSError as err:
            test.fail("Rename image failed: %s" % str(err))

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Get start,restore configuration parameters.
    start_error = "yes" == params.get("start_error", "no")
    restore_error = "yes" == params.get("restore_error", "no")
    virsh_dargs = {'debug': True, 'ignore_status': True}
    attach_option = params.get("attach_option")

    # Create disk xml and attach it.
    device_type = params.get("device_type")
    disk_type = params.get("disk_type", "network")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    target_dev = params.get("target_dev")
    target_bus = params.get("disk_target_bus", "virtio")
    image_size = params.get("image_size", "1.44M")
    emulated_image = "emulated-iscsi"

    # Storage pool and volume related paramters.
    pool_name = params.get("pool_name", "iscsi_pool")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    vol_name = params.get("volume_name")
    capacity = params.get("volume_size", "1048576")
    vol_format = params.get("volume_format")

    # Source file parameters.
    media_name = params.get("media_name")
    media_file = os.path.join(data_dir.get_tmp_dir(), media_name)
    media_file_new = media_file + ".new"
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    snapshot_name = "s1"

    # Policy related paramters.
    disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml")
    disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(), "attach_policy_disk.xml")
    update_policy = "yes" == params.get("update_policy", "no")
    policy_only = "yes" == params.get("policy_only", "no")
    update_policy_list = params.get("update_policy_list").split()
    expect_value = [None, startup_policy]

    try:
        if disk_type == "volume":
            pvt = libvirt.PoolVolumeTest(test, params)
            vol_name, vol_path = create_volume(pvt, vol_name)
            vol_path_new = vol_path + ".new"

        # Create disk xml.
        create_disk_xml()
        if vm.is_alive():
            vm.destroy()
        try:
            # Backup disk xml file for policy update if update_policy=True.
            if update_policy:
                shutil.copyfile(disk_xml_file, disk_xml_policy_file)
            result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml_file,
                                         flagstr="--config", **virsh_dargs)
            # For iSCSI pool volume,startupPolicy attribute is not valid for it.
            # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy.
            if pool_type == "iscsi" or all([device_type == "disk", startup_policy == "requisite"]):
                libvirt.check_exit_status(result, expect_error=True)
                return
            else:
                libvirt.check_exit_status(result, expect_error=False)
        except Exception as attach_device_exception:
            logging.debug("Attach device throws exception:%s", str(attach_device_exception))
            os.remove(media_file)
            test.error("Attach %s fail" % device_type)
        # Check update policy operations.
        if disk_type == "file" and update_policy:
            vm.start()
            if policy_only:
                check_policy_update(startup_policy, update_policy_list,
                                    disk_xml_policy_file, device_type, attach_option)
            else:
                check_source_update(disk_xml_policy_file)
        elif disk_type == "file":
            # Step 1. Start domain and destroy it normally
            vm.start()
            vm.destroy()

            # Step 2. Remove the source_file then start the domain
            rename_file(media_file, media_file_new)
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # For libvirt version >=2.0.0, feature is updated and startup policy attribute
            # can not exist alone without source protocol.
            if not start_error and not libvirt_version.version_compare(2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 3. Move back the source file and start the domain(if needed).
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                vm.start()

            # Step 4. Save the domain normally, then remove the source file
            # and restore it back
            vm.save_to_file(save_file)
            rename_file(media_file, media_file_new)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)
            if not restore_error and not libvirt_version.version_compare(2, 0, 0):
                check_disk_source(vm_name, target_dev, expect_value)

            # Step 5. Move back the source file and restore the domain(if needed)
            rename_file(media_file, media_file_new, revert=True)
            if not vm.is_alive():
                result = virsh.restore(save_file, **virsh_dargs)
                libvirt.check_exit_status(result, expect_error=False)
        elif disk_type == "volume":
            # Step 1. Start domain and destroy it normally.
            vm.start()
            # Step 1 Start VM successfully.
            if not check_in_vm(old_parts):
                test.fail("Check disk partitions in VM failed")

            # Step 2 Move the volume to other place, refresh the pool, then reboot the guest.
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            vm.destroy()
            result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=start_error)

            # Step 3 Move back the source file and start.
            rename_file(vol_path, vol_path_new, revert=True)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            if not vm.is_alive():
                vm.start()

            # Step 4 Save the domain normally, then remove the source file,then restore domain.
            vm.save_to_file(save_file)
            rename_file(vol_path, vol_path_new)
            cmd_result = virsh.pool_refresh(pool_name)
            libvirt.check_exit_status(cmd_result)
            result = virsh.restore(save_file, **virsh_dargs)
            libvirt.check_exit_status(result, expect_error=restore_error)

            # Step 5, Create snapshot,move the source to other place,then revert snapshot.
            if device_type == "disk":
                rename_file(vol_path, vol_path_new, revert=True)
                cmd_result = virsh.pool_refresh(pool_name)
                libvirt.check_exit_status(cmd_result)
                if restore_error:
                    result = virsh.restore(save_file, **virsh_dargs)
                    libvirt.check_exit_status(result)
                ret = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                rename_file(vol_path, vol_path_new)
                ret = virsh.snapshot_revert(vm_name, snapshot_name, **virsh_dargs)
                # Clean up snapshot.
                libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()

        if disk_type == "volume":
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **virsh_dargs)
        if os.path.exists(save_file):
            os.remove(save_file)
        if os.path.exists(disk_xml_file):
            os.remove(disk_xml_file)
        if os.path.exists(media_file):
            os.remove(media_file)
        if os.path.exists(disk_xml_policy_file):
            os.remove(disk_xml_policy_file)