예제 #1
0
    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if status_error:
            if checkpoint == 'length_of_error':
                log_lines = output.split('\n')
                v2v_start = False
                for line in log_lines:
                    if line.startswith('virt-v2v:'):
                        v2v_start = True
                    if line.startswith('libvirt:'):
                        v2v_start = False
                    if v2v_start and len(line) > 72:
                        raise exceptions.TestFail(
                            'Error log longer than 72 '
                            'charactors: %s', line)
            else:
                error_map = {
                    'conflict_options': ['option used more than once'],
                    'xen_no_output_format':
                    ['The input metadata did not define'
                     ' the disk format']
                }
                if not utils_v2v.check_log(output, error_map[checkpoint]):
                    raise exceptions.TestFail('Not found error message %s' %
                                              error_map[checkpoint])
        else:
            if output_mode == "rhev" and checkpoint != 'quiet':
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                check_ovf_snapshot_id(ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)

                def check_alloc():
                    try:
                        check_image(img_path, "allocation", expected_mode)
                        return True
                    except exceptions.TestFail:
                        pass

                if not utils_misc.wait_for(check_alloc, timeout=600,
                                           step=10.0):
                    raise exceptions.TestFail('Allocation check failed.')
            if '-of' in cmd and '--no-copy' not in cmd and checkpoint != 'quiet':
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    raise exceptions.TestFail("Import VM failed")
                else:
                    params['vmcheck_flag'] = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options:
                    virsh.start(vm_name, debug=True, ignore_status=False)
            if checkpoint == 'quiet':
                if len(output.strip()) != 0:
                    raise exceptions.TestFail(
                        'Output is not empty in quiet mode')
            if checkpoint == 'dependency':
                if 'libguestfs-winsupport' not in output:
                    raise exceptions.TestFail(
                        'libguestfs-winsupport not in dependency')
                if 'qemu-kvm-rhev' in output:
                    raise exceptions.TestFail('qemu-kvm-rhev is in dependency')
                win_img = params.get('win_image')
                command = 'guestfish -a %s -i'
                if process.run(command % win_img,
                               ignore_status=True).exit_status == 0:
                    raise exceptions.TestFail('Command "%s" success' %
                                              command % win_img)
            if checkpoint == 'no_dcpath':
                if not utils_v2v.check_log(output, ['--dcpath'], expect=False):
                    raise exceptions.TestFail('"--dcpath" is not removed')
예제 #2
0
 def test_get_license(self):
     resp = self.client.get_license()
     if not len(resp):
         raise exceptions.TestFail('No licence found!')
예제 #3
0
 def test_query(self):
     if self.cluster_id is not None:
         resp = self.client.query(self.cluster_id)
     if not len(resp) > 0:
         raise exceptions.TestFail("Query clusters conf failed")
     LOG.info("Got all pools: %s" % resp)
예제 #4
0
 def test_query(self):
     # Test query pools in a specified cluster
     resp = self.client.query()
     if not len(resp) > 0:
         raise exceptions.TestFail("Query pools failed")
     LOG.info("Got all pools: %s" % resp)
 def check_result(disk_source, disk_type, disk_target, flags, attach=True):
     """
     Check the test result of attach/detach-device command.
     """
     active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
     if not attach:
         utils_misc.wait_for(
             lambda: not is_attached(active_vmxml.devices, disk_type,
                                     disk_source, disk_target), 20)
     active_attached = is_attached(active_vmxml.devices, disk_type,
                                   disk_source, disk_target)
     vm_state = pre_vm_state
     if vm_state != "transient":
         inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
             vm_name, options="--inactive")
         inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                         disk_source, disk_target)
     if flags.count("config") and not flags.count("live"):
         if vm_state != "transient":
             if attach:
                 if not inactive_attached:
                     raise exceptions.TestFail(
                         "Inactive domain XML not updated"
                         " when --config options used for"
                         " attachment")
                 if vm_state != "shutoff":
                     if active_attached:
                         raise exceptions.TestFail(
                             "Active domain XML updated "
                             "when --config options used "
                             "for attachment")
             else:
                 if inactive_attached:
                     raise exceptions.TestFail(
                         "Inactive domain XML not updated"
                         " when --config options used for"
                         " detachment")
     elif flags.count("live") and not flags.count("config"):
         if attach:
             if vm_state in ["paused", "running", "transient"]:
                 if not active_attached:
                     raise exceptions.TestFail(
                         "Active domain XML not updated"
                         " when --live options used for"
                         " attachment")
             if vm_state in ["paused", "running"]:
                 if inactive_attached:
                     raise exceptions.TestFail(
                         "Inactive domain XML updated "
                         "when --live options used for"
                         " attachment")
         else:
             if vm_state in ["paused", "running", "transient"]:
                 if active_attached:
                     raise exceptions.TestFail(
                         "Active domain XML not updated"
                         " when --live options used for"
                         " detachment")
     elif flags.count("live") and flags.count("config"):
         if attach:
             if vm_state in ["paused", "running"]:
                 if not active_attached:
                     raise exceptions.TestFail(
                         "Active domain XML not updated"
                         " when --live --config options"
                         " used for attachment")
                 if not inactive_attached:
                     raise exceptions.TestFail(
                         "Inactive domain XML not updated"
                         " when --live --config options "
                         "used for attachment")
         else:
             if vm_state in ["paused", "running"]:
                 if active_attached:
                     raise exceptions.TestFail(
                         "Active domain XML not updated "
                         "when --live --config options "
                         "used for detachment")
                 if inactive_attached:
                     raise exceptions.TestFail(
                         "Inactive domain XML updated "
                         "when --live --config options"
                         " used for detachment")
     elif flags.count("current") or flags == "":
         if attach:
             if vm_state in ["paused", "running", "transient"]:
                 if not active_attached:
                     raise exceptions.TestFail(
                         "Active domain XML not updated"
                         " when --current options used "
                         "for attachment")
             if vm_state in ["paused", "running"]:
                 if inactive_attached:
                     raise exceptions.TestFail(
                         "Inactive domain XML updated "
                         "when --current options used "
                         "for live attachment")
             if vm_state == "shutoff" and not inactive_attached:
                 raise exceptions.TestFail(
                     "Inactive domain XML not updated"
                     " when --current options used for"
                     " attachment")
         else:
             if vm_state in ["paused", "running", "transient"]:
                 if active_attached:
                     raise exceptions.TestFail(
                         "Active domain XML not updated"
                         " when --current options used "
                         "for detachment")
             if vm_state == "shutoff" and inactive_attached:
                 raise exceptions.TestFail(
                     "Inactive domain XML not updated"
                     " when --current options used for"
                     " detachment")
예제 #6
0
    def run_debug(self):
        """
        viz run_normal.
        Additionally it stores last n verified characters and in
        case of failures it quickly receive enough data to verify failure or
        allowed loss and then analyze this data. It provides more info about
        the situation.
        Unlike normal run this one supports booth - loss and duplications.
        It's not friendly to data corruption.
        """
        logging.debug("ThRecvCheck %s: run", self.getName())
        attempt = 10
        max_loss = 0
        sum_loss = 0
        verif_buf = deque(maxlen=max(self.blocklen, self.sendlen))
        while not self.exitevent.isSet():
            ret = select.select([self.port.sock], [], [], 1.0)
            if ret[0] and (not self.exitevent.isSet()):
                buf = self.port.sock.recv(self.blocklen)
                if buf:
                    # Compare the received data with the control data
                    for idx_char in xrange(len(buf)):
                        _char = self.buff.popleft()
                        if buf[idx_char] == _char:
                            self.idx += 1
                            verif_buf.append(_char)
                        else:
                            # Detect the duplicated/lost characters.
                            logging.debug(
                                "ThRecvCheck %s: fail to receive "
                                "%dth character.", self.getName(), self.idx)
                            buf = buf[idx_char:]
                            for i in xrange(100):
                                if len(self.buff) < self.sendidx:
                                    time.sleep(0.01)
                                else:
                                    break
                            sendidx = min(self.sendidx, len(self.buff))
                            if sendidx < self.sendidx:
                                logging.debug(
                                    "ThRecvCheck %s: sendidx was "
                                    "lowered as there is not enough "
                                    "data after 1s. Using sendidx="
                                    "%s.", self.getName(), sendidx)
                            for _ in xrange(sendidx / self.blocklen):
                                if self.exitevent.isSet():
                                    break
                                buf += self.port.sock.recv(self.blocklen)
                            queue = _char
                            for _ in xrange(sendidx):
                                queue += self.buff[_]
                            offset_a = None
                            offset_b = None
                            for i in xrange(sendidx):
                                length = min(len(buf[i:]), len(queue))
                                if buf[i:] == queue[:length]:
                                    offset_a = i
                                    break
                            for i in xrange(sendidx):
                                length = min(len(queue[i:]), len(buf))
                                if queue[i:][:length] == buf[:length]:
                                    offset_b = i
                                    break

                            if (offset_b and offset_b < offset_a) or offset_a:
                                # Data duplication
                                self.sendidx -= offset_a
                                max_loss = max(max_loss, offset_a)
                                sum_loss += offset_a
                                logging.debug(
                                    "ThRecvCheck %s: DUP %s (out of "
                                    "%s)", self.getName(), offset_a, sendidx)
                                buf = buf[offset_a + 1:]
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                verif_buf.extend(buf)
                                self.idx += len(buf)
                            elif offset_b:  # Data loss
                                max_loss = max(max_loss, offset_b)
                                sum_loss += offset_b
                                logging.debug(
                                    "ThRecvCheck %s: LOST %s (out of"
                                    " %s)", self.getName(), offset_b, sendidx)
                                # Pop-out the lost characters from verif_queue
                                # (first one is already out)
                                self.sendidx -= offset_b
                                for i in xrange(offset_b - 1):
                                    self.buff.popleft()
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                self.idx += len(buf)
                                verif_buf.extend(buf)
                            else:  # Too big data loss or duplication
                                verif = ""
                                for _ in xrange(-min(sendidx, len(verif_buf)),
                                                0):
                                    verif += verif_buf[_]
                                logging.error(
                                    "ThRecvCheck %s: mismatched data"
                                    ":\nverified: ..%s\nreceived:   "
                                    "%s\nsent:       %s", self.getName(),
                                    repr(verif), repr(buf), repr(queue))
                                raise exceptions.TestFail(
                                    "Recv and sendqueue "
                                    "don't match with any offset.")
                            # buf was changed, break from this loop
                            attempt = 10
                            break
                    attempt = 10
                else:  # ! buf
                    # Broken socket
                    if attempt > 0:
                        attempt -= 1
                        if self.migrate_event is None:
                            self.exitevent.set()
                            raise exceptions.TestFail(
                                "ThRecvCheck %s: Broken pipe."
                                " If this is expected behavior set migrate"
                                "_event to support reconnection." %
                                self.getName())
                        logging.debug(
                            "ThRecvCheck %s: Broken pipe "
                            ", reconnecting. ", self.getName())
                        self.reload_loss_idx()
                        # Wait until main thread sets the new self.port
                        while not (self.exitevent.isSet()
                                   or self.migrate_event.wait(1)):
                            pass
                        if self.exitevent.isSet():
                            break
                        logging.debug(
                            "ThRecvCheck %s: Broken pipe resumed, "
                            "reconnecting...", self.getName())

                        self.port.sock = False
                        self.port.open()
        if self.sendidx >= 0:
            self.minsendidx = min(self.minsendidx, self.sendidx)
        if (self.sendlen - self.minsendidx):
            logging.debug(
                "ThRecvCheck %s: Data loss occurred during socket"
                "reconnection. Maximal loss was %d per one "
                "migration.", self.getName(), (self.sendlen - self.minsendidx))
        if sum_loss > 0:
            logging.debug(
                "ThRecvCheck %s: Data offset detected, cumulative "
                "err: %d, max err: %d(%d)", self.getName(), sum_loss, max_loss,
                float(max_loss) / self.blocklen)
        logging.debug("ThRecvCheck %s: exit(%d)", self.getName(), self.idx)
        self.ret_code = 0
예제 #7
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    check_finish_job = "yes" == params.get("check_finish_job", "yes")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    snapshots_take = int(params.get("snapshots_take", '0'))
    external_disk_only_snapshot = "yes" == params.get("external_disk_only_snapshot", "no")

    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel("Forbid using relative path or file name only is added since libvirt-3.0.0")

    if "--transient-job" in options and not libvirt_version.version_compare(4, 5, 0):
        test.cancel("--transient-job option is supported until libvirt 4.5.0 version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" % (target,
                                                                  vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user,
                  'debug': True, 'ignore_status': True, 'timeout': timeout}

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" %
                  libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" % (dest_path,
                                                               expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot(snapshot_numbers_take):
        """
        Make external disk snapshot

        :param snapshot_numbers_take: snapshot numbers.
        """
        for count in range(0, snapshot_numbers_take):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "blockcopy_snap"
            snap_xml.snap_name = snapshot_name + "_%s" % count
            snap_xml.description = "blockcopy snapshot"

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            src_disk_xml = disks[0]
            disk_xml = snap_xml.SnapDiskXML()
            disk_xml.xmltreefile = src_disk_xml.xmltreefile
            del disk_xml.device
            del disk_xml.address
            disk_xml.snapshot = "external"
            disk_xml.disk_name = disk_xml.target['dev']

            # Only qcow2 works as external snapshot file format, update it
            # here
            driver_attr = disk_xml.driver
            driver_attr.update({'type': 'qcow2'})
            disk_xml.driver = driver_attr

            new_attrs = disk_xml.source.attrs
            if 'file' in disk_xml.source.attrs:
                new_file = os.path.join(tmp_dir, "blockcopy_shallow_%s.snap" % count)
                snapshot_external_disks.append(new_file)
                new_attrs.update({'file': new_file})
                hosts = None
            elif ('dev' in disk_xml.source.attrs or
                  'name' in disk_xml.source.attrs or
                  'pool' in disk_xml.source.attrs):
                if (disk_xml.type_name == 'block' or
                        disk_source_protocol == 'iscsi'):
                    disk_xml.type_name = 'block'
                    if 'name' in new_attrs:
                        del new_attrs['name']
                        del new_attrs['protocol']
                    elif 'pool' in new_attrs:
                        del new_attrs['pool']
                        del new_attrs['volume']
                        del new_attrs['mode']
                    back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                           is_login=True,
                                                           image_size="1G",
                                                           emulated_image=back_n)
                    emulated_iscsi.append(back_n)
                    cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                    process.run(cmd, shell=True)
                    new_attrs.update({'dev': back_path})
                    hosts = None

            new_src_dict = {"attrs": new_attrs}
            if hosts:
                new_src_dict.update({"hosts": hosts})
            disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

            new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow or external_disk_only_snapshot:
            _make_snapshot(snapshots_take)

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine("--nvram")
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target,
                                         dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() + cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        if "--transient-job" in options:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(blockcopy_thread, (vm_name, target, dest_path, options))
            kill_blockcopy_process()
            utl.check_blockjob(vm_name, target)
            return

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                     options, **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth in ['0B', '0M']) and not utl.check_blockjob(vm_name, target, "bandwidth",
                                                                                  bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0 and check_finish_job:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name, save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() + cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip()) or
                            chk_libvirtd_log(libvirtd_log_path,
                                             log_pattern, "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name,
                                       ignore_status=True).exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
예제 #8
0
class BallooningTestPause(BallooningTest):
    """
    Basic functions of memory ballooning test for guest booted
    in paused status
    """
    def __init__(self, test, params, env):
        super(BallooningTest, self).__init__(test, params, env)

        self.vm = env.get_vm(params["main_vm"])
        self.ori_mem = self.get_vm_mem(self.vm)
        if self.get_ballooned_memory() != self.ori_mem:
            self.balloon_memory(self.ori_mem)
        self.old_mmem = self.ori_mem
        self.old_gmem = None

    @error_context.context_aware
    def memory_check(self, step, changed_mem):
        """
        Check memory change status in monitor and return memory both
        in guest and monitor
        :param step: the check point string
        :type step: string
        :param changed_mem: ballooned memory in current step(compared with
        last round of memory, i.e. self.old_mmem, instead of self.ori_mem)
        :type changed_mem: int
        :return: memory size get from monitor and guest
        :rtype: tuple
        """
        error_context.context("Check memory status %s" % step, logging.info)
        mmem = self.get_ballooned_memory()
        gmem = self.get_memory_status()
        if (abs(mmem - self.old_mmem)) != changed_mem or (
                self.old_gmem and
            (abs(gmem - self.old_gmem) - changed_mem) > 100):
            self.error_report(step, abs(self.old_mmem - changed_mem), mmem,
                              gmem)
            raise exceptions.TestFail("Balloon test failed %s" % step)
        return (mmem, gmem)

    @error_context.context_aware
    def balloon_memory(self, new_mem):
        """
        Baloon guest memory to new_mem

        :param new_mem: New desired memory.
        :type new_mem: int
        """
        error_context.context("Change VM memory to %s" % new_mem, logging.info)
        try:
            self.vm.balloon(new_mem)
        except Exception, e:
            if self.vm.monitor.verify_status('paused'):
                # Make sure memory not changed before the guest resumed
                if self.get_ballooned_memory() != self.ori_mem:
                    raise exceptions.TestFail("Memory changed before guest "
                                              "resumed")

                logging.info("Resume the guest")
                self.vm.resume()
            elif new_mem == self.get_ballooned_memory():
                pass
            else:
                raise exceptions.TestFail("Balloon memory fail with error"
                                          " message: %s" % e)
        compare_mem = new_mem
        balloon_timeout = float(self.params.get("balloon_timeout", 240))
        status = utils_misc.wait_for(
            (lambda: compare_mem == self.get_ballooned_memory()),
            balloon_timeout)
        if status is None:
            raise exceptions.TestFail("Failed to balloon memory to expect"
                                      " value during %ss" % balloon_timeout)
예제 #9
0
def run(test, params, env):
    """
    Balloon guest memory when guest started in paused status,
    use M when compare memory in this script:
    1) Boot a guest with balloon enabled and in paused status,
    i.e. '-S' used but not cont
    2) Evict guest memory in paused status, cont the guest;
    check memory in monitor
    3) To check if the guest memory balloon working well after above test,
    continue to do:
    3.1) Enlarge guest memory in running status;
    check memory both in guest and monitor
    3.2) Evict guest memory in running status;
    check memory both in guest and monitor
    4) Run subtest if necessary
    5) Reset memory back to the original value

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def _memory_check_after_sub_test():
        """
        Check memory status after subtest, the changed_mem is 0
        """
        try:
            return balloon_test.memory_check("after subtest", 0)
        except exceptions.TestFail:
            return None

    if params['os_type'] == 'windows':
        balloon_test = BallooningTestPauseWin(test, params, env)
    else:
        balloon_test = BallooningTestPauseLinux(test, params, env)

    min_sz, max_sz = balloon_test.get_memory_boundary()

    for tag in params.objects('test_tags'):
        vm = env.get_vm(params["main_vm"])
        if vm.monitor.verify_status('paused'):
            error_context.context(
                "Running balloon %s test when"
                " the guest in paused status" % tag, logging.info)
        else:
            error_context.context(
                "Running balloon %s test after"
                " the guest turned to running status" % tag, logging.info)
        params_tag = params.object_params(tag)
        balloon_type = params_tag['balloon_type']
        if balloon_type == 'evict':
            expect_mem = int(random.uniform(min_sz, balloon_test.old_mmem))
        else:
            expect_mem = int(random.uniform(balloon_test.old_mmem, max_sz))

        balloon_test.balloon_memory(expect_mem)
        changed_memory = abs(balloon_test.old_mmem - expect_mem)
        mmem, gmem = balloon_test.memory_check("after %s memory" % tag,
                                               changed_memory)
        balloon_test.old_mmem = mmem
        balloon_test.old_gmem = gmem

    subtest = params.get("sub_test_after_balloon")
    if subtest:
        error_context.context("Running subtest after guest balloon test",
                              logging.info)
        qemu_should_quit = balloon_test.run_balloon_sub_test(
            test, params, env, subtest)
        if qemu_should_quit == 1:
            return

        sleep_before_check = int(params.get("sleep_before_check", 0))
        timeout = int(params.get("balloon_timeout", 100)) + sleep_before_check
        msg = "Wait memory balloon back after %s " % subtest
        output = utils_misc.wait_for(_memory_check_after_sub_test, timeout,
                                     sleep_before_check, 5, msg)
        if output is None:
            raise exceptions.TestFail("Check memory status failed after "
                                      "subtest after %s seconds" % timeout)

    error_context.context(
        "Reset guest memory to original one after all the "
        "test", logging.info)
    balloon_test.reset_memory()
예제 #10
0
             logging.debug("Expect error: %s", cmd_result.stderr)
         else:
             # Commit id '4c297728' changed how virsh exits when
             # unexpectedly failing due to timeout from a fail (1)
             # to a success(0), so we need to look for a different
             # marker to indicate the copy aborted. As "stdout: Now
             # in mirroring phase" could be in stdout which fail the
             # check, so also do check in libvirtd log to confirm.
             if options.count("--timeout") and options.count("--wait"):
                 log_pattern = "Copy aborted"
                 if (re.search(log_pattern, cmd_result.stdout)
                         or chk_libvirtd_log(libvirtd_log_path, log_pattern,
                                             "debug")):
                     logging.debug("Found success a timed out block copy")
             else:
                 raise exceptions.TestFail("Expect fail, but run "
                                           "successfully: %s" % bug_url)
 finally:
     # Recover VM may fail unexpectedly, we need using try/except to
     # proceed the following cleanup steps
     try:
         # Abort exist blockjob to avoid any possible lock error
         virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
         vm.destroy(gracefully=False)
         # It may take a long time to shutdown the VM which has
         # blockjob running
         utils_misc.wait_for(
             lambda: virsh.domstate(vm_name, ignore_status=True).
             exit_status, 180)
         if virsh.domain_exists(vm_name):
             if active_snap or with_shallow:
                 option = "--snapshots-metadata"
예제 #11
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        """
        bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

    def _blockcopy_cmd():
        """
        Run blockcopy command
        """
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)
        _blockjob_and_libvirtd_chk(cmd_result)
        if cmd_result.exit_status:
            return False
        elif "Copy aborted" in cmd_result.stdout:
            return False
        else:
            return cmd_result

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if disk_xml.source.attrs.has_key('file'):
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif (disk_xml.source.attrs.has_key('dev')
              or disk_xml.source.attrs.has_key('name')
              or disk_xml.source.attrs.has_key('pool')):
            if (disk_xml.type_name == 'block'
                    or disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if new_attrs.has_key('name'):
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif new_attrs.has_key('pool'):
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            utl.set_vm_disk(vm, params, tmp_dir, test)
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail")
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")
            cmd_result = utils_misc.wait_for(_blockcopy_cmd, 10)
            if not cmd_result:
                raise exceptions.TestFail("Run blockcopy command fail")
            status = 0
        else:
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                         **extra_dict)
            _blockjob_and_libvirtd_chk(cmd_result)
            status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not utl.check_blockjob(vm_name, target, "bandwidth",
                                              bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout, excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stderr)
        else:
예제 #12
0
def run(test, params, env):
    """
    Test hot unplug virtio serial devices.

     1) Start guest with virtio serial device(s).
     2) Load module in guest os.
     3) For each of the virtio serial ports, do following steps one by one:
     3.1) Unload module in guest
     3.2) Hot-unplug the virtio serial port
     3.3) Hotplug the devices
     3.4) Reload module in the guest
     4) Repeat step2,3 100 times
     5) Reboot VM to make sure the guest kernel not panic.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """
    def get_virtio_port_by_name(vm, name):
        """
        Get virtio port object by name in VM.

        :param name: name of the port
        """
        for device in vm.devices:
            if isinstance(device, qdevices.QDevice):
                if device.get_param("name") == name:
                    return device
        return None

    def get_virtio_port_name_by_params(params, tag):
        """
        Get virtio port name via params according tag.

        :param params: test params.
        :param tag: port name or tag(eg, vc1).
        """
        prefix = params.get('virtio_port_name_prefix')
        index = params.objects("virtio_ports").index(tag)
        if prefix:
            return "%s%d" % (prefix, index)
        return tag

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    for repeat in xrange(int(params.get("repeat_times", 1))):
        repeat += 1
        session = vm.wait_for_login(timeout=timeout)
        module = params.get("modprobe_module")
        if module:
            error.context("Load module %s" % module, logging.info)
            session.cmd("modprobe %s" % module)
        for port in params.objects("virtio_ports"):
            port_params = params.object_params(port)
            port_name = get_virtio_port_name_by_params(port_params, port)
            virtio_port = get_virtio_port_by_name(vm, port_name)
            if not virtio_port:
                raise exceptions.TestFail("Virtio Port named '%s' not found" %
                                          port_name)
            chardev_qid = virtio_port.get_param("chardev")
            port_chardev = vm.devices.get_by_qid(chardev_qid)[0]
            if module:
                error.context("Unload module %s" % module, logging.info)
                session.cmd("modprobe -r %s" % module)
            error.context(
                "Unplug virtio port '%s' in %d tune(s)" % (port, repeat),
                logging.info)
            virtio_port.unplug(vm.monitor)
            if port_params.get("unplug_chardev") == "yes":
                error.context(
                    "Unplug chardev '%s' for virtio port '%s'" %
                    (port, chardev_qid), logging.info)
                port_chardev.unplug(vm.monitor)
                time.sleep(0.5)
                port_chardev.hotplug(vm.monitor)
            virtio_port.hotplug(vm.monitor)
            if module:
                error.context("Load  module %s" % module, logging.info)
                session.cmd("modprobe %s" % module)
        session.close()
    vm.reboot()
    session = vm.wait_for_login(timeout=timeout)
    session.close()
예제 #13
0
 def runTest(self):
     """
     Should fail.
     """
     raise exceptions.TestFail('This test is supposed to fail')
예제 #14
0
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_hostname")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_hostname")
    vpx_pwd = params.get("vpx_pwd")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_hostname")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    target = params.get("target")
    v2v_opts = params.get("v2v_opts")
    # For VDDK
    input_transport = params.get("input_transport")
    vddk_libdir = params.get('vddk_libdir')
    # nfs mount source
    vddk_libdir_src = params.get('vddk_libdir_src')
    vddk_thumbprint = params.get('vddk_thumbprint')

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_pwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            process.run("ssh-agent -k")
            raise exceptions.TestError("Fail to setup ssh-agent")
    else:
        raise exceptions.TestSkipError("Unspported hypervisor: %s" % hypervisor)

    # Create libvirt URI for the source node
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("Remote host uri for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip,
                   'remote_user': source_user, 'remote_pwd': source_pwd,
                   'debug': True}
    remote_virsh = virsh.VirshPersistent(**virsh_dargs)
    try:
        if not remote_virsh.domain_exists(vm_name):
            raise exceptions.TestError("VM '%s' not exist" % vm_name)
    finally:
        remote_virsh.close_session()

    # Prepare libvirt storage pool
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name")
    pool_target = params.get("pool_target")
    libvirt_pool = utlv.PoolVolumeTest(test, params)
    libvirt_pool.pre_pool(pool_name, pool_type, pool_target, '')

    # Preapre libvirt virtual network
    network = params.get("network")
    net_kwargs = {'net_name': network,
                  'address': params.get('network_addr'),
                  'dhcp_start': params.get('network_dhcp_start'),
                  'dhcp_end': params.get('network_dhcp_end')}
    libvirt_net = utlv.LibvirtNetwork('vnet', **net_kwargs)
    net_info = virsh.net_info(network).stdout.strip()
    bridge = re.search(r'Bridge:\s+(\S+)', net_info).group(1)
    params['netdst'] = bridge

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {'target': target, 'hypervisor': hypervisor,
                  'main_vm': vm_name, 'input_mode': input_mode,
                  'network': network, 'bridge': bridge,
                  'storage': pool_name, 'hostname': source_ip,
                  'input_transport': input_transport, 'vcenter_host': vpx_ip,
                  'vcenter_password': vpx_pwd,
                  'vddk_thumbprint': vddk_thumbprint,
                  'vddk_libdir': vddk_libdir,
                  'vddk_libdir_src': vddk_libdir_src,
                  }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    # Set libguestfs environment
    if hypervisor == 'xen':
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            raise exceptions.TestFail("Convert VM failed")

        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        # Win10 is not supported by some cpu model,
        # need to modify to 'host-model'
        unsupport_list = ['win10', 'win2016', 'win2019']
        if params.get('os_version') in unsupport_list:
            logging.info('Set cpu mode to "host-model" for %s.', unsupport_list)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.mode = 'host-model'
            cpu_xml.fallback = 'allow'
            vmxml['cpu'] = cpu_xml
            vmxml.sync()
        vm.start()

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        ret = vmchecker.run()
        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            raise exceptions.TestFail("%d checkpoints failed: %s" % (len(ret), ret))
    finally:
        vmcheck = utils_v2v.VMCheck(test, params, env)
        vmcheck.cleanup()
        utils_v2v.cleanup_constant_files(params)
        if hypervisor == "xen":
            process.run("ssh-agent -k")
        # Clean libvirt VM
        virsh.remove_domain(vm_name)
        # Clean libvirt pool
        if libvirt_pool:
            libvirt_pool.cleanup_pool(pool_name, pool_type, pool_target, '')
        # Clean libvirt network
        if libvirt_net:
            libvirt_net.cleanup()
예제 #15
0
 def run(self):
     logging.debug("ThSendCheck %s: run", self.getName())
     _err_msg_exception = ('ThSendCheck ' + str(self.getName()) + ': Got '
                           'exception %s, continuing')
     _err_msg_disconnect = ('ThSendCheck ' + str(self.getName()) + ': Port '
                            'disconnected, waiting for new port.')
     _err_msg_reconnect = ('ThSendCheck ' + str(self.getName()) + ': Port '
                           'reconnected, continuing.')
     too_much_data = False
     if self.reduced_set:
         rand_a = 65
         rand_b = 91
     else:
         rand_a = 0
         rand_b = 255
     while not self.exitevent.isSet():
         # FIXME: workaround the problem with qemu-kvm stall when too
         # much data is sent without receiving
         for queue in self.queues:
             while not self.exitevent.isSet() and len(queue) > 1048576:
                 too_much_data = True
                 time.sleep(0.1)
         try:
             ret = select.select([], [self.port.sock], [], 1.0)
         except Exception, inst:
             # self.port is not yet set while reconnecting
             if self.migrate_event is None:
                 raise exceptions.TestFail(
                     "ThSendCheck %s: Broken pipe. If this"
                     " is expected behavior set migrate_event "
                     "to support reconnection." % self.getName())
             if self.port.sock is None:
                 logging.debug(_err_msg_disconnect)
                 while self.port.sock is None:
                     if self.exitevent.isSet():
                         break
                     time.sleep(0.1)
                 logging.debug(_err_msg_reconnect)
             else:
                 logging.debug(_err_msg_exception, inst)
             continue
         if ret[1]:
             # Generate blocklen of random data add them to the FIFO
             # and send them over virtio_console
             buf = ""
             for _ in range(self.blocklen):
                 char = "%c" % random.randrange(rand_a, rand_b)
                 buf += char
                 for queue in self.queues:
                     queue.append(char)
             target = self.idx + self.blocklen
             while not self.exitevent.isSet() and self.idx < target:
                 try:
                     idx = self.port.sock.send(buf)
                 except socket.timeout:
                     continue
                 except Exception, inst:
                     # Broken pipe
                     if not hasattr(inst, 'errno') or inst.errno != 32:
                         continue
                     if self.migrate_event is None:
                         self.exitevent.set()
                         raise exceptions.TestFail(
                             "ThSendCheck %s: Broken "
                             "pipe. If this is expected behavior "
                             "set migrate_event to support "
                             "reconnection." % self.getName())
                     logging.debug(
                         "ThSendCheck %s: Broken pipe "
                         ", reconnecting. ", self.getName())
                     attempt = 10
                     while (attempt > 1 and not self.exitevent.isSet()):
                         # Wait until main thread sets the new self.port
                         while not (self.exitevent.isSet()
                                    or self.migrate_event.wait(1)):
                             pass
                         if self.exitevent.isSet():
                             break
                         logging.debug(
                             "ThSendCheck %s: Broken pipe resumed"
                             ", reconnecting...", self.getName())
                         self.port.sock = False
                         self.port.open()
                         try:
                             idx = self.port.sock.send(buf)
                         except Exception:
                             attempt -= 1
                             time.sleep(10)
                         else:
                             attempt = 0
                 buf = buf[idx:]
                 self.idx += idx
예제 #16
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd,
                                     local_ip, remote_user, local_user,
                                     local_pwd):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = ("virsh -c %s list %s" %
                             (complete_uri, options_ref))
        try:
            # setup autologin for ssh from remote machine to execute commands
            # remotely
            config_opt = ["StrictHostKeyChecking=no"]
            ssh_key.setup_remote_ssh_key(remote_ip,
                                         remote_user,
                                         remote_passwd,
                                         hostname2=local_ip,
                                         user2=local_user,
                                         password2=local_pwd,
                                         config_options=config_opt)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_passwd, "#")
            time.sleep(5)
            status, output = session.cmd_status_output(command_on_remote,
                                                       internal_timeout=30)
        except Exception as info:
            logging.error("Shell failed to execute command from" " remote")
            return 1, info
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")
    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    desc = params.get("list_desc", "")
    libvirtd = params.get("libvirtd", "on")
    remote_ref = params.get("remote_ref", "")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip")
    remote_user = params.get("remote_user", "root")
    local_user = params.get("username", "root")
    local_pwd = params.get("local_pwd", None)

    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.help("list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise exceptions.TestSkipError(
            "This version do not support vm type:%s" % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
            result_expected = vm_name
            logging.info("domain's name is: %s", vm_name)

        if options_ref == "vm_id":
            logging.info("%s's running-id is: %s", vm_name, domid)
            options_ref = "%s %s" % (domid, list_ref)
        elif options_ref == "vm_uuid":
            logging.info("%s's uuid is: %s", vm_name, domuuid)
            options_ref = "%s %s" % (domuuid, list_ref)
        elif options_ref == "inactive":
            vm.destroy()
            options_ref = "--inactive %s" % list_ref
        elif options_ref == "vm_name":
            options_ref = "%s %s" % (vm_name, list_ref)
        elif options_ref == "all":
            options_ref = "--all %s" % list_ref
        elif options_ref == "":
            options_ref = "%s" % list_ref

        # Prepare libvirtd status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if remote_ref == "remote":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Remote test parameters unchanged from default")
            logging.info("Execute virsh command on remote host %s.", remote_ip)
            status, output = list_local_domains_on_remote(
                options_ref, remote_ip, remote_pwd, local_ip, remote_user,
                local_user, local_pwd)
            logging.info("Status:%s", status)
            logging.info("Output:\n%s", output)
        else:
            if vm_ref:
                options_ref = "%s --%s" % (options_ref, vm_ref)
            result = virsh.dom_list(options_ref,
                                    ignore_status=True,
                                    print_info=True)
            status = result.exit_status
            output = result.stdout.strip()

    except Exception as output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name,
                                     ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
        else:
            if status:
                raise exceptions.TestFail("Run failed with right command.")
            if not re.search(result_expected, output):
                raise exceptions.TestFail("Run successful but result is not"
                                          " expected.")
예제 #17
0
    def run_normal(self):
        """
        Receives data and verifies, whether they match the self.buff (queue).
        It allow data loss up to self.sendidx which can be manually loaded
        after host socket reconnection or you can overwrite this value from
        other thread.
        """
        logging.debug("ThRecvCheck %s: run", self.getName())
        _err_msg_missing_migrate_ev = (
            "ThRecvCheck %s: Broken pipe. If "
            "this is expected behavior set migrate_event to "
            "support reconnection." % self.getName())
        _err_msg_exception = ('ThRecvCheck ' + str(self.getName()) + ': Got '
                              'exception %s, continuing')
        _err_msg_disconnect = ('ThRecvCheck ' + str(self.getName()) + ': Port '
                               'disconnected, waiting for new port.')
        _err_msg_reconnect = ('ThRecvCheck ' + str(self.getName()) + ': Port '
                              'reconnected, continuing.')
        attempt = 10
        while not self.exitevent.isSet():
            try:
                ret = select.select([self.port.sock], [], [], 1.0)
            except Exception, inst:
                # self.port is not yet set while reconnecting
                if self.port.sock is None:
                    logging.debug(_err_msg_disconnect)
                    while self.port.sock is None:
                        if self.exitevent.isSet():
                            break
                        time.sleep(0.1)
                    logging.debug(_err_msg_reconnect)
                else:
                    logging.debug(_err_msg_exception, inst)
                continue
            if ret[0] and (not self.exitevent.isSet()):
                try:
                    buf = self.port.sock.recv(self.blocklen)
                except Exception, inst:
                    # self.port is not yet set while reconnecting
                    if self.port.sock is None:
                        logging.debug(_err_msg_disconnect)
                        while self.port.sock is None:
                            if self.exitevent.isSet():
                                break
                            time.sleep(0.1)
                        logging.debug(_err_msg_reconnect)
                    else:
                        logging.debug(_err_msg_exception, inst)
                    continue
                if buf:
                    # Compare the received data with the control data
                    for char in buf:
                        _char = self.buff.popleft()
                        if char == _char:
                            self.idx += 1
                        else:
                            # TODO BUG: data from the socket on host can
                            # be lost during migration
                            while char != _char:
                                if self.sendidx > 0:
                                    self.sendidx -= 1
                                    _char = self.buff.popleft()
                                else:
                                    self.exitevent.set()
                                    logging.error(
                                        "ThRecvCheck %s: "
                                        "Failed to recv %dth "
                                        "character", self.getName(), self.idx)
                                    logging.error(
                                        "ThRecvCheck %s: "
                                        "%s != %s", self.getName(), repr(char),
                                        repr(_char))
                                    logging.error(
                                        "ThRecvCheck %s: "
                                        "Recv = %s", self.getName(), repr(buf))
                                    # sender might change the buff :-(
                                    time.sleep(1)
                                    _char = ""
                                    for buf in self.buff:
                                        _char += buf
                                        _char += ' '
                                    logging.error(
                                        "ThRecvCheck %s: "
                                        "Queue = %s", self.getName(),
                                        repr(_char))
                                    logging.info(
                                        "ThRecvCheck %s: "
                                        "MaxSendIDX = %d", self.getName(),
                                        (self.sendlen - self.sendidx))
                                    raise exceptions.TestFail(
                                        "ThRecvCheck %s: "
                                        "incorrect data" % self.getName())
                    attempt = 10
                else:  # ! buf
                    # Broken socket
                    if attempt > 0:
                        attempt -= 1
                        if self.migrate_event is None:
                            self.exitevent.set()
                            raise exceptions.TestFail(
                                _err_msg_missing_migrate_ev)
                        logging.debug(
                            "ThRecvCheck %s: Broken pipe "
                            ", reconnecting. ", self.getName())
                        self.reload_loss_idx()
                        # Wait until main thread sets the new self.port
                        while not (self.exitevent.isSet()
                                   or self.migrate_event.wait(1)):
                            pass
                        if self.exitevent.isSet():
                            break
                        logging.debug(
                            "ThRecvCheck %s: Broken pipe resumed, "
                            "reconnecting...", self.getName())

                        self.port.sock = False
                        self.port.open()
예제 #18
0
    def do_migration(self, vms, srcuri, desturi, migration_type,
                     options=None, thread_timeout=60,
                     ignore_status=False, func=None, virsh_opt="",
                     extra_opts="", **args):
        """
        Migrate vms.

        :param vms: migrated vms.
        :param srcuri: local uri, used when migrate vm from remote to local
        :param descuri: remote uri, used when migrate vm from local to remote
        :param migration_type: do orderly for simultaneous migration
        :param options: migration options
        :param thread_timeout: time out seconds for the migration thread running
        :param ignore_status: determine if an exception is raised for errors
        :param func: the function executed during migration thread is running
        :param args: dictionary used by func,
                     'func_param' is mandatory if no real func_param, none is
                     requested.
                     'shell' is optional, where shell=True(bool) can be used
                     for process.run

        """
        for vm in vms:
            vm.connect_uri = args.get("virsh_uri", "qemu:///system")
        if migration_type == "orderly":
            for vm in vms:
                migration_thread = threading.Thread(target=self.thread_func_migration,
                                                    args=(vm, desturi, options,
                                                          ignore_status, virsh_opt,
                                                          extra_opts))
                migration_thread.start()
                eclipse_time = 0
                stime = int(time.time())
                if func:
                    # Execute command once the migration is started
                    migrate_start_state = args.get("migrate_start_state", "paused")

                    # Wait for migration to start
                    migrate_options = ""
                    if options:
                        migrate_options = str(options)
                    if extra_opts:
                        migrate_options += " %s" % extra_opts

                    migration_started = self.wait_for_migration_start(vm, state=migrate_start_state,
                                                                      uri=desturi,
                                                                      migrate_options=migrate_options.strip())

                    if migration_started:
                        logging.info("Migration started for %s", vm.name)
                        if func == process.run:
                            try:
                                func(args['func_params'], shell=args['shell'])
                            except KeyError:
                                func(args['func_params'])
                        elif func == virsh.migrate_postcopy:
                            func(vm.name, uri=srcuri, debug=True)
                        else:
                            if 'func_params' in args:
                                func(args['func_params'])
                            else:
                                func()
                    else:
                        logging.error("Migration failed to start for %s",
                                      vm.name)
                eclipse_time = int(time.time()) - stime
                logging.debug("start_time:%d, eclipse_time:%d", stime, eclipse_time)
                if eclipse_time < thread_timeout:
                    migration_thread.join(thread_timeout - eclipse_time)
                if migration_thread.isAlive():
                    logging.error("Migrate %s timeout.", migration_thread)
                    self.RET_LOCK.acquire()
                    self.RET_MIGRATION = False
                    self.RET_LOCK.release()
        elif migration_type == "cross":
            # Migrate a vm to remote first,
            # then migrate another to remote with the first vm back
            vm_remote = vms.pop()
            self.thread_func_migration(vm_remote, desturi)
            for vm in vms:
                thread1 = threading.Thread(target=self.thread_func_migration,
                                           args=(vm_remote, srcuri, options))
                thread2 = threading.Thread(target=self.thread_func_migration,
                                           args=(vm, desturi, options))
                thread1.start()
                thread2.start()
                thread1.join(thread_timeout)
                thread2.join(thread_timeout)
                vm_remote = vm
                if thread1.isAlive() or thread1.isAlive():
                    logging.error("Cross migrate timeout.")
                    self.RET_LOCK.acquire()
                    self.RET_MIGRATION = False
                    self.RET_LOCK.release()
            # Add popped vm back to list
            vms.append(vm_remote)
        elif migration_type == "simultaneous":
            migration_threads = []
            for vm in vms:
                migration_threads.append(threading.Thread(
                                         target=self.thread_func_migration,
                                         args=(vm, desturi, options)))
            # let all migration going first
            for thread in migration_threads:
                thread.start()

            # listen threads until they end
            for thread in migration_threads:
                thread.join(thread_timeout)
                if thread.isAlive():
                    logging.error("Migrate %s timeout.", thread)
                    self.RET_LOCK.acquire()
                    self.RET_MIGRATION = False
                    self.RET_LOCK.release()
        if not self.RET_MIGRATION and not ignore_status:
            raise exceptions.TestFail()
예제 #19
0
    def export_target(self):
        """
        Export target in localhost for emulated iscsi
        """
        selinux_mode = None

        # create image disk
        if not os.path.isfile(self.emulated_image):
            process.system(self.create_cmd)
        else:
            emulated_image_size = os.path.getsize(self.emulated_image) / 1024
            if emulated_image_size != self.emulated_expect_size:
                # No need to remvoe, rebuild is fine
                process.system(self.create_cmd)

        # confirm if the target exists and create iSCSI target
        cmd = "targetcli ls /iscsi 1"
        output = process.system_output(cmd)
        if not re.findall("%s$" % self.target, output, re.M):
            logging.debug("Need to export target in host")

            # Set selinux to permissive mode to make sure
            # iscsi target export successfully
            if utils_selinux.is_enforcing():
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")

            # In fact, We've got two options here
            #
            # 1) Create a block backstore that usually provides the best
            #    performance. We can use a block device like /dev/sdb or
            #    a logical volume previously created,
            #     (lvcreate -name lv_iscsi -size 1G vg)
            # 2) Create a fileio backstore,
            #    which enables the local file system cache.
            #
            # This class Only works for emulated iscsi device,
            # So fileio backstore is enough and safe.

            # Create a fileio backstore
            device_cmd = ("targetcli /backstores/fileio/ create %s %s" %
                          (self.device, self.emulated_image))
            output = process.system_output(device_cmd)
            if "Created fileio" not in output:
                raise exceptions.TestFail("Failed to create fileio %s. (%s)" %
                                          (self.device, output))

            # Create an IQN with a target named target_name
            target_cmd = "targetcli /iscsi/ create %s" % self.target
            output = process.system_output(target_cmd)
            if "Created target" not in output:
                raise exceptions.TestFail("Failed to create target %s. (%s)" %
                                          (self.target, output))

            check_portal = "targetcli /iscsi/%s/tpg1/portals ls" % self.target
            portal_info = process.system_output(check_portal)
            if "0.0.0.0:3260" not in portal_info:
                # Create portal
                # 0.0.0.0 means binding to INADDR_ANY
                # and using default IP port 3260
                portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s" %
                              (self.target, "0.0.0.0"))
                output = process.system_output(portal_cmd)
                if "Created network portal" not in output:
                    raise exceptions.TestFail("Failed to create portal. (%s)" %
                                              output)
            if ("ipv6" == utils_net.IPAddress(self.portal_ip).version
                    and self.portal_ip not in portal_info):
                # Ipv6 portal address can't be created by default,
                # create ipv6 portal if needed.
                portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s" %
                              (self.target, self.portal_ip))
                output = process.system_output(portal_cmd)
                if "Created network portal" not in output:
                    raise exceptions.TestFail("Failed to create portal. (%s)" %
                                              output)
            # Create lun
            lun_cmd = "targetcli /iscsi/%s/tpg1/luns/ " % self.target
            dev_cmd = "create /backstores/fileio/%s" % self.device
            output = process.system_output(lun_cmd + dev_cmd)
            luns = re.findall(r"Created LUN (\d+).", output)
            if not luns:
                raise exceptions.TestFail("Failed to create lun. (%s)" %
                                          output)
            self.luns = luns[0]

            # Set firewall if it's enabled
            output = process.system_output("firewall-cmd --state",
                                           ignore_status=True)
            if re.findall("^running", output, re.M):
                # firewall is running
                process.system("firewall-cmd --permanent --add-port=3260/tcp")
                process.system("firewall-cmd --reload")

            # Restore selinux
            if selinux_mode is not None:
                utils_selinux.set_status(selinux_mode)

            self.export_flag = True
        else:
            logging.info("Target %s has already existed!" % self.target)

        if self.chap_flag:
            # Set CHAP authentication on the exported target
            self.set_chap_auth_target()
            # Set CHAP authentication for initiator to login target
            if self.portal_visible():
                self.set_chap_auth_initiator()
        else:
            # To enable that so-called "demo mode" TPG operation,
            # disable all authentication for the corresponding Endpoint.
            # which means grant access to all initiators,
            # so that they can access all LUNs in the TPG
            # without further authentication.
            auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
            attr_cmd = ("set attribute %s %s %s %s" %
                        ("authentication=0", "demo_mode_write_protect=0",
                         "generate_node_acls=1", "cache_dynamic_acls=1"))
            output = process.system_output(auth_cmd + attr_cmd)
            logging.info("Define access rights: %s" % output)
            # Discovery the target
            self.portal_visible()

        # Save configuration
        process.system("targetcli / saveconfig")

        # Restart iSCSI service
        restart_iscsid()
예제 #20
0
def hotplug_domain_vcpu(vm, count, by_virsh=True, hotplug=True):
    """
    Hot-plug/Hot-unplug vcpu for domian

    :param vm:   VM object
    :param count:    to setvcpus it's the current vcpus number,
                     but to qemu-monitor-command,
                     we need to designate a specific CPU ID.
                     The default will be got by (count - 1)
    :param by_virsh: True means hotplug/unplug by command setvcpus,
                     otherwise, using qemu_monitor
    :param hotplug:  True means hot-plug, False means hot-unplug
    """
    if by_virsh:
        result = virsh.setvcpus(vm.name, count, "--live", debug=True)
    else:
        cmds = []
        cmd_type = "--hmp"
        result = None
        if "ppc" in platform.machine():
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            topology = vmxml.get_cpu_topology()
            vcpu_count = vm.get_cpu_count()

            if topology:
                threads = int(topology["threads"])
            else:
                threads = 1
            # test if count multiple of threads
            err_str = "Expected vcpu counts to be multiples of %d" % threads
            if hotplug:
                err_str += ",Invalid vcpu counts for hotplug"
            else:
                err_str += ",Invalid vcpu counts for hotunplug"
            if (count % threads) != 0:
                raise exceptions.TestError(err_str)
            if hotplug:
                for item in range(0, int(count), threads):
                    if item < vcpu_count:
                        continue
                    cmds.append(
                        "device_add host-spapr-cpu-core,id=core%d,core-id=%d" %
                        (item, item))
            else:
                for item in range(int(count), vcpu_count, threads):
                    cmds.append("device_del core%d" % item)
        else:
            cmd_type = "--pretty"
            if hotplug:
                cpu_opt = "cpu-add"
            else:
                cpu_opt = "cpu-del"
                # Note: cpu-del is supported currently, it will return error.
                # as follow,
                # {
                #    "id": "libvirt-23",
                #    "error": {
                #        "class": "CommandNotFound",
                #        "desc": "The command cpu-del has not been found"
                #    }
                # }
                # so, the caller should check the result.
            # hot-plug/hot-plug the CPU has maximal ID
            params = (cpu_opt, (count - 1))
            cmds.append('{\"execute\":\"%s\",\"arguments\":{\"id\":%d}}' %
                        params)
        # Execute cmds to hot(un)plug
        for cmd in cmds:
            result = virsh.qemu_monitor_command(vm.name,
                                                cmd,
                                                cmd_type,
                                                debug=True)
            if result.exit_status != 0:
                raise exceptions.TestFail(result.stderr_text)
            else:
                logging.debug("Command output:\n%s",
                              result.stdout_text.strip())
    return result
예제 #21
0
    def _make_snapshot(snapshot_numbers_take):
        """
        Make external disk snapshot

        :param snapshot_numbers_take: snapshot numbers.
        """
        for count in range(0, snapshot_numbers_take):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "blockcopy_snap"
            snap_xml.snap_name = snapshot_name + "_%s" % count
            snap_xml.description = "blockcopy snapshot"

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            src_disk_xml = disks[0]
            disk_xml = snap_xml.SnapDiskXML()
            disk_xml.xmltreefile = src_disk_xml.xmltreefile
            del disk_xml.device
            del disk_xml.address
            disk_xml.snapshot = "external"
            disk_xml.disk_name = disk_xml.target['dev']

            # Only qcow2 works as external snapshot file format, update it
            # here
            driver_attr = disk_xml.driver
            driver_attr.update({'type': 'qcow2'})
            disk_xml.driver = driver_attr

            new_attrs = disk_xml.source.attrs
            if 'file' in disk_xml.source.attrs:
                new_file = os.path.join(tmp_dir, "blockcopy_shallow_%s.snap" % count)
                snapshot_external_disks.append(new_file)
                new_attrs.update({'file': new_file})
                hosts = None
            elif ('dev' in disk_xml.source.attrs or
                  'name' in disk_xml.source.attrs or
                  'pool' in disk_xml.source.attrs):
                if (disk_xml.type_name == 'block' or
                        disk_source_protocol == 'iscsi'):
                    disk_xml.type_name = 'block'
                    if 'name' in new_attrs:
                        del new_attrs['name']
                        del new_attrs['protocol']
                    elif 'pool' in new_attrs:
                        del new_attrs['pool']
                        del new_attrs['volume']
                        del new_attrs['mode']
                    back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                           is_login=True,
                                                           image_size="1G",
                                                           emulated_image=back_n)
                    emulated_iscsi.append(back_n)
                    cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                    process.run(cmd, shell=True)
                    new_attrs.update({'dev': back_path})
                    hosts = None

            new_src_dict = {"attrs": new_attrs}
            if hosts:
                new_src_dict.update({"hosts": hosts})
            disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

            new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                raise exceptions.TestFail(snapshot_result.stderr)
예제 #22
0
    def runTest(self):
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        # This trick will give better reporting of cloud tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except exceptions.TestNotFoundError, details:
            raise exceptions.TestSkipError(details)
        except exceptions.TestWarn, details:
            raise exceptions.TestWarn(details)
        except exceptions.TestError, details:
            raise exceptions.TestError(details)
        except exceptions.TestFail, details:
            raise exceptions.TestFail(details)
        finally:
            if env_lang:
                os.environ['LANG'] = env_lang
            else:
                del os.environ['LANG']

    def _log_detailed_cmd_info(self, result):
        """
        Log detailed command information.

        :param result: :class:`avocado.utils.process.CmdResult` instance.
        """
        self.log.info("Exit status: %s", result.exit_status)
        self.log.info("Duration: %s", result.duration)
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(disk_source, disk_type, disk_target, flags, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not attach:
            utils_misc.wait_for(
                lambda: not is_attached(active_vmxml.devices, disk_type,
                                        disk_source, disk_target), 20)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        vm_state = pre_vm_state
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
                vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)
        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --config options used for"
                            " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail(
                                "Active domain XML updated "
                                "when --config options used "
                                "for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --config options used for"
                            " detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live options used for"
                            " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --live options used for"
                            " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live options used for"
                            " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live --config options"
                            " used for attachment")
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --live --config options "
                            "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated "
                            "when --live --config options "
                            "used for detachment")
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --live --config options"
                            " used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --current options used "
                            "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --current options used "
                            "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated"
                        " when --current options used for"
                        " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --current options used "
                            "for detachment")
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated"
                        " when --current options used for"
                        " detachment")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_status_error = "yes" == params.get("at_status_error", 'no')
    dt_status_error = "yes" == params.get("dt_status_error", 'no')
    # Disk specific attributes.
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_target = params.get("at_dt_disk_device_target", "vdd")

    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Turn VM into certain state.
    if pre_vm_state == "running":
        logging.info("Starting %s...", vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    elif pre_vm_state == "shutoff":
        logging.info("Shutting down %s...", vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    elif pre_vm_state == "paused":
        logging.info("Pausing %s...", vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
        if not vm.pause():
            raise exceptions.TestSkipError("Can't pause the domain")
    elif pre_vm_state == "transient":
        logging.info("Creating %s...", vm_name)
        vm.undefine()
        if virsh.create(backup_xml.xml, **virsh_dargs).exit_status:
            backup_xml.define()
            raise exceptions.TestSkipError("Can't create the domain")
        vm.wait_for_login().close()

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    try:
        # Create disk image.
        device_source = os.path.join(data_dir.get_tmp_dir(),
                                     device_source_name)
        libvirt.create_local_disk("file", device_source, "1")

        # Attach the disk.
        ret = virsh.attach_disk(vm_ref,
                                device_source,
                                device_target,
                                at_options,
                                debug=True)
        libvirt.check_exit_status(ret, at_status_error)

        # Check if command take affect in config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        #Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(device_source, device, device_target, at_options)

        # Detach the disk.
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Can't pause the domain")
        ret = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True)
        libvirt.check_exit_status(ret, dt_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        #Sleep a while for vm is stable
        time.sleep(10)
        if not ret.exit_status:
            check_result(device_source, device, device_target, dt_options,
                         False)

        # Try to start vm at last.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()

        if os.path.exists(device_source):
            os.remove(device_source)
예제 #24
0
    def runTest(self):
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        # This trick will give better reporting of cloud tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except exceptions.TestNotFoundError, details:
            raise exceptions.TestSkipError(details)
        except exceptions.TestWarn, details:
            raise exceptions.TestWarn(details)
        except exceptions.TestError, details:
            raise exceptions.TestError(details)
        except exceptions.TestFail, details:
            raise exceptions.TestFail(details)
        finally:
            if env_lang:
                os.environ['LANG'] = env_lang
            else:
                del os.environ['LANG']

    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
예제 #25
0
def run(test, params, env):
    """
    Convert a remote vm to remote ovirt node.
    """
    for v in params.itervalues():
        if "V2V_EXAMPLE" in v:
            raise exceptions.TestSkipError("Please set real value for %s" % v)

    vm_name = params.get("main_vm")
    target = params.get("target")
    hypervisor = params.get("hypervisor")
    input_mode = params.get("input_mode")
    storage = params.get('storage')
    network = params.get('network')
    bridge = params.get('bridge')
    source_user = params.get("username", "root")
    xen_ip = params.get("xen_ip")
    xen_pwd = params.get("xen_pwd")
    vpx_ip = params.get("vpx_ip")
    vpx_pwd = params.get("vpx_pwd")
    vpx_pwd_file = params.get("vpx_pwd_file")
    vpx_dc = params.get("vpx_dc")
    esx_ip = params.get("esx_ip")
    address_cache = env.get('address_cache')
    v2v_opts = params.get("v2v_opts")
    v2v_timeout = int(params.get('v2v_timeout', 1200))

    # Prepare step for different hypervisor
    if hypervisor == "esx":
        source_ip = vpx_ip
        source_pwd = vpx_pwd
        # Create password file to access ESX hypervisor
        with open(vpx_pwd_file, 'w') as f:
            f.write(vpx_pwd)
    elif hypervisor == "xen":
        source_ip = xen_ip
        source_pwd = xen_pwd
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(source_ip, source_user, source_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            process.run("ssh-agent -k")
            raise exceptions.TestError("Fail to setup ssh-agent")
    elif hypervisor == "kvm":
        source_ip = None
        source_pwd = None
    else:
        raise exceptions.TestSkipError("Unspported hypervisor: %s" %
                                       hypervisor)

    # Create libvirt URI
    v2v_uri = utils_v2v.Uri(hypervisor)
    remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip)
    logging.debug("libvirt URI for converting: %s", remote_uri)

    # Make sure the VM exist before convert
    v2v_virsh = None
    close_virsh = False
    if hypervisor == 'kvm':
        v2v_virsh = virsh
    else:
        virsh_dargs = {
            'uri': remote_uri,
            'remote_ip': source_ip,
            'remote_user': source_user,
            'remote_pwd': source_pwd,
            'debug': True
        }
        v2v_virsh = virsh.VirshPersistent(**virsh_dargs)
        close_virsh = True
    try:
        if not v2v_virsh.domain_exists(vm_name):
            raise exceptions.TestError("VM '%s' not exist" % vm_name)
    finally:
        if close_virsh:
            v2v_virsh.close_session()

    # Create SASL user on the ovirt host
    user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                   params.get("sasl_pwd"))
    v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
    v2v_sasl.server_ip = params.get("remote_ip")
    v2v_sasl.server_user = params.get('remote_user')
    v2v_sasl.server_pwd = params.get('remote_pwd')
    v2v_sasl.setup(remote=True)

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        'target': target,
        'hypervisor': hypervisor,
        'main_vm': vm_name,
        'input_mode': input_mode,
        'network': network,
        'bridge': bridge,
        'storage': storage,
        'hostname': source_ip
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})
    output_format = params.get('output_format')
    if output_format:
        v2v_params.update({'output_format': 'qcow2'})

    # Set libguestfs environment variable
    os.environ['LIBGUESTFS_BACKEND'] = 'direct'
    try:
        # Execute virt-v2v command
        ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            raise exceptions.TestFail("Convert VM failed")

        # Import the VM to oVirt Data Center from export domain, and start it
        if not utils_v2v.import_vm_to_ovirt(
                params, address_cache, timeout=v2v_timeout):
            raise exceptions.TestError("Import VM failed")

        # Check all checkpoints after convert
        vmchecker = VMChecker(test, params, env)
        ret = vmchecker.run()
        if len(ret) == 0:
            logging.info("All checkpoints passed")
        else:
            raise exceptions.TestFail("%d checkpoints failed: %s" %
                                      (len(ret), ret))
    finally:
        vmcheck = utils_v2v.VMCheck(test, params, env)
        vmcheck.cleanup()
        if v2v_sasl:
            v2v_sasl.cleanup()
        if hypervisor == "esx":
            os.remove(vpx_pwd_file)
        if hypervisor == "xen":
            process.run("ssh-agent -k")
예제 #26
0
        if params.get("medium") == "import":
            try:
                vm.login()
                break
            except (remote.LoginError, Exception), e:
                pass

        if migrate_background:
            vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
        else:
            time.sleep(1)
    else:
        logging.warn("Timeout elapsed while waiting for install to finish ")
        copy_images()
        raise exceptions.TestFail(
            "Timeout elapsed while waiting for install to "
            "finish")

    logging.debug('cleaning up threads and mounts that may be active')
    global _url_auto_content_server_thread
    global _url_auto_content_server_thread_event
    if _url_auto_content_server_thread is not None:
        _url_auto_content_server_thread_event.set()
        _url_auto_content_server_thread.join(3)
        _url_auto_content_server_thread = None
        utils_disk.cleanup(unattended_install_config.cdrom_cd1_mount)

    global _unattended_server_thread
    global _unattended_server_thread_event
    if _unattended_server_thread is not None:
        _unattended_server_thread_event.set()
예제 #27
0
def run(test, params, env):
    """
    Run video in Windows guest
    1) Boot guest with the device.
    2) Check if wmplayer is installed default
    3) Install kmplayer if wmplayer is not installed
    4) Run video

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_wmplayer_installed(session):
        """
        Check if wmplayer is installed

        :param session: VM session
        :return: return wmplayer.exe path
        """
        error_context.context("Check if wmplayer is installed", logging.info)
        install_path = params.get("wmplayer_path")
        check_cmd = 'dir "%s"|findstr /I wmplayer'
        check_cmd = params.get("wmplayer_check_cmd", check_cmd) % install_path
        if session.cmd_status(check_cmd) == 0:
            return install_path

    def check_kmplayer_installed(session):
        """
        Check if kmplayer is installed

        :param session: VM session
        :return: return kmplayer.exe path
        """
        error_context.context("Check if kmplayer is installed", logging.info)
        install_path = params.get("kmplayer_path")
        check_cmd = 'dir "%s"|findstr /I kmplayer'
        check_cmd = params.get("kmplayer_check_cmd", check_cmd) % install_path
        if session.cmd_status(check_cmd) != 0:
            kmplayer_install(session)
        return install_path

    def kmplayer_install(session):
        """
        Install kmplayer

        :param session: VM session
        """
        error_context.context("Install kmplayer ...", logging.info)
        guest_name = params["guest_name"]
        alias_map = params["guest_alias"]
        guest_list = dict([x.split(":") for x in alias_map.split(",")])
        guest_name = guest_list[guest_name]

        install_cmd = params["kmplayer_install_cmd"] % guest_name
        install_cmd = utils_misc.set_winutils_letter(session, install_cmd)
        s, o = session.cmd_status_output(install_cmd, timeout=240)
        if s != 0:
            raise exceptions.TestError("Failed to install kmplayer %s" % o)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    wmplayer = check_wmplayer_installed(session)
    if wmplayer:
        video_player = wmplayer
        if params.get("wmplayer_reg_cmd"):
            logging.info("Update regedit")
            session.cmd(params.get("wmplayer_reg_cmd"))
    else:
        kmplayer = check_kmplayer_installed(session)
        video_player = kmplayer

    video_url = params["video_url"]
    play_video_cmd = params["play_video_cmd"] % (video_player, video_url)
    error_context.context("Play video", logging.info)
    try:
        session.cmd(play_video_cmd, timeout=240)
        time.sleep(float(params.get("time_for_video", 240)))
    except Exception, details:
        raise exceptions.TestFail(details)
예제 #28
0
    def setup_cdrom(self):
        """
        Mount cdrom and copy vmlinuz and initrd.img.
        """
        error_context.context(
            "Copying vmlinuz and initrd.img from install cdrom %s" %
            self.cdrom_cd1)
        if not os.path.isdir(self.image_path):
            os.makedirs(self.image_path)

        if (self.params.get('unattended_delivery_method')
                in ['integrated', 'url']):
            i = iso9660.Iso9660Mount(self.cdrom_cd1)
            self.cdrom_cd1_mount = i.mnt_dir
        else:
            i = iso9660.iso9660(self.cdrom_cd1)

        if i is None:
            raise exceptions.TestFail("Could not instantiate an iso9660 class")

        i.copy(os.path.join(self.boot_path, os.path.basename(self.kernel)),
               self.kernel)
        assert (os.path.getsize(self.kernel) > 0)
        i.copy(os.path.join(self.boot_path, os.path.basename(self.initrd)),
               self.initrd)
        assert (os.path.getsize(self.initrd) > 0)

        if self.unattended_file.endswith('.preseed'):
            self.preseed_initrd()

        if self.params.get("vm_type") == "libvirt":
            if self.vm.driver_type == 'qemu':
                # Virtinstall command needs files "vmlinuz" and "initrd.img"
                os.chdir(self.image_path)
                base_kernel = os.path.basename(self.kernel)
                base_initrd = os.path.basename(self.initrd)
                if base_kernel != 'vmlinuz':
                    process.run("mv %s vmlinuz" % base_kernel, verbose=DEBUG)
                if base_initrd != 'initrd.img':
                    process.run("mv %s initrd.img" % base_initrd,
                                verbose=DEBUG)
                if (self.params.get('unattended_delivery_method') !=
                        'integrated'):
                    i.close()
                    utils_disk.cleanup(self.cdrom_cd1_mount)
            elif ((self.vm.driver_type == 'xen')
                  and (self.params.get('hvm_or_pv') == 'pv')):
                logging.debug("starting unattended content web server")

                self.url_auto_content_port = utils_misc.find_free_port(
                    8100, 8199, self.url_auto_content_ip)

                start_auto_content_server_thread(self.url_auto_content_port,
                                                 self.cdrom_cd1_mount)

                self.medium = 'url'
                self.url = (
                    'http://%s:%s' %
                    (self.url_auto_content_ip, self.url_auto_content_port))

                pxe_path = os.path.join(os.path.dirname(self.image_path),
                                        'xen')
                if not os.path.isdir(pxe_path):
                    os.makedirs(pxe_path)

                pxe_kernel = os.path.join(pxe_path,
                                          os.path.basename(self.kernel))
                pxe_initrd = os.path.join(pxe_path,
                                          os.path.basename(self.initrd))
                process.run("cp %s %s" % (self.kernel, pxe_kernel))
                process.run("cp %s %s" % (self.initrd, pxe_initrd))

                if 'repo=cdrom' in self.kernel_params:
                    # Red Hat
                    self.kernel_params = re.sub(
                        'repo\=[\:\w\d\/]*', 'repo=http://%s:%s' %
                        (self.url_auto_content_ip, self.url_auto_content_port),
                        self.kernel_params)
예제 #29
0
def run(test, params, env):
    """
    Qemu virtio-rng device test:
    1) boot guest with virtio-rng device
    3) check host random device opened by qemu (optional)
    4) enable driver verifier in guest
    5) check device using right driver in guest.
    6) read random data from guest.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def is_dev_used_by_qemu(dev_file, vm_pid):
        """
        Check host random device opened by qemu.

        :param dev_file: host random device name.
        :param vm_pid: qemu process ID.
        :return: Match objects or None.
        """
        lsof_cmd = "lsof %s" % dev_file
        output = process.system_output(lsof_cmd, ignore_status=True)
        return re.search(r"\s+%s\s+" % vm_pid, output, re.M)

    def check_driver_status(session, check_cmd, driver_id):
        """
        :param session: VM session
        :param check_cmd: cmd to check driver status
        :param driver_id: driver id
        """
        check_cmd = check_cmd.replace("DRIVER_ID", driver_id)
        status, output = session.cmd_status_output(check_cmd)
        if "disabled" in output:
            raise exceptions.TestFail("Driver is disable")

    def get_driver_id(session, cmd, pattern):
        """
        :param session: VM session
        :param cmd: cmd to get driver id
        :param pattern: driver id pattern
        """
        output = session.cmd_output(cmd)
        driver_id = re.findall(pattern, output)
        if not driver_id:
            raise exceptions.TestFail("Didn't find driver info from guest %s" %
                                      output)
        driver_id = driver_id[0]
        driver_id = '^&'.join(driver_id.split('&'))
        return driver_id

    rng_data_rex = params.get("rng_data_rex", r".*")
    dev_file = params.get("filename_passthrough")
    timeout = float(params.get("login_timeout", 360))
    rng_dll_register_cmd = params.get("rng_dll_register_cmd")
    read_rng_timeout = float(params.get("read_rng_timeout", "360"))
    cmd_timeout = float(params.get("session_cmd_timeout", "360"))
    driver_name = params["driver_name"]
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    vm_pid = vm.get_pid()

    if dev_file:
        error_context.context("Check '%s' used by qemu" % dev_file,
                              logging.info)
        if not is_dev_used_by_qemu(dev_file, vm_pid):
            msg = "Qemu (pid=%d) not using host passthrough " % vm_pid
            msg += "device '%s'" % dev_file
            raise exceptions.TestFail(msg)
    session = vm.wait_for_login(timeout=timeout)

    if params["os_type"] == "windows":
        session = utils_test.qemu.windrv_check_running_verifier(
            session, vm, test, driver_name, timeout)
        error_context.context("Check driver status", logging.info)
        session = vm.wait_for_login(timeout=timeout)
        driver_id_cmd = utils_misc.set_winutils_letter(session,
                                                       params["driver_id_cmd"])
        driver_id = get_driver_id(session, driver_id_cmd,
                                  params["driver_id_pattern"])
        if params.get("driver_check_cmd"):
            driver_check_cmd = utils_misc.set_winutils_letter(
                session, params.get("driver_check_cmd"))
            check_driver_status(session, driver_check_cmd, driver_id)
    else:
        error_context.context("verify virtio-rng device driver", logging.info)
        verify_cmd = params["driver_verifier_cmd"]
        try:
            output = session.cmd_output_safe(verify_cmd, timeout=cmd_timeout)
        except aexpect.ShellTimeoutError:
            err = "%s timeout, pls check if it's a product bug" % verify_cmd
            raise exceptions.TestFail(err)

        if not re.search(r"%s" % driver_name, output, re.M):
            msg = "Verify device driver failed, "
            msg += "guest report driver is %s, " % output
            msg += "expect is '%s'" % driver_name
            raise exceptions.TestFail(msg)

    error_context.context("Read virtio-rng device to get random number",
                          logging.info)
    read_rng_cmd = utils_misc.set_winutils_letter(session,
                                                  params["read_rng_cmd"])

    if rng_dll_register_cmd:
        logging.info("register 'viorngum.dll' into system")
        session.cmd(rng_dll_register_cmd, timeout=120)

    if params.get("test_duration"):
        start_time = time.time()
        while (time.time() - start_time) < float(params.get("test_duration")):
            output = session.cmd_output(read_rng_cmd, timeout=read_rng_timeout)
            if len(re.findall(rng_data_rex, output, re.M)) < 2:
                raise exceptions.TestFail("Unable to read random numbers from"
                                          "guest: %s" % output)
    else:
        output = session.cmd_output(read_rng_cmd, timeout=read_rng_timeout)
        if len(re.findall(rng_data_rex, output, re.M)) < 2:
            raise exceptions.TestFail("Unable to read random numbers from"
                                      "guest: %s" % output)
    session.close()
예제 #30
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba,
                              pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail(
                    "block device not found with scsi_%s", new_vhba_scsibus)
            first_blk_dev = new_blks[0]
            utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                timeout=_TIMEOUT)
            lun_sl = get_symbols_by_blk(first_blk_dev)
            if not lun_sl:
                raise exceptions.TestFail("lun symbolic links not found under "
                                          "/dev/disk/by-path/ for blk dev %s" %
                                          first_blk_dev)
            lun_dev = lun_sl[0]
            path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": first_online_hba,
                "scsi_wwnn": wwnn,
                "scsi_wwpn": wwpn
            })
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT * 2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not sucessfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT * 5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(
                    set(cur_mpath_devs).difference(set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail(
                        "lun symbolic links not found in "
                        "/dev/disk/by-path/ for %s" % first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail(
                    "Fail to create qcow2 on blk dev: %s", detail)
        else:
            raise exceptions.TestFail("Don't have a vaild path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {
                'type_name': disk_type,
                'target_dev': device_target,
                'target_bus': target_bus,
                'source_pool': pool_name,
                'source_volume': test_vol,
                'driver_type': driver_type
            }
        else:
            disk_params = {
                'type_name': disk_type,
                'device': disk_device,
                'driver_name': driver_name,
                'driver_type': driver_type,
                'source_file': path_to_blk,
                'target_dev': device_target,
                'target_bus': target_bus
            }
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s", vm_name,
                          snapshot_name)
            utlv.check_exit_status(
                virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s", file_existence,
                          file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s", file_existence,
                          file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
        # if diskonly, delete --metadata and remove files
        # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s", snap,
                          options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s",
                                      snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)