Exemplo n.º 1
0
 def recover(self, params=None):
     """
     Recover test environment
     """
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
     tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
     if os.path.exists(tmp_c_file):
         os.remove(tmp_c_file)
     if os.path.exists(tmp_exe_file):
         os.remove(tmp_exe_file)
     if 'memory_pid' in params:
         pid = int(params.get('memory_pid'))
         utils_misc.safe_kill(pid, signal.SIGKILL)
         process.run("swapon -a", shell=True)
     if 'cpu_pid' in params:
         pid = int(params.get('cpu_pid'))
         utils_misc.safe_kill(pid, signal.SIGKILL)
         tmp_sh_file = params.get("tmp_sh_file")
         if os.path.exists(tmp_sh_file):
             os.remove(tmp_sh_file)
     virsh.destroy(self.vm_name)
     if len(self.snp_list) < len(self.current_snp_list):
         self.diff_snp_list = list(set(self.current_snp_list) -
                                   set(self.snp_list))
         for item in self.diff_snp_list:
             virsh.snapshot_delete(self.vm_name, item)
     remove_machine_cgroup()
Exemplo n.º 2
0
 def result_confirm(self, params):
     """
     Confirm if VM installation is succeed
     """
     if self.twice_execute and self.kill_first:
         get_pid_cmd = "ps -ef | grep '%s' | grep qemu-kvm | grep -v grep"\
                       % self.vm_name
         result = utils.run(get_pid_cmd, ignore_status=True)
         if result.exit_status:
             raise error.TestFail("First install failed!")
         install_pid = result.stdout.strip().split()[1]
         utils_misc.safe_kill(int(install_pid), signal.SIGKILL)
     self.td.join()
     if self.read_only:
         if virsh.domain_exists(self.vm_name):
             raise error.TestFail("Domain '%s' should not exist"
                                  % self.vm_name)
         os.chmod(self.image_path,
                  stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
     else:
         if not virsh.domain_exists(self.vm_name):
             raise error.TestFail("Domain '%s' should exists, no matter its"
                                  " installation is succeed or failed!"
                                  % self.vm_name)
         else:
             if not self.kill_first:
                 if self.vm.is_dead():
                     self.vm.start()
                 try:
                     self.vm.wait_for_login()
                 except remote.LoginTimeoutError, detail:
                     raise error.TestFail(str(detail))
             else:
                 virsh.remove_domain(self.vm_name)
Exemplo n.º 3
0
 def recover(self, params=None):
     """
     Recover test environment
     """
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     virsh.destroy(self.vm_name)
     if 'cpu_pid' in params:
         pid = int(params.get('cpu_pid'))
         utils_misc.safe_kill(pid, signal.SIGKILL)
         tmp_sh_file = params.get("tmp_sh_file")
         if os.path.exists(tmp_sh_file):
             os.remove(tmp_sh_file)
     if os.path.exists(self.dump_file):
         os.remove(self.dump_file)
     if os.path.exists(self.dump_file1):
         os.remove(self.dump_file1)
     remove_machine_cgroup()
Exemplo n.º 4
0
 def recover(self, params):
     """
     Recover test environment
     """
     abnormal_type = params.get("abnormal_type")
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     if virsh.domain_exists(self.vm_new_name):
         virsh.remove_domain(self.vm_new_name)
     if os.path.exists(self.new_image_file):
         os.remove(self.new_image_file)
     if self.twice_execute:
         if virsh.domain_exists(self.vm_new_name1):
             virsh.remove_domain(self.vm_new_name1)
         if os.path.exists(self.new_image_file1):
             os.remove(self.new_image_file1)
     if abnormal_type == "memory_lack":
         if params.has_key('memory_pid'):
             pid = params.get('memory_pid')
             utils_misc.safe_kill(pid, signal.SIGKILL)
             utils.run("swapon -a")
         tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
         tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
         if os.path.exists(tmp_c_file):
             os.remove(tmp_c_file)
         if os.path.exists(tmp_exe_file):
             os.remove(tmp_exe_file)
     elif abnormal_type in ["disk_lack", ""]:
         if self.selinux_enforcing:
             utils_selinux.set_status("enforcing")
         tmp_file = os.path.join(self.mount_dir, "tmp")
         if os.path.exists(tmp_file):
             os.remove(tmp_file)
         # Sometimes one umount action is not enough
         utils_misc.wait_for(
             lambda: utils_misc.umount(self.partition, self.mount_dir, self.
                                       fs_type), 120)
         if self.iscsi_dev:
             self.iscsi_dev.cleanup()
         os.rmdir(self.mount_dir)
     remove_machine_cgroup()
Exemplo n.º 5
0
 def result_confirm(self, params):
     """
     Confirm if VM installation is succeed
     """
     if self.twice_execute and self.kill_first:
         get_pid_cmd = "ps -ef | grep '%s' | grep qemu-kvm | grep -v grep"\
                       % self.vm_name
         result = process.run(get_pid_cmd, ignore_status=True, shell=True)
         if result.exit_status:
             self.test.fail("First install failed!")
         install_pid = result.stdout_text.strip().split()[1]
         utils_misc.safe_kill(int(install_pid), signal.SIGKILL)
     self.td.join()
     if self.read_only:
         if virsh.domain_exists(self.vm_name):
             self.test.fail("Domain '%s' should not exist" % self.vm_name)
         os.chmod(self.image_path,
                  stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
     else:
         if not virsh.domain_exists(self.vm_name):
             self.test.fail("Domain '%s' should exists, no matter its"
                            " installation is succeed or failed!" %
                            self.vm_name)
         else:
             if not self.kill_first:
                 if self.vm.is_dead():
                     self.vm.start()
                 try:
                     self.vm.wait_for_login()
                 except remote.LoginTimeoutError as detail:
                     self.test.fail(str(detail))
             else:
                 virsh.remove_domain(self.vm_name)
     if self.twice_execute or self.read_only:
         self.td1 = threading.Thread(target=unattended_install.run,
                                     args=(self.test, params, self.env))
         self.td1.start()
         self.td1.join()
         if not virsh.domain_exists(self.vm_name):
             self.test.fail("Domain '%s' installation failed!" %
                            self.vm_name)
Exemplo n.º 6
0
 def recover(self, params):
     """
     Recover test environment
     """
     abnormal_type = params.get("abnormal_type")
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     if virsh.domain_exists(self.vm_new_name):
         virsh.remove_domain(self.vm_new_name)
     if os.path.exists(self.new_image_file):
         os.remove(self.new_image_file)
     if self.twice_execute:
         if virsh.domain_exists(self.vm_new_name1):
             virsh.remove_domain(self.vm_new_name1)
         if os.path.exists(self.new_image_file1):
             os.remove(self.new_image_file1)
     if abnormal_type == "memory_lack":
         if 'memory_pid' in params:
             pid = params.get('memory_pid')
             utils_misc.safe_kill(pid, signal.SIGKILL)
             process.run("swapon -a", shell=True)
         tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
         tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
         if os.path.exists(tmp_c_file):
             os.remove(tmp_c_file)
         if os.path.exists(tmp_exe_file):
             os.remove(tmp_exe_file)
     elif abnormal_type in ["disk_lack", ""]:
         if self.selinux_enforcing:
             utils_selinux.set_status("enforcing")
         tmp_file = os.path.join(self.mount_dir, "tmp")
         if os.path.exists(tmp_file):
             os.remove(tmp_file)
         # Sometimes one umount action is not enough
         utils_misc.wait_for(lambda: utils_misc.umount(self.partition,
                                                       self.mount_dir,
                                                       self.fs_type), 120)
         if self.iscsi_dev:
             self.iscsi_dev.cleanup()
         os.rmdir(self.mount_dir)
     remove_machine_cgroup()
Exemplo n.º 7
0
 def result_confirm(self, params):
     """
     Confirm if snapshot has been created.
     """
     if params.has_key('cpu_pid'):
         cpu_id = params.get('cpu_pid')
         self.cgroup.cgclassify_cgroup(int(cpu_id), self.cgroup_name)
     if self.kill_first:
         # Stop this threading
         try:
             first_pid = self.cgroup.get_pids(self.cgroup_index)[1]
             utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
         except IndexError:
             logging.info("Snapshot create process in cgroup"
                          " has been over")
     else:
         if self.td1:
             self.td1.join(self.time_out)
     self.td0.join(self.time_out)
     self.current_snp_list = virsh.snapshot_list(self.vm_name)
     if len(self.snp_list) >= len(self.current_snp_list):
         raise error.TestFail("Create snapshot failed for low memory!")
Exemplo n.º 8
0
 def result_confirm(self, params):
     """
     Confirm if snapshot has been created.
     """
     if 'cpu_pid' in params:
         cpu_id = params.get('cpu_pid')
         self.cgroup.cgclassify_cgroup(int(cpu_id), self.cgroup_name)
     if self.kill_first:
         # Stop this threading
         try:
             first_pid = self.cgroup.get_pids(self.cgroup_index)[1]
             utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
         except IndexError:
             logging.info("Snapshot create process in cgroup"
                          " has been over")
     else:
         if self.td1:
             self.td1.join(self.time_out)
     self.td0.join(self.time_out)
     self.current_snp_list = virsh.snapshot_list(self.vm_name)
     if len(self.snp_list) >= len(self.current_snp_list):
         self.test.fail("Create snapshot failed for low memory!")
Exemplo n.º 9
0
 def result_confirm(self, params):
     """
     Confirm if dump file has been created.
     """
     if 'cpu_pid' in params:
         cpu_id = params.get('cpu_pid')
         self.cgroup.cgclassify_cgroup(int(cpu_id), self.cgroup_name)
     if self.kill_first:
         # Stop this threading
         try:
             first_pid = self.cgroup.get_pids(self.cgroup_index)[1]
             utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
         except IndexError:
             logging.info("Dump process in cgroup has been over")
     else:
         if self.td1:
             self.td1.join(self.time_out)
     self.td0.join(self.time_out)
     if not os.path.join(self.dump_file1):
         self.test.fail("Dump file %s doesn't exist!" % self.dump_file)
     if self.twice_execute and not os.path.join(self.dump_file1):
         self.test.fail("Dump file %s doesn't exist!" % self.dump_file1)
Exemplo n.º 10
0
    def result_confirm(self, params):
        """
        Confirm if virt-clone executed succeed
        """
        if self.kill_first:
            # Stop this threading
            first_pid = self.cgroup.get_pids(self.cgroup_index)[-1]
            utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
        else:
            self.td0.join(self.time_out)
        if self.td1:
            self.td1.join(self.time_out)
        abnormal_type = params.get("abnormal_type")
        if abnormal_type == "cpu_lack":
            if not virsh.domain_exists(self.vm_new_name):
                self.test.fail("Clone '%s' failed" % self.vm_new_name)
            else:
                result = virsh.start(self.vm_new_name, ignore_status=True)
                if result.exit_status:
                    self.test.fail("Cloned domain cannot be started!")
        elif abnormal_type == "disk_lack":
            if virsh.domain_exists(self.vm_new_name):
                self.test.fail("Clone '%s' succeed but expect failed!"
                               % self.vm_new_name)
        else:
            if self.twice_execute and not self.kill_first:
                if virsh.domain_exists(self.vm_new_name):
                    self.test.fail("Clone '%s' succeed but expect"
                                   " failed!" % self.vm_new_name)
                if virsh.domain_exists(self.vm_new_name1):
                    self.test.fail("Clone '%s' succeed but expect"
                                   " failed!" % self.vm_new_name1)

            elif self.twice_execute and self.kill_first:
                if not virsh.domain_exists(self.vm_new_name):
                    self.test.fail("Clone '%s' failed!"
                                   % self.vm_new_name)
Exemplo n.º 11
0
    def result_confirm(self, params):
        """
        Confirm if virt-clone executed succeed
        """
        if self.kill_first:
            # Stop this threading
            first_pid = self.cgroup.get_pids(self.cgroup_index)[-1]
            utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
        else:
            self.td0.join(self.time_out)
        if self.td1:
            self.td1.join(self.time_out)
        abnormal_type = params.get("abnormal_type")
        if abnormal_type == "cpu_lack":
            if not virsh.domain_exists(self.vm_new_name):
                raise error.TestFail("Clone '%s' failed" % self.vm_new_name)
            else:
                result = virsh.start(self.vm_new_name, ignore_status=True)
                if result.exit_status:
                    raise error.TestFail("Cloned domain cannot be started!")
        elif abnormal_type == "disk_lack":
            if virsh.domain_exists(self.vm_new_name):
                raise error.TestFail("Clone '%s' succeed but expect failed!"
                                     % self.vm_new_name)
        else:
            if self.twice_execute and not self.kill_first:
                if virsh.domain_exists(self.vm_new_name):
                    raise error.TestFail("Clone '%s' succeed but expect"
                                         " failed!" % self.vm_new_name)
                if virsh.domain_exists(self.vm_new_name1):
                    raise error.TestFail("Clone '%s' succeed but expect"
                                         " failed!" % self.vm_new_name1)

            elif self.twice_execute and self.kill_first:
                if not virsh.domain_exists(self.vm_new_name):
                    raise error.TestFail("Clone '%s' failed!"
                                         % self.vm_new_name)
Exemplo n.º 12
0
 def result_confirm(self, params):
     """
     Confirm if dump file has been created.
     """
     if 'cpu_pid' in params:
         cpu_id = params.get('cpu_pid')
         self.cgroup.cgclassify_cgroup(int(cpu_id), self.cgroup_name)
     if self.kill_first:
         # Stop this threading
         try:
             first_pid = self.cgroup.get_pids(self.cgroup_index)[1]
             utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
         except IndexError:
             logging.info("Dump process in cgroup has been over")
     else:
         if self.td1:
             self.td1.join(self.time_out)
     self.td0.join(self.time_out)
     if not os.path.join(self.dump_file1):
         self.test.fail("Dump file %s doesn't exist!"
                        % self.dump_file)
     if self.twice_execute and not os.path.join(self.dump_file1):
         self.test.fail("Dump file %s doesn't exist!"
                        % self.dump_file1)
Exemplo n.º 13
0
    def do_cancel(self, sig=signal.SIGKILL):
        """
        Kill process during migration.

        :param sig: The signal to send
        :raise: test.error when kill fails
        """
        def _get_pid():
            cmd = "ps aux |grep 'virsh .*migrate' |grep -v grep |awk '{print $2}'"
            pid = process.run(cmd, shell=True).stdout_text
            return pid

        pid = utils_misc.wait_for(_get_pid, 30)
        if utils_misc.safe_kill(pid, sig):
            logging.info("Succeed to cancel migration: [%s].", pid.strip())
        else:
            raise exceptions.TestError("Fail to cancel migration: [%s]" %
                                       pid.strip())
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test command: virsh net-dhcp-leases

    1. Create a new network and run virsh command to check dhcp leases info.
    2. Attach an interface before or after start the domain, then check the
       dhcp leases info.
    3. Clean the environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    net_name = params.get("net_name", "default")
    net_option = params.get("net_option", "")
    status_error = "yes" == params.get("status_error", "no")
    prepare_net = "yes" == params.get("prepare_net", "yes")
    hotplug_iface = "yes" == params.get("hotplug_interface", "no")
    filter_by_mac = "yes" == params.get("filter_by_mac", "no")
    invalid_mac = "yes" == params.get("invalid_mac", "no")
    expect_msg = params.get("leases_err_msg")
    # upstream expect msg may change on new libvirt
    new_expect_msg = params.get("new_leases_err_msg")
    range_lease = eval(params.get("range_lease", "None"))
    host_lease = eval(params.get("host_lease", "None"))
    host = eval(params.get("host", "None"))
    invalid_lease = "yes" == params.get("invalid_lease", "no")
    blank_lease = "yes" == params.get("blank_lease", "no")
    if (host_lease
            or range_lease) and not libvirt_version.version_compare(6, 2, 0):
        test.cancel(
            "Don't support: libvirt support lease setting since 6.2.0!")
    # Generate a random string as the MAC address
    nic_mac = None
    if invalid_mac:
        nic_mac = utils_misc.generate_random_string(17)

    # Command won't fail on old libvirt
    if not libvirt_version.version_compare(1, 3, 1) and invalid_mac:
        logging.debug("Reset case to positive as BZ#1261432")
        status_error = False

    def create_network():
        """
        Create a network
        """
        net_ip_addr = params.get("net_ip_addr", "192.168.200.1")
        net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0")
        net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2")
        net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254")
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'mode': "nat"}
        range = network_xml.RangeXML()
        range.attrs = {'start': net_dhcp_start, "end": net_dhcp_end}
        ipxml = network_xml.IPXML()
        if range_lease:
            range.lease_attrs = range_lease
        ipxml.address = net_ip_addr
        ipxml.netmask = net_ip_netmask
        ipxml.dhcp_ranges = range
        if host:
            new_host = network_xml.DhcpHostXML()
            new_host.attrs = host
            new_host.lease_attrs = host_lease
            ipxml.hosts = [new_host]
        netxml.set_ip(ipxml)
        netxml.create()

    def get_net_dhcp_leases(output):
        """
        Return the dhcp lease info in a list
        """
        leases = []
        lines = output.splitlines()
        if not lines:
            return leases
        try:
            pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+"
            keys = re.findall(pat, lines[0])
            for line in lines[2:]:
                values = re.findall(pat, line)
                leases.append(dict(list(zip(keys, values))))
            return leases
        except Exception:
            test.error("Fail to parse output: %s" % output)

    def check_lease_time(ex_time, duration):
        """
        Compare the expiry time from the virsh cmd output and the setting
        :param ex_time: text, the expiry time get from the net-dhcp-lease output
        :param duration: dict, the configured expiry time
        """
        now_time = datetime.now()
        # convert the lease time from str to the datetime structure
        # lease is in format like: 2021-01-18 02:15:35
        get_ex_time = datetime.strptime(ex_time, '%Y-%m-%d %H:%M:%S')
        if duration['expiry'] == '0':
            if get_ex_time > now_time:
                test.fail("The expiry time is not correct!!")
        if 'unit' not in duration:
            duration['unit'] = 'minutes'
        else:
            if duration['unit'] == 'seconds':
                dur_sec = int(duration['expiry'])
            elif duration['unit'] == 'hours':
                dur_sec = int(duration['expiry']) * 3600
            else:
                dur_sec = int(duration['expiry']) * 60

            delta = get_ex_time - now_time
            logging.debug(
                "The get_ex_time is %s, the now_time is %s, "
                "duration is %s", get_ex_time, now_time, duration)
            if delta > timedelta(seconds=dur_sec):
                test.fail("Get expiry time %s longer than the setting %s!!" %
                          (delta, timedelta(seconds=dur_sec)))
            elif delta < timedelta(seconds=(dur_sec - 30)):
                test.fail("Get expiry time %s shorter than the setting %s" %
                          (delta, timedelta(seconds=dur_sec)))
            else:
                logging.info("Get expected lease info.")
        return None

    def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120):
        """
        Get interface IP address by given MAC addrss. If try_dhclint is
        True, then try to allocate IP addrss for the interface.
        """
        session = vm.wait_for_login(login_nic_index,
                                    timeout=timeout,
                                    serial=True)

        def f():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = utils_misc.wait_for(f, 10)
            if ip_addr is None:
                iface_name = utils_net.get_linux_ifname(session, mac_addr)
                if try_dhclint:
                    session.cmd("dhclient %s" % iface_name)
                    ip_addr = utils_misc.wait_for(f, 10)
                else:
                    # No IP for the interface, just print the interface name
                    logging.warn(
                        "Find '%s' with MAC address '%s', "
                        "but which has no IP address", iface_name, mac_addr)
        finally:
            session.close()
        return ip_addr

    def check_net_lease(net_leases, expected_find=True):
        """
        Check the dhcp lease info.
        """
        if not net_leases:
            if expected_find:
                test.fail("Lease info is empty")
            else:
                logging.debug("No dhcp lease info find as expected")
        else:
            if not expected_find:
                test.fail("Find unexpected dhcp lease info: %s" % net_leases)
        find_mac = False
        for net_lease in net_leases:
            net_mac = net_lease['MAC address']
            net_ip = net_lease['IP address'][:-3]
            expiry_time = net_lease['Expiry Time']
            if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac):
                find_mac = True
                logging.debug("Find '%s' in domain XML", net_mac)
            else:
                logging.debug("Not find '%s' in domain XML", net_mac)
                continue
            iface_ip = get_ip_by_mac(net_mac)
            if iface_ip and iface_ip != net_ip:
                test.fail("Address '%s' is not expected" % iface_ip)
            #check if lease time is correct
            if libvirt_version.version_compare(6, 2, 0):
                if host_lease and net_mac == host['mac']:
                    check_lease_time(expiry_time, host_lease)
                elif range_lease:
                    check_lease_time(expiry_time, range_lease)
        if expected_find and not find_mac:
            test.fail("No matched MAC address")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if vm.is_alive():
        vm.destroy(gracefully=False)
    login_nic_index = 0
    new_nic_index = 0
    # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except
    # default
    net_state = virsh.net_state_dict(only_names=True)
    logging.debug(
        "current networks: %s, destroy and undefine networks "
        "except default!", net_state)
    for net in net_state:
        if net != "default":
            virsh.net_destroy(net)
            virsh.net_undefine(net)
    cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'"
    pid_list = process.run(cmd, shell=True).stdout_text.strip().splitlines()
    logging.debug(pid_list)
    for pid in pid_list:
        utils_misc.safe_kill(pid, signal.SIGKILL)
    try:
        # Create new network
        if prepare_net:
            nets_old = virsh.net_state_dict()
            if net_name in list(nets_old.keys()):
                virsh.net_destroy(net_name)
                virsh.net_undefine(net_name)
            create_network()
        nets = virsh.net_state_dict()
        if net_name not in list(nets.keys()) and not status_error:
            test.error("Not find network '%s'" % net_name)
        expected_find = False
        result = virsh.net_dhcp_leases(net_name,
                                       mac=nic_mac,
                                       options=net_option,
                                       debug=True,
                                       ignore_status=True)
        utlv.check_exit_status(result, status_error)
        lease = get_net_dhcp_leases(result.stdout.strip())
        check_net_lease(lease, expected_find)
        if not status_error:
            if host:
                iface_mac = host['mac']
            else:
                iface_mac = utils_net.generate_mac_address_simple()
            if filter_by_mac:
                nic_mac = iface_mac
            op = "--type network --model virtio --source %s --mac %s" \
                 % (net_name, iface_mac)
            nic_params = {
                'mac': iface_mac,
                'nettype': 'bridge',
                'ip_version': 'ipv4'
            }
            login_timeout = 120
            if not hotplug_iface:
                op += " --config"
                virsh.attach_interface(vm_name,
                                       option=op,
                                       debug=True,
                                       ignore_status=False)
                vm.add_nic(**nic_params)
                vm.start()
                new_nic_index = vm.get_nic_index_by_mac(iface_mac)
                if new_nic_index > 0:
                    login_nic_index = new_nic_index
            else:
                vm.start()
                # wait for VM start before hotplug interface
                vm.wait_for_serial_login()
                virsh.attach_interface(vm_name,
                                       option=op,
                                       debug=True,
                                       ignore_status=False)
                vm.add_nic(**nic_params)
                # As VM already started, so the login timeout could be shortened
                login_timeout = 10
            new_interface_ip = get_ip_by_mac(iface_mac,
                                             try_dhclint=True,
                                             timeout=login_timeout)
            if new_interface_ip:
                expected_find = True
            result = virsh.net_dhcp_leases(net_name,
                                           mac=nic_mac,
                                           debug=False,
                                           ignore_status=True)
            utlv.check_exit_status(result, status_error)
            lease = get_net_dhcp_leases(result.stdout.strip())
            check_net_lease(lease, expected_find)
        else:
            if expect_msg:
                utlv.check_result(result, expect_msg.split(';'))
    except LibvirtXMLError as e:
        if status_error and invalid_lease:
            if blank_lease and libvirt_version.version_compare(7, 1, 0):
                expect_msg = new_expect_msg
            if expect_msg not in e.details:
                test.fail("Network create fail unexpected: %s" % e.details)
            else:
                logging.debug("Network create fail expected: %s", e.details)
    finally:
        # Delete the new attached interface
        if new_nic_index > 0:
            vm.del_nic(new_nic_index)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        if prepare_net:
            virsh.net_destroy(net_name)
Exemplo n.º 15
0
def run(test, params, env):
    """
    Run yonit bitmap benchmark in Windows guests, especially win7 32bit,
    for regression test of BZ #556455.

    Run the benchmark (infinite) loop background using
    run_guest_test_background, and detect the existence of the process
    in guest.

      1. If the process exits before test timeout, that means the benchmark
      exits unexpectedly, and BSOD may have happened, which can be verified
      from the screenshot saved by virt-test.
      2. If just timeout happen, this test passes, i.e. the guest stays
      good while running the benchmark in the given time.

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """

    sec_per_day = 86400  # seconds per day
    test_timeout = int(params.get("test_timeout", sec_per_day))
    login_timeout = int(params.get("login_timeout", 360))

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=login_timeout)

    # Since the benchmark runs into an infinite loop, the background process
    # will never return, unless we get a BSOD.
    #
    # We set the test_timeout of the background guest_test much bigger than
    # that of this test to make sure that the background benchmark is still
    # running while the the foreground detecting is on going.
    error_context.context("run benchmark test in background", logging.info)
    params["test_timeout"] = test_timeout * 2 + sec_per_day
    logging.info("set Yonit bitmap test timeout to"
                 " %ss" % params["test_timeout"])
    pid = guest_test.run_guest_test_background(test, params, env)
    if pid < 0:
        session.close()
        test.error("Could not create child process to execute "
                   "guest_test background")

    def is_yonit_benchmark_launched():
        if session.cmd_status(
                'tasklist | find /I "compress_benchmark_loop"') != 0:
            logging.debug("yonit bitmap benchmark was not found")
            return False
        return True

    error_context.context("Watching Yonit bitmap benchmark is"
                          " running until timeout", logging.info)
    try:
        # Start detecting whether the benchmark is started a few mins
        # after the background test launched, as the downloading
        # will take some time.
        launch_timeout = login_timeout
        if utils_misc.wait_for(is_yonit_benchmark_launched,
                               launch_timeout, 180, 5):
            logging.debug("Yonit bitmap benchmark was launched successfully")
        else:
            test.error("Failed to launch yonit bitmap benchmark")

        # If the benchmark exits before timeout, errors happened.
        if utils_misc.wait_for(lambda: not is_yonit_benchmark_launched(),
                               test_timeout, 60, 10):
            test.error("Yonit bitmap benchmark exits unexpectly")
        else:
            if session.is_responsive():
                logging.info("Guest stays good until test timeout")
            else:
                test.fail("Guest is dead")
    finally:
        logging.info("Kill the background benchmark tracking process")
        utils_misc.safe_kill(pid, signal.SIGKILL)
        guest_test.wait_guest_test_background(pid)
        session.close()
Exemplo n.º 16
0
def run_yonit_bitmap(test, params, env):
    """
    Run yonit bitmap benchmark in Windows guests, especially win7 32bit,
    for regression test of BZ #556455.

    Run the benchmark (infinite) loop background using
    run_guest_test_background, and detect the existence of the process
    in guest.

      1. If the process exits before test timeout, that means the benchmark
      exits unexpectedly, and BSOD may have happened, which can be verified
      from the screenshot saved by virt-test.
      2. If just timeout happen, this test passes, i.e. the guest stays
      good while running the benchmark in the given time.

    :param test: Kvm test object
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """

    sec_per_day = 86400  # seconds per day
    test_timeout = int(params.get("test_timeout", sec_per_day))
    login_timeout = int(params.get("login_timeout", 360))

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=login_timeout)

    # Since the benchmark runs into an infinite loop, the background process
    # will never return, unless we get a BSOD.
    #
    # We set the test_timeout of the background guest_test much bigger than
    # that of this test to make sure that the background benchmark is still
    # running while the the foreground detecting is on going.
    error.context("run benchmark test in background", logging.info)
    params["test_timeout"] = test_timeout * 2 + sec_per_day
    logging.info("set Yonit bitmap test timeout to"
                 " %ss" % params["test_timeout"])
    pid = guest_test.run_guest_test_background(test, params, env)
    if pid < 0:
        session.close()
        raise error.TestError("Could not create child process to execute "
                              "guest_test background")

    def is_yonit_benchmark_launched():
        if session.get_command_status(
                'tasklist | find /I "compress_benchmark_loop"') != 0:
            logging.debug("yonit bitmap benchmark was not found")
            return False
        return True

    error.context("Watching Yonit bitmap benchmark is running until timeout",
                  logging.info)
    try:
        # Start detecting whether the benchmark is started a few mins
        # after the background test launched, as the downloading
        # will take some time.
        launch_timeout = login_timeout
        if utils_misc.wait_for(is_yonit_benchmark_launched, launch_timeout,
                               180, 5):
            logging.debug("Yonit bitmap benchmark was launched successfully")
        else:
            raise error.TestError("Failed to launch yonit bitmap benchmark")

        # If the benchmark exits before timeout, errors happened.
        if utils_misc.wait_for(lambda: not is_yonit_benchmark_launched(),
                               test_timeout, 60, 10):
            raise error.TestError("Yonit bitmap benchmark exits unexpectly")
        else:
            if session.is_responsive():
                logging.info("Guest stays good until test timeout")
            else:
                raise error.TestFail("Guest is dead")
    finally:
        logging.info("Kill the background benchmark tracking process")
        utils_misc.safe_kill(pid, signal.SIGKILL)
        guest_test.wait_guest_test_background(pid)
        session.close()
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test command: virsh net-dhcp-leases

    1. Create a new network and run virsh command to check dhcp leases info.
    2. Attach an interface before or after start the domain, then check the
       dhcp leases info.
    3. Clean the environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    net_name = params.get("net_name", "default")
    net_option = params.get("net_option", "")
    status_error = "yes" == params.get("status_error", "no")
    prepare_net = "yes" == params.get("prepare_net", "yes")
    hotplug_iface = "yes" == params.get("hotplug_interface", "no")
    filter_by_mac = "yes" == params.get("filter_by_mac", "no")
    invalid_mac = "yes" == params.get("invalid_mac", "no")
    expect_msg = params.get("leases_err_msg")
    # Generate a random string as the MAC address
    nic_mac = None
    if invalid_mac:
        nic_mac = utils_misc.generate_random_string(17)

    # Command won't fail on old libvirt
    if not libvirt_version.version_compare(1, 3, 1) and invalid_mac:
        logging.debug("Reset case to positive as BZ#1261432")
        status_error = False

    def create_network():
        """
        Create a network
        """
        net_ip_addr = params.get("net_ip_addr", "192.168.200.1")
        net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0")
        net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2")
        net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254")
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'mode': "nat"}
        ipxml = network_xml.IPXML()
        ipxml.address = net_ip_addr
        ipxml.netmask = net_ip_netmask
        ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end}
        netxml.set_ip(ipxml)
        netxml.create()

    def get_net_dhcp_leases(output):
        """
        Return the dhcp lease info in a list
        """
        leases = []
        lines = output.splitlines()
        if not lines:
            return leases
        try:
            pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+"
            keys = re.findall(pat, lines[0])
            for line in lines[2:]:
                values = re.findall(pat, line)
                leases.append(dict(list(zip(keys, values))))
            return leases
        except Exception:
            test.error("Fail to parse output: %s" % output)

    def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120):
        """
        Get interface IP address by given MAC addrss. If try_dhclint is
        True, then try to allocate IP addrss for the interface.
        """
        session = vm.wait_for_login(login_nic_index,
                                    timeout=timeout,
                                    serial=True)

        def f():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = utils_misc.wait_for(f, 10)
            if ip_addr is None:
                iface_name = utils_net.get_linux_ifname(session, mac_addr)
                if try_dhclint:
                    session.cmd("dhclient %s" % iface_name)
                    ip_addr = utils_misc.wait_for(f, 10)
                else:
                    # No IP for the interface, just print the interface name
                    logging.warn(
                        "Find '%s' with MAC address '%s', "
                        "but which has no IP address", iface_name, mac_addr)
        finally:
            session.close()
        return ip_addr

    def check_net_lease(net_leases, expected_find=True):
        """
        Check the dhcp lease info.
        """
        if not net_leases:
            if expected_find:
                test.fail("Lease info is empty")
            else:
                logging.debug("No dhcp lease info find as expected")
        else:
            if not expected_find:
                test.fail("Find unexpected dhcp lease info: %s" % net_leases)
        find_mac = False
        for net_lease in net_leases:
            net_mac = net_lease['MAC address']
            net_ip = net_lease['IP address'][:-3]
            if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac):
                find_mac = True
                logging.debug("Find '%s' in domain XML", net_mac)
            else:
                logging.debug("Not find '%s' in domain XML", net_mac)
                continue
            iface_ip = get_ip_by_mac(net_mac)
            if iface_ip and iface_ip != net_ip:
                test.fail("Address '%s' is not expected" % iface_ip)
        if expected_find and not find_mac:
            test.fail("No matched MAC address")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if vm.is_alive():
        vm.destroy(gracefully=False)
    login_nic_index = 0
    new_nic_index = 0
    # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except
    # default
    net_state = virsh.net_state_dict(only_names=True)
    logging.debug(
        "current networks: %s, destroy and undefine networks "
        "except default!", net_state)
    for net in net_state:
        if net != "default":
            virsh.net_destroy(net)
            virsh.net_undefine(net)
    cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'"
    pid_list = process.run(cmd, shell=True).stdout_text.strip().splitlines()
    logging.debug(pid_list)
    for pid in pid_list:
        utils_misc.safe_kill(pid, signal.SIGKILL)
    # Create new network
    if prepare_net:
        create_network()
    nets = virsh.net_state_dict()
    if net_name not in list(nets.keys()) and not status_error:
        test.error("Not find network '%s'" % net_name)
    expected_find = False
    try:
        result = virsh.net_dhcp_leases(net_name,
                                       mac=nic_mac,
                                       options=net_option,
                                       debug=True,
                                       ignore_status=True)
        utlv.check_exit_status(result, status_error)
        lease = get_net_dhcp_leases(result.stdout.strip())
        check_net_lease(lease, expected_find)
        if not status_error:
            iface_mac = utils_net.generate_mac_address_simple()
            if filter_by_mac:
                nic_mac = iface_mac
            op = "--type network --model virtio --source %s --mac %s" \
                 % (net_name, iface_mac)
            nic_params = {
                'mac': iface_mac,
                'nettype': 'bridge',
                'ip_version': 'ipv4'
            }
            login_timeout = 120
            if not hotplug_iface:
                op += " --config"
                virsh.attach_interface(vm_name,
                                       option=op,
                                       debug=True,
                                       ignore_status=False)
                vm.add_nic(**nic_params)
                vm.start()
                new_nic_index = vm.get_nic_index_by_mac(iface_mac)
                if new_nic_index > 0:
                    login_nic_index = new_nic_index
            else:
                vm.start()
                # wait for VM start before hotplug interface
                vm.wait_for_serial_login()
                virsh.attach_interface(vm_name,
                                       option=op,
                                       debug=True,
                                       ignore_status=False)
                vm.add_nic(**nic_params)
                # As VM already started, so the login timeout could be shortened
                login_timeout = 10
            new_interface_ip = get_ip_by_mac(iface_mac,
                                             try_dhclint=True,
                                             timeout=login_timeout)
            # Allocate IP address for the new interface may fail, so only
            # check the result if get new IP address
            if new_interface_ip:
                expected_find = True
            result = virsh.net_dhcp_leases(net_name,
                                           mac=nic_mac,
                                           debug=False,
                                           ignore_status=True)
            utlv.check_exit_status(result, status_error)
            lease = get_net_dhcp_leases(result.stdout.strip())
            check_net_lease(lease, expected_find)
        else:
            if expect_msg:
                utlv.check_result(result, expect_msg.split(';'))
    finally:
        # Delete the new attached interface
        if new_nic_index > 0:
            vm.del_nic(new_nic_index)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        if prepare_net:
            virsh.net_destroy(net_name)
def run(test, params, env):
    """
    Test push-mode incremental backup

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb in vm
    3. start a push mode full backup on vdb
    4. create some data on vdb in vm
    5. start a push mode incremental backup
    6. repeat step 4 and 5 as required
    7. check the full/incremental backup file data
    """

    def backup_job_done(vm_name, vm_disk):
        """
        Check if a backup job for a vm's specific disk is finished.

        :param vm_name: vm's name
        :param vm_disk: the disk to be checked, such as 'vdb'
        :return: 'True' means job finished
        """
        result = virsh.blockjob(vm_name, vm_disk, debug=True)
        if "no current block job" in result.stdout_text.strip().lower():
            return True

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    target_driver = params.get("target_driver", "qcow2")
    target_type = params.get("target_type", "file")
    target_blkdev_path = params.get("target_blkdev_path")
    target_blkdev_size = params.get("target_blkdev_size", original_disk_size)
    reuse_target_file = "yes" == params.get("reuse_target_file")
    prepare_target_file = "yes" == params.get("prepare_target_file")
    prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    expect_backup_canceled = "yes" == params.get("expect_backup_canceled")
    tmp_dir = data_dir.get_data_dir()
    virsh_dargs = {'debug': True, 'ignore_status': True}

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            if os.path.exists(disk_path):
                os.remove(disk_path)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {"device_type": "disk",
                           "type_name": "file",
                           "driver_type": "qcow2",
                           "target_dev": original_disk_target,
                           "source_file": disk_path}
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "ceph":
            ceph_mon_host = params.get("ceph_mon_host", "EXAMPLE_MON_HOST_AUTHX")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT")
            ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL")
            ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE")
            ceph_disk_name = ceph_pool_name + "/" + ceph_file_name
            ceph_client_name = params.get("ceph_client_name", "EXAMPLE_CLIENT_NAME")
            ceph_client_key = params.get("ceph_client_key", "EXAMPLE_CLIENT_KEY")
            ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER")
            ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY")
            auth_sec_usage_type = "ceph"

            enable_auth = "yes" == params.get("enable_auth", "yes")
            key_file = os.path.join(tmp_dir, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_host)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                     "sec_name": "ceph_auth_secret"}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                           debug=True)
                    disk_params_auth = {"auth_user": ceph_auth_user,
                                        "secret_type": auth_sec_usage_type,
                                        "secret_uuid": auth_sec_uuid,
                                        "auth_in_source": True}
                else:
                    test.error("No ceph client name/key provided.")
                disk_path = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_host,
                                                               key_file)
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name,
                              ceph_file_name, ceph_cfg, key_file)
            process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size),
                        shell=True, verbose=True)
            disk_params = {'device_type': 'disk',
                           'type_name': 'network',
                           "driver_type": "qcow2",
                           'target_dev': original_disk_target}
            disk_params_src = {'source_protocol': 'rbd',
                               'source_name': ceph_disk_name,
                               'source_host_name': ceph_mon_host,
                               'source_host_port': ceph_host_port}
            disk_params.update(disk_params_src)
            disk_params.update(disk_params_auth)
        else:
            test.error("The disk type '%s' not supported in this script." %
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name, disk_xml,
                                flagstr="--config", debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_path_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "push"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(backup_index - 1)

            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = target_type
                    target_params = {"attrs": {}}
                    if target_type == "file":
                        target_file_name = "target_file_%s" % backup_index
                        target_file_path = os.path.join(tmp_dir, target_file_name)
                        if os.path.exists(target_file_path):
                            os.remove(target_file_path)
                        if prepare_target_file:
                            libvirt.create_local_disk("file", target_file_path,
                                                      original_disk_size, target_driver)
                        target_params["attrs"]["file"] = target_file_path
                        backup_path_list.append(target_file_path)
                    elif target_type == "block":
                        if prepare_target_blkdev:
                            target_blkdev_path = libvirt.setup_or_cleanup_iscsi(
                                    is_setup=True, image_size=target_blkdev_size)
                        target_params["attrs"]["dev"] = target_blkdev_path
                        backup_path_list.append(target_blkdev_path)
                    else:
                        test.fail("We do not support backup target type: '%s'"
                                  % target_type)
                    logging.debug("target params: %s", target_params)
                    backup_disk_params["backup_target"] = target_params
                    driver_params = {"type": target_driver}
                    backup_disk_params["backup_driver"] = driver_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                        backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(backup_params,
                                                        backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)
            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get("checkpoint_desc",
                                                      "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(cp_params,
                                                                disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s",
                          backup_index, checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            if expect_backup_canceled:
                # Generate more data to extend the backup job duration
                dd_count = "100"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            if reuse_target_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name, backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(backup_result.stderr.strip())
            # If required, do some error operations during backup job
            error_operation = params.get("error_operation")
            if error_operation:
                if "destroy_vm" in error_operation:
                    vm.destroy(gracefully=False)
                if "kill_qemu" in error_operation:
                    utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL)
                if utils_misc.wait_for(lambda: utils_backup.is_backup_canceled(vm_name),
                                       timeout=5):
                    raise utils_backup.BackupCanceledError()
                elif expect_backup_canceled:
                    test.fail("Backup job should be canceled but not.")

            # Wait for the backup job actually finished
            if not utils_misc.wait_for(
                    lambda: backup_job_done(vm_name, original_disk_target), 60):
                test.fail("Backup job not finished in 60s")

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file)
        process.run(cmd, shell=True, verbose=True)

        for backup_path in backup_path_list:
            if target_driver == "qcow2":
                # Clear backup image's backing file before comparison
                qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s"
                            % backup_path)
                process.run(qemu_cmd, shell=True, verbose=True)
            if not utils_backup.cmp_backup_data(original_data_file, backup_path,
                                                backup_file_driver=target_driver):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_path))
            else:
                logging.debug("'%s' contains correct backup data", backup_path)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    except utils_backup.BackupCanceledError as detail:
        if expect_backup_canceled:
            logging.debug("Backup canceled as expected.")
            if not vm.is_alive():
                logging.debug("Check if vm can be started again when backup "
                              "canceled.")
                vm.start()
                vm.wait_for_login().close()
        else:
            test.fail("Backup job canceled: %s" % detail)
    finally:
        # Remove checkpoints
        utils_backup.clean_checkpoints(vm_name,
                                       clean_metadata=not vm.is_alive())

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove local backup file
        if "target_file_path" in locals():
            if os.path.exists(target_file_path):
                os.remove(target_file_path)

        # Remove test disk's local image file
        if original_disk_type == "local":
            if "disk_path" in locals() and os.path.exists(disk_path):
                os.remove(disk_path)

        # Remove iscsi devices
        libvirt.setup_or_cleanup_iscsi(False)

        # Remove ceph related data
        if original_disk_type == "ceph":
            ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name,
                              ceph_file_name, ceph_cfg, key_file)
            if "auth_sec_uuid" in locals() and auth_sec_uuid:
                virsh.secret_undefine(auth_sec_uuid)
            if "ceph_cfg" in locals() and os.path.exists(ceph_cfg):
                os.remove(ceph_cfg)
            if os.path.exists(key_file):
                os.remove(key_file)
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test the pull-mode backup function

    Steps:
    1. craete a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. check the full/incremental backup file data
    """

    # Basic case config
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")
    local_hostname = params.get("loal_hostname", "localhost")
    local_ip = params.get("local_ip", "127.0.0.1")
    local_user_name = params.get("local_user_name", "root")
    local_user_password = params.get("local_user_password", "redhat")
    tmp_dir = data_dir.get_tmp_dir()
    # Backup config
    scratch_type = params.get("scratch_type", "file")
    reuse_scratch_file = "yes" == params.get("reuse_scratch_file")
    prepare_scratch_file = "yes" == params.get("prepare_scratch_file")
    scratch_blkdev_path = params.get("scratch_blkdev_path")
    scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size)
    prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev")
    backup_rounds = int(params.get("backup_rounds", 3))
    backup_error = "yes" == params.get("backup_error")
    expect_backup_canceled = "yes" == params.get("expect_backup_canceled")
    # NBD service config
    nbd_protocol = params.get("nbd_protocol", "unix")
    nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")
    nbd_hostname = local_hostname
    set_exportname = "yes" == params.get("set_exportname")
    set_exportbitmap = "yes" == params.get("set_exportbitmap")
    # TLS service config
    tls_enabled = "yes" == params.get("tls_enabled")
    tls_x509_verify = "yes" == params.get("tls_x509_verify")
    custom_pki_path = "yes" == params.get("custom_pki_path")
    tls_client_ip = tls_server_ip = local_ip
    tls_client_cn = tls_server_cn = local_hostname
    tls_client_user = tls_server_user = local_user_name
    tls_client_pwd = tls_server_pwd = local_user_password
    tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert")
    tls_error = "yes" == params.get("tls_error")
    # LUKS config
    scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted")
    luks_passphrase = params.get("luks_passphrase", "password")

    # Cancel the test if libvirt support related functions
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")
    if tls_enabled and not libvirt_version.version_compare(6, 6, 0):
        test.cancel("Current libvirt version doesn't support pull mode "
                    "backup with tls nbd.")

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        utils_backup.enable_inc_backup_for_vm(vm)

        # Prepare tls env
        if tls_enabled:
            # Prepare pki
            tls_config = {
                "qemu_tls": "yes",
                "auto_recover": "yes",
                "client_ip": tls_client_ip,
                "server_ip": tls_server_ip,
                "client_cn": tls_client_cn,
                "server_cn": tls_server_cn,
                "client_user": tls_client_user,
                "server_user": tls_server_user,
                "client_pwd": tls_client_pwd,
                "server_pwd": tls_server_pwd,
            }
            if custom_pki_path:
                pki_path = os.path.join(tmp_dir, "inc_bkup_pki")
            else:
                pki_path = "/etc/pki/libvirt-backup/"
            if tls_x509_verify:
                tls_config["client_ip"] = tls_client_ip
            tls_config["custom_pki_path"] = pki_path
            tls_obj = TLSConnection(tls_config)
            tls_obj.conn_setup(True, tls_provide_client_cert)
            logging.debug("TLS certs in: %s" % pki_path)
            # Set qemu.conf
            qemu_config = LibvirtQemuConfig()
            if tls_x509_verify:
                qemu_config.backup_tls_x509_verify = True
            else:
                qemu_config.backup_tls_x509_verify = False
            if custom_pki_path:
                qemu_config.backup_tls_x509_cert_dir = pki_path
            utils_libvirtd.Libvirtd().restart()

        # Prepare libvirt secret
        if scratch_luks_encrypted:
            utils_secret.clean_up_secrets()
            luks_secret_uuid = libvirt.create_secret(params)
            virsh.secret_set_value(luks_secret_uuid,
                                   luks_passphrase,
                                   encode=True,
                                   debug=True)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "{}_image.qcow2".format(original_disk_target)
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        elif original_disk_type == "iscsi":
            iscsi_host = '127.0.0.1'
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size=original_disk_size,
                portal_ip=iscsi_host)
            disk_path = ("iscsi://[%s]/%s/%s" %
                         (iscsi_host, iscsi_target, lun_num))
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'iscsi',
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': iscsi_host,
                'source_host_port': '3260'
            }
            disk_params.update(disk_params_src)
        elif original_disk_type == "gluster":
            gluster_vol_name = "gluster_vol"
            gluster_pool_name = "gluster_pool"
            gluster_img_name = "gluster.qcow2"
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            disk_path = 'gluster://%s/%s/%s' % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            process.run("qemu-img create -f qcow2 %s %s" %
                        (disk_path, original_disk_size),
                        shell=True,
                        verbose=True)
            disk_params = {
                'device_type': 'disk',
                'type_name': 'network',
                "driver_type": "qcow2",
                'target_dev': original_disk_target
            }
            disk_params_src = {
                'source_protocol': 'gluster',
                'source_name': gluster_vol_name + "/%s" % gluster_img_name,
                'source_host_name': gluster_host_ip,
                'source_host_port': '24007'
            }
            disk_params.update(disk_params_src)
        else:
            test.error("The disk type '%s' not supported in this script.",
                       original_disk_type)
        if hotplug_disk:
            vm.start()
            session = vm.wait_for_login().close()
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm_name, disk_xml, debug=True)
        else:
            disk_xml = libvirt.create_disk_xml(disk_params)
            virsh.attach_device(vm.name,
                                disk_xml,
                                flagstr="--config",
                                debug=True)
            vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        for backup_index in range(backup_rounds):
            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {}
            if nbd_protocol == "unix":
                backup_server_dict["transport"] = "unix"
                backup_server_dict["socket"] = nbd_socket
            else:
                backup_server_dict["name"] = nbd_hostname
                backup_server_dict["port"] = nbd_tcp_port
                if tls_enabled:
                    backup_server_dict["tls"] = "yes"
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Custom nbd export name and bitmap name if required
                    if set_exportname:
                        nbd_export_name = original_disk_target + "_custom_exp"
                        backup_disk_params["exportname"] = nbd_export_name
                    if set_exportbitmap:
                        nbd_bitmap_name = original_disk_target + "_custom_bitmap"
                        backup_disk_params["exportbitmap"] = nbd_bitmap_name

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_path = None
                    if scratch_type == "file":
                        scratch_file_name = "scratch_file_%s" % backup_index
                        scratch_path = os.path.join(tmp_dir, scratch_file_name)
                        if prepare_scratch_file:
                            libvirt.create_local_disk("file", scratch_path,
                                                      original_disk_size,
                                                      "qcow2")
                        scratch_params["attrs"]["file"] = scratch_path
                    elif scratch_type == "block":
                        if prepare_scratch_blkdev:
                            scratch_path = libvirt.setup_or_cleanup_iscsi(
                                is_setup=True, image_size=scratch_blkdev_size)
                        scratch_params["attrs"]["dev"] = scratch_path
                    else:
                        test.fail(
                            "We do not support backup scratch type: '%s'" %
                            scratch_type)
                    if scratch_luks_encrypted:
                        encryption_dict = {
                            "encryption": "luks",
                            "secret": {
                                "type": "passphrase",
                                "uuid": luks_secret_uuid
                            }
                        }
                        scratch_params["encryption"] = encryption_dict
                    logging.debug("scratch params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params

                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()
            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml
            if reuse_scratch_file:
                backup_options += " --reuse-external"
            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               ignore_status=True,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())
            # If required, do some error operations during backup job
            error_operation = params.get("error_operation")
            if error_operation:
                if "destroy_vm" in error_operation:
                    vm.destroy(gracefully=False)
                if "kill_qemu" in error_operation:
                    utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL)
                if utils_misc.wait_for(
                        lambda: utils_backup.is_backup_canceled(vm_name),
                        timeout=5):
                    raise utils_backup.BackupCanceledError()
                elif expect_backup_canceled:
                    test.fail("Backup job should be canceled but not.")
            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_export": nbd_export_name
            }
            if nbd_protocol == "unix":
                nbd_params["nbd_socket"] = nbd_socket
            elif nbd_protocol == "tcp":
                nbd_params["nbd_hostname"] = nbd_hostname
                nbd_params["nbd_tcp_port"] = nbd_tcp_port
                if tls_enabled:
                    nbd_params["tls_dir"] = pki_path
                    nbd_params["tls_server_ip"] = tls_server_ip
            if not is_incremental:
                # Do full backup
                try:
                    utils_backup.pull_full_backup_to_file(
                        nbd_params, backup_file_path)
                except Exception as details:
                    if tls_enabled and tls_error:
                        raise utils_backup.BackupTLSError(details)
                    else:
                        test.fail("Fail to get full backup data: %s" % details)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            # Check if scratch file encrypted
            if scratch_luks_encrypted and scratch_path:
                cmd = "qemu-img info -U %s" % scratch_path
                result = process.run(cmd, shell=True,
                                     verbose=True).stdout_text.strip()
                if (not re.search("format.*luks", result, re.IGNORECASE)
                        or not re.search("encrypted.*yes", result,
                                         re.IGNORECASE)):
                    test.fail("scratch file/dev is not encrypted by LUKS")
            virsh.domjobabort(vm_name, debug=True)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data
        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path,
                                                            original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" % (disk_path, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as detail:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail("Backup failed to start: %s" % detail)
    except utils_backup.BackupTLSError as detail:
        if tls_error:
            logging.debug("Failed to get backup data as expected.")
        else:
            test.fail("Failed to get tls backup data: %s" % detail)
    except utils_backup.BackupCanceledError as detail:
        if expect_backup_canceled:
            logging.debug("Backup canceled as expected.")
            if not vm.is_alive():
                logging.debug("Check if vm can be started again when backup "
                              "canceled.")
                vm.start()
                vm.wait_for_login().close()
        else:
            test.fail("Backup job canceled: %s" % detail)
    finally:
        # Remove checkpoints
        clean_checkpoint_metadata = not vm.is_alive()
        if "error_operation" in locals() and error_operation is not None:
            if "kill_qemu" in error_operation:
                clean_checkpoint_metadata = True
        utils_backup.clean_checkpoints(
            vm_name, clean_metadata=clean_checkpoint_metadata)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        # Remove iscsi devices
        if original_disk_type == "iscsi" or scratch_type == "block":
            libvirt.setup_or_cleanup_iscsi(False)

        # Remove gluster devices
        if original_disk_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)

        # Recover qemu.conf
        if "qemu_config" in locals():
            qemu_config.restore()

        # Remove tls object
        if "tls_obj" in locals():
            del tls_obj

        # Remove libvirt secret
        if "luks_secret_uuid" in locals():
            virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test command: virsh net-dhcp-leases

    1. Create a new network and run virsh command to check dhcp leases info.
    2. Attach an interface before or after start the domain, then check the
       dhcp leases info.
    3. Clean the environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    net_name = params.get("net_name", "default")
    net_option = params.get("net_option", "")
    status_error = "yes" == params.get("status_error", "no")
    prepare_net = "yes" == params.get("prepare_net", "yes")
    hotplug_iface = "yes" == params.get("hotplug_interface", "no")
    filter_by_mac = "yes" == params.get("filter_by_mac", "no")
    invalid_mac = "yes" == params.get("invalid_mac", "no")
    expect_msg = params.get("leases_err_msg")
    # Generate a random string as the MAC address
    nic_mac = None
    if invalid_mac:
        nic_mac = utils_misc.generate_random_string(17)

    # Command won't fail on old libvirt
    if not libvirt_version.version_compare(1, 3, 1) and invalid_mac:
        logging.debug("Reset case to positive as BZ#1261432")
        status_error = False

    def create_network():
        """
        Create a network
        """
        net_ip_addr = params.get("net_ip_addr", "192.168.200.1")
        net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0")
        net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2")
        net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254")
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'mode': "nat"}
        ipxml = network_xml.IPXML()
        ipxml.address = net_ip_addr
        ipxml.netmask = net_ip_netmask
        ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end}
        netxml.set_ip(ipxml)
        netxml.create()

    def get_net_dhcp_leases(output):
        """
        Return the dhcp lease info in a list
        """
        leases = []
        lines = output.splitlines()
        if not lines:
            return leases
        try:
            pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+"
            keys = re.findall(pat, lines[0])
            for line in lines[2:]:
                values = re.findall(pat, line)
                leases.append(dict(list(zip(keys, values))))
            return leases
        except Exception:
            test.error("Fail to parse output: %s" % output)

    def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120):
        """
        Get interface IP address by given MAC addrss. If try_dhclint is
        True, then try to allocate IP addrss for the interface.
        """
        session = vm.wait_for_login(login_nic_index, timeout=timeout, serial=True)

        def f():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = utils_misc.wait_for(f, 10)
            if ip_addr is None:
                iface_name = utils_net.get_linux_ifname(session, mac_addr)
                if try_dhclint:
                    session.cmd("dhclient %s" % iface_name)
                    ip_addr = utils_misc.wait_for(f, 10)
                else:
                    # No IP for the interface, just print the interface name
                    logging.warn("Find '%s' with MAC address '%s', "
                                 "but which has no IP address", iface_name,
                                 mac_addr)
        finally:
            session.close()
        return ip_addr

    def check_net_lease(net_leases, expected_find=True):
        """
        Check the dhcp lease info.
        """
        if not net_leases:
            if expected_find:
                test.fail("Lease info is empty")
            else:
                logging.debug("No dhcp lease info find as expected")
        else:
            if not expected_find:
                test.fail("Find unexpected dhcp lease info: %s"
                          % net_leases)
        find_mac = False
        for net_lease in net_leases:
            net_mac = net_lease['MAC address']
            net_ip = net_lease['IP address'][:-3]
            if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac):
                find_mac = True
                logging.debug("Find '%s' in domain XML", net_mac)
            else:
                logging.debug("Not find '%s' in domain XML", net_mac)
                continue
            iface_ip = get_ip_by_mac(net_mac)
            if iface_ip and iface_ip != net_ip:
                test.fail("Address '%s' is not expected" % iface_ip)
        if expected_find and not find_mac:
            test.fail("No matched MAC address")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if vm.is_alive():
        vm.destroy(gracefully=False)
    login_nic_index = 0
    new_nic_index = 0
    # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except
    # default
    net_state = virsh.net_state_dict(only_names=True)
    logging.debug("current networks: %s, destroy and undefine networks "
                  "except default!", net_state)
    for net in net_state:
        if net != "default":
            virsh.net_destroy(net)
            virsh.net_undefine(net)
    cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'"
    pid_list = results_stdout_52lts(process.run(cmd, shell=True)).strip().splitlines()
    logging.debug(pid_list)
    for pid in pid_list:
        utils_misc.safe_kill(pid, signal.SIGKILL)
    # Create new network
    if prepare_net:
        create_network()
    nets = virsh.net_state_dict()
    if net_name not in list(nets.keys()) and not status_error:
        test.error("Not find network '%s'" % net_name)
    expected_find = False
    try:
        result = virsh.net_dhcp_leases(net_name,
                                       mac=nic_mac,
                                       options=net_option,
                                       debug=True,
                                       ignore_status=True)
        utlv.check_exit_status(result, status_error)
        lease = get_net_dhcp_leases(result.stdout.strip())
        check_net_lease(lease, expected_find)
        if not status_error:
            iface_mac = utils_net.generate_mac_address_simple()
            if filter_by_mac:
                nic_mac = iface_mac
            op = "--type network --model virtio --source %s --mac %s" \
                 % (net_name, iface_mac)
            nic_params = {'mac': iface_mac, 'nettype': 'bridge',
                          'ip_version': 'ipv4'}
            login_timeout = 120
            if not hotplug_iface:
                op += " --config"
                virsh.attach_interface(vm_name, option=op, debug=True,
                                       ignore_status=False)
                vm.add_nic(**nic_params)
                vm.start()
                new_nic_index = vm.get_nic_index_by_mac(iface_mac)
                if new_nic_index > 0:
                    login_nic_index = new_nic_index
            else:
                vm.start()
                # wait for VM start before hotplug interface
                vm.wait_for_serial_login()
                virsh.attach_interface(vm_name, option=op, debug=True,
                                       ignore_status=False)
                vm.add_nic(**nic_params)
                # As VM already started, so the login timeout could be shortened
                login_timeout = 10
            new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True,
                                             timeout=login_timeout)
            # Allocate IP address for the new interface may fail, so only
            # check the result if get new IP address
            if new_interface_ip:
                expected_find = True
            result = virsh.net_dhcp_leases(net_name, mac=nic_mac,
                                           debug=False, ignore_status=True)
            utlv.check_exit_status(result, status_error)
            lease = get_net_dhcp_leases(result.stdout.strip())
            check_net_lease(lease, expected_find)
        else:
            if expect_msg:
                utlv.check_result(result, expect_msg.split(';'))
    finally:
        # Delete the new attached interface
        if new_nic_index > 0:
            vm.del_nic(new_nic_index)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        if prepare_net:
            virsh.net_destroy(net_name)