def run(test, params, env): """ Test command virsh cpu-models """ cpu_arch = params.get("cpu_arch", "") option = params.get("option", "") target_uri = params.get("target_uri", "default") status_error = "yes" == params.get("status_error", "no") logging.debug(target_uri.count('EXAMPLE.COM')) if target_uri.count('EXAMPLE.COM'): raise error.TestNAError("Please replace '%s' with valid uri" % target_uri) connect_uri = libvirt_vm.normalize_connect_uri(target_uri) arch_list = [] if not cpu_arch: try: capa = capability_xml.CapabilityXML() guest_map = capa.get_guest_capabilities() guest_arch = [] for v in guest_map.values(): guest_arch += v.keys() for arch in set(guest_arch): arch_list.append(arch) except Exception, e: raise error.TestError("Fail to get guest arch list of the" " host supported:\n%s" % e)
def run_virsh_uri(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default") ) option = params.get("options") target_uri = params.get("target_uri") if target_uri: if target_uri.count('EXAMPLE.COM'): raise error.TestError('target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": libvirt_vm.service_libvirtd_control("stop") # Run test case logging.info("The command: %s", cmd) try: uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": libvirt_vm.service_libvirtd_control("start") # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise error.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if cmp(target_uri, uri_test) != 0: raise error.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise error.TestFail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test the command virsh domcapabilities """ target_uri = params.get("target_uri", "default") if target_uri.count("EXAMPLE.COM"): raise error.TestNAError("Please replace '%s' with valid uri" % target_uri) connect_uri = libvirt_vm.normalize_connect_uri(target_uri) virsh_options = params.get("virsh_options", "") virttype = params.get("virttype_value", "") emulatorbin = params.get("emulatorbin_value", "") arch = params.get("arch_value", "") machine = params.get("machine_value", "") option_dict = {'arch': arch, 'virttype': virttype, 'emulatorbin': emulatorbin, 'machine': machine} options_list = [option_dict] extra_option = params.get("extra_option", "") # Get --virttype, --emulatorbin, --arch and --machine values from # virsh capabilities output, then assemble option for testing # This will ignore the virttype, emulatorbin, arch and machine values if virsh_options == "AUTO": options_list = [] capa_xml = capability_xml.CapabilityXML() guest_capa = capa_xml.get_guest_capabilities() for arch_prop in guest_capa.values(): for arch in arch_prop.keys(): machine_list = arch_prop[arch]['machine'] virttype_list = [] emulatorbin_list = [arch_prop[arch]['emulator']] for key in arch_prop[arch].keys(): if key.startswith("domain_"): virttype_list.append(key[7:]) if arch_prop[arch][key].values(): emulatorbin_list.append(arch_prop[arch][key]['emulator']) for virttype in virttype_list: for emulatorbin in emulatorbin_list: for machine in machine_list: option_dict = {'arch': arch, 'virttype': virttype, 'emulatorbin': emulatorbin, 'machine': machine} options_list.append(option_dict) # Run test cases for option in options_list: result = virsh.domcapabilities(virttype=option['virttype'], emulatorbin=option['emulatorbin'], arch=option['arch'], machine=option['machine'], options=extra_option, uri=connect_uri, ignore_status=True, debug=True) # Check status_error status_error = "yes" == params.get("status_error", "no") utlv.check_exit_status(result, status_error)
def compare_poolcapabilities_xml(source): """ Compare new output of pool-capability with the standard one (1) Dict the new pool capability XML (2) Compare with the standard XML dict """ cap_xml = pool_capability_xml.PoolcapabilityXML() cap_xml.xml = source connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", 'default')) # Check the pool capability xml pool_capa = cap_xml.get_pool_capabilities() logging.debug(pool_capa) pool_type_list = ['dir', 'fs', 'netfs', 'logical', 'disk', 'iscsi', 'iscsi-direct', 'scsi', 'mpath', 'rbd', 'sheepdog', 'gluster', 'zfs', 'vstorage'] for pooltype in pool_capa.keys(): if pooltype not in pool_type_list: test.fail("'%s' is not expected in pool-capability" % (pooltype)) pool_type_info_dict = {'dir': {'pool_default_format_name': [], 'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']}, 'fs': {'auto': ['auto', 'ext2', 'ext3', 'ext4', 'ufs', 'iso9660', 'udf', 'gfs', 'gfs2', 'vfat', 'hfs+', 'xfs', 'ocfs2', 'vmfs'], 'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']}, 'netfs': {'auto': ['auto', 'nfs', 'glusterfs', 'cifs'], 'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']}, 'logical': {'lvm2': ['unknown', 'lvm2'], 'vol_default_format_name': []}, 'disk': {'unknown': ['unknown', 'dos', 'dvh', 'gpt', 'mac', 'bsd', 'pc98', 'sun', 'lvm2'], 'none': ['none', 'linux', 'fat16', 'fat32', 'linux-swap', 'linux-lvm', 'linux-raid', 'extended']}, 'iscsi': {'pool_default_format_name': [], 'vol_default_format_name': []}, 'iscsi-direct': {'pool_default_format_name': [], 'vol_default_format_name': []}, 'scsi': {'pool_default_format_name': [], 'vol_default_format_name': []}, 'mpath': {'pool_default_format_name': [], 'vol_default_format_name': []}, 'rbd': {'pool_default_format_name': []}, 'sheepdog': {'pool_default_format_name': [], 'vol_default_format_name': []}, 'gluster': {'pool_default_format_name': [], 'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']}, 'zfs': {'pool_default_format_name': [], 'vol_default_format_name': []}, 'vstorage': {'pool_default_format_name': [], 'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']}} #Check the pool capability information if pool_capa != pool_type_info_dict: test.fail('Unexpected pool information support occurred,please check the information by manual')
def run_virsh_net_info(test, params, env): """ Test command: virsh net-info <network> The command returns basic information about virtual network. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = params.get("status_error", "no") net_name = params.get("testing_net_name", "default") net_ref = params.get("netinfo_net_ref", "name") extra = params.get("netinfo_options_extra", "") virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare network for following test. try: netxml = origin_nets[net_name] except KeyError: virsh_instance.close_session() raise error.TestNAError("'%s' virtual network doesn't exist." % net_name) if net_ref == "name": net_ref = netxml.name elif net_ref == "uuid": net_ref = netxml.uuid elif net_ref.find("invalid") != -1: net_ref = params.get(net_ref) # Run test case result = virsh.net_info(net_ref, extra, **virsh_dargs) status = result.exit_status output = result.stdout.strip() err = result.stderr.strip() virsh_instance.close_session() # Check status_error if status_error == "yes": if status == 0 or err == "": raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0 or output == "": raise error.TestFail("Run failed with right command")
def run(test, params, env): """ Test command: virsh net-info <network> The command returns basic information about virtual network. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = params.get("status_error", "no") net_name = params.get("testing_net_name", "default") net_ref = params.get("netinfo_net_ref", "name") extra = params.get("netinfo_options_extra", "") virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare network for following test. try: netxml = origin_nets[net_name] except KeyError: virsh_instance.close_session() raise error.TestNAError("'%s' virtual network doesn't exist." % net_name) if net_ref == "name": net_ref = netxml.name elif net_ref == "uuid": net_ref = netxml.uuid elif net_ref.find("invalid") != -1: net_ref = params.get(net_ref) # Run test case result = virsh.net_info(net_ref, extra, **virsh_dargs) status = result.exit_status output = result.stdout.strip() err = result.stderr.strip() virsh_instance.close_session() # Check status_error if status_error == "yes": if status == 0 or err == "": raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0 or output == "": raise error.TestFail("Run failed with right command")
def run_virsh_version(test, params, env): """ Test the command virsh version (1) Call virsh version (2) Call virsh version with an unexpected option (3) Call virsh version with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": libvirt_vm.libvirtd_stop() # Run test case option = params.get("virsh_version_options") try: output = virsh.version(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 #good except error.CmdError: status = 1 #bad # Recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise error.TestFail("Command 'virsh version %s' succeeded " "(incorrect command)" % option) elif status_error == "no": if status != 0: raise error.TestFail("Command 'virsh version %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test command virsh cpu-models """ cpu_arch = params.get("cpu_arch", "") option = params.get("option", "") status_error = "yes" == params.get("status_error", "no") remote_ref = params.get("remote_ref", "") connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) if remote_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if 'EXAMPLE.COM' in remote_ip: test.cancel("Please replace '%s' with valid remote ip" % remote_ip) ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd) connect_uri = libvirt_vm.complete_uri(remote_ip) arch_list = [] if not cpu_arch: try: capa = capability_xml.CapabilityXML() guest_map = capa.get_guest_capabilities() guest_arch = [] for v in list(itervalues(guest_map)): guest_arch += list(v.keys()) for arch in set(guest_arch): arch_list.append(arch) except Exception as e: test.error("Fail to get guest arch list of the host" " supported:\n%s" % e) else: arch_list.append(cpu_arch) for arch in arch_list: logging.debug("Get the CPU models for arch: %s" % arch) result = virsh.cpu_models(arch, options=option, uri=connect_uri, ignore_status=True, debug=True) utlv.check_exit_status(result, expect_error=status_error)
def run(test, params, env): """ Test the command virsh version (1) Call virsh version (2) Call virsh version with an unexpected option (3) Call virsh version with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) libvirtd = params.get("libvirtd", "on") option = params.get("virsh_version_options") status_error = (params.get("status_error") == "yes") # Prepare libvirtd service if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case result = virsh.version(option, uri=connect_uri, debug=True) # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error if status_error: if not result.exit_status: if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Command 'virsh version %s' succeeded " "(incorrect command)" % option) else: if result.exit_status: test.fail("Command 'virsh version %s' failed " "(correct command)" % option) if option.count("daemon") and not result.stdout.count("daemon"): test.fail("No daemon information outputted!")
def run(test, params, env): """ Test the command virsh version (1) Call virsh version (2) Call virsh version with an unexpected option (3) Call virsh version with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_version_options") try: output = virsh.version(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise error.TestFail("Command 'virsh version %s' succeeded " "(incorrect command)" % option) elif status_error == "no": if status != 0: raise error.TestFail("Command 'virsh version %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test command virsh cpu-models """ cpu_arch = params.get("cpu_arch", "") option = params.get("option", "") status_error = "yes" == params.get("status_error", "no") remote_ref = params.get("remote_ref", "") connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) if remote_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if 'EXAMPLE.COM' in remote_ip: test.cancel("Please replace '%s' with valid remote ip" % remote_ip) ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd) connect_uri = libvirt_vm.complete_uri(remote_ip) arch_list = [] if not cpu_arch: try: capa = capability_xml.CapabilityXML() guest_map = capa.get_guest_capabilities() guest_arch = [] for v in list(itervalues(guest_map)): guest_arch += list(v.keys()) for arch in set(guest_arch): arch_list.append(arch) except Exception as e: test.error("Fail to get guest arch list of the host" " supported:\n%s" % e) else: arch_list.append(cpu_arch) for arch in arch_list: logging.debug("Get the CPU models for arch: %s" % arch) result = virsh.cpu_models(arch, options=option, uri=connect_uri, ignore_status=True, debug=True) utlv.check_exit_status(result, expect_error=status_error)
def run(test, params, env): """ Test the command virsh version (1) Call virsh version (2) Call virsh version with an unexpected option (3) Call virsh version with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) libvirtd = params.get("libvirtd", "on") option = params.get("virsh_version_options") status_error = (params.get("status_error") == "yes") # Prepare libvirtd service if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case result = virsh.version(option, uri=connect_uri, debug=True) # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error if status_error: if not result.exit_status: test.fail("Command 'virsh version %s' succeeded " "(incorrect command)" % option) else: if result.exit_status: test.fail("Command 'virsh version %s' failed " "(correct command)" % option) if option.count("daemon") and not result.stdout.count("daemon"): test.fail("No daemon information outputed!")
def run(test, params, env): """ Test the command virsh capabilities (1) Call virsh capabilities (2) Call virsh capabilities with an unexpected option (3) Call virsh capabilities with libvirtd service stop """ def compare_capabilities_xml(source): cap_xml = capability_xml.CapabilityXML() cap_xml.xml = source # Check that host has a non-empty UUID tag. xml_uuid = cap_xml.uuid logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid) if xml_uuid == "": raise error.TestFail("The host uuid in capabilities_xml is none!") # Check the host arch. xml_arch = cap_xml.arch logging.debug("Host arch (capabilities_xml): %s", xml_arch) exp_arch = utils.run("arch", ignore_status=True).stdout.strip() if cmp(xml_arch, exp_arch) != 0: raise error.TestFail("The host arch in capabilities_xml is expected" " to be %s, but get %s" % (exp_arch, xml_arch)) # Check the host cpu count. xml_cpu_count = cap_xml.cpu_count logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count) cmd = "grep processor /proc/cpuinfo | wc -l" exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip()) if xml_cpu_count != exp_cpu_count: raise error.TestFail("Host cpus count is expected to be %s, but get " "%s" % (exp_cpu_count, xml_cpu_count)) # Check the arch of guest supported. guest_capa = cap_xml.get_guest_capabilities() logging.debug(guest_capa) try: img = utils_misc.find_command("qemu-kvm") except ValueError: raise error.TestNAError("Cannot find qemu-kvm") if re.search("ppc", utils.run("arch").stdout): cmd = img + " --cpu ? | grep ppc" else: cmd = img + " --cpu ? | grep qemu" cmd_result = utils.run(cmd, ignore_status=True) for guest in cap_xml.xmltreefile.findall('guest'): guest_wordsize = guest.find('arch').find('wordsize').text logging.debug("Arch of guest supported (capabilities_xml):%s", guest_wordsize) if not re.search(guest_wordsize, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an extra arch " "of guest to support!") # Check the type of hypervisor. first_guest = cap_xml.xmltreefile.findall('guest')[0] first_domain = first_guest.find('arch').findall('domain')[0] guest_domain_type = first_domain.get('type') logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type) cmd_result = utils.run("virsh uri", ignore_status=True) if not re.search(guest_domain_type, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an different " "hypervisor") # check power management support. try: pm_cmd = os_dep.command('pm-is-supported') pm_cap_map = {'suspend': 'suspend_mem', 'hibernate': 'suspend_disk', 'suspend-hybrid': 'suspend_hybrid', } exp_pms = [] for opt in pm_cap_map: cmd = '%s --%s' % (pm_cmd, opt) res = utils.run(cmd, ignore_status=True) if res.exit_status == 0: exp_pms.append(pm_cap_map[opt]) pms = cap_xml.power_management_list if set(exp_pms) != set(pms): raise error.TestFail("Expected supported PMs are %s, got %s " "instead." % (exp_pms, pms)) except ValueError: logging.debug('Power management checking is skipped, since command ' 'pm-is-supported is not found.') connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) # Prepare libvirtd service if "libvirtd" in params: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_cap_options") try: output = virsh.capabilities(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad output = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh capabilities' succeeded " "with libvirtd service stopped, incorrect") else: raise error.TestFail("Command 'virsh capabilities %s' succeeded " "(incorrect command)" % option) elif status_error == "no": compare_capabilities_xml(output) if status != 0: raise error.TestFail("Command 'virsh capabilities %s' failed " "(correct command)" % option)
def run_virsh_uri(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("options") target_uri = params.get("target_uri") if target_uri: if target_uri.count('EXAMPLE.COM'): raise error.TestError( 'target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": libvirt_vm.service_libvirtd_control("stop") # Run test case logging.info("The command: %s", cmd) try: uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": libvirt_vm.service_libvirtd_control("start") # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise error.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if cmp(target_uri, uri_test) != 0: raise error.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise error.TestFail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test the command virsh capabilities (1) Call virsh capabilities (2) Call virsh capabilities with an unexpected option (3) Call virsh capabilities with libvirtd service stop """ def compare_capabilities_xml(source): dom = parseString(source) host = dom.getElementsByTagName('host')[0] # check that host has a non-empty UUID tag. uuid = host.getElementsByTagName('uuid')[0] host_uuid_output = uuid.firstChild.data logging.info("Host uuid (capabilities_xml):%s", host_uuid_output) if host_uuid_output == "": raise error.TestFail("The host uuid in capabilities_xml is none!") # check the host arch. arch = host.getElementsByTagName('arch')[0] host_arch_output = arch.firstChild.data logging.info("Host arch (capabilities_xml):%s", host_arch_output) cmd_result = utils.run("arch", ignore_status=True) if cmp(host_arch_output, cmd_result.stdout.strip()) != 0: raise error.TestFail("The host arch in capabilities_xml is wrong!") # check the host cpus num. cpus = dom.getElementsByTagName('cpus') host_cpus = 0 for cpu in cpus: host_cpus += int(cpu.getAttribute('num')) logging.info("Host cpus num (capabilities_xml):%s", host_cpus) cmd = "less /proc/cpuinfo | grep processor | wc -l" cmd_result = utils.run(cmd, ignore_status=True) if cmp(host_cpus, int(cmd_result.stdout.strip())) != 0: raise error.TestFail("Host cpus num (capabilities_xml) is " "wrong") # check the arch of guest supported. try: img = utils_misc.find_command("qemu-kvm") except ValueError: raise error.TestNAError("Cannot find qemu-kvm") cmd = img + " --cpu ? | grep qemu" cmd_result = utils.run(cmd, ignore_status=True) guest_wordsize_array = dom.getElementsByTagName('wordsize') length = len(guest_wordsize_array) for i in range(length): element = guest_wordsize_array[i] guest_wordsize = element.firstChild.data logging.info("Arch of guest supported (capabilities_xml):%s", guest_wordsize) if not re.search(guest_wordsize, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an extra arch " "of guest to support!") # check the type of hyperviosr. guest_domain_type = dom.getElementsByTagName('domain')[0] guest_domain_type_output = guest_domain_type.getAttribute('type') logging.info("Hypervisor (capabilities_xml):%s", guest_domain_type_output) cmd_result = utils.run("virsh uri", ignore_status=True) if not re.search(guest_domain_type_output, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an different " "hypervisor") connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) # Prepare libvirtd service if params.has_key("libvirtd"): libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_cap_options") try: output = virsh.capabilities(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad output = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh capabilities' succeeded " "with libvirtd service stopped, incorrect") else: raise error.TestFail("Command 'virsh capabilities %s' succeeded " "(incorrect command)" % option) elif status_error == "no": compare_capabilities_xml(output) if status != 0: raise error.TestFail("Command 'virsh capabilities %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test the command virsh capabilities (1) Call virsh capabilities (2) Call virsh capabilities with an unexpected option (3) Call virsh capabilities with libvirtd service stop """ def compare_capabilities_xml(source): cap_xml = capability_xml.CapabilityXML() cap_xml.xml = source # Check that host has a non-empty UUID tag. xml_uuid = cap_xml.uuid logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid) if xml_uuid == "": raise error.TestFail("The host uuid in capabilities_xml is none!") # Check the host arch. xml_arch = cap_xml.arch logging.debug("Host arch (capabilities_xml): %s", xml_arch) exp_arch = utils.run("arch", ignore_status=True).stdout.strip() if cmp(xml_arch, exp_arch) != 0: raise error.TestFail("The host arch in capabilities_xml is expected" " to be %s, but get %s" % (exp_arch, xml_arch)) # Check the host cpu count. xml_cpu_count = cap_xml.cpu_count logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count) cmd = "grep processor /proc/cpuinfo | wc -l" exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip()) if xml_cpu_count != exp_cpu_count: raise error.TestFail("Host cpus count is expected to be %s, but get " "%s" % (exp_cpu_count, xml_cpu_count)) # Check the arch of guest supported. xmltreefile = cap_xml.__dict_get__('xml') xml_os_arch_machine_map = cap_xml.os_arch_machine_map logging.debug(xml_os_arch_machine_map['hvm']) try: img = utils_misc.find_command("qemu-kvm") except ValueError: raise error.TestNAError("Cannot find qemu-kvm") cmd = img + " --cpu ? | grep qemu" cmd_result = utils.run(cmd, ignore_status=True) for guest in xmltreefile.findall('guest'): guest_wordsize = guest.find('arch').find('wordsize').text logging.debug("Arch of guest supported (capabilities_xml):%s", guest_wordsize) if not re.search(guest_wordsize, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an extra arch " "of guest to support!") # Check the type of hypervisor. first_guest = xmltreefile.findall('guest')[0] first_domain = first_guest.find('arch').findall('domain')[0] guest_domain_type = first_domain.get('type') logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type) cmd_result = utils.run("virsh uri", ignore_status=True) if not re.search(guest_domain_type, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an different " "hypervisor") # check power management support. try: pm_cmd = os_dep.command('pm-is-supported') pm_cap_map = {'suspend': 'suspend_mem', 'hibernate': 'suspend_disk', 'suspend-hybrid': 'suspend_hybrid', } exp_pms = [] for opt in pm_cap_map: cmd = '%s --%s' % (pm_cmd, opt) res = utils.run(cmd, ignore_status=True) if res.exit_status == 0: exp_pms.append(pm_cap_map[opt]) pms = cap_xml.power_management_list if set(exp_pms) != set(pms): raise error.TestFail("Expected supported PMs are %s, got %s " "instead." % (exp_pms, pms)) except ValueError: logging.debug('Power management checking is skipped, since command ' 'pm-is-supported is not found.') connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) # Prepare libvirtd service if "libvirtd" in params: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_cap_options") try: output = virsh.capabilities(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad output = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh capabilities' succeeded " "with libvirtd service stopped, incorrect") else: raise error.TestFail("Command 'virsh capabilities %s' succeeded " "(incorrect command)" % option) elif status_error == "no": compare_capabilities_xml(output) if status != 0: raise error.TestFail("Command 'virsh capabilities %s' failed " "(correct command)" % option)
def run_virt_edit(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() if vm_ref == "domdisk": if len(dom_disk_dict) != 1: raise error.TestError("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = dom_disk_dict.values()[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img utils.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img) # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test virsh_dargs = {'ignore_status': True, 'debug': True, 'uri': uri} result = lgf.virt_edit_cmd(vm_ref, file_ref, options, options_suffix, expr, **virsh_dargs) status = result.exit_status # Recover libvirtd. if libvirtd == "off": utils_libvirtd.libvirtd_start() utils.run("rm -f %s" % created_img) status_error = (status_error == "yes") if status != 0: if not status_error: raise error.TestFail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(vm, file_ref, foo_line))): raise error.TestFail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ Test virsh {at|de}tach-interface command. 1) Prepare test environment and its parameters 2) Attach the required interface 3) According test type(only attach or both attach and detach): a.Go on to test detach(if attaching is correct) b.Return GOOD or raise TestFail(if attaching is wrong) 4) Check if attached interface is correct: a.Try to catch it in vm's XML file b.Try to catch it in vm 5) Detach the attached interface 6) Check result """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("at_detach_iface_vm_ref", "domname") options_suffix = params.get("at_detach_iface_options_suffix", "") status_error = "yes" == params.get("status_error", "no") start_vm = params.get("start_vm") # Should attach must be pass for detach test. correct_attach = "yes" == params.get("correct_attach", "no") readonly = ("yes" == params.get("readonly", "no")) # Interface specific attributes. iface_type = params.get("at_detach_iface_type", "network") iface_source = params.get("at_detach_iface_source", "default") iface_mode = params.get("at_detach_iface_mode", "vepa") iface_mac = params.get("at_detach_iface_mac", "created") iface_target = params.get("at_detach_iface_target") iface_model = params.get("at_detach_iface_model") iface_inbound = params.get("at_detach_iface_inbound") iface_outbound = params.get("at_detach_iface_outbound") iface_rom = params.get("at_detach_rom_bar") iface_link = params.get("at_detach_link_state") iface_boot = params.get("at_detach_boot_order") iface_driver = params.get("at_detach_iface_driver") iface_driver_host = params.get("at_detach_driver_host") iface_driver_guest = params.get("at_detach_driver_guest") iface_backend = params.get("at_detach_iface_backend") save_restore = params.get("save_restore", "no") restart_libvirtd = params.get("restart_libvirtd", "no") attach_cmd = params.get("attach_cmd", "attach-interface") virsh_dargs = {'ignore_status': True, 'debug': True, 'uri': uri} validate_xml_result = "yes" == params.get("check_xml_result", "no") paused_after_vm_start = "yes" == params.get("paused_after_vm_start", "no") machine_type = params.get("machine_type") # Get iface name if iface_type is direct if iface_type == "direct": iface_source = utils_net.get_net_if(state="UP")[0] # Get a bridge name for test if iface_type is bridge. # If there is no bridge other than virbr0, try to create one # or fail test if iface_type == "bridge": host_bridge = utils_net.Bridge() bridge_list = host_bridge.list_br() try: bridge_list.remove("virbr0") except AttributeError: pass # If no virbr0, just pass is ok logging.debug("Useful bridges:%s", bridge_list) if len(bridge_list): iface_source = bridge_list[0] else: process.run('ip link add name br0 type bridge', ignore_status=False) iface_source = 'br0' logging.debug("Added bridge br0") # Test both detach and attach, So collect info # both of them for result check. # When something wrong with interface, set it to 1 fail_flag = 0 result_info = [] # Get a mac address if iface_mac is 'created'. if iface_mac == "created" or correct_attach: iface_mac = utils_net.generate_mac_address_simple() names = locals() iface_format = get_formatted_iface_dict(names, params.get("vm_arch_name")) # for rtl8139 model, need to add pcie bridge if iface_model == "rtl8139" and machine_type == "q35": add_pcie_controller(vm_name) if start_vm == "yes" and not vm.is_alive(): vm.start() try: # Generate xml file if using attach-device command if attach_cmd == "attach-device": # Change boot order to disk libvirt.change_boot_order(vm_name, "disk", "1") vm.destroy() vm.start() # Generate attached xml new_iface = Interface(type_name=iface_type) xml_file_tmp = libvirt.modify_vm_iface(vm_name, "get_xml", iface_format) new_iface.xml = xml_file_tmp new_iface.del_address() xml_file = new_iface.xml # To confirm vm's state and make sure os fully started if start_vm == "no": if vm.is_alive(): vm.destroy() else: vm.wait_for_login().close() if paused_after_vm_start: vm.pause() # Set attach-interface domain dom_uuid = vm.get_uuid() dom_id = vm.get_id() if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = dom_id elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "hexdomid" and dom_id is not None: vm_ref = hex(int(dom_id)) # Set attach-interface options and Start attach-interface test if correct_attach: options = set_options("network", "default", iface_mac, "", "attach", None, iface_model) if readonly: virsh_dargs.update({'readonly': True, 'debug': True}) attach_result = virsh.attach_interface(vm_name, options, **virsh_dargs) else: if attach_cmd == "attach-interface": options = set_options(iface_type, iface_source, iface_mac, options_suffix, "attach", iface_target, iface_model, iface_inbound, iface_outbound) attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs) elif attach_cmd == "attach-device": attach_result = virsh.attach_device(vm_name, xml_file, ignore_status=True, debug=True) attach_status = attach_result.exit_status logging.debug(attach_result) # If attach interface failed. if attach_status: if not status_error: fail_flag = 1 result_info.append("Attach Failed: %s" % attach_result.stderr) elif status_error: # Here we just use it to exit, do not mean test failed fail_flag = 1 # If attach interface succeeded. else: if status_error and not correct_attach: fail_flag = 1 result_info.append("Attach Success with wrong command.") if fail_flag and start_vm == "yes": vm.destroy() if len(result_info): test.fail(result_info) else: # Exit because it is error_test for attach-interface. return if "print-xml" in options_suffix: iface_obj = Interface(type_name=iface_type) iface_obj.xml = attach_result.stdout.strip() if (iface_obj.type_name == iface_type and iface_obj.source['dev'] == iface_source and iface_obj.target['dev'] == iface_target and iface_obj.model == iface_model and iface_obj.bandwidth.inbound == eval( iface_format['inbound']) and iface_obj.bandwidth.outbound == eval( iface_format['outbound'])): logging.info("Print ml all element check pass") else: test.fail("Print xml do not show as expected") # Check dumpxml file whether the interface is added successfully. status, ret = check_dumpxml_iface(vm_name, iface_format) if "print-xml" not in options_suffix: # Check validate_xml_result flag to determine whether apply check_interface_xml. if validate_xml_result: # If options_suffix contains config, it need dump inactive xml. is_active = True if options_suffix.count("config"): is_active = False # Check dumping VM xml value. if not check_interface_xml(vm_name, iface_type, iface_source, iface_mac, is_active): test.fail( "Failed to find matched interface values in VM interface xml" ) if status: fail_flag = 1 result_info.append(ret) else: if status == 0: test.fail( "Attach interface effect in xml with print-xml option") else: return # Login to domain to check new interface. if not vm.is_alive(): vm.start() elif vm.state() == "paused": vm.resume() vm.wait_for_login().close() status, ret = login_to_check(vm, iface_mac) if status: fail_flag = 1 result_info.append(ret) # Check on host for direct type if iface_type == 'direct': cmd_result = process.run( "ip -d link show test").stdout_text.strip() logging.info("cmd output is %s", cmd_result) check_patten = ( "%s@%s.*\n.*%s.*\n.*macvtap.*mode.*%s" % (iface_target, iface_source, iface_mac, iface_mode)) logging.info("check patten is %s", check_patten) if not re.search(check_patten, cmd_result): logging.error("Can not find %s in ip link" % check_patten) fail_flag = 1 result_info.append(cmd_result) # Do operation and check again if restart_libvirtd == "yes": libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if save_restore == "yes": check_save_restore(vm_name) status, ret = check_dumpxml_iface(vm_name, iface_format) if status: fail_flag = 1 result_info.append(ret) # Set detach-interface options options = set_options(iface_type, None, iface_mac, options_suffix, "detach") # Start detach-interface test if save_restore == "yes" and vm_ref == dom_id: vm_ref = vm_name detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs) detach_status = detach_result.exit_status detach_msg = detach_result.stderr.strip() logging.debug(detach_result) if detach_status == 0 and status_error == 0: # If command with --config parameter, ignore below checking. if options_suffix.count("config"): return # Check the xml after detach and clean up if needed. time.sleep(5) status, _ = check_dumpxml_iface(vm_name, iface_format) if status == 0: detach_status = 1 detach_msg = "xml still exist after detach" cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac) virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs) else: logging.info("After detach, the interface xml disappeared") # Check results. if status_error: if detach_status == 0: test.fail("Detach Success with wrong command.") else: if detach_status != 0: test.fail("Detach Failed: %s" % detach_msg) else: if fail_flag: test.fail("Attach-Detach Success but " "something wrong with its " "functional use:%s" % result_info) finally: if vm.is_alive(): vm.destroy() backup_xml.sync()
def run(test, params, env): """ Test the command virsh capabilities (1) Call virsh capabilities (2) Call virsh capabilities with an unexpected option (3) Call virsh capabilities with libvirtd service stop """ def compare_capabilities_xml(source): cap_xml = capability_xml.CapabilityXML() cap_xml.xml = source # Check that host has a non-empty UUID tag. xml_uuid = cap_xml.uuid logging.debug("Host UUID (capabilities_xml): %s", xml_uuid) if xml_uuid == "": raise error.TestFail("The host uuid in capabilities_xml is none!") # Check the host arch. xml_arch = cap_xml.arch logging.debug("Host arch (capabilities_xml): %s", xml_arch) exp_arch = process.run("arch", shell=True).stdout.strip() if cmp(xml_arch, exp_arch) != 0: raise error.TestFail("The host arch in capabilities_xml is " "expected to be %s, but get %s" % (exp_arch, xml_arch)) # Check the host cpu count. xml_cpu_count = cap_xml.cpu_count logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count) cmd = "grep processor /proc/cpuinfo | wc -l" exp_cpu_count = int(process.run(cmd, shell=True).stdout.strip()) if xml_cpu_count != exp_cpu_count: raise error.TestFail("Host cpus count is expected to be %s, " "but get %s" % (exp_cpu_count, xml_cpu_count)) # Check the arch of guest supported. guest_capa = cap_xml.get_guest_capabilities() logging.debug(guest_capa) # libvirt track wordsize in hardcode struct virArchData wordsize = {} wordsize['64'] = ['alpha', 'aarch64', 'ia64', 'mips64', 'mips64el', 'parisc64', 'ppc64', 'ppc64le', 's390x', 'sh4eb', 'sparc64', 'x86_64'] wordsize['32'] = ['armv6l', 'armv7l', 'armv7b', 'cris', 'i686', 'lm32', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mipsel', 'openrisc', 'parisc', 'ppc', 'ppcle', 'ppcemb', 's390', 'sh4', 'sparc', 'unicore32', 'xtensa', 'xtensaeb'] uri_type = process.run("virsh uri", shell=True).stdout.split(':')[0] domain_type = "domain_" + uri_type for arch_dict in guest_capa.values(): for arch, val_dict in arch_dict.items(): # Check wordsize if arch not in wordsize[val_dict['wordsize']]: raise error.TestFail("'%s' wordsize '%s' in " "capabilities_xml not expected" % (arch, val_dict['wordsize'])) # Check the type of hypervisor if domain_type not in val_dict.keys(): raise error.TestFail("domain type '%s' is not matched" " under arch '%s' in " "capabilities_xml" % (uri_type, arch)) # check power management support. try: pm_cmd = os_dep.command('pm-is-supported') pm_cap_map = {'suspend': 'suspend_mem', 'hibernate': 'suspend_disk', 'suspend-hybrid': 'suspend_hybrid'} exp_pms = [] for opt in pm_cap_map: cmd = '%s --%s' % (pm_cmd, opt) res = process.run(cmd, ignore_status=True, shell=True) if res.exit_status == 0: exp_pms.append(pm_cap_map[opt]) pms = cap_xml.power_management_list if set(exp_pms) != set(pms): raise error.TestFail("Expected supported PMs are %s, got %s " "instead." % (exp_pms, pms)) except ValueError: logging.debug('Power management checking is skipped, since command' ' pm-is-supported is not found.') connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) # Prepare libvirtd service if "libvirtd" in params: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_cap_options") try: output = virsh.capabilities(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad output = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh capabilities' succeeded " "with libvirtd service stopped, " "incorrect") else: raise error.TestFail("Command 'virsh capabilities %s' " "succeeded (incorrect command)" % option) elif status_error == "no": compare_capabilities_xml(output) if status != 0: raise error.TestFail("Command 'virsh capabilities %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True} cmd = "chmod 666 %s" % testnet_xml.xml utils.system(cmd) try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) # Undefine an active network and check state undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status if not undefine_status: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: raise error.TestFail("Define network for transaction test " "failed:%s", result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: raise error.TestFail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: raise error.TestFail("Define an unexpected network, " "and start it successfully.") else: raise error.TestFail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: raise error.TestFail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: raise error.TestFail("Run failed with right command") else: if start_status != 0: raise error.TestFail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test the command virsh freecell (1) Call virsh freecell (2) Call virsh freecell --all (3) Call virsh freecell with a numeric argument (4) Call virsh freecell xyz (5) Call virsh freecell with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("virsh_freecell_options") # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case cmd_result = virsh.freecell(ignore_status=True, extra=option, uri=connect_uri, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check the output if virsh.has_help_command('numatune'): OLD_LIBVIRT = False else: OLD_LIBVIRT = True if option == '--all': raise error.TestNAError("Older libvirt virsh freecell " "doesn't support --all option") def output_check(freecell_output): if not re.search("ki?B", freecell_output, re.IGNORECASE): raise error.TestFail("virsh freecell output invalid: " + freecell_output) # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail( "Command 'virsh freecell' succeeded " "with libvirtd service stopped, incorrect") else: # newer libvirt if not OLD_LIBVIRT: raise error.TestFail( "Command 'virsh freecell %s' succeeded" "(incorrect command)" % option) else: # older libvirt raise error.TestNAError('Older libvirt virsh freecell ' 'incorrectly processes extranious' 'command-line options') elif status_error == "no": output_check(output) if status != 0: raise error.TestFail("Command 'virsh freecell %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test command: virsh net-start. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") inactive_default = "yes" == params.get("net_start_inactive_default", "yes") net_ref = params.get("net_start_net_ref", "netname") # default is tested extra = params.get("net_start_options_extra", "") # extra cmd-line params. # make easy to maintain virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare default network for following test. try: default_netxml = origin_nets['default'] except KeyError: virsh_instance.close_session() raise error.TestNAError("Test requires default network to exist") # To confirm default network is active if not default_netxml.active: default_netxml.active = True # inactive default according test's need if inactive_default: logging.info("Stopped default network") default_netxml.active = False # State before run command origin_state = virsh_instance.net_state_dict() logging.debug("Origin network(s) state: %s", origin_state) if net_ref == "netname": net_ref = default_netxml.name elif net_ref == "netuuid": net_ref = default_netxml.uuid if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True} # Run test case result = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) is_default_active = current_state['default']['active'] # Recover default state to active if not is_default_active: default_netxml.active = True virsh_instance.close_session() # Check status_error if status_error: if not status: raise error.TestFail("Run successfully with wrong command!") else: if status: raise error.TestFail("Run failed with right command") else: if not is_default_active: raise error.TestFail("Execute cmd successfully but " "default is inactive actually.")
def run_virt_edit(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() if vm_ref == "domdisk": if len(dom_disk_dict) != 1: raise error.TestError("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = dom_disk_dict.values()[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img utils.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img) # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": libvirt_vm.libvirtd_stop() # Run test virsh_dargs = {'ignore_status': True, 'debug': True, 'uri': uri} result = lgf.virt_edit_cmd(vm_ref, file_ref, options, options_suffix, expr, **virsh_dargs) status = result.exit_status # Recover libvirtd. if libvirtd == "off": libvirt_vm.libvirtd_start() utils.run("rm -f %s" % created_img) status_error = (status_error == "yes") if status != 0: if not status_error: raise error.TestFail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(vm, file_ref, foo_line))): raise error.TestFail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True} cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True} try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask ipxml_v4.dhcp_ranges = {"start": dhcp_ranges_start, "end": dhcp_ranges_end} testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = to_text( process.system_output(sysctl_cmd + ' -n')) if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) testnet_xml.debug_xml() # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append("undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append("Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append("undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test the command virsh domcapabilities """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) remote_ref = params.get("remote_ref", "") if remote_ref == "remote": remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) if 'EXAMPLE.COM' in remote_ip: test.cancel("Please replace '%s' with valid remote ip" % remote_ip) ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd) connect_uri = libvirt_vm.complete_uri(remote_ip) virsh_options = params.get("virsh_options", "") virttype = params.get("virttype_value", "") emulatorbin = params.get("emulatorbin_value", "") arch = params.get("arch_value", "") machine = params.get("machine_value", "") option_dict = { 'arch': arch, 'virttype': virttype, 'emulatorbin': emulatorbin, 'machine': machine } options_list = [option_dict] extra_option = params.get("extra_option", "") # Get --virttype, --emulatorbin, --arch and --machine values from # virsh capabilities output, then assemble option for testing # This will ignore the virttype, emulatorbin, arch and machine values if virsh_options == "AUTO": options_list = [] capa_xml = capability_xml.CapabilityXML() guest_capa = capa_xml.get_guest_capabilities() for arch_prop in list(itervalues(guest_capa)): for arch in list(arch_prop.keys()): machine_list = arch_prop[arch]['machine'] virttype_list = [] emulatorbin_list = [arch_prop[arch]['emulator']] for key in list(arch_prop[arch].keys()): if key.startswith("domain_"): virttype_list.append(key[7:]) if list(itervalues(arch_prop[arch][key])): emulatorbin_list.append( arch_prop[arch][key]['emulator']) for virttype in virttype_list: for emulatorbin in emulatorbin_list: for machine in machine_list: option_dict = { 'arch': arch, 'virttype': virttype, 'emulatorbin': emulatorbin, 'machine': machine } options_list.append(option_dict) # Run test cases for option in options_list: result = virsh.domcapabilities(virttype=option['virttype'], emulatorbin=option['emulatorbin'], arch=option['arch'], machine=option['machine'], options=extra_option, uri=connect_uri, ignore_status=True, debug=True) # Check status_error status_error = "yes" == params.get("status_error", "no") utlv.check_exit_status(result, status_error)
def run(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE") remote_user = params.get("virt_edit_remote_user", "root") remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE") connect_uri = params.get("virt_edit_connect_uri") if connect_uri is not None: uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host) if uri.count("EXAMPLE"): raise error.TestNAError("Please config host and passwd first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) else: uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") backup_extension = params.get("virt_edit_backup_extension") test_format = params.get("virt_edit_format") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() # Disk format: raw or qcow2 disk_format = None # If object is a disk file path is_disk = False if vm_ref == "domdisk": if len(dom_disk_dict) != 1: raise error.TestError("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = dom_disk_dict.values()[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) if test_format: # Get format:raw or qcow2 info = utils.run("qemu-img info %s" % vm_ref).stdout for line in info.splitlines(): comps = line.split(':') if comps[0].count("format"): disk_format = comps[-1].strip() break if disk_format is None: raise error.TestError("Cannot get disk format:%s" % info) is_disk = True elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img utils.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img) is_disk = True # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" if backup_extension is not None: if options is None: options = "" options += " -b %s" % backup_extension # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test result = lgf.virt_edit_cmd(vm_ref, file_ref, is_disk=is_disk, disk_format=disk_format, options=options, extra=options_suffix, expr=expr, connect_uri=uri, debug=True) status = result.exit_status # Recover libvirtd. if libvirtd == "off": utils_libvirtd.libvirtd_start() utils.run("rm -f %s" % created_img) # Remove backup file in vm if it exists if backup_extension is not None: backup_file = file_ref + backup_extension cleanup_file_in_vm(vm, backup_file) status_error = (status_error == "yes") if status != 0: if not status_error: raise error.TestFail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(vm, file_ref, foo_line))): raise error.TestFail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) option = params.get("virsh_uri_options") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): raise exceptions.TestSkipError( 'target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in list(params.keys()) if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands if remote_ref: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) try: if remote_ref == "remote": connect_uri = target_uri uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise exceptions.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if target_uri != uri_test: raise exceptions.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise exceptions.TestFail("Command: %s failed " "(correct command)" % cmd)
def run(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE") remote_user = params.get("virt_edit_remote_user", "root") remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE") connect_uri = params.get("virt_edit_connect_uri") if connect_uri is not None: uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host) if uri.count("EXAMPLE"): raise error.TestNAError("Please config host and passwd first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) else: uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") backup_extension = params.get("virt_edit_backup_extension") test_format = params.get("virt_edit_format") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() # Disk format: raw or qcow2 disk_format = None # If object is a disk file path is_disk = False if vm_ref == "domdisk": if len(dom_disk_dict) != 1: raise error.TestError("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = dom_disk_dict.values()[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) if test_format: # Get format:raw or qcow2 info = utils.run("qemu-img info %s" % vm_ref).stdout for line in info.splitlines(): comps = line.split(':') if comps[0].count("format"): disk_format = comps[-1].strip() break if disk_format is None: raise error.TestError("Cannot get disk format:%s" % info) options = "--format=%s" % disk_format is_disk = True elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img utils.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img) is_disk = True # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" if backup_extension is not None: if options is None: options = "" options += " -b %s" % backup_extension # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test result = lgf.virt_edit_cmd(vm_ref, file_ref, is_disk, options, options_suffix, expr, uri, debug=True) status = result.exit_status # Recover libvirtd. if libvirtd == "off": utils_libvirtd.libvirtd_start() utils.run("rm -f %s" % created_img) # Remove backup file in vm if it exists if backup_extension is not None: backup_file = file_ref + backup_extension cleanup_file_in_vm(vm, backup_file) status_error = (status_error == "yes") if status != 0: if not status_error: raise error.TestFail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(vm, file_ref, foo_line))): raise error.TestFail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("virsh_uri_options") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): raise exceptions.TestSkipError( 'target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in params.keys() if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands config_opt = ["StrictHostKeyChecking=no"] if remote_ref: ssh_key.setup_remote_ssh_key(remote_ip, remote_user, remote_pwd, config_options=config_opt) try: if remote_ref == "remote": connect_uri = target_uri uri_test = virsh.canonical_uri(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise exceptions.TestFail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if cmp(target_uri, uri_test) != 0: raise exceptions.TestFail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: raise exceptions.TestFail("Command: %s failed " "(correct command)" % cmd)
def run_virsh_attach_detach_interface(test, params, env): """ Test virsh {at|de}tach-interface command. 1) Prepare test environment and its parameters 2) Attach the required interface 3) According test type(only attach or both attach and detach): a.Go on to test detach(if attaching is correct) b.Return GOOD or raise TestFail(if attaching is wrong) 4) Check if attached interface is correct: a.Try to catch it in vm's XML file b.Try to catch it in vm 5) Detach the attached interface 6) Check result """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("at_detach_iface_vm_ref", "domname") options_suffix = params.get("at_detach_iface_options_suffix", "") status_error = "yes" == params.get("status_error", "no") start_vm = params.get("start_vm") # Should attach must be pass for detach test. correct_attach = "yes" == params.get("correct_attach", "no") # Interface specific attributes. iface_type = params.get("at_detach_iface_type", "network") iface_source = params.get("at_detach_iface_source", "default") iface_mac = params.get("at_detach_iface_mac", "created") virsh_dargs = {'ignore_status': True, 'uri': uri} # Get a bridge name for test if iface_type is bridge. # If there is no bridge other than virbr0, raise TestNAError if iface_type == "bridge": host_bridge = utils_net.Bridge() bridge_list = host_bridge.list_br() try: bridge_list.remove("virbr0") except AttributeError: pass # If no virbr0, just pass is ok logging.debug("Useful bridges:%s", bridge_list) # just choosing one bridge on host. if len(bridge_list): iface_source = bridge_list[0] else: raise error.TestNAError("No useful bridge on host " "other than 'virbr0'.") dom_uuid = vm.get_uuid() dom_id = vm.get_id() # To confirm vm's state if start_vm == "no" and vm.is_alive(): vm.destroy() # Test both detach and attach, So collect info # both of them for result check. # When something wrong with interface, set it to 1 fail_flag = 0 result_info = [] # Set attach-interface domain if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = dom_id elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "hexdomid" and dom_id is not None: vm_ref = hex(int(dom_id)) # Get a mac address if iface_mac is 'created'. if iface_mac == "created" or correct_attach: iface_mac = utils_net.generate_mac_address_simple() # Set attach-interface options and Start attach-interface test if correct_attach: options = set_options("network", "default", iface_mac, "", "attach") attach_result = virsh.attach_interface(vm_name, options, **virsh_dargs) else: options = set_options(iface_type, iface_source, iface_mac, options_suffix, "attach") attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs) attach_status = attach_result.exit_status logging.debug(attach_result) # If attach interface failed. if attach_status: if not status_error: fail_flag = 1 result_info.append("Attach Failed: %s" % attach_result) elif status_error: # Here we just use it to exit, do not mean test failed fail_flag = 1 # If attach interface succeeded. else: if status_error and not correct_attach: fail_flag = 1 result_info.append("Attach Success with wrong command.") if fail_flag and start_vm == "yes": vm.destroy() if len(result_info): raise error.TestFail(result_info) else: # Exit because it is error_test for attach-interface. return # Check dumpxml file whether the interface is added successfully. status, ret = check_dumpxml_iface( vm_name, iface_mac, iface_type, iface_source) if status: fail_flag = 1 result_info.append(ret) # Login to domain to check new interface. if not vm.is_alive(): vm.start() elif vm.state() == "paused": vm.resume() status, ret = login_to_check(vm, iface_mac) if status: fail_flag = 1 result_info.append(ret) # Set detach-interface options options = set_options(iface_type, None, iface_mac, options_suffix, "detach") # Start detach-interface test detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs) detach_status = detach_result.exit_status logging.debug(detach_result) # Clean up. if check_dumpxml_iface(vm_name, iface_mac) is not None: cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac) virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs) # Shutdown vm to be afraid of cleaning up failed if vm.is_alive(): vm.destroy() # Check results. if status_error: if detach_status == 0: raise error.TestFail("Detach Success with wrong command.") else: if detach_status != 0: raise error.TestFail("Detach Failed.") else: if fail_flag: raise error.TestFail("Attach-Detach Success but " "something wrong with its " "functional use:%s" % result_info)
def run(test, params, env): """ Test command: virsh net-start. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") inactive_default = "yes" == params.get("net_start_inactive_default", "yes") net_ref = params.get("net_start_net_ref", "netname") # default is tested extra = params.get("net_start_options_extra", "") # extra cmd-line params. # make easy to maintain virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare default network for following test. try: default_netxml = origin_nets['default'] except KeyError: virsh_instance.close_session() test.cancel("Test requires default network to exist") # To confirm default network is active if not default_netxml.active: default_netxml.active = True # inactive default according test's need if inactive_default: logging.info("Stopped default network") default_netxml.active = False # State before run command origin_state = virsh_instance.net_state_dict() logging.debug("Origin network(s) state: %s", origin_state) if net_ref == "netname": net_ref = default_netxml.name elif net_ref == "netuuid": net_ref = default_netxml.uuid if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True} # Run test case result = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) is_default_active = current_state['default']['active'] # Recover default state to active if not is_default_active: default_netxml.active = True virsh_instance.close_session() # Check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") else: if not is_default_active: test.fail("Execute cmd successfully but " "default is inactive actually.")
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append("undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s", result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run_virsh_attach_detach_interface(test, params, env): """ Test virsh {at|de}tach-interface command. 1) Prepare test environment and its parameters 2) Attach the required interface 3) According test type(only attach or both attach and detach): a.Go on to test detach(if attaching is correct) b.Return GOOD or raise TestFail(if attaching is wrong) 4) Check if attached interface is correct: a.Try to catch it in vm's XML file b.Try to catch it in vm 5) Detach the attached interface 6) Check result """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("at_detach_iface_vm_ref", "domname") options_suffix = params.get("at_detach_iface_options_suffix", "") status_error = "yes" == params.get("status_error", "no") start_vm = params.get("start_vm") # Should attach must be pass for detach test. correct_attach = "yes" == params.get("correct_attach", "no") # Interface specific attributes. iface_type = params.get("at_detach_iface_type", "network") iface_source = params.get("at_detach_iface_source", "default") iface_mac = params.get("at_detach_iface_mac", "created") virsh_dargs = {'ignore_status': True, 'uri': uri} # Get a bridge name for test if iface_type is bridge. # If there is no bridge other than virbr0, raise TestNAError if iface_type == "bridge": host_bridge = utils_net.Bridge() bridge_list = host_bridge.list_br() try: bridge_list.remove("virbr0") except AttributeError: pass # If no virbr0, just pass is ok logging.debug("Useful bridges:%s", bridge_list) # just choosing one bridge on host. if len(bridge_list): iface_source = bridge_list[0] else: raise error.TestNAError("No useful bridge on host " "other than 'virbr0'.") dom_uuid = vm.get_uuid() dom_id = vm.get_id() # To confirm vm's state if start_vm == "no" and vm.is_alive(): vm.destroy() # Test both detach and attach, So collect info # both of them for result check. # When something wrong with interface, set it to 1 fail_flag = 0 result_info = [] # Set attach-interface domain if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = dom_id elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "hexdomid" and dom_id is not None: vm_ref = hex(int(dom_id)) # Get a mac address if iface_mac is 'created'. if iface_mac == "created" or correct_attach: iface_mac = utils_net.generate_mac_address_simple() # Set attach-interface options and Start attach-interface test if correct_attach: options = set_options("network", "default", iface_mac, "", "attach") attach_result = virsh.attach_interface(vm_name, options, **virsh_dargs) else: options = set_options(iface_type, iface_source, iface_mac, options_suffix, "attach") attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs) attach_status = attach_result.exit_status logging.debug(attach_result) # If attach interface failed. if attach_status: if not status_error: fail_flag = 1 result_info.append("Attach Failed: %s" % attach_result) elif status_error: # Here we just use it to exit, do not mean test failed fail_flag = 1 # If attach interface succeeded. else: if status_error and not correct_attach: fail_flag = 1 result_info.append("Attach Success with wrong command.") if fail_flag and start_vm == "yes": vm.destroy() if len(result_info): raise error.TestFail(result_info) else: # Exit because it is error_test for attach-interface. return # Check dumpxml file whether the interface is added successfully. status, ret = check_dumpxml_iface(vm_name, iface_mac, iface_type, iface_source) if status: fail_flag = 1 result_info.append(ret) # Login to domain to check new interface. if not vm.is_alive(): vm.start() elif vm.state() == "paused": vm.resume() status, ret = login_to_check(vm, iface_mac) if status: fail_flag = 1 result_info.append(ret) # Set detach-interface options options = set_options(iface_type, None, iface_mac, options_suffix, "detach") # Start detach-interface test detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs) detach_status = detach_result.exit_status logging.debug(detach_result) # Clean up. if check_dumpxml_iface(vm_name, iface_mac) is not None: cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac) virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs) # Shutdown vm to be afraid of cleaning up failed if vm.is_alive(): vm.destroy() # Check results. if status_error: if detach_status == 0: raise error.TestFail("Detach Success with wrong command.") else: if detach_status != 0: raise error.TestFail("Detach Failed.") else: if fail_flag: raise error.TestFail("Attach-Detach Success but " "something wrong with its " "functional use:%s" % result_info)
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError, detail: fail_flag = 1 result_info.append(str(detail)) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml
def run(test, params, env): """ Test command: virsh net-start. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") inactive_default = "yes" == params.get("net_start_inactive_default", "yes") net_ref = params.get("net_start_net_ref", "netname") # default is tested extra = params.get("net_start_options_extra", "") # extra cmd-line params. route_test = "yes" == params.get("route_test", "no") # make easy to maintain virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare default network for following test. try: default_netxml = origin_nets['default'] except KeyError: virsh_instance.close_session() test.cancel("Test requires default network to exist") try: # To confirm default network is active if not default_netxml.active: default_netxml.active = True # inactive default according test's need if inactive_default: logging.info("Stopped default network") default_netxml.active = False # State before run command origin_state = virsh_instance.net_state_dict() logging.debug("Origin network(s) state: %s", origin_state) if route_test: # define a network "def" with route address is "192.168.122.x" # 1. start def first then start default current_state = virsh_instance.net_state_dict() if 'def' in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) expect_fail = "yes" == params.get("expect_start_fail", "no") test_xml = network_xml.NetworkXML(network_name="def") test_xml.forward = {'mode': 'nat'} test_xml.routes = [{ 'address': '192.168.122.0', 'prefix': '24', 'gateway': '192.168.100.1' }] ipxml = IPXML(address='192.168.100.1', netmask='255.255.255.0') ipxml.dhcp_ranges = { 'start': '192.168.100.2', 'end': '192.168.100.254' } test_xml.ip = ipxml test_xml.define() virsh.net_start("def") # start default, should fail result = virsh.net_start("default") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) # 2. start default then start def virsh.net_destroy("def") virsh.net_start("default") current_state11 = virsh_instance.net_state_dict() logging.debug("before start 2nd network(s) state: %s", current_state11) # start def, should fail result = virsh.net_start("def") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) current_state12 = virsh_instance.net_state_dict() logging.debug("after start 2nd network(s) state: %s", current_state12) # clear the env virsh.net_undefine("def") else: if net_ref == "netname": net_ref = default_netxml.name elif net_ref == "netuuid": net_ref = default_netxml.uuid if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } if params.get('net_start_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': True, 'readonly': True, 'ignore_status': True } # Run test case if 'unprivileged_user' in virsh_dargs and status_error: test_virsh = virsh.VirshPersistent( unprivileged_user=virsh_dargs['unprivileged_user']) virsh_dargs.pop('unprivileged_user') result = test_virsh.net_start(net_ref, extra, **virsh_dargs) test_virsh.close_session() elif not route_test: result = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) if 'default' not in current_state: test.fail('Network "default" cannot be found') # Check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) is_default_active = current_state['default']['active'] if not is_default_active: test.fail( "Execute cmd successfully but default is inactive actually." ) finally: virsh_instance.close_session() current_state = virsh_instance.net_state_dict() if "def" in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) virsh.net_start('default', debug=True, ignore_status=True)
def run(test, params, env): """ Test command: virsh net-create. 1) Gather test parameters 2) Store current libvirt host network state 3) Call virsh net create on possibly modified network XML 4) Recover original network. 5) Check result. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") net_name = params.get("net_create_net_name", "") # default is tested net_uuid = params.get("net_create_net_uuid", "") # default is tested options_ref = params.get("net_create_options_ref", "") # default is tested # extra cmd-line params. extra = params.get("net_create_options_extra", "") corrupt = "yes" == params.get("net_create_corrupt_xml", "no") remove_existing = "yes" == params.get("net_create_remove_existing", "yes") # Dictionary or None value bridge = eval(params.get("net_create_bridge", "None"), {'__builtins__': None}, {}) # make easy to maintain virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': False} vrsh = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = libvirt_xml.NetworkXML.new_all_networks_dict(vrsh) backup_state = vrsh.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") if corrupt: # find file size test_xml.seek(0, 2) # end # write garbage at middle of file test_xml.seek(test_xml.tell() / 2) test_xml.write('"<network><<<BAD>>><\'XML</network\>' '!@#$%^&*)>(}>}{CORRUPTE|>!') test_xml.flush() # Assume next user might want to read test_xml.seek(0) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Run test case # Be nice to user if status_error: logging.info("The following is expected to fail...") try: # Determine depth of test - if low-level calls are needed if (options_ref or extra or corrupt): logging.debug("Performing low-level net-create test") # vrsh will act like it's own virsh-dargs, i.e. it is dict-like test_passed = do_low_level_test(vrsh, test_xml, options_ref, extra) else: # high-level test logging.debug("Performing high-level net-create test") # vrsh will act like it's own virsh-dargs, i.e. it is dict-like test_passed = do_high_level_test(vrsh, test_xml, net_name, net_uuid, bridge) finally: # Be nice to user if status_error: # In case test itself has errors, warn they are real. logging.info("The following is NOT expected to fail...") # Done with file, cleanup del test_xml # Recover environment leftovers = libvirt_xml.NetworkXML.new_all_networks_dict(vrsh) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) vrsh.close_session() # Check Result if status_error: # An error was expected if test_passed: # Error was not produced raise error.TestFail("Error test did not fail!") else: # no error expected if not test_passed: raise error.TestFail("Normal test returned failure")
def run_virsh_net_autostart(test, params, env): """ Test command: virsh net-autostart. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") net_ref = params.get("net_autostart_net_ref", "netname") disable = "yes" == params.get("net_autostart_disable", "no") extra = params.get("net_autostart_extra", "") # extra cmd-line params. # Make easy to maintain virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) try: default_xml = backup['default'] except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") # To guarantee cleanup will be executed try: # Remove all network before test for netxml in backup.values(): netxml.orbital_nuclear_strike() # Prepare default property for network # Transeint network can not be set autostart # So confirm persistent is true for test default_xml['persistent'] = True netname = "default" netuuid = default_xml.uuid # Set network 'default' to inactive # Since we do not reboot host to check(instead of restarting libvirtd) # If default network is active, we cann't check "--disable". # Because active network will not be inactive after restarting libvirtd # even we set autostart to False. While inactive network will be active # after restarting libvirtd if we set autostart to True default_xml['active'] = False currents = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) current_state = virsh_instance.net_state_dict() logging.debug("Current network(s): %s", current_state) # Prepare options and arguments if net_ref == "netname": net_ref = netname elif net_ref == "netuuid": net_ref = netuuid if disable: net_ref += " --disable" # Run test case # Use function in virsh module directly for both normal and error test result = virsh.net_autostart(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Check if autostart or disable is successful with libvirtd restart. # TODO: Since autostart is designed for host reboot, # we'd better check it with host reboot. utils_libvirtd.libvirtd_restart() # Reopen default_xml virsh_instance = virsh.VirshPersistent(**virsh_dargs) currents = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) current_state = virsh_instance.net_state_dict() logging.debug("Current network(s): %s", current_state) default_xml = currents['default'] is_active = default_xml['active'] finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError: pass # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Check Result if status_error: if status == 0: raise error.TestFail("Run successfully with wrong command!") else: if disable: if status or is_active: raise error.TestFail("Disable autostart failed.") else: if status or (not is_active): raise error.TestFail("Set network autostart failed.")
def run_virsh_capabilities(test, params, env): """ Test the command virsh capabilities (1) Call virsh capabilities (2) Call virsh capabilities with an unexpected option (3) Call virsh capabilities with libvirtd service stop """ def compare_capabilities_xml(source): dom = parseString(source) host = dom.getElementsByTagName('host')[0] # check that host has a non-empty UUID tag. uuid = host.getElementsByTagName('uuid')[0] host_uuid_output = uuid.firstChild.data logging.info("Host uuid (capabilities_xml):%s", host_uuid_output) if host_uuid_output == "": raise error.TestFail("The host uuid in capabilities_xml is none!") # check the host arch. arch = host.getElementsByTagName('arch')[0] host_arch_output = arch.firstChild.data logging.info("Host arch (capabilities_xml):%s", host_arch_output) cmd_result = utils.run("arch", ignore_status=True) if cmp(host_arch_output, cmd_result.stdout.strip()) != 0: raise error.TestFail("The host arch in capabilities_xml is wrong!") # check the host cpus num. cpus = dom.getElementsByTagName('cpus')[0] host_cpus_output = cpus.getAttribute('num') logging.info("Host cpus num (capabilities_xml):%s", host_cpus_output) cmd = "less /proc/cpuinfo | grep processor | wc -l" cmd_result = utils.run(cmd, ignore_status=True) if cmp(host_cpus_output, cmd_result.stdout.strip()) != 0: raise error.TestFail("Host cpus num (capabilities_xml) is " "wrong") # check the arch of guest supported. cmd = "/usr/libexec/qemu-kvm --cpu ? | grep qemu" cmd_result = utils.run(cmd, ignore_status=True) guest_wordsize_array = dom.getElementsByTagName('wordsize') length = len(guest_wordsize_array) for i in range(length): element = guest_wordsize_array[i] guest_wordsize = element.firstChild.data logging.info("Arch of guest supported (capabilities_xml):%s", guest_wordsize) if not re.search(guest_wordsize, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an extra arch " "of guest to support!") # check the type of hyperviosr. guest_domain_type = dom.getElementsByTagName('domain')[0] guest_domain_type_output = guest_domain_type.getAttribute('type') logging.info("Hypervisor (capabilities_xml):%s", guest_domain_type_output) cmd_result = utils.run("virsh uri", ignore_status=True) if not re.search(guest_domain_type_output, cmd_result.stdout.strip()): raise error.TestFail("The capabilities_xml gives an different " "hypervisor") connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default") ) # Prepare libvirtd service if params.has_key("libvirtd"): libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_cap_options") try: output = virsh.capabilities(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except error.CmdError: status = 1 # bad output = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh capabilities' succeeded " "with libvirtd service stopped, incorrect") else: raise error.TestFail("Command 'virsh capabilities %s' succeeded " "(incorrect command)" % option) elif status_error == "no": compare_capabilities_xml(output) if status != 0: raise error.TestFail("Command 'virsh capabilities %s' failed " "(correct command)" % option)
def run_virsh_net_define_undefine(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError, detail: fail_flag = 1 result_info.append(str(detail)) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") loop = int(params.get("loop", 1)) # Get params about creating a bridge bridge = params.get('bridge', None) create_bridge = "yes" == params.get('create_bridge', 'no') ovs_bridge = "yes" == params.get('ovs_bridge', 'no') iface_name = utils_net.get_net_if(state="UP")[0] # Get params about creating a network create_netxml = "yes" == params.get("create_netxml", "no") domain = params.get('domain', None) forward = params.get("forward", None) net_dns_txt = params.get("net_dns_txt", None) net_bandwidth_inbound = params.get("net_bandwidth_inbound", None) net_bandwidth_outbound = params.get("net_bandwidth_outbound", None) mac = params.get("mac") # Edit the created network xml to get the xml to be tested del_mac = "yes" == params.get('del_mac', 'no') del_ip = "yes" == params.get('del_ip', 'no') add_dev = "yes" == params.get('add_dev', 'no') virtualport = 'yes' == params.get("virtualport", "no") virtualport_type = params.get("virtualport_type") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) logging.debug("Get network xml as testnet_xml: %s" % testnet_xml) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True } try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask range_4 = network_xml.RangeXML() range_4.attrs = { "start": dhcp_ranges_start, "end": dhcp_ranges_end } ipxml_v4.dhcp_ranges = range_4 testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = process.run(sysctl_cmd + ' -n').stdout_text if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) if create_netxml: net_dict = { 'del_nat_attrs': True, 'del_ip': del_ip, 'dns_txt': net_dns_txt, 'domain': domain, 'bridge': bridge, 'forward': forward, 'interface_dev': iface_name, 'virtualport': virtualport, 'virtualport_type': virtualport_type, 'mac': mac, 'net_bandwidth_inbound': net_bandwidth_inbound, 'net_bandwidth_outbound': net_bandwidth_outbound } logging.debug("net_dict is %s" % net_dict) testnet_xml = libvirt_network.modify_network_xml( net_dict, testnet_xml) testnet_xml.debug_xml() if create_bridge: if ovs_bridge: utils_net.create_ovs_bridge(bridge, ignore_status=False) else: utils_net.create_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Run test case while loop: try: define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states after define if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined network: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: logging.debug("check result is %s, expect_msg is %s" % (define_result, expect_msg)) libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append( "Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append( "Found wrong network states for " "defined network: %s" % str(net_state)) # Check network states after start if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started network: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append( "Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append( "Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append( "undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append( "Found wrong network states for " "undefined network: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status except Exception: logging.debug( "The define and undefine operation in loop %s failed. ", loop) finally: loop = loop - 1 finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml if create_bridge: if ovs_bridge: utils_net.delete_ovs_bridge(bridge, ignore_status=False) else: utils_net.delete_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test the command virsh capabilities (1) Call virsh capabilities (2) Call virsh capabilities with an unexpected option (3) Call virsh capabilities with libvirtd service stop """ def compare_capabilities_xml(source): cap_xml = capability_xml.CapabilityXML() cap_xml.xml = source # Check that host has a non-empty UUID tag. xml_uuid = cap_xml.uuid logging.debug("Host UUID (capabilities_xml): %s", xml_uuid) if xml_uuid == "": test.fail("The host uuid in capabilities_xml is none!") # Check the host arch. xml_arch = cap_xml.arch logging.debug("Host arch (capabilities_xml): %s", xml_arch) exp_arch = process.run("arch", shell=True).stdout.strip() if cmp(xml_arch, exp_arch) != 0: test.fail("The host arch in capabilities_xml is " "expected to be %s, but get %s" % (exp_arch, xml_arch)) # Check the host cpu count. xml_cpu_count = cap_xml.cpu_count logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count) search_str = 'processor' if platform.machine() == 's390x': search_str = 'cpu number' cmd = "grep '%s' /proc/cpuinfo | wc -l" % search_str exp_cpu_count = int(process.run(cmd, shell=True).stdout.strip()) if xml_cpu_count != exp_cpu_count: test.fail("Host cpus count is expected to be %s, " "but get %s" % (exp_cpu_count, xml_cpu_count)) # Check the arch of guest supported. guest_capa = cap_xml.get_guest_capabilities() logging.debug(guest_capa) # libvirt track wordsize in hardcode struct virArchData wordsize = {} wordsize['64'] = ['alpha', 'aarch64', 'ia64', 'mips64', 'mips64el', 'parisc64', 'ppc64', 'ppc64le', 's390x', 'sh4eb', 'sparc64', 'x86_64'] wordsize['32'] = ['armv6l', 'armv7l', 'armv7b', 'cris', 'i686', 'lm32', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mipsel', 'openrisc', 'parisc', 'ppc', 'ppcle', 'ppcemb', 's390', 'sh4', 'sparc', 'unicore32', 'xtensa', 'xtensaeb'] uri_type = process.run("virsh uri", shell=True).stdout.split(':')[0] domain_type = "domain_" + uri_type for arch_dict in list(itervalues(guest_capa)): for arch, val_dict in list(iteritems(arch_dict)): # Check wordsize if arch not in wordsize[val_dict['wordsize']]: test.fail("'%s' wordsize '%s' in " "capabilities_xml not expected" % (arch, val_dict['wordsize'])) # Check the type of hypervisor if domain_type not in list(val_dict.keys()): test.fail("domain type '%s' is not matched" " under arch '%s' in " "capabilities_xml" % (uri_type, arch)) # check power management support. try: pm_cmd = path.find_command('pm-is-supported') pm_cap_map = {'suspend': 'suspend_mem', 'hibernate': 'suspend_disk', 'suspend-hybrid': 'suspend_hybrid'} exp_pms = [] for opt in pm_cap_map: cmd = '%s --%s' % (pm_cmd, opt) res = process.run(cmd, ignore_status=True, shell=True) if res.exit_status == 0: exp_pms.append(pm_cap_map[opt]) pms = cap_xml.power_management_list if set(exp_pms) != set(pms): test.fail("Expected supported PMs are %s, got %s " "instead." % (exp_pms, pms)) except path.CmdNotFoundError: logging.debug('Power management checking is skipped, since command' ' pm-is-supported is not found.') connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) # Prepare libvirtd service if "libvirtd" in params: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case option = params.get("virsh_cap_options") try: output = virsh.capabilities(option, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad output = '' # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": test.fail("Command 'virsh capabilities' succeeded " "with libvirtd service stopped, " "incorrect") else: test.fail("Command 'virsh capabilities %s' " "succeeded (incorrect command)" % option) elif status_error == "no": compare_capabilities_xml(output) if status != 0: test.fail("Command 'virsh capabilities %s' failed " "(correct command)" % option)
def run_virsh_ttyconsole(test, params, env): """ Test command: virsh ttyconsole. 1) Config console in xml file. 2) Run test for virsh ttyconsole. 3) Result check. """ os_type = params.get("os_type") if os_type == "windows": raise error.TestNAError("SKIP:Do not support Windows.") # Get parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("virsh_ttyconsole_vm_ref", "domname") vm_state = params.get("vm_state", "running") option_suffix = params.get("virsh_ttyconsole_option_suffix", "") vm_uuid = vm.get_uuid() vm_id = "" virsh_dargs = {'ignore_status': True, 'uri': uri} # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) if vm.is_alive(): vm.destroy() # Config vm for tty console xml_console_config(vm_name) vm.destroy() # Prepare vm state for test if vm_state != "shutoff": vm.start() vm.wait_for_login() vm_id = vm.get_id() if vm_state == "paused": vm.pause() # Prepare options if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = vm_uuid elif vm_ref == "domid": vm_ref = vm_id elif vm_id and vm_ref == "hex_id": vm_ref = hex(int(vm_id)) if option_suffix: vm_ref += " %s" % option_suffix # Run test command result = virsh.ttyconsole(vm_ref, **virsh_dargs) status = result.exit_status logging.debug(result) # Recover state of vm. if vm_state == "paused": vm.resume() # Recover vm if vm.is_alive(): vm.destroy() xml_console_recover(vmxml_backup) # check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successful with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command.")
def run(test, params, env): """ Test virsh {at|de}tach-interface command. 1) Prepare test environment and its parameters 2) Attach the required interface 3) Perform attach and detach operation 4) Check if attached interface is correct 5) Detach the attached interface """ def is_attached(vmxml_devices, iface_type, iface_source, iface_mac): """ Check attached interface exist or not. :param vmxml_devices: VMXMLDevices instance :param iface_type: interface device type :param iface_source : interface source :param iface_mac : interface MAC address :return: True/False if backing file and interface found """ ifaces = vmxml_devices.by_device_tag('interface') for iface in ifaces: if iface.type_name != iface_type: continue if iface.mac_address != iface_mac: continue if iface_source is not None: if iface.xmltreefile.find('source') is not None: if iface.source['network'] != iface_source: continue else: continue # All three conditions met logging.debug("Find %s in given iface XML", iface_mac) return True logging.debug("Not find %s in given iface XML", iface_mac) return False def check_result(vm_name, iface_source, iface_type, iface_mac, flags, vm_state, attach=True): """ Check the test result of attach/detach-device command. """ active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if not attach: utils_misc.wait_for(lambda: not is_attached(active_vmxml.devices, iface_type, iface_source, iface_mac), 20) active_attached = is_attached(active_vmxml.devices, iface_type, iface_source, iface_mac) if vm_state != "transient": inactive_vmxml = vm_xml.VMXML.new_from_dumpxml( vm_name, options="--inactive") inactive_attached = is_attached(inactive_vmxml.devices, iface_type, iface_source, iface_mac) if flags.count("config"): if vm_state != "transient": if attach: if not inactive_attached: raise exceptions.TestFail("Inactive domain XML not" " updated when --config " "options used for attachment") else: if inactive_attached: raise exceptions.TestFail("Inactive domain XML not" " updated when --config " "options used for detachment") if flags.count("live"): if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --live options used for" " attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --live options used for" " detachment") if flags.count("current") or flags == "": if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --current options used " "for attachment") elif vm_state == "shutoff" and not inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated" " when --current options used for" " attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail("Active domain XML not updated" " when --current options used " "for detachment") elif vm_state == "shutoff" and inactive_attached: raise exceptions.TestFail("Inactive domain XML not updated " "when --current options used for " "detachment") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("at_detach_iface_vm_ref", "domname") at_options_suffix = params.get("at_dt_iface_at_options", "") dt_options_suffix = params.get("at_dt_iface_dt_options", "") at_status_error = "yes" == params.get("at_status_error", "no") dt_status_error = "yes" == params.get("dt_status_error", "no") pre_vm_state = params.get("at_dt_iface_pre_vm_state") # Skip if libvirt doesn't support --live/--current. if (at_options_suffix.count("--live") or dt_options_suffix.count("--live")): if not libvirt_version.version_compare(1, 0, 5): raise exceptions.TestSkipError("update-device doesn't" " support --live") if (at_options_suffix.count("--current") or dt_options_suffix.count("--current")): if not libvirt_version.version_compare(1, 0, 5): raise exceptions.TestSkipError("virsh update-device " "doesn't support --current") # Interface specific attributes. iface_type = params.get("at_detach_iface_type", "network") if iface_type == "bridge": try: utils_misc.find_command("brctl") except ValueError: raise exceptions.TestSkipError("Command 'brctl' is missing." " You must install it.") iface_source = params.get("at_detach_iface_source", "default") iface_mac = params.get("at_detach_iface_mac", "created") virsh_dargs = {'ignore_status': True, 'uri': uri, 'debug': True} # Check host version. rhel6_host = False if not process.run("grep 'Red Hat Enterprise Linux Server " "release 6' /etc/redhat-release", ignore_status=True, shell=True).exit_status: rhel6_host = True # Back up xml file. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Get a bridge name for test if iface_type is bridge. # If there is no bridge other than virbr0, raise TestSkipError if iface_type == "bridge": host_bridge = utils_net.Bridge() bridge_list = host_bridge.list_br() try: bridge_list.remove("virbr0") except AttributeError: pass # If no virbr0, just pass is ok logging.debug("Useful bridges:%s", bridge_list) # just choosing one bridge on host. if len(bridge_list): iface_source = bridge_list[0] else: raise exceptions.TestSkipError("No useful bridge on host " "other than 'virbr0'.") # Turn VM into certain state. if pre_vm_state == "running": if (rhel6_host and at_options_suffix == "--config" and dt_options_suffix == ""): raise exceptions.TestSkipError("For bug921407, " "won't fix on rhel6 host") logging.info("Starting %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() elif pre_vm_state == "shutoff": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) elif pre_vm_state == "paused": if (rhel6_host and at_options_suffix == "--config" and dt_options_suffix == ""): raise exceptions.TestSkipError("For bug921407, " "won't fix on rhel6 host") logging.info("Pausing %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() if not vm.pause(): raise exceptions.TestSkipError("Cann't pause the domain") elif pre_vm_state == "transient": logging.info("Creating %s..." % vm_name) vm.undefine() if virsh.create(backup_xml.xml, **virsh_dargs).exit_status: backup_xml.define() raise exceptions.TestSkipError("Cann't create the domain") dom_uuid = vm.get_uuid() dom_id = vm.get_id() # Set attach-interface domain if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = dom_id elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "hexdomid" and dom_id is not None: vm_ref = hex(int(dom_id)) # Get a mac address if iface_mac is 'created'. if iface_mac == "created": iface_mac = utils_net.generate_mac_address_simple() try: # Set attach-interface options and # start attach-interface test options = set_options(iface_type, iface_source, iface_mac, at_options_suffix, "attach") ret = virsh.attach_interface(vm_name, options, **virsh_dargs) libvirt.check_exit_status(ret, at_status_error) # Check if the command take effect in vm # or config file. if vm.is_paused(): vm.resume() vm.wait_for_login().close() #Sleep a while for vm is stable time.sleep(3) if not ret.exit_status: check_result(vm_name, iface_source, iface_type, iface_mac, at_options_suffix, pre_vm_state) # Set detach-interface options options = set_options(iface_type, None, iface_mac, dt_options_suffix, "detach") # Sleep for a while time.sleep(10) # Start detach-interface test if pre_vm_state == "paused": if not vm.pause(): raise exceptions.TestFail("Cann't pause the domain") ret = virsh.detach_interface(vm_ref, options, **virsh_dargs) if rhel6_host and pre_vm_state in ['paused', 'running']: if (at_options_suffix == "--config" and dt_options_suffix == "--config"): dt_status_error = True libvirt.check_exit_status(ret, dt_status_error) # Check if the command take effect # in vm or config file. if vm.is_paused(): vm.resume() vm.wait_for_login().close() #Sleep a while for vm is stable time.sleep(10) if not ret.exit_status: check_result(vm_name, iface_source, iface_type, iface_mac, dt_options_suffix, pre_vm_state, False) finally: # Restore the vm if vm.is_alive(): vm.destroy(gracefully=False, free_mac_addresses=False) backup_xml.sync()
def run_virsh_freecell(test, params, env): """ Test the command virsh freecell (1) Call virsh freecell (2) Call virsh freecell --all (3) Call virsh freecell with a numeric argument (4) Call virsh freecell xyz (5) Call virsh freecell with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default") ) option = params.get("virsh_freecell_options") # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": libvirt_vm.service_libvirtd_control("stop") # Run test case cmd_result = virsh.freecell(ignore_status=True, extra=option, uri=connect_uri, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # Recover libvirtd service start if libvirtd == "off": libvirt_vm.service_libvirtd_control("start") # Check the output if virsh.has_help_command('numatune'): OLD_LIBVIRT = False else: OLD_LIBVIRT = True if option == '--all': raise error.TestNAError("Older libvirt virsh freecell " "doesn't support --all option") def output_check(freecell_output): if not re.search("ki?B", freecell_output, re.IGNORECASE): raise error.TestFail("virsh freecell output invalid: " + freecell_output) # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh freecell' succeeded " "with libvirtd service stopped, incorrect") else: # newer libvirt if not OLD_LIBVIRT: raise error.TestFail("Command 'virsh freecell %s' succeeded" "(incorrect command)" % option) else: # older libvirt raise error.TestNAError('Older libvirt virsh freecell ' 'incorrectly processes extranious' 'command-line options') elif status_error == "no": output_check(output) if status != 0: raise error.TestFail("Command 'virsh freecell %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test command: virsh ttyconsole. 1) Config console in xml file. 2) Run test for virsh ttyconsole. 3) Result check. """ os_type = params.get("os_type") if os_type == "windows": raise error.TestNAError("SKIP:Do not support Windows.") # Get parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("virsh_ttyconsole_vm_ref", "domname") vm_state = params.get("vm_state", "running") option_suffix = params.get("virsh_ttyconsole_option_suffix", "") vm_uuid = vm.get_uuid() vm_id = "" virsh_dargs = {'ignore_status': True, 'uri': uri} # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() # Config vm for tty console xml_console_config(vm_name) vm.destroy() # Prepare vm state for test if vm_state != "shutoff": vm.start() vm.wait_for_login() vm_id = vm.get_id() if vm_state == "paused": vm.pause() # Prepare options if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = vm_uuid elif vm_ref == "domid": vm_ref = vm_id elif vm_id and vm_ref == "hex_id": vm_ref = hex(int(vm_id)) if option_suffix: vm_ref += " %s" % option_suffix # Run test command result = virsh.ttyconsole(vm_ref, **virsh_dargs) status = result.exit_status logging.debug(result) # Recover state of vm. if vm_state == "paused": vm.resume() # Recover vm if vm.is_alive(): vm.destroy() xml_console_recover(vmxml_backup) # check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successful with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command.")
def run(test, params, env): """ Test command: virsh net-start. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") inactive_default = "yes" == params.get("net_start_inactive_default", "yes") net_ref = params.get("net_start_net_ref", "netname") # default is tested extra = params.get("net_start_options_extra", "") # extra cmd-line params. route_test = "yes" == params.get("route_test", "no") firewalld_operate = params.get("firewalld_operate", None) # make easy to maintain virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare default network for following test. try: default_netxml = origin_nets['default'] except KeyError: virsh_instance.close_session() test.cancel("Test requires default network to exist") try: # To confirm default network is active if not default_netxml.active: default_netxml.active = True # inactive default according test's need if inactive_default: logging.info("Stopped default network") default_netxml.active = False # State before run command origin_state = virsh_instance.net_state_dict() logging.debug("Origin network(s) state: %s", origin_state) if route_test: # define a network "def" with route address is "192.168.122.x" # 1. start def first then start default current_state = virsh_instance.net_state_dict() if 'def' in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) expect_fail = "yes" == params.get("expect_start_fail", "no") test_xml = network_xml.NetworkXML(network_name="def") test_xml.forward = {'mode': 'nat'} test_xml.routes = [{ 'address': '192.168.122.0', 'prefix': '24', 'gateway': '192.168.100.1' }] ipxml = IPXML(address='192.168.100.1', netmask='255.255.255.0') range_4 = network_xml.RangeXML() range_4.attrs = { 'start': '192.168.100.2', 'end': '192.168.100.254' } ipxml.dhcp_ranges = range_4 test_xml.ip = ipxml test_xml.define() virsh.net_start("def") # start default, should fail result = virsh.net_start("default") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) # 2. start default then start def virsh.net_destroy("def") virsh.net_start("default") current_state11 = virsh_instance.net_state_dict() logging.debug("before start 2nd network(s) state: %s", current_state11) # start def, should fail result = virsh.net_start("def") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) current_state12 = virsh_instance.net_state_dict() logging.debug("after start 2nd network(s) state: %s", current_state12) # clear the env virsh.net_undefine("def") else: if net_ref == "netname": net_ref = default_netxml.name elif net_ref == "netuuid": net_ref = default_netxml.uuid if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } if params.get('net_start_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': True, 'readonly': True, 'ignore_status': True } # Run test case if 'unprivileged_user' in virsh_dargs and status_error: test_virsh = virsh.VirshPersistent( unprivileged_user=virsh_dargs['unprivileged_user']) virsh_dargs.pop('unprivileged_user') result = test_virsh.net_start(net_ref, extra, **virsh_dargs) test_virsh.close_session() elif not route_test: result = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) if 'default' not in current_state: test.fail('Network "default" cannot be found') if firewalld_operate: # current network is active, ensure firewalld is active # if not, restart firewalld, then restart libvirtd firewalld_service = service.Factory.create_service("firewalld") libvirtd_obj = utils_libvirtd.Libvirtd() if not firewalld_service.status(): firewalld_service.start() libvirtd_obj.restart() virsh_instance = virsh.VirshPersistent(**virsh_dargs) if firewalld_operate == "restart": # after firewalld restart, destroy and start the network firewalld_service.restart() time.sleep(5) res1 = virsh.net_destroy(net_ref, extra, **virsh_dargs) # need to add wait time. As libvirt doesn't know that firewalld has restarted until it gets the # dbus message, but that message won't arrive until some time after all of libvirt's chains/rules # have already been removed by the firewalld restart. refer to bug 1942805 time.sleep(5) res2 = virsh.net_start(net_ref, extra, **virsh_dargs) elif firewalld_operate == "stop_start": # start network which has been destroyed before firewalld restart res1 = virsh.net_destroy(net_ref, extra, **virsh_dargs) firewalld_service.stop() firewalld_service.start() time.sleep(5) res2 = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug( "firewalld_operate is %s, result for start network after firewalld restart: %s", firewalld_operate, res2) status1 = res1.exit_status | res2.exit_status if status1: test.fail( "Start or destroy network after firewalld restart fail!") # Check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) is_default_active = current_state['default']['active'] if not is_default_active: test.fail( "Execute cmd successfully but default is inactive actually." ) finally: virsh_instance.close_session() current_state = virsh_instance.net_state_dict() if "def" in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) virsh.net_start('default', debug=True, ignore_status=True)
def run(test, params, env): """ Test virsh {at|de}tach-interface command. 1) Prepare test environment and its parameters 2) Attach the required interface 3) According test type(only attach or both attach and detach): a.Go on to test detach(if attaching is correct) b.Return GOOD or raise TestFail(if attaching is wrong) 4) Check if attached interface is correct: a.Try to catch it in vm's XML file b.Try to catch it in vm 5) Detach the attached interface 6) Check result """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) vm_ref = params.get("at_detach_iface_vm_ref", "domname") options_suffix = params.get("at_detach_iface_options_suffix", "") status_error = "yes" == params.get("status_error", "no") start_vm = params.get("start_vm") # Should attach must be pass for detach test. correct_attach = "yes" == params.get("correct_attach", "no") readonly = ("yes" == params.get("readonly", "no")) # Interface specific attributes. iface_type = params.get("at_detach_iface_type", "network") if iface_type == "bridge": try: utils_path.find_command("brctl") except utils_path.CmdNotFoundError: test.cancel("Command 'brctl' is missing. You must " "install it.") iface_source = params.get("at_detach_iface_source", "default") iface_mode = params.get("at_detach_iface_mode", "vepa") iface_mac = params.get("at_detach_iface_mac", "created") iface_target = params.get("at_detach_iface_target") iface_model = params.get("at_detach_iface_model") iface_inbound = params.get("at_detach_iface_inbound") iface_outbound = params.get("at_detach_iface_outbound") iface_rom = params.get("at_detach_rom_bar") iface_link = params.get("at_detach_link_state") iface_boot = params.get("at_detach_boot_order") iface_driver = params.get("at_detach_iface_driver") iface_driver_host = params.get("at_detach_driver_host") iface_driver_guest = params.get("at_detach_driver_guest") iface_backend = params.get("at_detach_iface_backend") save_restore = params.get("save_restore", "no") restart_libvirtd = params.get("restart_libvirtd", "no") attach_cmd = params.get("attach_cmd", "attach-interface") virsh_dargs = {'ignore_status': True, 'debug': True, 'uri': uri} # Get iface name if iface_type is direct if iface_type == "direct": iface_source = utils_net.get_net_if(state="UP")[0] # Get a bridge name for test if iface_type is bridge. # If there is no bridge other than virbr0, raise TestCancel if iface_type == "bridge": host_bridge = utils_net.Bridge() bridge_list = host_bridge.list_br() try: bridge_list.remove("virbr0") except AttributeError: pass # If no virbr0, just pass is ok logging.debug("Useful bridges:%s", bridge_list) # just choosing one bridge on host. if len(bridge_list): iface_source = bridge_list[0] else: test.cancel("No useful bridge on host " "other than 'virbr0'.") # Test both detach and attach, So collect info # both of them for result check. # When something wrong with interface, set it to 1 fail_flag = 0 result_info = [] # Get a mac address if iface_mac is 'created'. if iface_mac == "created" or correct_attach: iface_mac = utils_net.generate_mac_address_simple() # Record all iface parameters in iface_dict iface_dict = {} update_list = [ "driver", "driver_host", "driver_guest", "model", "rom", "inbound", "outbound", "link", "target", "mac", "source", "boot", "backend", "type", "mode" ] names = locals() for update_item in update_list: if names["iface_"+update_item]: iface_dict.update({update_item: names["iface_"+update_item]}) else: iface_dict.update({update_item: None}) logging.info("iface_dict is %s", iface_dict) # Format the params iface_format = format_param(iface_dict) logging.info("iface_format is %s", iface_format) try: # Generate xml file if using attach-device command if attach_cmd == "attach-device": # Change boot order to disk libvirt.change_boot_order(vm_name, "disk", "1") vm.destroy() vm.start() # Generate attached xml xml_file_tmp = libvirt.modify_vm_iface(vm_name, "get_xml", iface_format) new_iface = Interface(type_name=iface_type) new_iface.xml = xml_file_tmp new_iface.del_address() xml_file = new_iface.xml # To confirm vm's state and make sure os fully started if start_vm == "no": if vm.is_alive(): vm.destroy() else: vm.wait_for_login().close() # Set attach-interface domain dom_uuid = vm.get_uuid() dom_id = vm.get_id() if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domid": vm_ref = dom_id elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "hexdomid" and dom_id is not None: vm_ref = hex(int(dom_id)) # Set attach-interface options and Start attach-interface test if correct_attach: options = set_options("network", "default", iface_mac, "", "attach") if readonly: virsh_dargs.update({'readonly': True, 'debug': True}) attach_result = virsh.attach_interface(vm_name, options, **virsh_dargs) else: if attach_cmd == "attach-interface": options = set_options(iface_type, iface_source, iface_mac, options_suffix, "attach", iface_target, iface_model, iface_inbound, iface_outbound) attach_result = virsh.attach_interface(vm_ref, options, **virsh_dargs) elif attach_cmd == "attach-device": attach_result = virsh.attach_device(vm_name, xml_file, ignore_status=True, debug=True) attach_status = attach_result.exit_status logging.debug(attach_result) # If attach interface failed. if attach_status: if not status_error: fail_flag = 1 result_info.append("Attach Failed: %s" % attach_result.stderr) elif status_error: # Here we just use it to exit, do not mean test failed fail_flag = 1 # If attach interface succeeded. else: if status_error and not correct_attach: fail_flag = 1 result_info.append("Attach Success with wrong command.") if fail_flag and start_vm == "yes": vm.destroy() if len(result_info): test.fail(result_info) else: # Exit because it is error_test for attach-interface. return if "print-xml" in options_suffix: iface_obj = Interface(type_name=iface_type) iface_obj.xml = attach_result.stdout.strip() if (iface_obj.type_name == iface_type and iface_obj.source['dev'] == iface_source and iface_obj.target['dev'] == iface_target and iface_obj.model == iface_model and iface_obj.bandwidth.inbound == eval(iface_format['inbound']) and iface_obj.bandwidth.outbound == eval(iface_format['outbound'])): logging.info("Print ml all element check pass") else: test.fail("Print xml do not show as expected") # Check dumpxml file whether the interface is added successfully. status, ret = check_dumpxml_iface(vm_name, iface_format) if "print-xml" not in options_suffix: if status: fail_flag = 1 result_info.append(ret) else: if status == 0: test.fail("Attach interface effect in xml with print-xml option") else: return # Login to domain to check new interface. if not vm.is_alive(): vm.start() elif vm.state() == "paused": vm.resume() status, ret = login_to_check(vm, iface_mac) if status: fail_flag = 1 result_info.append(ret) # Check on host for direct type if iface_type == 'direct': cmd_result = process.run("ip -d link show test").stdout_text.strip() logging.info("cmd output is %s", cmd_result) check_patten = ("%s@%s.*\n.*%s.*\n.*macvtap.*mode.*%s" % (iface_target, iface_source, iface_mac, iface_mode)) logging.info("check patten is %s", check_patten) if not re.search(check_patten, cmd_result): logging.error("Can not find %s in ip link" % check_patten) fail_flag = 1 result_info.append(cmd_result) # Do operation and check again if restart_libvirtd == "yes": libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if save_restore == "yes": check_save_restore(vm_name) status, ret = check_dumpxml_iface(vm_name, iface_format) if status: fail_flag = 1 result_info.append(ret) # Set detach-interface options options = set_options(iface_type, None, iface_mac, options_suffix, "detach") # Start detach-interface test if save_restore == "yes" and vm_ref == dom_id: vm_ref = vm_name detach_result = virsh.detach_interface(vm_ref, options, **virsh_dargs) detach_status = detach_result.exit_status detach_msg = detach_result.stderr.strip() logging.debug(detach_result) if detach_status == 0 and status_error == 0: # Check the xml after detach and clean up if needed. time.sleep(5) status, _ = check_dumpxml_iface(vm_name, iface_format) if status == 0: detach_status = 1 detach_msg = "xml still exist after detach" cleanup_options = "--type %s --mac %s" % (iface_type, iface_mac) virsh.detach_interface(vm_ref, cleanup_options, **virsh_dargs) else: logging.info("After detach, the interface xml disappeared") # Check results. if status_error: if detach_status == 0: test.fail("Detach Success with wrong command.") else: if detach_status != 0: test.fail("Detach Failed: %s" % detach_msg) else: if fail_flag: test.fail("Attach-Detach Success but " "something wrong with its " "functional use:%s" % result_info) finally: if vm.is_alive(): vm.destroy() backup_xml.sync()
def run(test, params, env): """ Test the command virsh uri (1) Call virsh uri (2) Call virsh -c remote_uri uri (3) Call virsh uri with an unexpected option (4) Call virsh uri with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("virsh_uri_options") unprivileged_user = params.get('unprivileged_user') remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") # Forming the uri using the api target_uri = params.get("target_uri") remote_ref = params.get("uri_remote_ref", "") if remote_ref: if target_uri.count('EXAMPLE.COM'): test.cancel('target_uri configuration set to sample value') logging.info("The target_uri: %s", target_uri) cmd = "virsh -c %s uri" % target_uri else: cmd = "virsh uri %s" % option # Prepare libvirtd service check_libvirtd = "libvirtd" in list(params.keys()) if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case logging.info("The command: %s", cmd) # setup autologin for ssh to remote machine to execute commands if remote_ref: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) if unprivileged_user: if process.run("id %s" % unprivileged_user, ignore_status=True).exit_status != 0: process.run("useradd %s" % unprivileged_user) try: if remote_ref == "remote" or unprivileged_user: connect_uri = target_uri uri_test = virsh.canonical_uri(option, unprivileged_user=unprivileged_user, uri=connect_uri, ignore_status=False, debug=True) status = 0 # good except process.CmdError: status = 1 # bad uri_test = '' if unprivileged_user: process.run("userdel %s" % unprivileged_user) # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check status_error status_error = params.get("status_error", "no") if status_error == "yes": if status == 0: if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Command: %s succeeded " "(incorrect command)" % cmd) else: logging.info("command: %s is a expected error", cmd) elif status_error == "no": if target_uri != uri_test: test.fail("Virsh cmd uri %s != %s." % (uri_test, target_uri)) if status != 0: test.fail("Command: %s failed " "(correct command)" % cmd)