def hugepage_assign(hp_num, target_ip='', node='', hp_size='', user='', password=''): """ Allocates hugepages for src and dst machines :param hp_num: number of hugepages :param target_ip: ip address of destination machine :param node: numa node to which HP have to be allocated :param hp_size: hugepage size :param user: remote machine's username :param password: remote machine's password """ command = "" if node == '': if target_ip == '': utils_memory.set_num_huge_pages(int(hp_num)) else: command = "echo %s > /proc/sys/vm/nr_hugepages" % (hp_num) else: command = "echo %s > /sys/devices/system/node/node" % (hp_num) command += "%s/hugepages/hugepages-%skB/" % (str(node), hp_size) command += "nr_hugepages" if command != "": if target_ip != "": server_session = remote.wait_for_login('ssh', target_ip, '22', user, password, r"[\#\$]\s*$") cmd_output = server_session.cmd_status_output(command) server_session.close() if (cmd_output[0] != 0): raise error.TestNAError("HP not supported/configured") else: process.system_output(command, verbose=True, shell=True)
def run(test, params, env): """ Test huge page allocation Available huge page size is arch dependent """ try: expected_size = params.get("expected_size", "") number_of_huge_pages = params.get("number_of_hugepages", "") previous_allocation = "" previous_allocation = get_num_huge_pages() logging.debug( "Try to set '%s' huge pages." " Expected size: '%s'. Actual size: '%s'." % (number_of_huge_pages, expected_size, get_huge_page_size())) virsh.allocpages(expected_size, number_of_huge_pages, ignore_status=False) actual_allocation = get_num_huge_pages() if str(actual_allocation) != str(number_of_huge_pages): test.fail("Huge page allocation failed." " Expected '%s', got '%s'." % (number_of_huge_pages, actual_allocation)) finally: if previous_allocation: logging.debug("Restore huge page allocation") set_num_huge_pages(previous_allocation)
def setup_hugepages(page_size=2048, shp_num=1000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) utils_libvirtd.libvirtd_restart()
def setup_test_virtio_mem(): """ Setup vmxml for test """ set_num_huge_pages = params.get("set_num_huge_pages") if set_num_huge_pages: utils_memory.set_num_huge_pages(int(set_num_huge_pages)) libvirt_vmxml.remove_vm_devices_by_type(vm, 'memory') vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_attrs = eval(params.get('vm_attrs', '{}')) vmxml.setup_attrs(**vm_attrs) vmxml.sync() if params.get("start_vm") == "yes": vm.start() vm.wait_for_login().close()
def setup_test_virtio_mem(): """ Setup vmxml for test """ set_num_huge_pages = params.get("set_num_huge_pages") if set_num_huge_pages: utils_memory.set_num_huge_pages(int(set_num_huge_pages)) libvirt_vmxml.remove_vm_devices_by_type(vm, 'memory') vm_attrs = eval(params.get('vm_attrs', '{}')) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml.setup_attrs(**vm_attrs) vmxml.sync() mem_device_attrs = eval(params.get('mem_device_attrs', '{}')) mem_device = Memory() mem_device.setup_attrs(**mem_device_attrs) virsh.attach_device(vm_name, mem_device.xml, flagstr='--config', debug=True, ignore_status=False)
def run(test, params, env): """ Test hpt resizing """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = eval(params.get('error_msg', '[]')) hpt_attrs = eval(params.get('hpt_attrs', '{}')) hpt_order_path = params.get('hpt_order_path', '') cpu_attrs = eval(params.get('cpu_attrs', '{}')) numa_cell = eval(params.get('numa_cell', '{}')) hugepage = 'yes' == params.get('hugepage', 'no') maxpagesize = int(params.get('maxpagesize', 0)) check_hp = 'yes' == params.get('check_hp', 'no') qemu_check = params.get('qemu_check', '') skip_p8 = 'yes' == params.get('skip_p8', 'no') def set_hpt(vmxml, sync, **attrs): """ Set resizing value to vm xml :param vmxml: xml of vm to be manipulated :param sync: whether to sync vmxml after :param attrs: attrs to set to hpt xml """ if vmxml.xmltreefile.find('/features'): features_xml = vmxml.features else: features_xml = vm_xml.VMFeaturesXML() hpt_xml = vm_xml.VMFeaturesHptXML() for attr in attrs: setattr(hpt_xml, attr, attrs[attr]) features_xml.hpt = hpt_xml vmxml.features = features_xml logging.debug(vmxml) if sync: vmxml.sync() def set_cpu(vmxml, **attrs): """ Set cpu attrs for vmxml according to given attrs :param vmxml: xml of vm to be manipulated :param attrs: attrs to set to cpu xml """ if vmxml.xmltreefile.find('cpu'): cpu = vmxml.cpu else: cpu = vm_xml.VMCPUXML() if 'numa_cell' in attrs: cpu.xmltreefile.create_by_xpath('/numa') cpu.numa_cell = attrs['numa_cell'] for key in attrs: setattr(cpu, key, attrs[key]) vmxml.cpu = cpu vmxml.sync() def set_memory(vmxml): """ Set memory attributes in vm xml """ vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848)) vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16)) vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB') logging.debug(numa_cell) if numa_cell: # Remove cpu topology to avoid that it doesn't match vcpu count if vmxml.get_cpu_topology(): new_cpu = vmxml.cpu new_cpu.del_topology() vmxml.cpu = new_cpu vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1 vmxml.sync() def check_hpt_order(session, resizing=''): """ Return htp order in hpt_order file by default If 'resizing' is disabled, test updating htp_order """ if not hpt_order_path: test.cancel('No hpt order path provided.') hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) if resizing == 'disabled': cmd_result = session.cmd_status_output( 'echo %d > %s' % (hpt_order + 1, hpt_order_path)) result = process.CmdResult(stderr=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result, True) libvirt.check_result(result, error_msg) return hpt_order def check_hp_in_vm(session, page_size): """ Check if hugepage size is correct inside vm :param session: the session of the running vm :param page_size: the expected pagesize to be checked inside vm """ expect = False if int(page_size) == 65536 else True meminfo = session.cmd_output('cat /proc/meminfo|grep Huge') logging.info('meminfo: \n%s', meminfo) pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024) logging.info('"%s" should %s be found in meminfo output', pattern, '' if expect else 'not') result = expect == bool(re.search(pattern, meminfo)) if not result: test.fail('meminfo output not meet expectation') # Check PAGE_SIZE in another way if not expect: conf_page_size = session.cmd_output('getconf PAGE_SIZE') logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size) if int(conf_page_size) != int(page_size): test.fail( 'PAGE_SIZE not correct, should be %r, actually is %r' % (page_size, conf_page_size)) bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: arch = platform.machine() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) resizing = hpt_attrs.get('resizing') # Test on ppc64le hosts if arch.lower() == 'ppc64le': cpu_arch = cpu.get_cpu_arch() logging.debug('cpu_arch is: %s', cpu_arch) if skip_p8 and cpu_arch == 'power8': test.cancel('This case is not for POWER8') if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0): test.cancel('Qemu version is too low, ' 'does not support maxpagesize setting') if maxpagesize == 16384 and cpu_arch == 'power9': test.cancel('Power9 does not support 16M pagesize.') set_hpt(vmxml, True, **hpt_attrs) if cpu_attrs or numa_cell: if numa_cell: cpu_attrs['numa_cell'] = numa_cell set_cpu(vmxml, **cpu_attrs) if hugepage: vm_mem = vmxml.max_mem host_hp_size = utils_memory.get_huge_page_size() # Make 100m extra memory just to be safe hp_count = max((vm_mem + 102400) // host_hp_size, 1200) vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True) # Set up hugepage env mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs' if not os.path.isdir(hp_path): process.run('mkdir %s' % hp_path, verbose=True) utils_memory.set_num_huge_pages(hp_count) if utils_misc.is_mounted(mnt_source, hp_path, fstype, verbose=True): utils_misc.umount(mnt_source, hp_path, fstype, verbose=True) utils_misc.mount(mnt_source, hp_path, fstype, verbose=True) # Restart libvirtd service to make sure mounted hugepage # be recognized utils_libvirtd.libvirtd_restart() if resizing == 'enabled': set_memory(vmxml) logging.debug('vmxml: \n%s', vmxml) # Start vm and check if start succeeds result = virsh.start(vm_name, debug=True) libvirt.check_exit_status(result, expect_error=status_error) # if vm is not suposed to start, terminate test if status_error: libvirt.check_result(result, error_msg) return libvirt.check_qemu_cmd_line(qemu_check) session = vm.wait_for_login() hpt_order = check_hpt_order(session, resizing) # Check hugepage inside vm if check_hp: check_hp_in_vm(session, maxpagesize * 1024) if resizing == 'enabled': mem_xml = utils_hotplug.create_mem_xml( tg_size=int(params.get('mem_size', 2048000)), tg_sizeunit=params.get('size_unit', 'KiB'), tg_node=int(params.get('mem_node', 0)), mem_model=params.get('mem_model', 'dimm')) logging.debug(mem_xml) # Attach memory device to the guest for 12 times # that will reach the maxinum memory limitation for i in range(12): virsh.attach_device(vm_name, mem_xml.xml, debug=True, ignore_status=False) xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(xml_after_attach) # Check dumpxml of the guest, # check if each device has its alias for i in range(12): pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i logging.debug('Searching for %s', pattern) if not re.search(pattern, str( xml_after_attach.xmltreefile)): test.fail('Missing memory alias: %s' % pattern) # Test on non-ppc64le hosts else: set_hpt(vmxml, sync=False, **hpt_attrs) result = virsh.define(vmxml.xml) libvirt.check_exit_status(result, status_error) libvirt.check_result(result, error_msg) finally: bk_xml.sync() if hugepage: utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs') utils_memory.set_num_huge_pages(0)
def run(test, params, env): """ Test virtiofs filesystem device: 1.Start guest with 1/2 virtiofs filesystem devices. 2.Start 2 guest with the same virtiofs filesystem device. 3.Coldplug/Coldunplug virtiofs filesystem device 4.Share data between guests and host. 5.Lifecycle for guest with virtiofs filesystem device. """ def generate_expected_process_option(expected_results): """ Generate expected virtiofsd process option """ if cache_mode != "auto": expected_results = "cache=%s" % cache_mode if xattr == "on": expected_results += ",xattr" elif xattr == "off": expected_results += ",no_xattr" if flock == "on": expected_results += ",flock" else: expected_results += ",no_flock" if lock_posix == "on": expected_results += ",posix_lock" else: expected_results += ",no_posix_lock" logging.debug(expected_results) return expected_results def shared_data(vm_names, fs_devs): """ Shared data between guests and host: 1.Mount dir in guest; 2.Write a file in guest; 3.Check the md5sum value are the same in guests and host; """ md5s = [] for vm in vms: session = vm.wait_for_login() for fs_dev in fs_devs: logging.debug(fs_dev) mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False) session.cmd('mkdir -p %s' % mount_dir) logging.debug("mount virtiofs dir in guest") cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("mount virtiofs dir failed: %s" % output) if vm == vms[0]: filename_guest = mount_dir + '/' + vm.name cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("Write data failed: %s" % output) md5_value = session.cmd_status_output( "md5sum %s" % filename_guest)[1].strip().split()[0] md5s.append(md5_value) logging.debug(md5_value) md5_value = process.run( "md5sum %s" % filename_guest).stdout_text.strip().split()[0] logging.debug(md5_value) md5s.append(md5_value) session.close() if len(set(md5s)) != len(fs_devs): test.fail("The md5sum value are not the same in guests and host") def launch_externally_virtiofs(source_dir, source_socket): """ Launch externally virtiofs :param source_dir: the dir shared on host :param source_socket: the socket file listened on """ process.run('chcon -t virtd_exec_t %s' % path, ignore_status=False, shell=True) cmd = "systemd-run %s --socket-path=%s -o source=%s" % ( path, source_socket, source_dir) try: process.run(cmd, ignore_status=False, shell=True) # Make sure the socket is created utils_misc.wait_for(lambda: os.path.isdir(source_socket), timeout=3) process.run("chown qemu:qemu %s" % source_socket, ignore_status=False) process.run('chcon -t svirt_image_t %s' % source_socket, ignore_status=False, shell=True) except Exception as err: cmd = "pkill virtiofsd" process.run(cmd, shell=True) test.fail("{}".format(err)) def prepare_stress_script(script_path, script_content): """ Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files. :param source_path: The path of script :param content: The content of stress script """ logging.debug("stress script path: %s content: %s" % (script_path, script_content)) script_lines = script_content.split(';') try: with open(script_path, 'w') as fd: fd.write('\n'.join(script_lines)) os.chmod(script_path, 0o777) except Exception as e: test.error("Prepare the guest stress script failed %s" % e) def run_stress_script(session, script_path): """ Run stress script in the guest :param session: guest session :param script_path: The path of script in the guest """ # Set ULIMIT_NOFILE to increase the number of unlinked files session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path, timeout=120) def umount_fs(vm): """ Unmount the filesystem in guest :param vm: filesystem in this vm that should be unmounted """ if vm.is_alive(): session = vm.wait_for_login() for fs_dev in fs_devs: mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) session.close() def check_detached_xml(vm): """ Check whether there is xml about the filesystem device in the vm xml :param vm: the vm to be checked """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) filesystems = vmxml.devices.by_device_tag('filesystem') if filesystems: test.fail("There should be no filesystem devices in guest " "xml after hotunplug") def check_filesystem_in_guest(vm, fs_dev): """ Check whether there is virtiofs in vm :param vm: the vm to be checked :param fs_dev: the virtiofs device to be checked """ session = vm.wait_for_login() mount_dir = '/var/tmp/' + fs_dev.target['dir'] cmd = "mkdir %s; mount -t virtiofs %s %s" % ( mount_dir, fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) if not status: test.fail( "Mount virtiofs should failed after hotunplug device. %s" % output) session.close() start_vm = params.get("start_vm", "no") vm_names = params.get("vms", "avocado-vt-vm1").split() cache_mode = params.get("cache_mode", "none") xattr = params.get("xattr", "on") lock_posix = params.get("lock_posix", "on") flock = params.get("flock", "on") xattr = params.get("xattr", "on") path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd") queue_size = int(params.get("queue_size", "512")) driver_type = params.get("driver_type", "virtiofs") guest_num = int(params.get("guest_num", "1")) fs_num = int(params.get("fs_num", "1")) vcpus_per_cell = int(params.get("vcpus_per_cell", 2)) dir_prefix = params.get("dir_prefix", "mount_tag") error_msg_start = params.get("error_msg_start", "") error_msg_save = params.get("error_msg_save", "") status_error = params.get("status_error", "no") == "yes" socket_file_checking = params.get("socket_file_checking", "no") == "yes" suspend_resume = params.get("suspend_resume", "no") == "yes" managedsave = params.get("managedsave", "no") == "yes" coldplug = params.get("coldplug", "no") == "yes" hotplug_unplug = params.get("hotplug_unplug", "no") == "yes" detach_device_alias = params.get("detach_device_alias", "no") == "yes" extra_hugepages = params.get_numeric("extra_hugepages") edit_start = params.get("edit_start", "no") == "yes" with_hugepages = params.get("with_hugepages", "yes") == "yes" with_numa = params.get("with_numa", "yes") == "yes" with_memfd = params.get("with_memfd", "no") == "yes" source_socket = params.get("source_socket", "/var/tmp/vm001.socket") launched_mode = params.get("launched_mode", "auto") destroy_start = params.get("destroy_start", "no") == "yes" bug_url = params.get("bug_url", "") script_content = params.get("stress_script", "") stdio_handler_file = "file" == params.get("stdio_handler") fs_devs = [] vms = [] vmxml_backups = [] expected_fails_msg = [] expected_results = "" host_hp_size = utils_memory.get_huge_page_size() backup_huge_pages_num = utils_memory.get_num_huge_pages() huge_pages_num = 0 if len(vm_names) != guest_num: test.cancel("This test needs exactly %d vms." % guest_num) if not libvirt_version.version_compare(7, 0, 0) and not with_numa: test.cancel("Not supported without NUMA before 7.0.0") if not libvirt_version.version_compare(7, 6, 0) and destroy_start: test.cancel("Bug %s is not fixed on current build" % bug_url) try: # Define filesystem device xml for index in range(fs_num): driver = {'type': driver_type, 'queue': queue_size} source_dir = os.path.join('/var/tmp/', str(dir_prefix) + str(index)) logging.debug(source_dir) not os.path.isdir(source_dir) and os.mkdir(source_dir) target_dir = dir_prefix + str(index) source = {'socket': source_socket} target = {'dir': target_dir} if launched_mode == "auto": binary_keys = [ 'path', 'cache_mode', 'xattr', 'lock_posix', 'flock' ] binary_values = [path, cache_mode, xattr, lock_posix, flock] binary_dict = dict(zip(binary_keys, binary_values)) source = {'dir': source_dir} accessmode = "passthrough" fsdev_keys = [ 'accessmode', 'driver', 'source', 'target', 'binary' ] fsdev_values = [ accessmode, driver, source, target, binary_dict ] else: fsdev_keys = ['driver', 'source', 'target'] fsdev_values = [driver, source, target] fsdev_dict = dict(zip(fsdev_keys, fsdev_values)) logging.debug(fsdev_dict) fs_dev = libvirt_device_utils.create_fs_xml( fsdev_dict, launched_mode) logging.debug(fs_dev) fs_devs.append(fs_dev) #Start guest with virtiofs filesystem device for index in range(guest_num): logging.debug("prepare vm %s", vm_names[index]) vm = env.get_vm(vm_names[index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) if vmxml.max_mem < 1024000: vmxml.max_mem = 1024000 if with_hugepages: huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages utils_memory.set_num_huge_pages(huge_pages_num) vmxml.remove_all_device_by_type('filesystem') vmxml.sync() numa_no = None if with_numa: numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1 vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name, vmxml.vcpu, numa_number=numa_no) vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared", hpgs=with_hugepages, memfd=with_memfd) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) logging.debug(vmxml) if launched_mode == "externally": launch_externally_virtiofs(source_dir, source_socket) if coldplug: ret = virsh.attach_device(vm_names[index], fs_devs[0].xml, flagstr='--config', debug=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) else: if not hotplug_unplug: for fs in fs_devs: vmxml.add_device(fs) vmxml.sync() logging.debug(vmxml) libvirt_pcicontr.reset_pci_num(vm_names[index]) result = virsh.start(vm_names[index], debug=True) if hotplug_unplug: if stdio_handler_file: qemu_config = LibvirtQemuConfig() qemu_config.stdio_handler = "file" utils_libvirtd.Libvirtd().restart() for fs_dev in fs_devs: ret = virsh.attach_device(vm_names[index], fs_dev.xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error: return if status_error and not managedsave: expected_error = error_msg_start utils_test.libvirt.check_exit_status(result, expected_error) return else: utils_test.libvirt.check_exit_status(result, expect_error=False) expected_results = generate_expected_process_option( expected_results) if launched_mode == "auto": cmd = 'ps aux | grep virtiofsd | head -n 1' utils_test.libvirt.check_cmd_output(cmd, content=expected_results) if managedsave: expected_error = error_msg_save result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: shared_data(vm_names, fs_devs) if suspend_resume: virsh.suspend(vm_names[0], debug=True, ignore_status=False) time.sleep(30) virsh.resume(vm_names[0], debug=True, ignore_statue=False) elif destroy_start: session = vm.wait_for_login(timeout=120) # Prepare the guest test script script_path = os.path.join(fs_devs[0].source["dir"], "test.py") script_content %= (fs_devs[0].source["dir"], fs_devs[0].source["dir"]) prepare_stress_script(script_path, script_content) # Run guest stress script stress_script_thread = threading.Thread( target=run_stress_script, args=(session, script_path)) stress_script_thread.setDaemon(True) stress_script_thread.start() # Create a lot of unlink files time.sleep(60) virsh.destroy(vm_names[0], debug=True, ignore_status=False) ret = virsh.start(vm_names[0], debug=True) libvirt.check_exit_status(ret) elif edit_start: vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml( vm_names[0]) if vm.is_alive(): virsh.destroy(vm_names[0]) cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[ 0] cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug(virsh.dumpxml(vm_names[0])) if cmd_result.exit_status: test.error("virt-xml edit guest failed: %s" % cmd_result) result = virsh.start(vm_names[0], ignore_status=True, debug=True) if error_msg_start: expected_fails_msg.append(error_msg_start) utils_test.libvirt.check_result( result, expected_fails=expected_fails_msg) if not libvirt_version.version_compare(6, 10, 0): # Because of bug #1897105, it was fixed in libvirt-6.10.0, # before this version, need to recover the env manually. cmd = "pkill virtiofsd" process.run(cmd, shell=True) if not vm.is_alive(): # Restoring vm and check if vm can start successfully vmxml_virtio_backup.sync() virsh.start(vm_names[0], ignore_status=False, shell=True) elif socket_file_checking: result = virsh.domid(vm_names[0]) domid = result.stdout.strip() domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[ 0] if result.exit_status: test.fail("Get domid failed.") for fs_dev in fs_devs: alias = fs_dev.alias['name'] expected_pid = domain_dir + alias + '-fs.pid' expected_sock = alias + '-fs.sock' status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status if not (status1 and status2): test.fail( "The socket and pid file is not as expected") elif hotplug_unplug: for vm in vms: umount_fs(vm) for fs_dev in fs_devs: if detach_device_alias: alias = fs_dev.alias['name'] cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % ( vm.name, alias) output = process.run(cmd).stdout_text.splitlines() for item in output[1:]: if stdio_handler_file: if item.split()[0] != "virtiofsd": test.fail( "When setting stdio_handler as file, the command" "to write log should be virtiofsd!" ) else: if item.split()[0] != "virtlogd": test.fail( "When setting stdio_handler as logd, the command" "to write log should be virtlogd!") ret = virsh.detach_device_alias( vm.name, alias, ignore_status=True, debug=True, wait_for_event=True) else: ret = virsh.detach_device(vm.name, fs_dev.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(ret, status_error) check_filesystem_in_guest(vm, fs_dev) check_detached_xml(vm) finally: for vm in vms: if vm.is_alive(): umount_fs(vm) vm.destroy(gracefully=False) for vmxml_backup in vmxml_backups: vmxml_backup.sync() for index in range(fs_num): process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) + str(index), ignore_status=False) process.run('rm -rf %s' % source_socket, ignore_status=False, shell=True) if launched_mode == "externally": process.run('restorecon %s' % path, ignore_status=False, shell=True) utils_memory.set_num_huge_pages(backup_huge_pages_num) if stdio_handler_file: qemu_config.restore() utils_libvirtd.Libvirtd().restart()
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): raise error.TestNAError("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM")
else: if shp_num == 0 and started_anon <= 0: raise error.TestFail("VM doesn't use transparent" " hugepage, while static" " hugepage is disabled") finally: # end up session for session in sessions: session.close() for vm in vms: if vm.is_alive(): vm.destroy() for vm_name in vm_names: if mb_enable: vm_xml.VMXML.del_memoryBacking_tag(vm_name) else: vm_xml.VMXML.set_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_memory.set_num_huge_pages(shp_orig_num) utils_memory.set_transparent_hugepage(thp_orig_status)
def cleanup_test_virtio_mem(): """ Clean up environment """ if utils_memory.get_num_huge_pages() != ORG_HP: utils_memory.set_num_huge_pages(ORG_HP)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: test.cancel("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': test.cancel("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError as e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e test.fail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError) as e: error_msg = "Test failed in positive case.\n error: %s\n" % e test.fail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): test.cancel("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM") if test_type == "contrast": # wait for vm finish starting completely time.sleep(delay) if not (mb_enable and not tlbfs_enable): logging.debug("starting analyzing the hugepage usage...") pid = vms[-1].get_pid() started_free = utils_memory.get_num_huge_pages_free() # Get the thp usage from /proc/pid/smaps started_anon = utils_memory.get_num_anon_huge_pages(pid) static_used = non_started_free - started_free hugepage_used = static_used * page_size if test_type == "contrast": # get qemu-kvm memory consumption by top cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid rate = process.run(cmd, ignore_status=False, verbose=True, shell=True).stdout_text.strip() qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100 logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d", rate, qemu_kvm_used, hugepage_used) if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1): test.fail("Error for hugepage usage") if test_type == "stress": if non_started_free <= started_free: logging.debug("hugepage usage:%d -> %d", non_started_free, started_free) test.fail("Error for hugepage usage with stress") if mb_enable is not True: if static_used > 0: test.fail("VM use static hugepage without" " memoryBacking element") if thp_enable is not True and started_anon > 0: test.fail("VM use transparent hugepage, while" " it's disabled") else: if tlbfs_enable is not True: if static_used > 0: test.fail("VM use static hugepage without tlbfs" " mounted") if thp_enable and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage") else: if shp_num > 0: if static_used <= 0: test.fail("VM doesn't use static" " hugepage") else: if static_used > 0: test.fail("VM use static hugepage," " while it's set to zero") if thp_enable is not True: if started_anon > 0: test.fail("VM use transparent hugepage," " while it's disabled") else: if shp_num == 0 and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage, while static" " hugepage is disabled") finally: # end up session for session in sessions: session.close() for vm in vms: if vm.is_alive(): vm.destroy() for vm_name in vm_names: if mb_enable: vm_xml.VMXML.del_memoryBacking_tag(vm_name) else: vm_xml.VMXML.set_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_memory.set_num_huge_pages(shp_orig_num) utils_memory.set_transparent_hugepage(thp_orig_status)
def run(test, params, env): """ Test virtiofs filesystem device: 1.Start guest with 1/2 virtiofs filesystem devices. 2.Start 2 guest with the same virtiofs filesystem device. 3.Coldplug/Coldunplug virtiofs filesystem device 4.Share data between guests and host. 5.Lifecycle for guest with virtiofs filesystem device. """ def generate_expected_process_option(expected_results): """ Generate expected virtiofsd process option """ if cache_mode != "auto": expected_results = "cache=%s" % cache_mode if xattr == "on": expected_results += ",xattr" elif xattr == "off": expected_results += ",no_xattr" if flock == "on": expected_results += ",flock" else: expected_results += ",no_flock" if lock_posix == "on": expected_results += ",posix_lock" else: expected_results += ",no_posix_lock" logging.debug(expected_results) return expected_results def shared_data(vm_names, fs_devs): """ Shared data between guests and host: 1.Mount dir in guest; 2.Write a file in guest; 3.Check the md5sum value are the same in guests and host; """ md5s = [] for vm in vms: session = vm.wait_for_login() for fs_dev in fs_devs: logging.debug(fs_dev) session.cmd('rm -rf %s' % fs_dev.source['dir'], ignore_all_errors=False) session.cmd('mkdir -p %s' % fs_dev.source['dir']) logging.debug("mount virtiofs dir in guest") cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'], fs_dev.source['dir']) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("mount virtiofs dir failed: %s" % output) if vm == vms[0]: filename = fs_dev.source['dir'] + '/' + vm.name cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("Write data failed: %s" % output) md5_value = session.cmd_status_output("md5sum %s" % filename)[1].strip() md5s.append(md5_value) logging.debug(md5_value) md5_value = process.run("md5sum %s" % filename).stdout_text.strip() logging.debug(md5_value) md5s.append(md5_value) session.close() if len(set(md5s)) != len(fs_devs): test.fail("The md5sum value are not the same in guests and host") start_vm = params.get("start_vm", "no") vm_names = params.get("vms", "avocado-vt-vm1").split() cache_mode = params.get("cache_mode", "none") xattr = params.get("xattr", "on") lock_posix = params.get("lock_posix", "on") flock = params.get("flock", "on") xattr = params.get("xattr", "on") path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd") queue_size = int(params.get("queue_size", "512")) driver_type = params.get("driver_type", "virtiofs") guest_num = int(params.get("guest_num", "1")) fs_num = int(params.get("fs_num", "1")) vcpus_per_cell = int(params.get("vcpus_per_cell", 2)) source_dir_prefix = params.get("source_dir_prefix", "/dir") target_prefix = params.get("target_prefix", "mount_tag") error_msg_start = params.get("error_msg_start", "") error_msg_save = params.get("error_msg_save", "") status_error = params.get("status_error", "no") == "yes" socket_file_checking = params.get("socket_file_checking", "no") == "yes" suspend_resume = params.get("suspend_resume", "no") == "yes" managedsave = params.get("managedsave", "no") == "yes" coldplug = params.get("coldplug", "no") == "yes" fs_devs = [] vms = [] vmxml_backups = [] expected_results = "" huge_pages_num = 0 if len(vm_names) != guest_num: test.cancel("This test needs exactly %d vms." % guest_num) try: # Define filesystem device xml for index in range(fs_num): fsdev_keys = ['accessmode', 'driver', 'source', 'target', 'binary'] accessmode = "passthrough" driver = {'type': driver_type, 'queue': queue_size} source_dir = str(source_dir_prefix) + str(index) logging.debug(source_dir) not os.path.isdir(source_dir) and os.mkdir(source_dir) target_dir = target_prefix + str(index) source = {'dir': source_dir} target = {'dir': target_dir} fsdev_dict = [accessmode, driver, source, target] binary_keys = [ 'path', 'cache_mode', 'xattr', 'lock_posix', 'flock' ] binary_values = [path, cache_mode, xattr, lock_posix, flock] binary_dict = dict(zip(binary_keys, binary_values)) fsdev_values = [accessmode, driver, source, target, binary_dict] fsdev_dict = dict(zip(fsdev_keys, fsdev_values)) logging.debug(fsdev_dict) fs_dev = libvirt_device_utils.create_fs_xml(fsdev_dict) logging.debug(fs_dev) fs_devs.append(fs_dev) #Start guest with virtiofs filesystem device for index in range(guest_num): logging.debug("prepare vm %s", vm_names[index]) vm = env.get_vm(vm_names[index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) if vmxml.max_mem < 1024000: vmxml.max_mem = 1024000 hp_obj = test_setup.HugePageConfig(params) host_hp_size = hp_obj.get_hugepage_size() huge_pages_num += vmxml.max_mem // host_hp_size + 128 utils_memory.set_num_huge_pages(huge_pages_num) vmxml.remove_all_device_by_type('filesystem') vmxml.sync() numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1 vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name, vmxml.vcpu, numa_number=numa_no) vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) logging.debug(vmxml) if coldplug: ret = virsh.attach_device(vm_names[index], fs_devs[0].xml, flagstr='--config', debug=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) else: for fs in fs_devs: vmxml.add_device(fs) vmxml.sync() logging.debug(vmxml) result = virsh.start(vm_names[index], debug=True) if status_error and not managedsave: expected_error = error_msg_start utils_test.libvirt.check_exit_status(result, expected_error) return else: utils_test.libvirt.check_exit_status(result, expect_error=False) expected_results = generate_expected_process_option( expected_results) cmd = 'ps aux | grep virtiofsd | head -n 1' utils_test.libvirt.check_cmd_output(cmd, content=expected_results) if managedsave: expected_error = error_msg_save result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: shared_data(vm_names, fs_devs) if suspend_resume: virsh.suspend(vm_names[0], debug=True, ignore_status=False) time.sleep(30) virsh.resume(vm_names[0], debug=True, ignore_statue=False) elif socket_file_checking: result = virsh.domid(vm_names[0]) domid = result.stdout.strip() domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[ 0] if result.exit_status: test.fail("Get domid failed.") for fs_dev in fs_devs: alias = fs_dev.alias['name'] expected_pid = domain_dir + alias + '-fs.pid' expected_sock = alias + '-fs.sock' status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status if not (status1 and status2): test.fail( "The socket and pid file is not as expected") finally: for vm in vms: if vm.is_alive(): session = vm.wait_for_login() for fs_dev in fs_devs: mount_dir = fs_dev.source['dir'] logging.debug(mount_dir) session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) session.close() vm.destroy(gracefully=False) for index in range(fs_num): process.run('rm -rf %s' % source_dir_prefix + str(index), ignore_status=False) for vmxml_backup in vmxml_backups: vmxml_backup.sync()
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() if test_type == "contrast": range = float(params.get("mem_error_range", 1.25)) elif test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("Hugepage Stress Test need two VM(s).") # confirm no VM(s) running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VM(s) is living.") else: vm_names.append(params.get("main_vm")) # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) pass # jump out of for-loop else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg)