def run(test, params, env): """ KVM block resize test: 1) Start guest with data image and check the data image size. 2) Enlarge(or Decrease) the data image and check it in guest. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_block_size(session, block_cmd, block_pattern): """ Get block size inside guest. """ output = session.cmd_output(block_cmd) block_size = re.findall(block_pattern, output) if block_size: if not re.search("[a-zA-Z]", block_size[0]): return int(block_size[0]) else: return float(utils_misc.normalize_data_size(block_size[0], order_magnitude="B")) else: raise error.TestError("Can not find the block size for the" " deivce. The output of command" " is: %s" % output) def compare_block_size(session, block_cmd, block_pattern): """ Compare the current block size with the expected size. """ global current_size current_size = get_block_size(session, block_size_cmd, block_size_pattern) if (current_size <= block_size and current_size >= block_size * (1 - accept_ratio)): logging.info("Block Resizing Finished !!! \n" "Current size %s is same as the expected %s", current_size, block_size) return True return vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) if params.get("need_enable_verifier", "no") == "yes": error.context("Enable %s driver verifier" % params["driver_name"], logging.info) try: session = utils_test.qemu.setup_win_driver_verifier( session, params["driver_name"], vm, timeout) funcatexit.register(env, params.get("type"), utils_test.qemu.clear_win_driver_verifier, session, vm, timeout) except Exception, e: raise error.TestFail(e)
def run(test, params, env): """ Memory balloon with thp 1. Boot up a guest with balloon support, record memory fragement 2. Make fragement in guest with tmpfs 3. check the memory fragement with proc system, should increase 4. Do memory balloon the memory size ballooned should be a legal value 5. Check the memory fragement with proc system, should decrease :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_buddy_info(sleep=False): """Get buddy info""" if sleep: time.sleep(10) buddy_info = utils_memory.get_buddy_info('0', session=session)['0'] logging.info('Checked buddy info, value is %s', buddy_info) return buddy_info fragement_dir = params['fragement_dir'] vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() buddy_info_bf = get_buddy_info() logging.info('Making fragement on guest...') session.cmd_output_safe(params['cmd_make_fragement'], timeout=600) for i in range(1, 10, 2): session.cmd_output_safe('rm -f %s/*%s' % (fragement_dir, i)) funcatexit.register(env, params['type'], clean_env, session, fragement_dir) buddy_info_af_fragement = get_buddy_info(sleep=True) if buddy_info_bf >= buddy_info_af_fragement: test.fail('Buddy info should increase.') mem = int(float(utils_misc.normalize_data_size("%sM" % params["mem"]))) vm.balloon(mem - 1024) buddy_info_af_balloon = get_buddy_info(sleep=True) if buddy_info_af_balloon >= buddy_info_af_fragement: test.fail('Buddy info should decrease.')
def run_trans_hugepage(test, params, env): """ KVM kernel hugepages user side test: 1) Smoke test 2) Stress test :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def get_mem_status(params, role): if role == "host": info = utils.system_output("cat /proc/meminfo") else: info = session.cmd("cat /proc/meminfo") for h in re.split("\n+", info): if h.startswith("%s" % params): output = re.split('\s+', h)[1] return output dd_timeout = float(params.get("dd_timeout", 900)) mem = params['mem'] failures = [] debugfs_flag = 1 debugfs_path = os.path.join(test.tmpdir, 'debugfs') mem_path = os.path.join("/tmp", 'thp_space') login_timeout = float(params.get("login_timeout", "3600")) error.context("smoke test setup") if not os.path.ismount(debugfs_path): if not os.path.isdir(debugfs_path): os.makedirs(debugfs_path) utils.run("mount -t debugfs none %s" % debugfs_path) vm = utils_test.get_living_vm(env, params.get("main_vm")) session = utils_test.wait_for_login(vm, timeout=login_timeout) funcatexit.register(env, params.get("type"), cleanup, debugfs_path, session) logging.info("Smoke test start") error.context("smoke test") nr_ah_before = int(get_mem_status('AnonHugePages', 'host')) if nr_ah_before <= 0: e_msg = 'smoke: Host is not using THP' logging.error(e_msg) failures.append(e_msg) # Protect system from oom killer if int(get_mem_status('MemFree', 'guest')) / 1024 < mem: mem = int(get_mem_status('MemFree', 'guest')) / 1024 session.cmd("mkdir -p %s" % mem_path) session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path)) count = mem / 4 session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" % (mem_path, count), timeout=dd_timeout) nr_ah_after = int(get_mem_status('AnonHugePages', 'host')) if nr_ah_after <= nr_ah_before: e_msg = ('smoke: Host did not use new THP during dd') logging.error(e_msg) failures.append(e_msg) if debugfs_flag == 1: if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0: e_msg = 'smoke: KVM is not using THP' logging.error(e_msg) failures.append(e_msg) logging.info("Smoke test finished") # Use parallel dd as stress for memory count = count / 3 logging.info("Stress test start") error.context("stress test") cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count) cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path output = session.cmd_output(cmd, timeout=dd_timeout) if len(re.findall("No space", output)) > count * 0.05: e_msg = "stress: Too many dd instances failed in guest" logging.error(e_msg) failures.append(e_msg) try: output = session.cmd('pidof dd') except Exception: output = None if output is not None: for i in re.split('\n+', output): session.cmd('kill -9 %s' % i) session.cmd("umount %s" % mem_path) logging.info("Stress test finished") error.context("") if failures: raise error.TestFail("THP base test reported %s failures:\n%s" % (len(failures), "\n".join(failures)))
def run(test, params, env): """ KVM kernel hugepages user side test: 1) Smoke test 2) Stress test :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def get_mem_status(params, role): if role == "host": info = utils.system_output("cat /proc/meminfo") else: info = session.cmd("cat /proc/meminfo") for h in re.split("\n+", info): if h.startswith("%s" % params): output = re.split('\s+', h)[1] return output dd_timeout = float(params.get("dd_timeout", 900)) mem = params['mem'] failures = [] debugfs_flag = 1 debugfs_path = os.path.join(test.tmpdir, 'debugfs') mem_path = os.path.join("/tmp", 'thp_space') login_timeout = float(params.get("login_timeout", "3600")) error.context("smoke test setup") if not os.path.ismount(debugfs_path): if not os.path.isdir(debugfs_path): os.makedirs(debugfs_path) utils.run("mount -t debugfs none %s" % debugfs_path) vm = env.get_vm(params.get("main_vm")) session = vm.wait_for_login(timeout=login_timeout) funcatexit.register(env, params.get("type"), cleanup, debugfs_path, session) logging.info("Smoke test start") error.context("smoke test") nr_ah_before = int(get_mem_status('AnonHugePages', 'host')) if nr_ah_before <= 0: e_msg = 'smoke: Host is not using THP' logging.error(e_msg) failures.append(e_msg) # Protect system from oom killer if int(get_mem_status('MemFree', 'guest')) / 1024 < mem: mem = int(get_mem_status('MemFree', 'guest')) / 1024 session.cmd("mkdir -p %s" % mem_path) session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path)) count = mem / 4 session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" % (mem_path, count), timeout=dd_timeout) nr_ah_after = int(get_mem_status('AnonHugePages', 'host')) if nr_ah_after <= nr_ah_before: e_msg = ('smoke: Host did not use new THP during dd') logging.error(e_msg) failures.append(e_msg) if debugfs_flag == 1: if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0: e_msg = 'smoke: KVM is not using THP' logging.error(e_msg) failures.append(e_msg) logging.info("Smoke test finished") # Use parallel dd as stress for memory count = count / 3 logging.info("Stress test start") error.context("stress test") cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count) cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path output = session.cmd_output(cmd, timeout=dd_timeout) if len(re.findall("No space", output)) > count * 0.05: e_msg = "stress: Too many dd instances failed in guest" logging.error(e_msg) failures.append(e_msg) try: output = session.cmd('pidof dd') except Exception: output = None if output is not None: for i in re.split('\n+', output): session.cmd('kill -9 %s' % i) session.cmd("umount %s" % mem_path) logging.info("Stress test finished") error.context("") if failures: raise error.TestFail("THP base test reported %s failures:\n%s" % (len(failures), "\n".join(failures)))
def run(test, params, env): """ Run IOzone for windows on a windows guest: 1) Log into a guest 2) Execute the IOzone test contained in the winutils.iso 3) Get results 4) Postprocess it with the IOzone postprocessing module :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def post_result(results_path, analysisdir): """ Pick results from an IOzone run, generate a series graphs :params results_path: iozone test result path :params analysisdir: output of analysis result """ a = postprocess_iozone.IOzoneAnalyzer(list_files=[results_path], output_dir=analysisdir) a.analyze() p = postprocess_iozone.IOzonePlotter(results_file=results_path, output_dir=analysisdir) p.plot_all() def get_driver(): """ Get driver name """ driver_name = params.get("driver_name", "") drive_format = params.get("drive_format") if not driver_name: if "scsi" in drive_format: driver_name = "vioscsi" elif "virtio" in drive_format: driver_name = "viostor" else: driver_name = None return driver_name timeout = int(params.get("login_timeout", 360)) iozone_timeout = int(params.get("iozone_timeout")) disk_letter = params["disk_letter"] disk_index = params.get("disk_index", "2") results_path = os.path.join(test.resultsdir, 'raw_output_%s' % test.iteration) analysisdir = os.path.join(test.resultsdir, 'analysis_%s' % test.iteration) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) driver_name = get_driver() if driver_name: if params.get("need_enable_verifier", "no") == "yes": error_context.context("Enable %s driver verifier" % driver_name, logging.info) try: session = utils_test.qemu.setup_win_driver_verifier( session, driver_name, vm, timeout) funcatexit.register(env, params.get("type"), utils_test.qemu.clear_win_driver_verifier, session, vm, timeout) except Exception, e: raise exceptions.TestFail(e)
def run(test, params, env): """ Timer device boot guest: 1) Sync the host system time with ntp server 2) Add some load on host (Optional) 3) Boot the guest with specific clock source 4) Check the clock source currently used on guest 5) Do some file operation on guest (Optional) 6) Check the system time on guest and host (Optional) 7) Check the hardware time on guest and host (Optional) 8) Sleep period of time before reboot (Optional) 9) Reboot guest (Optional) 10) Check the system time on guest and host (Optional) 11) Check the hardware time on guest and host (Optional) :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def verify_guest_clock_source(session, expected): error_context.context("Check the current clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if expected not in session.cmd(cmd): test.fail("Guest didn't use '%s' clocksource" % expected) error_context.context("Sync the host system time with ntp server", logging.info) process.system("ntpdate clock.redhat.com") timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error_context.context("Add some load on host", logging.info) process.system(timerdevice_host_load_cmd, shell=True) host_load_stop_cmd = params["timerdevice_host_load_stop_cmd"] funcatexit.register(env, params["type"], _system, host_load_stop_cmd) error_context.context("Boot a guest with kvm-clock", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) timerdevice_clksource = params.get("timerdevice_clksource") if timerdevice_clksource: try: verify_guest_clock_source(session, timerdevice_clksource) except Exception: clksrc = timerdevice_clksource error_context.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) error_context.context("Update guest kernel cli to '%s'" % clksrc, logging.info) image_filename = storage.get_image_filename( params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: logging.warn("Update the first kernel entry to" " '%s' only" % clksrc) kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError as detail: test.error("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub(r"clocksource=.*?\s", "clocksource=%s" % clksrc, kernel_cfg) else: kernel_cfg_new = "%s %s" % (kernel_cfg, "clocksource=%s" % clksrc) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error_context.context("Boot the guest", logging.info) vm_name = params["main_vm"] cpu_model_flags = params.get("cpu_model_flags") params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.context("Check the current clocksource in guest", logging.info) verify_guest_clock_source(session, clksrc) error_context.context("Kill all ntp related processes") session.cmd("pkill ntp; true") if params.get("timerdevice_file_operation") == "yes": error_context.context("Do some file operation on guest", logging.info) session.cmd("dd if=/dev/zero of=/tmp/timer-test-file bs=1M count=100") return # Command to run to get the current time time_command = params["time_command"] # Filter which should match a string to be passed to time.strptime() time_filter_re = params["time_filter_re"] # Time format for time.strptime() time_format = params["time_format"] timerdevice_drift_threshold = params.get("timerdevice_drift_threshold", 3) error_context.context("Check the system time on guest and host", logging.info) (host_time, guest_time) = utils_test.get_time(session, time_command, time_filter_re, time_format) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's system time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) get_hw_time_cmd = params.get("get_hw_time_cmd") if get_hw_time_cmd: error_context.context("Check the hardware time on guest and host", logging.info) host_time = process.system_output(get_hw_time_cmd, shell=True) guest_time = session.cmd(get_hw_time_cmd) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's hardware time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: error_context.context("Sleep '%s' secs before reboot" % sleep_time, logging.info) sleep_time = int(sleep_time) time.sleep(sleep_time) session = vm.reboot() error_context.context("Check the system time on guest and host", logging.info) (host_time, guest_time) = utils_test.get_time(session, time_command, time_filter_re, time_format) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's system time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) get_hw_time_cmd = params.get("get_hw_time_cmd") if get_hw_time_cmd: error_context.context("Check the hardware time on guest and host", logging.info) host_time = process.system_output(get_hw_time_cmd, shell=True) guest_time = session.cmd(get_hw_time_cmd) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's hardware time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time))
def run_multi_disk_random_hotplug(test, params, env): """ This tests the disk hotplug/unplug functionality. 1) prepares multiple disks to be hotplugged 2) hotplugs them 3) verifies that they are in qtree/guest system/... 4) unplugs them 5) verifies they are not in qtree/guest system/... 6) repeats $repeat_times *) During the whole test stress_cmd might be executed :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ @error.context_aware def verify_qtree(params, info_qtree, info_block, proc_scsi, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :param info_qtree: Output of "info qtree" monitor command :type info_qtree: string :param info_block: Output of "info block" monitor command :type info_block: dict of dicts :param proc_scsi: Output of "/proc/scsi/scsi" guest file :type proc_scsi: string :param qdev: qemu_devices representation :type qdev: virttest.qemu_devices.DevContainer """ err = 0 qtree = qemu_qtree.QtreeContainer() qtree.parse_info_qtree(info_qtree) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(info_block) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(proc_scsi) err += tmp1 + tmp2 if err: logging.error("info qtree:\n%s", info_qtree) logging.error("info block:\n%s", info_block) logging.error("/proc/scsi/scsi:\n%s", proc_scsi) logging.error(qdev.str_bus_long()) raise error.TestFail("%s errors occurred while verifying" " qtree vs. params" % err) @error.context_aware def insert_into_qdev(qdev, param_matrix, no_disks, params): """ Inserts no_disks disks int qdev using randomized args from param_matrix :param qdev: qemu devices container :type qdev: virttest.qemu_devices.DevContainer :param param_matrix: Matrix of randomizable params :type param_matrix: list of lists :param no_disks: Desired number of disks :type no_disks: integer :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :return: (newly added devices, number of added disks) :rtype: tuple(list, integer) """ new_devices = [] _new_devs_fmt = "" _formats = param_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] i = 0 while i < no_disks: # Set the format if len(formats) < 1: logging.warn("Can't create desired number '%s' of disk types " "'%s'. Using '%d' no disks.", no_disks, _formats, i) break name = 'stg%d' % i args = {'name': name, 'filename': stg_image_name % i} fmt = random.choice(formats) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' elif fmt == 'lsi_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'lsi53c895a' else: args['fmt'] = fmt # Other params for key, value in param_matrix.iteritems(): args[key] = random.choice(value) devs = qdev.images_define_by_variables(**args) try: for dev in devs: qdev.insert(dev, force=False) except qemu_devices.DeviceInsertError: # All buses are full, (TODO add bus) or remove this format for dev in devs: if dev in qdev: qdev.remove(dev, recursive=True) formats.remove(fmt) continue # TODO: Modify check_disk_params to use vm.devices # 1) modify PCI bus to accept full pci addr (02.0, 01.3, ...) # 2) add all devices into qemu_devices according to qtree # 3) check qtree vs. qemu_devices PCI representation (+children) # (use qtree vs devices, if key and value_qtree == value_devices # match the device and remove it from comparison. # Also use blacklist to remove unnecessary stuff (like # kvmclock, smbus-eeprom, ... from qtree and drive, ... from # devices) # => then modify this to use qtree verification params = convert_params(params, args) env_process.preprocess_image(test, params.object_params(name), name) new_devices.extend(devs) _new_devs_fmt += "%s(%s) " % (name, fmt) i += 1 if _new_devs_fmt: logging.info("Adding disks: %s", _new_devs_fmt[:-1]) param_matrix['fmt'] = _formats return new_devices, params @error.context_aware def hotplug_serial(new_devices, monitor): """ Do the actual hotplug of the new_devices using monitor monitor. :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.QBaseDevice :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ err = [] for device in new_devices: time.sleep(float(params.get('wait_between_hotplugs', 0))) out = device.hotplug(monitor) out = device.verify_hotplug(out, monitor) err.append(out) if err == [True] * len(err): # No failures or unverified states logging.debug("Hotplug status: verified %d", len(err)) return failed = err.count(False) passed = err.count(True) unverif = len(err) - failed - passed if failed == 0: logging.warn("Hotplug status: verified %d, unverified %d", passed, unverif) else: logging.error("Hotplug status: verified %d, unverified %d, failed " "%d", passed, unverif, failed) raise error.TestFail("Hotplug of some devices failed.") @error.context_aware def unplug_serial(new_devices, qdev, monitor): """ Do the actual unplug of new_devices using monitor monitor :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.QBaseDevice :param qdev: qemu devices container :type qdev: virttest.qemu_devices.DevContainer :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ failed = 0 passed = 0 unverif = 0 for device in new_devices[::-1]: if device in qdev: time.sleep(float(params.get('wait_between_unplugs', 0))) out = qdev.unplug(device, monitor, True) else: continue if out is True: passed += 1 elif out is False: failed += 1 else: unverif += 1 # remove the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) if failed == 0 and unverif == 0: logging.debug("Unplug status: verified %d", passed) elif failed == 0: logging.warn("Unplug status: verified %d, unverified %d", passed, unverif) else: logging.error("Unplug status: verified %d, unverified %d, failed " "%d", passed, unverif, failed) raise error.TestFail("Unplug of some devices failed.") vm = env.get_vm(params['main_vm']) monitor = vm.monitor qdev = vm.devices session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = lambda _1, _2, _3: logging.warn("info qtree not " "supported. Can't verify qtree" "vs. guest disks.") stg_image_name = params['stg_image_name'] stg_image_num = int(params['stg_image_num']) stg_params = params.get('stg_params', '').split(' ') i = 0 while i < len(stg_params) - 1: if not stg_params[i].strip(): i += 1 continue if stg_params[i][-1] == '\\': stg_params[i] = '%s %s' % (stg_params[i][:-1], stg_params.pop(i + 1)) i += 1 param_matrix = {} for i in xrange(len(stg_params)): if not stg_params[i].strip(): continue (cmd, parm) = stg_params[i].split(':', 1) # ',' separated list of values parm = parm.split(',') j = 0 while j < len(parm) - 1: if parm[j][-1] == '\\': parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) j += 1 param_matrix[cmd] = parm # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: session.cmd("modprobe %s" % module) stress_cmd = params.get('stress_cmd') if stress_cmd: funcatexit.register(env, params.get('type'), stop_stresser, vm, params.get('stress_kill_cmd')) stress_session = vm.wait_for_login(timeout=10) for _ in xrange(int(params.get('no_stress_cmds', 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) context_msg = "Running sub test '%s' %s" error.context("Verify before hotplug") info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) _images = params['images'] for iteration in xrange(rp_times): sub_type = params.get("sub_type_before_plug") if sub_type: error.context(context_msg % (sub_type, "before hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error.context("Hotplugging devices, iteration %d" % iteration) qdev.set_dirty() new_devices, params = insert_into_qdev(qdev, param_matrix, stg_image_num, params) hotplug_serial(new_devices, monitor) time.sleep(float(params.get('wait_after_hotplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) qdev.set_clean() sub_type = params.get("sub_type_after_plug") if sub_type: error.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) sub_type = params.get("sub_type_before_unplug") if sub_type: error.context(context_msg % (sub_type, "before hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) unplug_serial(new_devices, qdev, monitor) time.sleep(float(params.get('wait_after_unplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) # we verified the unplugs, set the state to 0 for _ in xrange(qdev.get_state()): qdev.set_clean() sub_type = params.get("sub_type_after_unplug") if sub_type: error.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) # Check for various KVM failures vm.verify_alive()
error.context("Copy image from NFS Server") utils_test.run_image_copy(test, params, env) src = params.get('images_good') base_dir = params.get("images_base_dir", data_dir.get_data_dir()) dst = storage.get_image_filename(params, base_dir) if params.get("storage_type") == "iscsi": dd_cmd = "dd if=/dev/zero of=%s bs=1M count=1" % dst txt = "iscsi used, need destroy data in %s" % dst txt += " by command: %s" % dd_cmd logging.info(txt) utils.system(dd_cmd) image_name = os.path.basename(dst) mount_point = params.get("dst_dir") if mount_point and src: funcatexit.register(env, params.get("type"), copy_file_from_nfs, src, dst, mount_point, image_name) vm = env.get_vm(params["main_vm"]) local_dir = params.get("local_dir") if local_dir: local_dir = utils_misc.get_path(test.bindir, local_dir) else: local_dir = test.bindir if params.get("copy_to_local"): for param in params.get("copy_to_local").split(): l_value = params.get(param) if l_value: need_copy = True nfs_link = utils_misc.get_path(test.bindir, l_value) i_name = os.path.basename(l_value) local_link = os.path.join(local_dir, i_name)
def run_timerdevice_boot(test, params, env): """ Timer device boot guest: 1) Sync the host system time with ntp server 2) Add some load on host (Optional) 3) Boot the guest with specific clock source 4) Check the clock source currently used on guest 5) Do some file operation on guest (Optional) 6) Check the system time on guest and host (Optional) 7) Check the hardware time on guest and host (Optional) 8) Sleep period of time before reboot (Optional) 9) Reboot guest (Optional) 10) Check the system time on guest and host (Optional) 11) Check the hardware time on guest and host (Optional) :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def verify_guest_clock_source(session, expected): error.context("Check the current clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if not expected in session.cmd(cmd): raise error.TestFail( "Guest didn't use '%s' clocksource" % expected) error.context("Sync the host system time with ntp server", logging.info) utils.system("ntpdate clock.redhat.com") timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error.context("Add some load on host", logging.info) utils.system(timerdevice_host_load_cmd) host_load_stop_cmd = params["timerdevice_host_load_stop_cmd"] funcatexit.register(env, params["type"], utils.system, host_load_stop_cmd) error.context("Boot a guest with kvm-clock", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) timerdevice_clksource = params.get("timerdevice_clksource") if timerdevice_clksource: try: verify_guest_clock_source(session, timerdevice_clksource) except Exception: clksrc = timerdevice_clksource error.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) error.context("Update guest kernel cli to '%s'" % clksrc, logging.info) image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: logging.warn("Update the first kernel entry to" " '%s' only" % clksrc) kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError, detail: raise error.TestError("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub("clocksource=.*?\s", "clocksource=%s" % clksrc, kernel_cfg) else: kernel_cfg_new = "%s %s" % (kernel_cfg, "clocksource=%s" % clksrc) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error.context("Boot the guest", logging.info) vm_name = params["main_vm"] cpu_model_flags = params.get("cpu_model_flags") params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error.context("Check the current clocksource in guest", logging.info) verify_guest_clock_source(session, clksrc) error.context("Kill all ntp related processes") session.cmd("pkill ntp; true")
def run(test, params, env): """ KVM block resize test: 1) Start guest with data image and check the data image size. 2) Enlarge(or Decrease) the data image and check it in guest. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_block_size(session, block_cmd, block_pattern): """ Get block size inside guest. """ output = session.cmd_output(block_cmd) block_size = re.findall(block_pattern, output) if block_size: if not re.search("[a-zA-Z]", block_size[0]): return int(block_size[0]) else: return float( utils_misc.normalize_data_size(block_size[0], order_magnitude="B")) else: raise error.TestError("Can not find the block size for the" " deivce. The output of command" " is: %s" % output) def compare_block_size(session, block_cmd, block_pattern): """ Compare the current block size with the expected size. """ global current_size current_size = get_block_size(session, block_size_cmd, block_size_pattern) if (current_size <= block_size and current_size >= block_size * (1 - accept_ratio)): logging.info( "Block Resizing Finished !!! \n" "Current size %s is same as the expected %s", current_size, block_size) return True return vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) if params.get("need_enable_verifier", "no") == "yes": error.context("Enable %s driver verifier" % params["driver_name"], logging.info) try: session = utils_test.qemu.setup_win_driver_verifier( session, params["driver_name"], vm, timeout) funcatexit.register(env, params.get("type"), utils_test.qemu.clear_win_driver_verifier, session, vm, timeout) except Exception, e: raise error.TestFail(e)
def run_numa_stress(test, params, env): """ Qemu numa stress test: 1) Boot up a guest and find the node it used 2) Try to allocate memory in that node 3) Run memory heavy stress inside guest 4) Check the memory use status of qemu process 5) Repeat step 2 ~ 4 several times @param test: QEMU test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ host_numa_node = utils_misc.NumaInfo() if len(host_numa_node.online_nodes) < 2: raise error.TestNAError("Host only has one NUMA node, " "skipping test...") timeout = float(params.get("login_timeout", 240)) test_count = int(params.get("test_count", 4)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) qemu_pid = vm.get_pid() if test_count < len(host_numa_node.online_nodes): test_count = len(host_numa_node.online_nodes) tmpfs_size = 0 for node in host_numa_node.nodes: node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal")) if tmpfs_size < node_mem: tmpfs_size = node_mem tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test") tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path) tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240)) dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5 mount_fs_size = "size=%dK" % tmpfs_size memory_file = utils_misc.get_path(tmpfs_path, "test") dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file, tmpfs_size) if not os.path.isdir(tmpfs_path): os.mkdir(tmpfs_path) numa_node_malloc = -1 most_used_node, memory_used = utils_test.max_mem_map_node( host_numa_node, qemu_pid) for test_round in range(test_count): if utils_memory.freememtotal() < tmpfs_size: raise error.TestError("Don't have enough memory to execute this " "test after %s round" % test_round) error.context("Executing stress test round: %s" % test_round, logging.info) numa_node_malloc = most_used_node numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd) error.context("Try to allocate memory in node %s" % numa_node_malloc, logging.info) try: utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size) funcatexit.register(env, params.get("type"), utils_misc.umount, "none", tmpfs_path, "tmpfs") utils.system(numa_dd_cmd, timeout=dd_timeout) except Exception, error_msg: if "No space" in str(error_msg): pass else: raise error.TestFail("Can not allocate memory in node %s." " Error message:%s" % (numa_node_malloc, str(error_msg))) error.context("Run memory heavy stress in guest", logging.info) autotest_control.run_autotest_control(test, params, env) error.context("Get the qemu process memory use status", logging.info) node_after, memory_after = utils_test.max_mem_map_node( host_numa_node, qemu_pid) if node_after == most_used_node and memory_after >= memory_used: raise error.TestFail("Memory still stick in " "node %s" % numa_node_malloc) else: most_used_node = node_after memory_used = memory_after utils_misc.umount("none", tmpfs_path, "tmpfs") funcatexit.unregister(env, params.get("type"), utils_misc.umount, "none", tmpfs_path, "tmpfs") session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches") utils_memory.drop_caches()
def run(test, params, env): """ Timer device boot guest: 1) Check host clock's sync status with chronyd 2) Add some load on host (Optional) 3) Boot the guest with specific clock source 4) Check the clock source currently used on guest 5) Do some file operation on guest (Optional) 6) Check the system time on guest and host (Optional) 7) Check the hardware time on guest (linux only) 8) Sleep period of time before reboot (Optional) 9) Reboot guest (Optional) 10) Check the system time on guest and host (Optional) 11) Check the hardware time on guest (Optional) 12) Restore guest's clock source :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def get_hwtime(session): """ Get guest's hardware clock. :param session: VM session. """ hwclock_time_command = params.get("hwclock_time_command", "hwclock -u") hwclock_time_filter_re = params.get("hwclock_time_filter_re", r"(\d+-\d+-\d+ \d+:\d+:\d+)") hwclock_time_format = params.get("hwclock_time_format", "%Y-%m-%d %H:%M:%S") output = session.cmd_output_safe(hwclock_time_command) try: str_time = re.findall(hwclock_time_filter_re, output)[0] guest_time = time.mktime(time.strptime(str_time, hwclock_time_format)) except Exception as err: logging.debug( "(time_format, time_string): (%s, %s)", hwclock_time_format, str_time) raise err return guest_time def verify_timedrift(session, is_hardware=False): """ Verify timedrift between host and guest. :param session: VM session. :param is_hardware: if need to verify guest's hardware time. """ # Command to run to get the current time time_command = params["time_command"] # Filter which should match a string to be passed to time.strptime() time_filter_re = params["time_filter_re"] # Time format for time.strptime() time_format = params["time_format"] timerdevice_drift_threshold = float(params.get( "timerdevice_drift_threshold", 3)) time_type = "system" if not is_hardware else "harware" error_context.context("Check the %s time on guest" % time_type, logging.info) host_time, guest_time = utils_test.get_time(session, time_command, time_filter_re, time_format) if is_hardware: guest_time = get_hwtime(session) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's %s time is different with" " host's system time. Host time: '%s', guest time:" " '%s'" % (time_type, host_time, guest_time)) def get_current_clksrc(session): cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" current_clksrc = session.cmd_output_safe(cmd) if "kvm-clock" in current_clksrc: return "kvm-clock" elif "tsc" in current_clksrc: return "tsc" elif "timebase" in current_clksrc: return "timebase" elif "acpi_pm" in current_clksrc: return "acpi_pm" return current_clksrc def update_clksrc(session, clksrc): """ Update guest's clocksource, this func can work when not login into guest with ssh. :param session: VM session. :param clksrc: expected guest's clocksource. """ avail_cmd = "cat /sys/devices/system/clocksource/clocksource0/" avail_cmd += "available_clocksource" avail_clksrc = session.cmd_output_safe(avail_cmd) if clksrc in avail_clksrc: clksrc_cmd = "echo %s > /sys/devices/system/clocksource/" % clksrc clksrc_cmd += "clocksource0/current_clocksource" status, output = session.cmd_status_output(clksrc_cmd) if status: test.fail("fail to update guest's clocksource to %s," "details: %s" % clksrc, output) else: test.error("please check the clocksource you want to set, " "it's not supported by current guest, current " "available clocksources: %s" % avail_clksrc) error_context.context("sync host time with NTP server", logging.info) clock_sync_command = params["clock_sync_command"] process.system(clock_sync_command, shell=True) timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error_context.context("Add some load on host", logging.info) process.system(timerdevice_host_load_cmd, shell=True, ignore_bg_processes=True) host_load_stop_cmd = params.get("timerdevice_host_load_stop_cmd", "pkill -f 'do X=1'") funcatexit.register(env, params["type"], _system, host_load_stop_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Sync guest timezone before test", logging.info) if params["os_type"] == 'linux': utils_time.sync_timezone_linux(vm) else: utils_time.sync_timezone_win(vm) timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_serial_login(timeout=timeout) timerdevice_clksource = params.get("timerdevice_clksource") need_restore_clksrc = False if timerdevice_clksource: origin_clksrc = get_current_clksrc(session) logging.info("guest is booted with %s" % origin_clksrc) if timerdevice_clksource != origin_clksrc: update_clksrc(session, timerdevice_clksource) need_restore_clksrc = True error_context.context("check timedrift between guest and host.", logging.info) verify_timedrift(session) if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: error_context.context("Sleep '%s' secs before reboot" % sleep_time, logging.info) sleep_time = int(sleep_time) time.sleep(sleep_time) error_context.context("Check timedrift between guest and host " "after reboot.", logging.info) vm.reboot(timeout=timeout, serial=True) verify_timedrift(session) if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) if need_restore_clksrc: update_clksrc(session, origin_clksrc) session.close()
def run(test, params, env): """ Test 802.1Q vlan of NIC among guests and host. 1) Configure vlan interface over host bridge interface. 2) Create two VMs over vlan interface. 3) Load 8021q module in guest. 4) Configure ip address of guest with 192.168.*.* 5) Test by ping between guest and host, should fail. 6) Test by ping beween guests, should pass. 7) Setup vlan in guests and using hard-coded ip address 192.168.*.* 8) Test by ping between guest and host, should pass. 9) Test by ping among guests, should pass. 10) Test by netperf between guests and host. 11) Test by netperf between guests. 12) Delete vlan interface in host. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def add_vlan(interface, v_id, session=None): """ Create a vlan-device on interface. :params interface: Interface. :params v_id: Vlan id. :params session: VM session or none. """ vlan_if = '%s.%s' % (interface, v_id) add_cmd = "ip link add link %s name %s type " % (interface, vlan_if) add_cmd += "vlan id %s" % v_id error_context.context("Create vlan interface '%s' on %s" % (vlan_if, interface), logging.info) if session: session.cmd(add_cmd) else: process.system(add_cmd) return vlan_if def set_ip_vlan(vlan_if, vlan_ip, session=None): """ Set ip address of vlan interface. :params vlan_if: Vlan interface. :params vlan_ip: Vlan internal ip. :params session: VM session or none. """ error_context.context("Assign IP '%s' to vlan interface '%s'" % (vlan_ip, vlan_if), logging.info) if session: session.cmd("ifconfig %s 0.0.0.0" % vlan_if) session.cmd("ifconfig %s down" % vlan_if) session.cmd("ifconfig %s %s up" % (vlan_if, vlan_ip)) else: process.system("ifconfig %s %s up" % (vlan_if, vlan_ip)) def set_mac_vlan(vlan_if, mac_str, session): """ Give a new mac address for vlan interface in guest. :params: vlan_if: Vlan interface. :params: mac_str: New mac address for vlan. :params: session: VM session. """ mac_cmd = "ip link set %s add %s up" % (vlan_if, mac_str) error_context.context("Give a new mac address '%s' for vlan interface " "'%s'" % (mac_str, vlan_if), logging.info) session.cmd(mac_cmd) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ error_context.context("Enable arp_ignore for all ipv4 device in guest", logging.info) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) def ping_vlan(vm, dest, vlan_if, session): """ Test ping between vlans, from guest to host/guest. :params vm: VM object :params dest: Dest ip to ping. :params vlan_if: Vlan interface. :params session: VM session. """ error_context.context("Test ping from '%s' to '%s' on guest '%s'" % (vlan_if, dest, vm.name)) status, output = utils_test.ping(dest=dest, count=10, interface=vlan_if, session=session, timeout=30) if status: raise NetPingError(vlan_if, dest, output) def netperf_vlan(client='main_vm', server='localhost', sub_type='netperf_stress'): """ Test netperf stress among guests and host. :params client: Netperf client. :params server: Netperf server. :params sub_type: Sub_type to run. """ params["netperf_client"] = client params["netperf_server"] = server error_context.context("Run netperf stress test among guests and host, " "server: %s, client: %s" % (server, client), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) vms = [] sessions = [] ifname = [] vm_ip = [] vm_vlan_ip = [] vm_vlan_if = [] sub_type = params["sub_type"] host_br = params.get("host_br", "switch") host_vlan_id = params.get("host_vlan_id", "10") host_vlan_ip = params.get("host_vlan_ip", "192.168.10.10") subnet = params.get("subnet", "192.168") mac_str = params.get("mac_str").split(',') os_type = params.get("os_type", "linux") linux_modules.load_module("8021q") host_vlan_if = "%s.%s" % (host_br, host_vlan_id) if host_vlan_if not in utils_net.get_net_if(): host_vlan_if = add_vlan(interface=host_br, v_id=host_vlan_id) if host_vlan_if in utils_net.get_net_if(): set_ip_vlan(vlan_if=host_vlan_if, vlan_ip=host_vlan_ip) rm_host_vlan_cmd = params["rm_host_vlan_cmd"] % host_vlan_if funcatexit.register(env, params["type"], _system, rm_host_vlan_cmd) else: test.cancel("Fail to set up vlan over bridge interface in host!") if params.get("start_vm", "yes") == "no": vm_main = env.get_vm(params["main_vm"]) vm_main.create(params=params) vm2 = env.get_vm("vm2") vm2.create(params=params) vms.append(vm_main) vms.append(vm2) else: vms.append(env.get_vm([params["main_vm"]])) vms.append(env.get_vm('vm2')) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): error_context.context("Prepare test env on %s" % vm.name) session = vm.wait_for_serial_login() if not session: err_msg = "Could not log into guest %s" % vm.name test.error(err_msg) if os_type == "linux": interface = utils_net.get_linux_ifname(session, vm.get_mac_address()) error_context.context("Load 8021q module in guest %s" % vm.name, logging.info) session.cmd_output_safe("modprobe 8021q") error_context.context("Setup vlan environment in guest %s" % vm.name, logging.info) inter_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 1) set_ip_vlan(interface, inter_ip, session=session) set_arp_ignore(session) error_context.context("Test ping from guest '%s' to host with " "interface '%s'" % (vm.name, interface), logging.info) try: ping_vlan(vm, dest=host_vlan_ip, vlan_if=interface, session=session) except NetPingError: logging.info("Guest ping fail to host as expected with " "interface '%s'" % interface) else: test.fail("Guest ping to host should fail with interface" " '%s'" % interface) ifname.append(interface) vm_ip.append(inter_ip) sessions.append(session) # Ping succeed between guests error_context.context("Test ping between guests with interface %s" % ifname[0], logging.info) ping_vlan(vms[0], dest=vm_ip[1], vlan_if=ifname[0], session=sessions[0]) # set vlan tag for guest for vm_index, vm in enumerate(vms): session = sessions[vm_index] error_context.context("Add vlan interface on guest '%s'" % vm.name) session.cmd("ifconfig %s 0.0.0.0" % ifname[vm_index]) vlan_if = add_vlan(interface=ifname[vm_index], v_id=host_vlan_id, session=session) vm_vlan_if.append(vlan_if) set_mac_vlan(vlan_if, mac_str[vm_index], session=session) vlan_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 11) set_ip_vlan(vlan_if, vlan_ip, session=session) vm_vlan_ip.append(vlan_ip) error_context.context("Test ping from interface '%s' on guest " "'%s' to host." % (vm_vlan_if[vm_index], vm.name), logging.info) ping_vlan(vm, dest=host_vlan_ip, vlan_if=vm_vlan_if[vm_index], session=session) netperf_vlan(client=vm.name, server="localhost") error_context.context("Test ping and netperf between guests with " "interface '%s'" % vm_vlan_if[vm_index], logging.info) ping_vlan(vms[0], dest=vm_vlan_ip[1], vlan_if=vm_vlan_if[0], session=sessions[0]) netperf_vlan(client=params["main_vm"], server='vm2') exithandlers = "exithandlers__%s" % sub_type sub_exit_timeout = int(params.get("sub_exit_timeout", 10)) start_time = time.time() end_time = start_time + float(sub_exit_timeout) while time.time() < end_time: logging.debug("%s (%f secs)", sub_type + " is running", (time.time() - start_time)) if env.data.get(exithandlers): break time.sleep(1) for sess in sessions: if sess: sess.close()
def run(test, params, env): """ Timer device boot guest: 1) Sync the host system time with ntp server 2) Add some load on host (Optional) 3) Boot the guest with specific clock source 4) Check the clock source currently used on guest 5) Do some file operation on guest (Optional) 6) Check the system time on guest and host (Optional) 7) Check the hardware time on guest and host (Optional) 8) Sleep period of time before reboot (Optional) 9) Reboot guest (Optional) 10) Check the system time on guest and host (Optional) 11) Check the hardware time on guest and host (Optional) :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def verify_guest_clock_source(session, expected): error_context.context("Check the current clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if expected not in session.cmd(cmd): test.fail("Guest didn't use '%s' clocksource" % expected) error_context.context("Sync the host system time with ntp server", logging.info) process.system("ntpdate clock.redhat.com") timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error_context.context("Add some load on host", logging.info) process.system(timerdevice_host_load_cmd, shell=True) host_load_stop_cmd = params["timerdevice_host_load_stop_cmd"] funcatexit.register(env, params["type"], _system, host_load_stop_cmd) error_context.context("Boot a guest with kvm-clock", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) timerdevice_clksource = params.get("timerdevice_clksource") if timerdevice_clksource: try: verify_guest_clock_source(session, timerdevice_clksource) except Exception: clksrc = timerdevice_clksource error_context.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) error_context.context("Update guest kernel cli to '%s'" % clksrc, logging.info) image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: logging.warn("Update the first kernel entry to" " '%s' only" % clksrc) kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError as detail: test.error("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub(r"clocksource=.*?\s", "clocksource=%s" % clksrc, kernel_cfg) else: kernel_cfg_new = "%s %s" % (kernel_cfg, "clocksource=%s" % clksrc) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error_context.context("Boot the guest", logging.info) vm_name = params["main_vm"] cpu_model_flags = params.get("cpu_model_flags") params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.context("Check the current clocksource in guest", logging.info) verify_guest_clock_source(session, clksrc) error_context.context("Kill all ntp related processes") session.cmd("pkill ntp; true") if params.get("timerdevice_file_operation") == "yes": error_context.context("Do some file operation on guest", logging.info) session.cmd("dd if=/dev/zero of=/tmp/timer-test-file bs=1M count=100") return # Command to run to get the current time time_command = params["time_command"] # Filter which should match a string to be passed to time.strptime() time_filter_re = params["time_filter_re"] # Time format for time.strptime() time_format = params["time_format"] timerdevice_drift_threshold = params.get("timerdevice_drift_threshold", 3) error_context.context("Check the system time on guest and host", logging.info) (host_time, guest_time) = utils_test.get_time(session, time_command, time_filter_re, time_format) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's system time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) get_hw_time_cmd = params.get("get_hw_time_cmd") if get_hw_time_cmd: error_context.context( "Check the hardware time on guest and host", logging.info) host_time = process.system_output(get_hw_time_cmd, shell=True) guest_time = session.cmd(get_hw_time_cmd) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's hardware time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: error_context.context("Sleep '%s' secs before reboot" % sleep_time, logging.info) sleep_time = int(sleep_time) time.sleep(sleep_time) session = vm.reboot() error_context.context("Check the system time on guest and host", logging.info) (host_time, guest_time) = utils_test.get_time(session, time_command, time_filter_re, time_format) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's system time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) get_hw_time_cmd = params.get("get_hw_time_cmd") if get_hw_time_cmd: error_context.context( "Check the hardware time on guest and host", logging.info) host_time = process.system_output(get_hw_time_cmd, shell=True) guest_time = session.cmd(get_hw_time_cmd) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's hardware time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time))
def run(test, params, env): """ Test 802.1Q vlan of NIC among guests and host with linux bridge backend. 1) Configure vlan interface over host bridge interface. 2) Create two VMs over vlan interface. 3) Load 8021q module in guest. 4) Configure ip address of guest with 192.168.*.* 5) Test by ping between guest and host, should fail. 6) Test by ping beween guests, should pass. 7) Setup vlan in guests and using hard-coded ip address 192.168.*.* 8) Test by ping between guest and host, should pass. 9) Test by ping among guests, should pass. 10) Test by netperf between guests and host. 11) Test by netperf between guests. 12) Delete vlan interface in host. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def add_vlan(interface, v_id, session=None): """ Create a vlan-device on interface. :params interface: Interface. :params v_id: Vlan id. :params session: VM session or none. """ vlan_if = '%s.%s' % (interface, v_id) add_cmd = params["add_vlan_cmd"] % (interface, vlan_if, v_id) error_context.context("Create vlan interface '%s' on %s" % (vlan_if, interface), logging.info) if session: session.cmd(add_cmd) else: process.system(add_cmd) return vlan_if def set_ip_vlan(vlan_if, vlan_ip, session=None): """ Set ip address of vlan interface. :params vlan_if: Vlan interface. :params vlan_ip: Vlan internal ip. :params session: VM session or none. """ error_context.context("Assign IP '%s' to vlan interface '%s'" % (vlan_ip, vlan_if), logging.info) if session: session.cmd("ifconfig %s 0.0.0.0" % vlan_if) session.cmd("ifconfig %s down" % vlan_if) session.cmd("ifconfig %s %s up" % (vlan_if, vlan_ip)) else: process.system("ifconfig %s %s up" % (vlan_if, vlan_ip)) def set_mac_vlan(vlan_if, mac_str, session): """ Give a new mac address for vlan interface in guest. :params: vlan_if: Vlan interface. :params: mac_str: New mac address for vlan. :params: session: VM session. """ mac_cmd = "ip link set %s add %s up" % (vlan_if, mac_str) error_context.context("Give a new mac address '%s' for vlan interface " "'%s'" % (mac_str, vlan_if), logging.info) session.cmd(mac_cmd) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ error_context.context("Enable arp_ignore for all ipv4 device in guest", logging.info) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) def ping_vlan(vm, dest, vlan_if, session): """ Test ping between vlans, from guest to host/guest. :params vm: VM object :params dest: Dest ip to ping. :params vlan_if: Vlan interface. :params session: VM session. """ error_context.context("Test ping from '%s' to '%s' on guest '%s'" % (vlan_if, dest, vm.name)) status, output = utils_test.ping(dest=dest, count=10, interface=vlan_if, session=session, timeout=30) if status: raise NetPingError(vlan_if, dest, output) def netperf_vlan(client='main_vm', server='localhost', sub_type='netperf_stress'): """ Test netperf stress among guests and host. :params client: Netperf client. :params server: Netperf server. :params sub_type: Sub_type to run. """ params["netperf_client"] = client params["netperf_server"] = server error_context.context("Run netperf stress test among guests and host, " "server: %s, client: %s" % (server, client), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) vms = [] sessions = [] ifname = [] vm_ip = [] vm_vlan_ip = [] vm_vlan_if = [] sub_type = params["sub_type"] host_br = params.get("netdst", "switch") host_vlan_id = params.get("host_vlan_id", "10") host_vlan_ip = params.get("host_vlan_ip", "192.168.10.10") subnet = params.get("subnet", "192.168") mac_str = params.get("mac_str").split(',') br_backend = utils_net.find_bridge_manager(host_br) if not isinstance(br_backend, utils_net.Bridge): test.cancel("Host does not use Linux Bridge") linux_modules.load_module("8021q") host_vlan_if = "%s.%s" % (host_br, host_vlan_id) if host_vlan_if not in utils_net.get_net_if(): host_vlan_if = add_vlan(interface=host_br, v_id=host_vlan_id) if host_vlan_if in utils_net.get_net_if(): set_ip_vlan(vlan_if=host_vlan_if, vlan_ip=host_vlan_ip) rm_host_vlan_cmd = params["rm_host_vlan_cmd"] % host_vlan_if funcatexit.register(env, params["type"], _system, rm_host_vlan_cmd) else: test.cancel("Fail to set up vlan over bridge interface in host!") if params.get("start_vm", "yes") == "no": vm_main = env.get_vm(params["main_vm"]) vm_main.create(params=params) vm2 = env.get_vm("vm2") vm2.create(params=params) vms.append(vm_main) vms.append(vm2) else: vms.append(env.get_vm([params["main_vm"]])) vms.append(env.get_vm('vm2')) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): error_context.context("Prepare test env on %s" % vm.name) session = vm.wait_for_serial_login() if not session: err_msg = "Could not log into guest %s" % vm.name test.error(err_msg) interface = utils_net.get_linux_ifname(session, vm.get_mac_address()) error_context.context("Load 8021q module in guest %s" % vm.name, logging.info) session.cmd_output_safe("modprobe 8021q") error_context.context("Setup vlan environment in guest %s" % vm.name, logging.info) inter_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 1) set_ip_vlan(interface, inter_ip, session=session) set_arp_ignore(session) params["vlan_nic"] = "%s.%s" % (interface, host_vlan_id) error_context.context("Test ping from guest '%s' to host with " "interface '%s'" % (vm.name, interface), logging.info) try: ping_vlan(vm, dest=host_vlan_ip, vlan_if=interface, session=session) except NetPingError: logging.info("Guest ping fail to host as expected with " "interface '%s'", interface) else: test.fail("Guest ping to host should fail with interface" " '%s'" % interface) ifname.append(interface) vm_ip.append(inter_ip) sessions.append(session) # Ping succeed between guests error_context.context("Test ping between guests with interface %s" % ifname[0], logging.info) ping_vlan(vms[0], dest=vm_ip[1], vlan_if=ifname[0], session=sessions[0]) # set vlan tag for guest for vm_index, vm in enumerate(vms): session = sessions[vm_index] error_context.context("Add vlan interface on guest '%s'" % vm.name) session.cmd("ifconfig %s 0.0.0.0" % ifname[vm_index]) vlan_if = add_vlan(interface=ifname[vm_index], v_id=host_vlan_id, session=session) vm_vlan_if.append(vlan_if) set_mac_vlan(vlan_if, mac_str[vm_index], session=session) vlan_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 11) set_ip_vlan(vlan_if, vlan_ip, session=session) vm_vlan_ip.append(vlan_ip) error_context.context("Test ping from interface '%s' on guest " "'%s' to host." % (vm_vlan_if[vm_index], vm.name), logging.info) ping_vlan(vm, dest=host_vlan_ip, vlan_if=vm_vlan_if[vm_index], session=session) netperf_vlan(client=vm.name, server="localhost") error_context.context("Test ping and netperf between guests with " "interface '%s'" % vm_vlan_if[vm_index], logging.info) ping_vlan(vms[0], dest=vm_vlan_ip[1], vlan_if=vm_vlan_if[0], session=sessions[0]) netperf_vlan(client=params["main_vm"], server='vm2') exithandlers = "exithandlers__%s" % sub_type sub_exit_timeout = int(params.get("sub_exit_timeout", 10)) start_time = time.time() end_time = start_time + float(sub_exit_timeout) while time.time() < end_time: logging.debug("%s (%f secs)", sub_type + " is running", (time.time() - start_time)) if env.data.get(exithandlers): break time.sleep(1) for sess in sessions: if sess: sess.close()
def run(test, params, env): """ Qemu numa stress test: 1) Boot up a guest and find the node it used 2) Try to allocate memory in that node 3) Run memory heavy stress inside guest 4) Check the memory use status of qemu process 5) Repeat step 2 ~ 4 several times :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ host_numa_node = utils_misc.NumaInfo() if len(host_numa_node.online_nodes) < 2: test.cancel("Host only has one NUMA node, skipping test...") timeout = float(params.get("login_timeout", 240)) test_count = int(params.get("test_count", 4)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) qemu_pid = vm.get_pid() if test_count < len(host_numa_node.online_nodes): test_count = len(host_numa_node.online_nodes) tmpfs_size = params.get_numeric("tmpfs_size") for node in host_numa_node.nodes: node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal")) if tmpfs_size == 0: tmpfs_size = node_mem tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test") tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path) tmpfs_write_speed = get_tmpfs_write_speed() dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5 mount_fs_size = "size=%dK" % tmpfs_size memory_file = utils_misc.get_path(tmpfs_path, "test") dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file, tmpfs_size) utils_memory.drop_caches() if utils_memory.freememtotal() < tmpfs_size: test.cancel("Host does not have enough free memory to run the test, " "skipping test...") if not os.path.isdir(tmpfs_path): os.mkdir(tmpfs_path) test_mem = float(params.get("mem")) * float(params.get("mem_ratio", 0.8)) stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2) most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid) for test_round in range(test_count): if os.path.exists(memory_file): os.remove(memory_file) utils_memory.drop_caches() if utils_memory.freememtotal() < tmpfs_size: test.error("Don't have enough memory to execute this " "test after %s round" % test_round) error_context.context("Executing stress test round: %s" % test_round, logging.info) numa_node_malloc = most_used_node numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd) error_context.context( "Try to allocate memory in node %s" % numa_node_malloc, logging.info) try: utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size) funcatexit.register(env, params.get("type"), utils_misc.umount, "none", tmpfs_path, "tmpfs") process.system(numa_dd_cmd, timeout=dd_timeout, shell=True) except Exception as error_msg: if "No space" in str(error_msg): pass else: test.fail("Can not allocate memory in node %s." " Error message:%s" % (numa_node_malloc, str(error_msg))) error_context.context("Run memory heavy stress in guest", logging.info) stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args) stress_test.load_stress_tool() error_context.context("Get the qemu process memory use status", logging.info) node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid) if node_after == most_used_node and memory_after >= memory_used: test.fail("Memory still stick in node %s" % numa_node_malloc) else: most_used_node = node_after memory_used = memory_after stress_test.unload_stress() stress_test.clean() utils_misc.umount("none", tmpfs_path, "tmpfs") funcatexit.unregister(env, params.get("type"), utils_misc.umount, "none", tmpfs_path, "tmpfs") session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches") utils_memory.drop_caches() session.close()
def run(test, params, env): """ This tests the disk hotplug/unplug functionality. 1) prepares multiple disks to be hotplugged 2) hotplugs them 3) verifies that they are in qtree/guest system/... 4) stop I/O stress_cmd 5) unplugs them 6) continue I/O stress_cmd 7) verifies they are not in qtree/guest system/... 8) repeats $repeat_times :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def verify_qtree(params, info_qtree, info_block, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :param info_qtree: Output of "info qtree" monitor command :type info_qtree: string :param info_block: Output of "info block" monitor command :type info_block: dict of dicts :param qdev: qcontainer representation :type qdev: virttest.qemu_devices.qcontainer.DevContainer """ err = 0 qtree = qemu_qtree.QtreeContainer() qtree.parse_info_qtree(info_qtree) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(info_block) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) if err: logging.error("info qtree:\n%s", info_qtree) logging.error("info block:\n%s", info_block) logging.error(qdev.str_bus_long()) test.fail("%s errors occurred while verifying" " qtree vs. params" % err) def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices): """ Inserts no_disks disks int qdev using randomized args from param_matrix :param qdev: qemu devices container :type qdev: virttest.qemu_devices.qcontainer.DevContainer :param param_matrix: Matrix of randomizable params :type param_matrix: list of lists :param no_disks: Desired number of disks :type no_disks: integer :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :return: (newly added devices, number of added disks) :rtype: tuple(list, integer) """ dev_idx = 0 _new_devs_fmt = "" pci_bus = {'aobject': 'pci.0'} _formats = param_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] if len(new_devices) == 1: strict_mode = None else: strict_mode = True i = 0 while i < no_disks: # Set the format if len(formats) < 1: if i == 0: test.error("Fail to add any disks, probably bad" " configuration.") logging.warn( "Can't create desired number '%s' of disk types " "'%s'. Using '%d' no disks.", no_disks, _formats, i) break name = 'stg%d' % i args = { 'name': name, 'filename': stg_image_name % i, 'pci_bus': pci_bus } fmt = random.choice(formats) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' elif fmt == 'lsi_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'lsi53c895a' elif fmt == 'spapr_vscsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'spapr-vscsi' else: args['fmt'] = fmt args['imgfmt'] = params['image_format_%s' % name] if params.get( 'image_format_%s' % name) else params['image_format'] # Other params for key, value in param_matrix.items(): args[key] = random.choice(value) try: devs = qdev.images_define_by_variables(**args) # parallel test adds devices in mixed order, force bus/addrs qdev.insert(devs, strict_mode) except utils.DeviceError: for dev in devs: if dev in qdev: qdev.remove(dev, recursive=True) formats.remove(fmt) continue params = convert_params(params, args) env_process.preprocess_image(test, params.object_params(name), name) new_devices[dev_idx].extend(devs) dev_idx = (dev_idx + 1) % len(new_devices) _new_devs_fmt += "%s(%s) " % (name, fmt) i += 1 if _new_devs_fmt: logging.info("Using disks: %s", _new_devs_fmt[:-1]) param_matrix['fmt'] = _formats return new_devices, params def _hotplug(new_devices, monitor, prefix=""): """ Do the actual hotplug of the new_devices using monitor monitor. :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ hotplug_outputs = [] hotplug_sleep = float(params.get('wait_between_hotplugs', 0)) for device in new_devices: # Hotplug all devices time.sleep(hotplug_sleep) hotplug_outputs.append(device.hotplug(monitor)) time.sleep(hotplug_sleep) failed = [] passed = [] unverif = [] for device in new_devices: # Verify the hotplug status out = hotplug_outputs.pop(0) out = device.verify_hotplug(out, monitor) if out is True: passed.append(str(device)) elif out is False: failed.append(str(device)) else: unverif.append(str(device)) if not failed and not unverif: logging.debug("%sAll hotplugs verified (%s)", prefix, len(passed)) elif not failed: logging.warn("%sHotplug status:\nverified %s\nunverified %s", prefix, passed, unverif) else: logging.error( "%sHotplug status:\nverified %s\nunverified %s\n" "failed %s", prefix, passed, unverif, failed) logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) test.fail("%sHotplug of some devices failed." % prefix) def hotplug_serial(new_devices, monitor): _hotplug(new_devices[0], monitor) def hotplug_parallel(new_devices, monitors): threads = [] for i in range(len(new_devices)): name = "Th%s: " % i logging.debug("%sworks with %s devices", name, [_.str_short() for _ in new_devices[i]]) thread = threading.Thread(target=_hotplug, name=name[:-2], args=(new_devices[i], monitors[i], name)) thread.start() threads.append(thread) for thread in threads: thread.join() logging.debug("All threads finished.") def _postprocess_images(): # remove and check the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) def _unplug(new_devices, qdev, monitor, prefix=""): """ Do the actual unplug of new_devices using monitor monitor :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice :param qdev: qemu devices container :type qdev: virttest.qemu_devices.qcontainer.DevContainer :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ unplug_sleep = float(params.get('wait_between_unplugs', 0)) unplug_outs = [] unplug_devs = [] for device in new_devices[::-1]: # unplug all devices if device in qdev: # Some devices are removed with previous one time.sleep(unplug_sleep) unplug_devs.append(device) try: output = device.unplug(monitor) except MonitorError: # In new versions of qemu, to unplug a disk, cmd # '__com.redhat_drive_del' is not necessary; while it's # necessary in old qemu verisons. Following update is to # pass the error caused by using the cmd in new # qemu versions. if device.get_qid() not in monitor.info("block", debug=False): pass else: raise unplug_outs.append(output) # Remove from qdev even when unplug failed because further in # this test we compare VM with qdev, which should be without # these devices. We can do this because we already set the VM # as dirty. if LOCK: LOCK.acquire() qdev.remove( device, False if vm.check_capability(Flags.BLOCKDEV) else True) if LOCK: LOCK.release() time.sleep(unplug_sleep) failed = [] passed = [] unverif = [] for device in unplug_devs: # Verify unplugs _out = unplug_outs.pop(0) # unplug effect can be delayed as it waits for OS respone before # it removes the device form qtree for _ in range(50): out = device.verify_unplug(_out, monitor) if out is True: break time.sleep(0.1) if out is True: passed.append(str(device)) elif out is False: failed.append(str(device)) else: unverif.append(str(device)) if not failed and not unverif: logging.debug("%sAll unplugs verified (%s)", prefix, len(passed)) elif not failed: logging.warn("%sUnplug status:\nverified %s\nunverified %s", prefix, passed, unverif) else: logging.error( "%sUnplug status:\nverified %s\nunverified %s\n" "failed %s", prefix, passed, unverif, failed) logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) test.fail("%sUnplug of some devices failed." % prefix) def unplug_serial(new_devices, qdev, monitor): _unplug(new_devices[0], qdev, monitor) def unplug_parallel(new_devices, qdev, monitors): threads = [] for i in range(len(new_devices)): name = "Th%s: " % i logging.debug("%sworks with %s devices", name, [_.str_short() for _ in new_devices[i]]) thread = threading.Thread(target=_unplug, args=(new_devices[i], qdev, monitors[i])) thread.start() threads.append(thread) for thread in threads: thread.join() logging.debug("All threads finished.") def verify_qtree_unsupported(params, info_qtree, info_block, qdev): return logging.warn("info qtree not supported. Can't verify qtree vs. " "guest disks.") vm = env.get_vm(params['main_vm']) qdev = vm.devices session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = verify_qtree_unsupported stg_image_name = params['stg_image_name'] if not stg_image_name[0] == "/": stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) stg_image_num = int(params['stg_image_num']) stg_params = params.get('stg_params', '').split(' ') i = 0 while i < len(stg_params) - 1: if not stg_params[i].strip(): i += 1 continue if stg_params[i][-1] == '\\': stg_params[i] = '%s %s' % (stg_params[i][:-1], stg_params.pop(i + 1)) i += 1 param_matrix = {} for i in range(len(stg_params)): if not stg_params[i].strip(): continue (cmd, parm) = stg_params[i].split(':', 1) # ',' separated list of values parm = parm.split(',') j = 0 while j < len(parm) - 1: if parm[j][-1] == '\\': parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) j += 1 param_matrix[cmd] = parm # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: session.cmd("modprobe %s" % module) stress_cmd = params.get('stress_cmd') if stress_cmd: funcatexit.register(env, params.get('type'), stop_stresser, vm, params.get('stress_kill_cmd')) stress_session = vm.wait_for_login(timeout=10) for _ in range(int(params.get('no_stress_cmds', 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) queues = params.get("multi_disk_type") == "parallel" if queues: # parallel queues = range(len(vm.monitors)) hotplug = hotplug_parallel unplug = unplug_parallel monitor = vm.monitors global LOCK LOCK = threading.Lock() else: # serial queues = range(1) hotplug = hotplug_serial unplug = unplug_serial monitor = vm.monitor context_msg = "Running sub test '%s' %s" error_context.context("Verify disk before test", logging.info) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) verify_qtree(params, info_qtree, info_block, qdev) for iteration in range(rp_times): error_context.context( "Hotplugging/unplugging devices, iteration %d" % iteration, logging.info) sub_type = params.get("sub_type_before_plug") if sub_type: error_context.context(context_msg % (sub_type, "before hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Insert devices into qdev", logging.debug) qdev.set_dirty() new_devices = [[] for _ in queues] new_devices, params = insert_into_qdev(qdev, param_matrix, stg_image_num, params, new_devices) error_context.context("Hotplug the devices", logging.debug) hotplug(new_devices, monitor) time.sleep(float(params.get('wait_after_hotplug', 0))) error_context.context("Verify disks after hotplug", logging.debug) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, qdev) qdev.set_clean() sub_type = params.get("sub_type_after_plug") if sub_type: error_context.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) sub_type = params.get("sub_type_before_unplug") if sub_type: error_context.context(context_msg % (sub_type, "before hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Unplug and remove the devices", logging.debug) if stress_cmd: session.cmd(params["stress_stop_cmd"]) unplug(new_devices, qdev, monitor) if stress_cmd: session.cmd(params["stress_cont_cmd"]) _postprocess_images() error_context.context("Verify disks after unplug", logging.debug) time.sleep(float(params.get('wait_after_unplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, qdev) # we verified the unplugs, set the state to 0 for _ in range(qdev.get_state()): qdev.set_clean() sub_type = params.get("sub_type_after_unplug") if sub_type: error_context.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) # Check for various KVM failures error_context.context("Validating VM after all disk hotplug/unplugs", logging.debug) vm.verify_alive() out = session.cmd_output('dmesg') if "I/O error" in out: logging.warn(out) test.error("I/O error messages occured in dmesg, " "check the log for details.")
def run(test, params, env): """ This tests the disk hotplug/unplug functionality. 1) prepares multiple disks to be hotplugged 2) hotplugs them 3) verifies that they are in qtree/guest system/... 4) stop I/O stress_cmd 5) unplugs them 6) continue I/O stress_cmd 7) verifies they are not in qtree/guest system/... 8) repeats $repeat_times :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def verify_qtree(params, info_qtree, info_block, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :param info_qtree: Output of "info qtree" monitor command :type info_qtree: string :param info_block: Output of "info block" monitor command :type info_block: dict of dicts :param qdev: qcontainer representation :type qdev: virttest.qemu_devices.qcontainer.DevContainer """ err = 0 qtree = qemu_qtree.QtreeContainer() qtree.parse_info_qtree(info_qtree) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(info_block) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) if err: logging.error("info qtree:\n%s", info_qtree) logging.error("info block:\n%s", info_block) logging.error(qdev.str_bus_long()) test.fail("%s errors occurred while verifying" " qtree vs. params" % err) def _create_params_matrix(): matrix = {} stg_image_name = params['stg_image_name'] if not stg_image_name[0] == "/": stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) matrix['stg_image_name'] = stg_image_name stg_params = params.get('stg_params', '').split(' ') for i in range(len(stg_params)): if not stg_params[i].strip(): continue if stg_params[i][-1] == '\\': stg_params[i] = '%s %s' % (stg_params[i][:-1], stg_params.pop(i + 1)) if not stg_params[i].strip(): continue (cmd, parm) = stg_params[i].split(':', 1) # ',' separated list of values parm = parm.split(',') for j in range(len(parm)): if parm[j][-1] == '\\': parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) matrix[cmd] = parm return matrix def configure_images_params(params): params_matrix = _create_params_matrix() _formats = params_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] usb_port_occupied = 0 usb_max_port = params.get('usb_max_port', 6) set_drive_bus = params.get('set_drive_bus', 'yes') == 'yes' no_disks = int(params['stg_image_num']) i = 0 while i < no_disks: # Set the format if len(formats) < 1: if i == 0: test.error("Fail to add any disks, probably bad" " configuration.") logging.warn( "Can't create desired number '%s' of disk types " "'%s'. Using '%d' no disks.", no_disks, _formats, i) break name = 'stg%d' % i args = { 'name': name, 'filename': params_matrix['stg_image_name'] % i } fmt = random.choice(formats) drive_bus = None if set_drive_bus and fmt != 'virtio': drive_bus = str(i) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' elif fmt == 'lsi_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'lsi53c895a' elif fmt == 'spapr_vscsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'spapr-vscsi' elif fmt == 'usb2': usb_port_occupied += 1 if usb_port_occupied > int(usb_max_port): continue args['fmt'] = fmt else: args['fmt'] = fmt args['drive_bus'] = drive_bus # Other params for key, value in params_matrix.items(): args[key] = random.choice(value) env_process.preprocess_image( test, convert_params(params, args).object_params(name), name) i += 1 def _postprocess_images(): # remove and check the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) def verify_qtree_unsupported(params, info_qtree, info_block, qdev): return logging.warn("info qtree not supported. Can't verify qtree vs. " "guest disks.") def enable_driver_verifier(driver, timeout=300): return utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver, timeout) def _initial_win_drives(): size = params['stg_image_size'] disks = utils_disk.get_windows_disks_index(session, size) if not utils_disk.update_windows_disk_attributes(session, disks): test.fail("Failed to update windows disk attributes.") for disk in disks[1:24]: yield utils_disk.configure_empty_windows_disk(session, disk, size)[0] def run_stress_iozone(): error_context.context("Run iozone stress after hotplug", logging.info) iozone = generate_instance(params, vm, 'iozone') try: iozone_cmd_option = params['iozone_cmd_option'] iozone_timeout = float(params['iozone_timeout']) for letter in _initial_win_drives(): iozone.run(iozone_cmd_option.format(letter), iozone_timeout) finally: iozone.clean() def run_stress_dd(): error_context.context("Run dd stress after hotplug", logging.info) output = session.cmd_output( params.get("get_dev_cmd", "ls /dev/[svh]d*")) system_dev = re.findall(r"/dev/[svh]d\w+(?=\d+)", output)[0] for dev in re.split(r"\s+", output): if not dev: continue if not re.findall(system_dev, dev): session.cmd(params['dd_cmd'].format(dev), int(params['dd_timeout'])) Monitor.CONNECT_TIMEOUT = params.get_numeric('connect_timeout', 60) BlockDevicesPlug.ACQUIRE_LOCK_TIMEOUT = params.get_numeric( 'acquire_lock_timeout', 20) BlockDevicesPlug.VERIFY_UNPLUG_TIMEOUT = params.get_numeric( 'verify_unplug_timeout', 60) configure_images_params(params) params['start_vm'] = 'yes' env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params['main_vm']) session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) is_windows = params['os_type'] == 'windows' if is_windows: session = enable_driver_verifier(params['driver_name']) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = verify_qtree_unsupported # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: session.cmd("modprobe %s" % module) stress_cmd = params.get('stress_cmd') if stress_cmd: funcatexit.register(env, params.get('type'), stop_stresser, vm, params.get('stress_kill_cmd')) stress_session = vm.wait_for_login(timeout=10) for _ in range(int(params.get('no_stress_cmds', 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) queues = params.get("multi_disk_type") == "parallel" timeout = params.get_numeric('plug_timeout', 300) if queues: # parallel hotplug, unplug = 'hotplug_devs_threaded', 'unplug_devs_threaded' else: # serial hotplug, unplug = 'hotplug_devs_serial', 'unplug_devs_serial' context_msg = "Running sub test '%s' %s" plug = BlockDevicesPlug(vm) for iteration in range(rp_times): error_context.context( "Hotplugging/unplugging devices, iteration %d" % iteration, logging.info) sub_type = params.get("sub_type_before_plug") if sub_type: error_context.context(context_msg % (sub_type, "before hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Hotplug the devices", logging.debug) getattr(plug, hotplug)(timeout=timeout) time.sleep(float(params.get('wait_after_hotplug', 0))) error_context.context("Verify disks after hotplug", logging.debug) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, vm.devices) sub_type = params.get("sub_type_after_plug") if sub_type: error_context.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) run_stress_iozone() if is_windows else run_stress_dd() sub_type = params.get("sub_type_before_unplug") if sub_type: error_context.context(context_msg % (sub_type, "before hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Unplug and remove the devices", logging.debug) if stress_cmd: session.cmd(params["stress_stop_cmd"]) getattr(plug, unplug)(timeout=timeout) if stress_cmd: session.cmd(params["stress_cont_cmd"]) _postprocess_images() error_context.context("Verify disks after unplug", logging.debug) time.sleep(float(params.get('wait_after_unplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, vm.devices) sub_type = params.get("sub_type_after_unplug") if sub_type: error_context.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) configure_images_params(params) # Check for various KVM failures error_context.context("Validating VM after all disk hotplug/unplugs", logging.debug) vm.verify_alive() out = session.cmd_output('dmesg') if "I/O error" in out: logging.warn(out) test.error("I/O error messages occured in dmesg, " "check the log for details.")
try: error.context("Quit qemu-kvm before copying guest image") vm.monitor.quit() except Exception, e: logging.warn(e) from virttest import utils_test error.context("Copy image from NFS Server") utils_test.run_image_copy(test, params, env) image = '%s.%s' % (params['image_name'], params['image_format']) image_name = os.path.basename(image) src = params.get('images_good') dst = '%s/%s' % (data_dir.get_data_dir(), image) mount_point = params.get("dst_dir") if mount_point and src: funcatexit.register(env, params.get("type"), copy_file_from_nfs, src, dst, mount_point, image_name) vm = env.get_vm(params["main_vm"]) local_dir = params.get("local_dir") if local_dir: local_dir = utils_misc.get_path(test.bindir, local_dir) else: local_dir = test.bindir if params.get("copy_to_local"): for param in params.get("copy_to_local").split(): l_value = params.get(param) if l_value: need_copy = True nfs_link = utils_misc.get_path(test.bindir, l_value) i_name = os.path.basename(l_value) local_link = os.path.join(local_dir, i_name)
def run_multi_disk_random_hotplug(test, params, env): """ This tests the disk hotplug/unplug functionality. 1) prepares multiple disks to be hotplugged 2) hotplugs them 3) verifies that they are in qtree/guest system/... 4) unplugs them 5) verifies they are not in qtree/guest system/... 6) repeats $repeat_times *) During the whole test stress_cmd might be executed :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ @error.context_aware def verify_qtree(params, info_qtree, info_block, proc_scsi, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :param info_qtree: Output of "info qtree" monitor command :type info_qtree: string :param info_block: Output of "info block" monitor command :type info_block: dict of dicts :param proc_scsi: Output of "/proc/scsi/scsi" guest file :type proc_scsi: string :param qdev: qemu_devices representation :type qdev: virttest.qemu_devices.DevContainer """ err = 0 qtree = qemu_qtree.QtreeContainer() qtree.parse_info_qtree(info_qtree) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(info_block) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(proc_scsi) err += tmp1 + tmp2 if err: logging.error("info qtree:\n%s", info_qtree) logging.error("info block:\n%s", info_block) logging.error("/proc/scsi/scsi:\n%s", proc_scsi) logging.error(qdev.str_bus_long()) raise error.TestFail("%s errors occurred while verifying" " qtree vs. params" % err) @error.context_aware def insert_into_qdev(qdev, param_matrix, no_disks, params): """ Inserts no_disks disks int qdev using randomized args from param_matrix :param qdev: qemu devices container :type qdev: virttest.qemu_devices.DevContainer :param param_matrix: Matrix of randomizable params :type param_matrix: list of lists :param no_disks: Desired number of disks :type no_disks: integer :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :return: (newly added devices, number of added disks) :rtype: tuple(list, integer) """ new_devices = [] _new_devs_fmt = "" _formats = param_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] i = 0 while i < no_disks: # Set the format if len(formats) < 1: logging.warn( "Can't create desired number '%s' of disk types " "'%s'. Using '%d' no disks.", no_disks, _formats, i) break name = 'stg%d' % i args = {'name': name, 'filename': stg_image_name % i} fmt = random.choice(formats) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' elif fmt == 'lsi_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'lsi53c895a' else: args['fmt'] = fmt # Other params for key, value in param_matrix.iteritems(): args[key] = random.choice(value) devs = qdev.images_define_by_variables(**args) try: for dev in devs: qdev.insert(dev, force=False) except qemu_devices.DeviceInsertError: # All buses are full, (TODO add bus) or remove this format for dev in devs: if dev in qdev: qdev.remove(dev, recursive=True) formats.remove(fmt) continue # TODO: Modify check_disk_params to use vm.devices # 1) modify PCI bus to accept full pci addr (02.0, 01.3, ...) # 2) add all devices into qemu_devices according to qtree # 3) check qtree vs. qemu_devices PCI representation (+children) # (use qtree vs devices, if key and value_qtree == value_devices # match the device and remove it from comparison. # Also use blacklist to remove unnecessary stuff (like # kvmclock, smbus-eeprom, ... from qtree and drive, ... from # devices) # => then modify this to use qtree verification params = convert_params(params, args) env_process.preprocess_image(test, params.object_params(name), name) new_devices.extend(devs) _new_devs_fmt += "%s(%s) " % (name, fmt) i += 1 if _new_devs_fmt: logging.info("Adding disks: %s", _new_devs_fmt[:-1]) param_matrix['fmt'] = _formats return new_devices, params @error.context_aware def hotplug_serial(new_devices, monitor): """ Do the actual hotplug of the new_devices using monitor monitor. :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.QBaseDevice :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ err = [] for device in new_devices: time.sleep(float(params.get('wait_between_hotplugs', 0))) out = device.hotplug(monitor) out = device.verify_hotplug(out, monitor) err.append(out) if err == [True] * len(err): # No failures or unverified states logging.debug("Hotplug status: verified %d", len(err)) return failed = err.count(False) passed = err.count(True) unverif = len(err) - failed - passed if failed == 0: logging.warn("Hotplug status: verified %d, unverified %d", passed, unverif) else: logging.error( "Hotplug status: verified %d, unverified %d, failed " "%d", passed, unverif, failed) raise error.TestFail("Hotplug of some devices failed.") @error.context_aware def unplug_serial(new_devices, qdev, monitor): """ Do the actual unplug of new_devices using monitor monitor :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.QBaseDevice :param qdev: qemu devices container :type qdev: virttest.qemu_devices.DevContainer :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ failed = 0 passed = 0 unverif = 0 for device in new_devices[::-1]: if device in qdev: time.sleep(float(params.get('wait_between_unplugs', 0))) out = qdev.unplug(device, monitor, True) else: continue if out is True: passed += 1 elif out is False: failed += 1 else: unverif += 1 # remove the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) if failed == 0 and unverif == 0: logging.debug("Unplug status: verified %d", passed) elif failed == 0: logging.warn("Unplug status: verified %d, unverified %d", passed, unverif) else: logging.error( "Unplug status: verified %d, unverified %d, failed " "%d", passed, unverif, failed) raise error.TestFail("Unplug of some devices failed.") vm = env.get_vm(params['main_vm']) monitor = vm.monitor qdev = vm.devices session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = lambda _1, _2, _3: logging.warn( "info qtree not " "supported. Can't verify qtree" "vs. guest disks.") stg_image_name = params['stg_image_name'] stg_image_num = int(params['stg_image_num']) stg_params = params.get('stg_params', '').split(' ') i = 0 while i < len(stg_params) - 1: if not stg_params[i].strip(): i += 1 continue if stg_params[i][-1] == '\\': stg_params[i] = '%s %s' % (stg_params[i][:-1], stg_params.pop(i + 1)) i += 1 param_matrix = {} for i in xrange(len(stg_params)): if not stg_params[i].strip(): continue (cmd, parm) = stg_params[i].split(':', 1) # ',' separated list of values parm = parm.split(',') j = 0 while j < len(parm) - 1: if parm[j][-1] == '\\': parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) j += 1 param_matrix[cmd] = parm # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: session.cmd("modprobe %s" % module) stress_cmd = params.get('stress_cmd') if stress_cmd: funcatexit.register(env, params.get('type'), stop_stresser, vm, params.get('stress_kill_cmd')) stress_session = vm.wait_for_login(timeout=10) for _ in xrange(int(params.get('no_stress_cmds', 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) context_msg = "Running sub test '%s' %s" error.context("Verify before hotplug") info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) _images = params['images'] for iteration in xrange(rp_times): sub_type = params.get("sub_type_before_plug") if sub_type: error.context(context_msg % (sub_type, "before hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error.context("Hotplugging devices, iteration %d" % iteration) qdev.set_dirty() new_devices, params = insert_into_qdev(qdev, param_matrix, stg_image_num, params) hotplug_serial(new_devices, monitor) time.sleep(float(params.get('wait_after_hotplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) qdev.set_clean() sub_type = params.get("sub_type_after_plug") if sub_type: error.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) sub_type = params.get("sub_type_before_unplug") if sub_type: error.context(context_msg % (sub_type, "before hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) unplug_serial(new_devices, qdev, monitor) time.sleep(float(params.get('wait_after_unplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) # we verified the unplugs, set the state to 0 for _ in xrange(qdev.get_state()): qdev.set_clean() sub_type = params.get("sub_type_after_unplug") if sub_type: error.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) # Check for various KVM failures vm.verify_alive()
def run(test, params, env): """ Timer device boot guest: 1) Check host clock's sync status with chronyd 2) Add some load on host (Optional) 3) Boot the guest with specific clock source 4) Check the clock source currently used on guest 5) Do some file operation on guest (Optional) 6) Check the system time on guest and host (Optional) 7) Check the hardware time on guest (linux only) 8) Sleep period of time before reboot (Optional) 9) Reboot guest (Optional) 10) Check the system time on guest and host (Optional) 11) Check the hardware time on guest (Optional) 12) Restore guest's clock source :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def get_hwtime(session): """ Get guest's hardware clock. :param session: VM session. """ hwclock_time_command = params.get("hwclock_time_command", "hwclock -u") hwclock_time_filter_re = params.get("hwclock_time_filter_re", r"(\d+-\d+-\d+ \d+:\d+:\d+)") hwclock_time_format = params.get("hwclock_time_format", "%Y-%m-%d %H:%M:%S") output = session.cmd_output_safe(hwclock_time_command) try: str_time = re.findall(hwclock_time_filter_re, output)[0] guest_time = time.mktime( time.strptime(str_time, hwclock_time_format)) except Exception as err: test.log.debug("(time_format, time_string): (%s, %s)", hwclock_time_format, str_time) raise err return guest_time def verify_timedrift(session, is_hardware=False): """ Verify timedrift between host and guest. :param session: VM session. :param is_hardware: if need to verify guest's hardware time. """ # Command to run to get the current time time_command = params["time_command"] # Filter which should match a string to be passed to time.strptime() time_filter_re = params["time_filter_re"] # Time format for time.strptime() time_format = params["time_format"] timerdevice_drift_threshold = float( params.get("timerdevice_drift_threshold", 3)) time_type = "system" if not is_hardware else "harware" error_context.context("Check the %s time on guest" % time_type, test.log.info) host_time, guest_time = utils_test.get_time(session, time_command, time_filter_re, time_format) if is_hardware: guest_time = get_hwtime(session) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: test.fail("The guest's %s time is different with" " host's system time. Host time: '%s', guest time:" " '%s'" % (time_type, host_time, guest_time)) def get_current_clksrc(session): cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" current_clksrc = session.cmd_output_safe(cmd) if "kvm-clock" in current_clksrc: return "kvm-clock" elif "tsc" in current_clksrc: return "tsc" elif "timebase" in current_clksrc: return "timebase" elif "acpi_pm" in current_clksrc: return "acpi_pm" return current_clksrc def update_clksrc(session, clksrc): """ Update guest's clocksource, this func can work when not login into guest with ssh. :param session: VM session. :param clksrc: expected guest's clocksource. """ avail_cmd = "cat /sys/devices/system/clocksource/clocksource0/" avail_cmd += "available_clocksource" avail_clksrc = session.cmd_output_safe(avail_cmd) if clksrc in avail_clksrc: clksrc_cmd = "echo %s > /sys/devices/system/clocksource/" % clksrc clksrc_cmd += "clocksource0/current_clocksource" status, output = session.cmd_status_output(clksrc_cmd) if status: test.fail( "fail to update guest's clocksource to %s," "details: %s" % clksrc, output) else: test.error("please check the clocksource you want to set, " "it's not supported by current guest, current " "available clocksources: %s" % avail_clksrc) error_context.context("sync host time with NTP server", test.log.info) clock_sync_command = params["clock_sync_command"] process.system(clock_sync_command, shell=True) timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error_context.context("Add some load on host", test.log.info) host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"] host_cpu_cnt = int( process.system_output(host_cpu_cnt_cmd, shell=True).strip()) if params["os_type"] == "linux": timerdevice_host_load_cmd = timerdevice_host_load_cmd % host_cpu_cnt process.system(timerdevice_host_load_cmd, shell=True, ignore_bg_processes=True) else: timerdevice_host_load_cmd = timerdevice_host_load_cmd % int( host_cpu_cnt / 2) stress_bg = utils_test.HostStress( "stress", params, stress_args=timerdevice_host_load_cmd) stress_bg.load_stress_tool() host_load_stop_cmd = params.get("timerdevice_host_load_stop_cmd", "pkill -f 'do X=1'") funcatexit.register(env, params["type"], _system, host_load_stop_cmd) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Sync guest timezone before test", test.log.info) timeout = int(params.get("login_timeout", 360)) if params["os_type"] == 'linux': utils_time.sync_timezone_linux(vm, timeout) else: utils_time.sync_timezone_win(vm, timeout) session = vm.wait_for_serial_login(timeout=timeout) timerdevice_clksource = params.get("timerdevice_clksource") need_restore_clksrc = False if timerdevice_clksource: origin_clksrc = get_current_clksrc(session) test.log.info("guest is booted with %s", origin_clksrc) if timerdevice_clksource != origin_clksrc: update_clksrc(session, timerdevice_clksource) need_restore_clksrc = True error_context.context("check timedrift between guest and host.", test.log.info) verify_timedrift(session) if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) repeat_nums = params.get_numeric("repeat_nums") if repeat_nums: sleep_time = params["sleep_time"] for index in range(repeat_nums): time.sleep(int(sleep_time)) verify_timedrift(session) if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: error_context.context("Sleep '%s' secs before reboot" % sleep_time, test.log.info) sleep_time = int(sleep_time) time.sleep(sleep_time) error_context.context( "Check timedrift between guest and host " "after reboot.", test.log.info) vm.reboot(timeout=timeout, serial=True) verify_timedrift(session) if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) if need_restore_clksrc: update_clksrc(session, origin_clksrc) session.close()
def run(test, params, env): """ This tests the disk hotplug/unplug functionality. 1) prepares multiple disks to be hotplugged 2) hotplugs them 3) verifies that they are in qtree/guest system/... 4) unplugs them 5) verifies they are not in qtree/guest system/... 6) repeats $repeat_times *) During the whole test stress_cmd might be executed :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def verify_qtree(params, info_qtree, info_block, proc_scsi, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :param info_qtree: Output of "info qtree" monitor command :type info_qtree: string :param info_block: Output of "info block" monitor command :type info_block: dict of dicts :param proc_scsi: Output of "/proc/scsi/scsi" guest file :type proc_scsi: string :param qdev: qcontainer representation :type qdev: virttest.qemu_devices.qcontainer.DevContainer """ err = 0 qtree = qemu_qtree.QtreeContainer() qtree.parse_info_qtree(info_qtree) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(info_block) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi(proc_scsi) err += tmp1 + tmp2 if err: logging.error("info qtree:\n%s", info_qtree) logging.error("info block:\n%s", info_block) logging.error("/proc/scsi/scsi:\n%s", proc_scsi) logging.error(qdev.str_bus_long()) raise error.TestFail("%s errors occurred while verifying" " qtree vs. params" % err) def insert_into_qdev(qdev, param_matrix, no_disks, params, new_devices): """ Inserts no_disks disks int qdev using randomized args from param_matrix :param qdev: qemu devices container :type qdev: virttest.qemu_devices.qcontainer.DevContainer :param param_matrix: Matrix of randomizable params :type param_matrix: list of lists :param no_disks: Desired number of disks :type no_disks: integer :param params: Dictionary with the test parameters :type params: virttest.utils_params.Params :return: (newly added devices, number of added disks) :rtype: tuple(list, integer) """ dev_idx = 0 _new_devs_fmt = "" _formats = param_matrix.pop('fmt', [params.get('drive_format')]) formats = _formats[:] if len(new_devices) == 1: strict_mode = None else: strict_mode = True i = 0 while i < no_disks: # Set the format if len(formats) < 1: if i == 0: raise error.TestError("Fail to add any disks, probably bad" " configuration.") logging.warn("Can't create desired number '%s' of disk types " "'%s'. Using '%d' no disks.", no_disks, _formats, i) break name = 'stg%d' % i args = {'name': name, 'filename': stg_image_name % i} fmt = random.choice(formats) if fmt == 'virtio_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'virtio-scsi-pci' elif fmt == 'lsi_scsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'lsi53c895a' elif fmt == 'spapr_vscsi': args['fmt'] = 'scsi-hd' args['scsi_hba'] = 'spapr-vscsi' else: args['fmt'] = fmt # Other params for key, value in param_matrix.iteritems(): args[key] = random.choice(value) try: devs = qdev.images_define_by_variables(**args) # parallel test adds devices in mixed order, force bus/addrs qdev.insert(devs, strict_mode) except utils.DeviceError: for dev in devs: if dev in qdev: qdev.remove(dev, recursive=True) formats.remove(fmt) continue params = convert_params(params, args) env_process.preprocess_image(test, params.object_params(name), name) new_devices[dev_idx].extend(devs) dev_idx = (dev_idx + 1) % len(new_devices) _new_devs_fmt += "%s(%s) " % (name, fmt) i += 1 if _new_devs_fmt: logging.info("Using disks: %s", _new_devs_fmt[:-1]) param_matrix['fmt'] = _formats return new_devices, params def _hotplug(new_devices, monitor, prefix=""): """ Do the actual hotplug of the new_devices using monitor monitor. :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ hotplug_outputs = [] hotplug_sleep = float(params.get('wait_between_hotplugs', 0)) for device in new_devices: # Hotplug all devices time.sleep(hotplug_sleep) hotplug_outputs.append(device.hotplug(monitor)) time.sleep(hotplug_sleep) failed = [] passed = [] unverif = [] for device in new_devices: # Verify the hotplug status out = hotplug_outputs.pop(0) out = device.verify_hotplug(out, monitor) if out is True: passed.append(str(device)) elif out is False: failed.append(str(device)) else: unverif.append(str(device)) if not failed and not unverif: logging.debug("%sAll hotplugs verified (%s)", prefix, len(passed)) elif not failed: logging.warn("%sHotplug status:\nverified %s\nunverified %s", prefix, passed, unverif) else: logging.error("%sHotplug status:\nverified %s\nunverified %s\n" "failed %s", prefix, passed, unverif, failed) logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) raise error.TestFail("%sHotplug of some devices failed." % prefix) def hotplug_serial(new_devices, monitor): _hotplug(new_devices[0], monitor) def hotplug_parallel(new_devices, monitors): threads = [] for i in xrange(len(new_devices)): name = "Th%s: " % i logging.debug("%sworks with %s devices", name, [_.str_short() for _ in new_devices[i]]) thread = threading.Thread(target=_hotplug, name=name[:-2], args=(new_devices[i], monitors[i], name)) thread.start() threads.append(thread) for thread in threads: thread.join() logging.debug("All threads finished.") def _postprocess_images(): # remove and check the images _disks = [] for disk in params['images'].split(' '): if disk.startswith("stg"): env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) params['images'] = " ".join(_disks) def _unplug(new_devices, qdev, monitor, prefix=""): """ Do the actual unplug of new_devices using monitor monitor :param new_devices: List of devices which should be hotplugged :type new_devices: List of virttest.qemu_devices.qdevice.QBaseDevice :param qdev: qemu devices container :type qdev: virttest.qemu_devices.qcontainer.DevContainer :param monitor: Monitor which should be used for hotplug :type monitor: virttest.qemu_monitor.Monitor """ unplug_sleep = float(params.get('wait_between_unplugs', 0)) unplug_outs = [] unplug_devs = [] for device in new_devices[::-1]: # unplug all devices if device in qdev: # Some devices are removed with previous one time.sleep(unplug_sleep) unplug_devs.append(device) unplug_outs.append(device.unplug(monitor)) # Remove from qdev even when unplug failed because further in # this test we compare VM with qdev, which should be without # these devices. We can do this because we already set the VM # as dirty. if LOCK: LOCK.acquire() qdev.remove(device) if LOCK: LOCK.release() time.sleep(unplug_sleep) failed = [] passed = [] unverif = [] for device in unplug_devs: # Verify unplugs _out = unplug_outs.pop(0) # unplug effect can be delayed as it waits for OS respone before # it removes the device form qtree for _ in xrange(50): out = device.verify_unplug(_out, monitor) if out is True: break time.sleep(0.1) if out is True: passed.append(str(device)) elif out is False: failed.append(str(device)) else: unverif.append(str(device)) if not failed and not unverif: logging.debug("%sAll unplugs verified (%s)", prefix, len(passed)) elif not failed: logging.warn("%sUnplug status:\nverified %s\nunverified %s", prefix, passed, unverif) else: logging.error("%sUnplug status:\nverified %s\nunverified %s\n" "failed %s", prefix, passed, unverif, failed) logging.error("qtree:\n%s", monitor.info("qtree", debug=False)) raise error.TestFail("%sUnplug of some devices failed." % prefix) def unplug_serial(new_devices, qdev, monitor): _unplug(new_devices[0], qdev, monitor) def unplug_parallel(new_devices, qdev, monitors): threads = [] for i in xrange(len(new_devices)): name = "Th%s: " % i logging.debug("%sworks with %s devices", name, [_.str_short() for _ in new_devices[i]]) thread = threading.Thread(target=_unplug, args=(new_devices[i], qdev, monitors[i])) thread.start() threads.append(thread) for thread in threads: thread.join() logging.debug("All threads finished.") def verify_qtree_unsupported(params, info_qtree, info_block, proc_scsi, qdev): return logging.warn("info qtree not supported. Can't verify qtree vs. " "guest disks.") vm = env.get_vm(params['main_vm']) qdev = vm.devices session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = verify_qtree_unsupported stg_image_name = params['stg_image_name'] if not stg_image_name[0] == "/": stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) stg_image_num = int(params['stg_image_num']) stg_params = params.get('stg_params', '').split(' ') i = 0 while i < len(stg_params) - 1: if not stg_params[i].strip(): i += 1 continue if stg_params[i][-1] == '\\': stg_params[i] = '%s %s' % (stg_params[i][:-1], stg_params.pop(i + 1)) i += 1 param_matrix = {} for i in xrange(len(stg_params)): if not stg_params[i].strip(): continue (cmd, parm) = stg_params[i].split(':', 1) # ',' separated list of values parm = parm.split(',') j = 0 while j < len(parm) - 1: if parm[j][-1] == '\\': parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) j += 1 param_matrix[cmd] = parm # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: session.cmd("modprobe %s" % module) stress_cmd = params.get('stress_cmd') if stress_cmd: funcatexit.register(env, params.get('type'), stop_stresser, vm, params.get('stress_kill_cmd')) stress_session = vm.wait_for_login(timeout=10) for _ in xrange(int(params.get('no_stress_cmds', 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) queues = params.get("multi_disk_type") == "parallel" if queues: # parallel queues = xrange(len(vm.monitors)) hotplug = hotplug_parallel unplug = unplug_parallel monitor = vm.monitors global LOCK LOCK = threading.Lock() else: # serial queues = xrange(1) hotplug = hotplug_serial unplug = unplug_serial monitor = vm.monitor context_msg = "Running sub test '%s' %s" error.context("Verify disk before test", logging.info) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) for iteration in xrange(rp_times): error.context("Hotplugging/unplugging devices, iteration %d" % iteration, logging.info) sub_type = params.get("sub_type_before_plug") if sub_type: error.context(context_msg % (sub_type, "before hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error.context("Insert devices into qdev", logging.debug) qdev.set_dirty() new_devices = [[] for _ in queues] new_devices, params = insert_into_qdev(qdev, param_matrix, stg_image_num, params, new_devices) error.context("Hotplug the devices", logging.debug) hotplug(new_devices, monitor) time.sleep(float(params.get('wait_after_hotplug', 0))) error.context("Verify disks after hotplug", logging.debug) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) qdev.set_clean() sub_type = params.get("sub_type_after_plug") if sub_type: error.context(context_msg % (sub_type, "after hotplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) sub_type = params.get("sub_type_before_unplug") if sub_type: error.context(context_msg % (sub_type, "before hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) error.context("Unplug and remove the devices", logging.debug) unplug(new_devices, qdev, monitor) _postprocess_images() error.context("Verify disks after unplug", logging.debug) time.sleep(float(params.get('wait_after_unplug', 0))) info_qtree = vm.monitor.info('qtree', False) info_block = vm.monitor.info_block(False) vm.verify_alive() proc_scsi = session.cmd_output('cat /proc/scsi/scsi') verify_qtree(params, info_qtree, info_block, proc_scsi, qdev) # we verified the unplugs, set the state to 0 for _ in xrange(qdev.get_state()): qdev.set_clean() sub_type = params.get("sub_type_after_unplug") if sub_type: error.context(context_msg % (sub_type, "after hotunplug"), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) # Check for various KVM failures error.context("Validating VM after all disk hotplug/unplugs", logging.debug) vm.verify_alive() out = session.cmd_output('dmesg') if "I/O error" in out: logging.warn(out) raise error.TestWarn("I/O error messages occured in dmesg, check" "the log for details.")
def run(test, params, env): """ Qemu numa stress test: 1) Boot up a guest and find the node it used 2) Try to allocate memory in that node 3) Run memory heavy stress inside guest 4) Check the memory use status of qemu process 5) Repeat step 2 ~ 4 several times :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ host_numa_node = utils_misc.NumaInfo() if len(host_numa_node.online_nodes) < 2: raise error.TestNAError("Host only has one NUMA node, " "skipping test...") timeout = float(params.get("login_timeout", 240)) test_count = int(params.get("test_count", 4)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) qemu_pid = vm.get_pid() if test_count < len(host_numa_node.online_nodes): test_count = len(host_numa_node.online_nodes) tmpfs_size = 0 for node in host_numa_node.nodes: node_mem = int(host_numa_node.read_from_node_meminfo(node, "MemTotal")) if tmpfs_size < node_mem: tmpfs_size = node_mem tmpfs_path = params.get("tmpfs_path", "tmpfs_numa_test") tmpfs_path = utils_misc.get_path(data_dir.get_tmp_dir(), tmpfs_path) tmpfs_write_speed = int(params.get("tmpfs_write_speed", 10240)) dd_timeout = tmpfs_size / tmpfs_write_speed * 1.5 mount_fs_size = "size=%dK" % tmpfs_size memory_file = utils_misc.get_path(tmpfs_path, "test") dd_cmd = "dd if=/dev/urandom of=%s bs=1k count=%s" % (memory_file, tmpfs_size) if not os.path.isdir(tmpfs_path): os.mkdir(tmpfs_path) numa_node_malloc = -1 most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid) for test_round in range(test_count): if utils_memory.freememtotal() < tmpfs_size: raise error.TestError("Don't have enough memory to execute this " "test after %s round" % test_round) error.context("Executing stress test round: %s" % test_round, logging.info) numa_node_malloc = most_used_node numa_dd_cmd = "numactl -m %s %s" % (numa_node_malloc, dd_cmd) error.context("Try to allocate memory in node %s" % numa_node_malloc, logging.info) try: utils_misc.mount("none", tmpfs_path, "tmpfs", perm=mount_fs_size) funcatexit.register(env, params.get("type"), utils_misc.umount, "none", tmpfs_path, "tmpfs") utils.system(numa_dd_cmd, timeout=dd_timeout) except Exception, error_msg: if "No space" in str(error_msg): pass else: raise error.TestFail("Can not allocate memory in node %s." " Error message:%s" % (numa_node_malloc, str(error_msg))) error.context("Run memory heavy stress in guest", logging.info) autotest_control.run(test, params, env) error.context("Get the qemu process memory use status", logging.info) node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid) if node_after == most_used_node and memory_after >= memory_used: raise error.TestFail("Memory still stick in " "node %s" % numa_node_malloc) else: most_used_node = node_after memory_used = memory_after utils_misc.umount("none", tmpfs_path, "tmpfs") funcatexit.unregister(env, params.get("type"), utils_misc.umount, "none", tmpfs_path, "tmpfs") session.cmd("sync; echo 3 > /proc/sys/vm/drop_caches") utils_memory.drop_caches()
def run_timerdevice_boot(test, params, env): """ Timer device boot guest: 1) Sync the host system time with ntp server 2) Add some load on host (Optional) 3) Boot the guest with specific clock source 4) Check the clock source currently used on guest 5) Do some file operation on guest (Optional) 6) Check the system time on guest and host (Optional) 7) Check the hardware time on guest and host (Optional) 8) Sleep period of time before reboot (Optional) 9) Reboot guest (Optional) 10) Check the system time on guest and host (Optional) 11) Check the hardware time on guest and host (Optional) @param test: QEMU test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ def verify_guest_clock_source(session, expected): error.context("Check the current clocksource in guest", logging.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if not expected in session.cmd(cmd): raise error.TestFail("Guest didn't use '%s' clocksource" % expected) error.context("Sync the host system time with ntp server", logging.info) utils.system("ntpdate clock.redhat.com") timerdevice_host_load_cmd = params.get("timerdevice_host_load_cmd") if timerdevice_host_load_cmd: error.context("Add some load on host", logging.info) utils.system(timerdevice_host_load_cmd) host_load_stop_cmd = params["timerdevice_host_load_stop_cmd"] funcatexit.register(env, params["type"], utils.system, host_load_stop_cmd) error.context("Boot a guest with kvm-clock", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) timerdevice_clksource = params.get("timerdevice_clksource") if timerdevice_clksource: try: verify_guest_clock_source(session, timerdevice_clksource) except Exception: clksrc = timerdevice_clksource error.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) error.context("Update guest kernel cli to '%s'" % clksrc, logging.info) image_filename = storage.get_image_filename( params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: logging.warn("Update the first kernel entry to" " '%s' only" % clksrc) kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError, detail: raise error.TestError("Couldn't find the kernel config, regex" " pattern is '%s', detail: '%s'" % (kernel_cfg_pattern, detail)) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub("clocksource=.*?\s", "clocksource=%s" % clksrc, kernel_cfg) else: kernel_cfg_new = "%s %s" % (kernel_cfg, "clocksource=%s" % clksrc) disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error.context("Boot the guest", logging.info) vm_name = params["main_vm"] cpu_model_flags = params.get("cpu_model_flags") params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error.context("Check the current clocksource in guest", logging.info) verify_guest_clock_source(session, clksrc) error.context("Kill all ntp related processes") session.cmd("pkill ntp; true")