def setup(self, under_fs0): """ Pre-process for uefishell environment, launch vm from the UefiShell.iso, and setup a serial session for uefishell :param under_fs0: most uefi command executed under fs0:\ """ params = self.params for cdrom in params.objects("cdroms"): boot_index = params.get("boot_index_%s" % cdrom) if boot_index is not None: params["boot_index_%s" % cdrom] = int(boot_index) + 1 for image in params.objects("images"): params["image_boot_%s" % image] = "no" params["cdroms"] = "%s %s" % ("uefishell", params["cdroms"]) params["cdrom_uefishell"] = self.copy_uefishell() params["bootindex_uefishell"] = "0" if params.get("secureboot_pk_kek"): params["secureboot_pk_kek"] %= self.copy_secureboot_pk_kek( params["pk_kek_filename"]) params["extra_params"] %= params["secureboot_pk_kek"] params["start_vm"] = "yes" params["shell_prompt"] = r"(Shell|FS\d:\\.*)>" params["shell_linesep"] = r"\r\n" env_process.process(self.test, params, self.env, env_process.preprocess_image, env_process.preprocess_vm) vm = self.env.get_vm(params["main_vm"]) self.session = vm.wait_for_serial_login() if under_fs0 == "yes": self.send_command("fs0:")
def test(self): error.context("Preparing migration env and cdroms.") mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = utils_test.qemu.MultihostMigration if mig_protocol == "fd": self.mig_type = utils_test.qemu.MultihostMigrationFd if mig_protocol == "exec": self.mig_type = utils_test.qemu.MultihostMigrationExec self.vms = params.get("vms").split(" ") self.srchost = params.get("hosts")[0] self.dsthost = params.get("hosts")[1] self.is_src = params.get("hostid") == self.srchost self.mig = self.mig_type(test, params, env, False, ) self.cdrom_size = int(params.get("cdrom_size", 10)) if self.is_src: self.cdrom_orig = create_iso_image(params, "orig", file_size=self.cdrom_size) self.cdrom_dir = os.path.dirname(self.cdrom_orig) params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: self.cdrom_orig = create_iso_image(params, "orig", False) self.cdrom_dir = os.path.dirname(self.cdrom_orig)
def test(self): error.context("Preparing migration env and cdroms.") mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = utils_test.MultihostMigration if mig_protocol == "fd": self.mig_type = utils_test.MultihostMigrationFd if mig_protocol == "exec": self.mig_type = utils_test.MultihostMigrationExec self.vms = params.get("vms").split(" ") self.srchost = params.get("hosts")[0] self.dsthost = params.get("hosts")[1] self.is_src = params.get("hostid") == self.srchost self.mig = self.mig_type( test, params, env, False, ) self.cdrom_size = int(params.get("cdrom_size", 10)) if self.is_src: self.cdrom_orig = create_iso_image(params, "orig", file_size=self.cdrom_size) self.cdrom_dir = os.path.dirname(self.cdrom_orig) params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: self.cdrom_orig = create_iso_image(params, "orig", False) self.cdrom_dir = os.path.dirname(self.cdrom_orig)
def test(self): error.context("Preparing migration env and floppies.") mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = utils_test.MultihostMigration if mig_protocol == "fd": self.mig_type = utils_test.MultihostMigrationFd if mig_protocol == "exec": self.mig_type = utils_test.MultihostMigrationExec self.vms = params.get("vms").split(" ") self.srchost = params["hosts"][0] self.dsthost = params["hosts"][1] self.is_src = params["hostid"] == self.srchost self.mig = self.mig_type( test, params, env, False, ) if self.is_src: self.floppy = create_floppy(params) self.floppy_dir = os.path.dirname(self.floppy) params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: self.floppy = create_floppy(params, False) self.floppy_dir = os.path.dirname(self.floppy)
def test(self): error.context("Preparing migration env and floppies.") mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = utils_test.qemu.MultihostMigration if mig_protocol == "fd": self.mig_type = utils_test.qemu.MultihostMigrationFd if mig_protocol == "exec": self.mig_type = utils_test.qemu.MultihostMigrationExec self.vms = params.get("vms").split(" ") self.srchost = params["hosts"][0] self.dsthost = params["hosts"][1] self.is_src = params["hostid"] == self.srchost self.mig = self.mig_type(test, params, env, False, ) if self.is_src: self.floppy = create_floppy(params) self.floppy_dir = os.path.dirname(self.floppy) params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: self.floppy = create_floppy(params, False) self.floppy_dir = os.path.dirname(self.floppy)
def test(self): error.context("Preparing migration env and cdroms.", logging.info) mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = migration.MultihostMigration if mig_protocol == "fd": self.mig_type = migration.MultihostMigrationFd if mig_protocol == "exec": self.mig_type = migration.MultihostMigrationExec if "rdma" in mig_protocol: self.mig_type = migration.MultihostMigrationRdma self.vms = params.get("vms").split(" ") self.srchost = params.get("hosts")[0] self.dsthost = params.get("hosts")[1] self.is_src = params.get("hostid") == self.srchost self.mig = self.mig_type(test, params, env, False) self.cdrom_size = int(params.get("cdrom_size", 10)) cdrom = params.objects("cdroms")[-1] self.serial_num = params.get("drive_serial_%s" % cdrom) if self.is_src: self.cdrom_orig = create_iso_image(params, "orig", file_size=self.cdrom_size) self.cdrom_dir = os.path.dirname(self.cdrom_orig) vm = env.get_vm(self.vms[0]) vm.destroy() params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: self.cdrom_orig = create_iso_image(params, "orig", False) self.cdrom_dir = os.path.dirname(self.cdrom_orig)
def setup(self, under_fs0): """ Pre-process for uefishell environment, launch vm from the UefiShell.iso, and setup a serial session for uefishell :param under_fs0: most uefi command executed under fs0:\ """ params = self.params self.var_copy() for cdrom in params.objects("cdroms"): boot_index = params.get("boot_index_%s" % cdrom) if boot_index is not None: params["boot_index_%s" % cdrom] = int(boot_index) + 1 for image in params.objects("images"): params["image_boot_%s" % image] = "no" params["cdroms"] = "%s %s" % ("uefishell", params["cdroms"]) params["cdrom_uefishell"] = self.copy_uefishell() params["bootindex_uefishell"] = "0" params["start_vm"] = "yes" params["shell_prompt"] = r"(Shell|FS\d:\\.*)>" params["shell_linesep"] = r"\r\n" env_process.process(self.test, params, self.env, env_process.preprocess_image, env_process.preprocess_vm) vm = self.env.get_vm(params["main_vm"]) self.session = vm.wait_for_serial_login() if under_fs0 == "yes": self.send_command("fs0:")
def run(test, params, env): """ Verify UEFI config setting in the GUI screen: 1) Boot up a guest. 2) If boot_splash_time not None, check splash-time in log output 3) If check_info_pattern not None, check info in log output :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def info_check(info): """ Check log info """ logs = vm.logsessions['seabios'].get_output() result = re.search(info, logs, re.S) return result def create_cdroms(cdrom_test): """ Create 'test' cdrom with one file on it """ logging.info("creating test cdrom") process.run("dd if=/dev/urandom of=test bs=10M count=1") process.run("mkisofs -o %s test" % cdrom_test) process.run("rm -f test") boot_splash_time = params.get("boot_splash_time") check_info_pattern = params.get("check_info_pattern") timeout = int(params.get("check_timeout", 360)) cdrom_test = params.get("cdrom_test") if cdrom_test: create_cdroms(cdrom_test) params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() try: if check_info_pattern: expect_result = check_info_pattern elif boot_splash_time: splash_time_pattern = params.get("splash_time_pattern") expect_result = (splash_time_pattern % (int(boot_splash_time) // 1000)) if not utils_misc.wait_for(lambda: info_check(expect_result), timeout): test.fail("Does not get expected result from bios log: %s" % expect_result) finally: if params.get("cdroms") == "test": logging.info("cleaning up temp cdrom images") os.remove(cdrom_test)
def run(test, params, env): """ Qemu cpu test: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ ###plan: ####1.Can add loop to catch any test point method ,if so ,you can add the snario by add testcase cmdline ####but it need generate cfg file dynamically ####2.Cover the max matrix as soon as possible,and can test diff case or say snario by loop file error_context.context("Vm boot up successful", logging.info) vm = QemuTest(test, params, env) #id = params.get("id") #config = re.findall(r"qemu.(.+?).s2",id)[0] #with open("/home/id", 'r+') as file: # file_id = file.read() # if file_id != config: # file.write(config) # params.setdefault("kill_vm_gracefully", "yes") for i in range(1, 10): if i == 1: if params.get("not_preprocess") == "yes": error_prompt = params.get("error_prompt") error_context.context(("Check if have error prompt:[%s] " % error_prompt), logging.info) try: env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) except virt_vm.VMCreateError as e: if error_prompt not in e.output: test.fail("There is no error prompt:[%s]" % error_prompt) continue fp_group_var = "fp_group_s%s" % i fp_type = "fp_type_s%s" % i if params.get("not_preprocess") == "no": os = OsTest(test, params, env) if params.get(fp_group_var): fp_group = params.get(fp_group_var) if params.get(fp_type) == "vm": func = getattr(vm, params.get(fp_group)) else: func = getattr(os, params.get(fp_group)) st = "s%s" % i func(st) if "down" in func.__name__: return os.dmesg()
def run(test, params, env): """ Check the usb devices. 1) Boot up guest with usb devices 2) verify usb devices in monitor 3) verify usb devices in guest :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _check_test_step_result(result, output): if result: logging.info(output) else: test.fail(output) # parse the usb topology from cfg parsed_devs = parse_usb_topology(params) logging.info("starting vm according to the usb topology") env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() # collect usb dev information for qemu check devs = collect_usb_dev(params, vm, parsed_devs, "for_qemu") error_context.context("verify usb devices information in qemu...", logging.info) result, output = verify_usb_device_in_monitor_qtree(vm, devs) _check_test_step_result(result, output) # collect usb dev information for guest check devs = collect_usb_dev(params, vm, parsed_devs, "for_guest") login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) error_context.context("verify usb devices information in guest...", logging.info) result, output = verify_usb_device_in_guest(params, session, devs) _check_test_step_result(result, output) session.close()
def run(test, params, env): """ Qemu cpu test: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ ###plan: ####1.Can add loop to catch any test point method ,if so ,you can add the snario by add testcase cmdline ####but it need generate cfg file dynamically ####2.Cover the max matrix as soon as possible,and can test diff case or say snario by loop file error_context.context("Vm boot up successful", logging.info) vm = QemuTest(test, params, env) for i in range(1, 10): if i == 1: if params.get("not_preprocess") == "yes": error_prompt = params.get("error_prompt") error_context.context(("Check if have error prompt:[%s] " % error_prompt), logging.info) try: env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) except virt_vm.VMCreateError as e: if error_prompt not in e.output: test.fail("There is no error prompt:[%s]" % error_prompt) continue fp_group_var = "fp_group_s%s" % i fp_type = "fp_type_s%s" % i if params.get("not_preprocess") == "no": os = OsTest(test, params, env) if params.get(fp_group_var): fp_group = params.get(fp_group_var) if params.get(fp_type) == "vm": func = getattr(vm, params.get(fp_group)) else: func = getattr(os, params.get(fp_group)) st = "s%s" % i func(st) vm.serial_output()
def test(self): error_context.context("Preparing migration env and cdroms.", logging.info) mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = migration.MultihostMigration if mig_protocol == "fd": self.mig_type = migration.MultihostMigrationFd if mig_protocol == "exec": self.mig_type = migration.MultihostMigrationExec if "rdma" in mig_protocol: self.mig_type = migration.MultihostMigrationRdma self.vms = params.get("vms").split(" ") self.srchost = params.get("hosts")[0] self.dsthost = params.get("hosts")[1] self.is_src = params.get("hostid") == self.srchost self.mig = self.mig_type( test, params, env, False, ) self.cdrom_size = int(params.get("cdrom_size", 10)) cdrom = params.objects("cdroms")[-1] self.serial_num = params.get("drive_serial_%s" % cdrom) if self.is_src: self.cdrom_orig = create_iso_image(params, "orig", file_size=self.cdrom_size) self.cdrom_dir = os.path.dirname(self.cdrom_orig) vm = env.get_vm(self.vms[0]) vm.destroy() params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: self.cdrom_orig = create_iso_image(params, "orig", False) self.cdrom_dir = os.path.dirname(self.cdrom_orig)
def _prepare_vm(self, vm_name): """ Prepare, start vm and return vm. @param vm_name: Class with data necessary for migration. @return: Started VM. """ new_params = self.params.copy() new_params['migration_mode'] = None new_params['start_vm'] = 'yes' self.vm_lock.acquire() env_process.process(self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm) self.vm_lock.release() vm = self.env.get_vm(vm_name) vm.wait_for_login(timeout=self.login_timeout) return vm
def test(self): super(test_multihost_ejecting, self).test() if self.is_src: # Starts in source self.cdrom_new = create_iso_image(params, "new") serial_num = generate_serial_num() cdrom = params.get("cdroms", "").split()[-1] params["drive_serial_%s" % cdrom] = serial_num params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) cdrom = get_testing_cdrom_device(session, cdrom_dev_list, serial_num) error.context("Eject cdrom.") session.cmd(params["eject_cdrom_cmd"] % cdrom) vm.eject_cdrom(device) time.sleep(2) if get_cdrom_file(vm, device) is not None: raise error.TestFail("Device %s was not ejected" % (cdrom)) cdrom = self.cdrom_new error.context("Change cdrom.") vm.change_media(device, cdrom) if get_cdrom_file(vm, device) != cdrom: raise error.TestFail("It wasn't possible to change " "cdrom %s" % (cdrom)) time.sleep(workaround_eject_time) self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'cdrom_dev', cdrom_prepare_timeout) self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)
def prepare_for_migration(self, mig_data, migration_mode): """ Prepare destination of migration for migration. :param mig_data: Class with data necessary for migration. :param migration_mode: Migration mode for prepare machine. """ new_params = self._prepare_params(mig_data) new_params['migration_mode'] = migration_mode new_params['start_vm'] = 'yes' if self.params.get("migration_sync_vms", "no") == "yes": if mig_data.is_src(): self.vm_lock.acquire() env_process.process(self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm) self.vm_lock.release() self._quick_check_vms(mig_data) # Send vms configuration to dst host. vms = cPickle.dumps([ self.env.get_vm(vm_name) for vm_name in mig_data.vms_name ]) self.env.get_vm(mig_data.vms_name[0]).monitor.info("qtree") SyncData(self.master_id(), self.hostid, mig_data.hosts, mig_data.mig_id, self.sync_server).sync(vms, timeout=240) elif mig_data.is_dst(): # Load vms configuration from src host. vms = cPickle.loads( SyncData(self.master_id(), self.hostid, mig_data.hosts, mig_data.mig_id, self.sync_server).sync(timeout=240)[mig_data.src]) for vm in vms: # Save config to env. Used for create machine. # When reuse_previous_config params is set don't check # machine. vm.address_cache = self.env.get("address_cache") self.env.register_vm(vm.name, vm) self.vm_lock.acquire() env_process.process(self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm) vms[0].monitor.info("qtree") self.vm_lock.release() self._quick_check_vms(mig_data) else: self.vm_lock.acquire() env_process.process(self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm) self.vm_lock.release() self._quick_check_vms(mig_data) self._check_vms(mig_data)
def run(test, params, env): """ Run an gluster test. steps: 1) create gluster brick if there is no one with good name 2) create volume on brick 3) create VM image on disk with specific format 4) install vm on VM image 5) boot VM 6) start fio test on booted VM :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ image_name = params.get("image_name") timeout = int(params.get("login_timeout", 360)) # Workaroud wrong config file order. params['image_name_backing_file_snapshot'] = params.get("image_name") params['image_format_backing_file_snapshot'] = params.get("image_format") params['image_name_snapshot'] = params.get("image_name") + "-snap" error_context.context("boot guest over glusterfs", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login(timeout=timeout) error_context.context("shutdown VM", logging.info) vm.destroy() error_context.context("create snapshot of vm disk", logging.info) snapshot_params = params.object_params("snapshot") base_dir = params.get("images_base_dir", data_dir.get_data_dir()) image = qemu_storage.QemuImg(snapshot_params, base_dir, image_name) image.create(snapshot_params) env_process.process(test, snapshot_params, env, env_process.preprocess_image, env_process.preprocess_vm)
def prepare_for_migration(self, mig_data, migration_mode): """ Prepare destination of migration for migration. :param mig_data: Class with data necessary for migration. :param migration_mode: Migration mode for prepare machine. """ new_params = self._prepare_params(mig_data) new_params["migration_mode"] = migration_mode new_params["start_vm"] = "yes" if self.params.get("migration_sync_vms", "no") == "yes": if mig_data.is_src(): self.vm_lock.acquire() env_process.process( self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm ) self.vm_lock.release() self._quick_check_vms(mig_data) # Send vms configuration to dst host. vms = cPickle.dumps([self.env.get_vm(vm_name) for vm_name in mig_data.vms_name]) self.env.get_vm(mig_data.vms_name[0]).monitor.info("qtree") SyncData(self.master_id(), self.hostid, mig_data.hosts, mig_data.mig_id, self.sync_server).sync( vms, timeout=240 ) elif mig_data.is_dst(): # Load vms configuration from src host. vms = cPickle.loads( SyncData(self.master_id(), self.hostid, mig_data.hosts, mig_data.mig_id, self.sync_server).sync( timeout=240 )[mig_data.src] ) for vm in vms: # Save config to env. Used for create machine. # When reuse_previous_config params is set don't check # machine. vm.address_cache = self.env.get("address_cache") self.env.register_vm(vm.name, vm) self.vm_lock.acquire() env_process.process( self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm ) vms[0].monitor.info("qtree") self.vm_lock.release() self._quick_check_vms(mig_data) else: self.vm_lock.acquire() env_process.process( self.test, new_params, self.env, env_process.preprocess_image, env_process.preprocess_vm ) self.vm_lock.release() self._quick_check_vms(mig_data) self._check_vms(mig_data)
def run(test, params, env): """ The usb devices negative test 1) Boot guest with invalid usb devices 2) Verify QEMU error info :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ # parse the usb topology from cfg parse_usb_topology(params) logging.info("starting vm according to the usb topology") error_info = params["error_info"] error_context.context(("verify [%s] is reported by QEMU..." % error_info), logging.info) try: env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) except virt_vm.VMCreateError, e: if error_info not in e.output: test.fail("%s is not reported by QEMU" % error_info)
def run(test, params, env): """ Verify max luns support for one channel of spapr-vscsi. Step: 1. Boot a guest with 32 luns for one channel 2. Boot a guest with 33 luns for one channel :param test: Qemu test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ vm = env.get_vm(params['main_vm']) timeout = float(params.get("login_timeout", 240)) stg_image_num = int(params.get('stg_image_num')) stg_image_name = params.get('stg_image_name', 'images/%s') channel = params.get('channel') error_info = params["error_info"] for i in range(stg_image_num): name = "stg%d" % i params['images'] += " %s" % name params["image_name_%s" % name] = stg_image_name % name params["blk_extra_params_%s" % name] = channel params["drive_port_%s" % name] = i params["scsi_hba_%s" % name] = "spapr-vscsi" if params['luns'] == "lun_33": img_params = params.object_params("stg32") image = qemu_storage.QemuImg(img_params, data_dir.get_data_dir(), "stg32") params["extra_params"] = "-blockdev node-name=file_stg32,\ driver=file,auto-read-only=on,discard=unmap,aio=threads,filename=%s,\ cache.direct=on,cache.no-flush=off -blockdev node-name=drive_stg32,\ driver=qcow2,read-only=off,cache.direct=on,cache.no-flush=off,\ file=file_stg32 -device scsi-hd,lun=32,id=stg32,bus=spapr_vscsi0.0,\ drive=drive_stg32,write-cache=on,channel=0" % image.image_filename image.create(params) params['start_vm'] = 'yes' try: env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) except virt_vm.VMCreateError as e: if error_info not in e.output: test.fail("%s is not reported by QEMU" % error_info) if params['luns'] == "lun_32": session = vm.wait_for_login(timeout=timeout) o = session.cmd_output("lsblk -o SUBSYSTEMS|grep vio|wc -l") if int(o) != stg_image_num: test.fail("Wrong disks number") o = session.cmd_output("lsblk -o KNAME,SUBSYSTEMS|grep vio") disks = re.findall(r"(sd\w+)", o, re.M) disk = random.choice(disks) cmd_w = "dd if=/dev/zero of=/dev/%s bs=1M count=8" % disk cmd_r = "dd if=/dev/%s of=/dev/null bs=1M count=8" % disk error_context.context('Do dd writing test on the data disk.', logging.info) status = session.cmd_status(cmd_w, timeout=timeout) if status != 0: test.error("dd writing test failed") error_context.context('Do dd reading test on the data disk.', logging.info) status = session.cmd_status(cmd_r, timeout=timeout) if status != 0: test.error("dd reading test failed") session.close() vm.destroy(gracefully=True)
def run(test, params, env): """ Test failover path on SCSI passthrough with underlying DM-multipath device. Step: 1. Build multipath device on host. 2. Boot a guest with passthrough path. 3. Access guest then do io on the data disk. 4. Check vm status. 5. Alternately close a path every 10 seconds on host 6. Check vm status 7. Offline two path and check status 8. Online one path and check status :param test: Qemu test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ vm = env.get_vm(params['main_vm']) timeout = float(params.get("login_timeout", 240)) get_id_cmd = params.get("get_id_cmd") get_mpath_cmd = params.get("get_mpath_cmd") get_mdev_cmd = params.get("get_mdev_cmd") get_tdev_cmd = params.get("get_tdev_cmd") set_path_cmd = params.get("set_path_cmd") cmd_dd = params.get("cmd_dd") post_cmd = params.get("post_cmd") repeat_time = params.get_numeric("repeat_time") id = process.getoutput(get_id_cmd).strip() get_mpath_cmd = get_mpath_cmd % (id, id) mpath = process.getoutput(get_mpath_cmd).strip() params["image_name_stg0"] = "/dev/mapper/%s" % mpath params['start_vm'] = 'yes' time.sleep(5) env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) session = vm.wait_for_login(timeout=timeout) out = session.cmd_output(get_tdev_cmd) cmd_dd = cmd_dd % out error_context.context('Do dd writing test on the data disk.', logging.info) session.sendline(cmd_dd) if not vm.monitor.verify_status("running"): test.fail("Guest did not run after dd") get_mdev_cmd = get_mdev_cmd % id o = process.getoutput(get_mdev_cmd) mdev = re.findall(r"sd.", o, re.M) error_context.context('Alternately close a path every 10 seconds on host') for dev in mdev: process.getoutput(set_path_cmd % ("running", dev)) for i in range(repeat_time): for dev in mdev: process.getoutput(set_path_cmd % ("offline", dev)) time.sleep(5) process.getoutput("multipath -l") time.sleep(10) process.getoutput(set_path_cmd % ("running", dev)) time.sleep(5) process.getoutput("multipath -l") time.sleep(1) time.sleep(1) for dev in mdev: process.getoutput(set_path_cmd % ("running", dev)) if not utils_misc.wait_for(lambda: vm.monitor.verify_status("running"), timeout=20): test.fail("Guest did not run after change path") for dev in mdev: process.getoutput(set_path_cmd % ("offline", dev)) if not utils_misc.wait_for(lambda: vm.monitor.verify_status("paused"), timeout=20): test.fail("Guest did not pause after offline") dev = random.choice(mdev) process.getoutput(set_path_cmd % ("running", dev)) if vm.monitor.verify_status("paused"): vm.monitor.send_args_cmd("c") if not utils_misc.wait_for(lambda: vm.monitor.verify_status("running"), timeout=20): test.fail("Guest did not run after online") session.close() vm.destroy(gracefully=True)
def run(test, params, env): """ Verify SLOF info by user interface. Step: Scenario 1: 1.1 Boot a guest with at least two blocks, with "-boot menu=on", Press "F12" in the guest desktop at the early stage of booting process. 1.2 Check the boot menu info whether are match with guest info. 1.3 Select one of valid device to boot up the guest. 1.4 Check whether errors in SLOF. 1.5 Log in guest successfully. 1.6 Ping external host ip successfully. Scenario 2: 2.1. Boot the guest with spapr-vty and press 's' immediately when the guest boot up. 2.2. Check the output of console, whether is stopped enter kernel. 2.3. Type "boot" or "reset-all". 2.4. Check guest whether boot up successfully. 2.5. Log in guest successfully. :param test: Qemu test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ STOP, F12 = range(2) enter_key = {STOP: 's', F12: 'f12'} def _send_custom_key(keystr): """ Send custom keyword to SLOF's user interface. """ logging.info('Sending \"%s\" to SLOF user interface.' % keystr) for key in keystr: key = 'minus' if key == '-' else key vm.send_key(key) vm.send_key('ret') def _send_key(key, custom=True, sleep=0.0): """ Send keywords to SLOF's user interface. """ obj_name = 'select' if re.search(r'^\d+$', key) else key k_params = params.object_params(obj_name.replace('-', '_')) if custom: _send_custom_key(key) else: vm.send_key(key) time.sleep(sleep) content, _ = slof.get_boot_content(vm, 0, k_params['start'], k_params['end']) if content: logging.info('Output of SLOF:\n%s' % ''.join(content)) return ''.join(content) return None def _check_menu_info(menu_info): """ Check the menu info by each items. """ bootable_num = '' for i in range(1, int(params['boot_dev_num']) + 1): option = params['menu_option%d' % i] logging.info('Checking the device(%s) if is included in menu list.' % '->'.join(option.split())) dev_type, hba_type, child_bus, addr = option.split() addr = re.sub(r'^0x0?', '', addr) pattern = re.compile(r'(\d+)\)\s+%s(\d+)?\s+:\s+/%s(\S+)?/%s@%s' % (dev_type, hba_type, child_bus, addr), re.M) searched = pattern.search(menu_info) if not searched: test.fail('No such item(%s) in boot menu list.' % '->'.join(option.split())) if i == int(params['bootable_index']): bootable_num = searched.group(1) return bootable_num def _enter_user_interface(mode): """ Enter user interface. """ o = utils_misc.wait_for( lambda: _send_key(enter_key[mode], False), ack_timeout, step=0.0) if not o: test.fail('Failed to enter user interface in %s sec.' % ack_timeout) return o def _f12_user_interface_test(): """ Test f12 user interface. """ menu_list = _enter_user_interface(F12) actual_num = len(re.findall(r'\d+\)', menu_list)) dev_num = params['boot_dev_num'] if actual_num != int(dev_num): test.fail( 'The number of boot devices is not %s in menu list.' % dev_num) if not utils_misc.wait_for( lambda: _send_key( _check_menu_info(menu_list), False), ack_timeout, step=0.0): test.fail('Failed to load after selecting boot device ' 'in %s sec.' % ack_timeout) def _load_user_interface_test(): """ Test boot/reset-all user interface. """ _enter_user_interface(STOP) if not utils_misc.wait_for( lambda: _send_key(keys, True, 3), ack_timeout, step=0.0): test.fail( 'Failed to load after \'%s\' in %s sec.' % (keys, ack_timeout)) def _check_serial_log_status(): """ Check the status of serial log. """ file_timeout = 30 if not utils_misc.wait_for( lambda: os.path.isfile(vm.serial_console_log), file_timeout): test.error('No found serial log during %s sec.' % file_timeout) main_tests = {'f12': _f12_user_interface_test, 'boot': _load_user_interface_test, 'reset-all': _load_user_interface_test} ack_timeout = params['ack_timeout'] keys = params['send_keys'] env_process.process( test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() _check_serial_log_status() main_tests[keys]() error_context.context("Try to log into guest '%s'." % vm.name, logging.info) session = vm.wait_for_login(timeout=float(params['login_timeout'])) logging.info("log into guest '%s' successfully." % vm.name) error_context.context("Try to ping external host.", logging.info) extra_host_ip = utils_net.get_host_ip_address(params) session.cmd('ping %s -c 5' % extra_host_ip) logging.info("Ping host(%s) successfully." % extra_host_ip) vm.destroy(gracefully=True)
def run(test, params, env): """ Test throttle relevant properties feature. 1) Boot up guest with throttle groups. There are two throttle groups and each have two disk 2) Build fio operation options and expected result according to throttle properties. 3) Execute one disk or all disks testing on groups parallel. """ def copy_base_vm_image(): """Copy the base vm image for VMs.""" src_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), params['images']) src_filename = src_img.image_filename src_format = src_img.image_format dst_dir = os.path.dirname(src_filename) for vm_name in vms_list: dst_filename = os.path.join(dst_dir, '%s.%s' % (vm_name, src_format)) if not os.path.exists(dst_filename): logging.info('Copying %s to %s.', src_filename, dst_filename) shutil.copy(src_filename, dst_filename) def wait_for_login_all_vms(): """Wait all VMs to login.""" return [vm.wait_for_login() for vm in vms] @error_context.context_aware def fio_on_vm(vm_t, session_t): error_context.context("Deploy fio", logging.info) fio = generate_instance(params, vm_t, 'fio') logging.info("fio: %s", fio) tgm = ThrottleGroupManager(vm_t) logging.info("tgm: %s", tgm) groups = params["throttle_groups"].split() testers = [] for group in groups: tgm.get_throttle_group_props(group) images = params["throttle_group_member_%s" % group].split() tester = ThrottleTester(test, params, vm_t, session_t, group, images) error_context.context( "Build test stuff for %s:%s" % (group, images), logging.info) tester.build_default_option() tester.build_images_fio_option() tester.set_fio(fio) testers.append(tester) error_context.context("Start groups testing:%s" % groups, logging.info) groups_tester = ThrottleGroupsTester(testers) groups_tester.start() def fio_on_vms(): """Run fio on all started vms at the same time.""" logging.info("Start to do fio on multi-vms:") fio_parallel_params = [] for vm, session in zip(vms, sessions): fio_parallel_params.append((fio_on_vm, (vm, session))) utils_misc.parallel(fio_parallel_params) logging.info("Done fio on multi-vms.") vms_list = params['vms'].split() copy_base_vm_image() vms_default = params['vms'].split()[0] vms_post = params['vms'].split(vms_default)[1].strip() params['vms'] = str(vms_post) params['start_vm'] = 'yes' env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vms = env.get_all_vms() for vm_verify in vms: vm_verify.verify_alive() sessions = wait_for_login_all_vms() fio_on_vms()
def test(self): super(test_multihost_locking, self).test() error.context("Lock cdrom in VM.") if self.is_src: # Starts in source serial_num = generate_serial_num() cdrom = params.get("cdroms", "").split()[-1] params["drive_serial_%s" % cdrom] = serial_num params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) guest_cdrom_device = get_testing_cdrom_device(session, cdrom_dev_list, serial_num) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device) locked = check_cdrom_lock(vm, device) if locked: logging.debug("Cdrom device is successfully locked in VM.") else: raise error.TestFail("Cdrom device should be locked" " in VM.") self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'cdrom_dev', cdrom_prepare_timeout) self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) if not self.is_src: # Starts in dest vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) locked = check_cdrom_lock(vm, device) if locked: logging.debug("Cdrom device stayed locked after " "migration in VM.") else: raise error.TestFail("Cdrom device should stayed locked" " after migration in VM.") error.context("Unlock cdrom from VM.") if not self.is_src: # Starts in dest cdrom_dev_list = list_guest_cdroms(session) guest_cdrom_device = get_testing_cdrom_device(session, cdrom_dev_list, serial_num) session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device) locked = check_cdrom_lock(vm, device) if not locked: logging.debug("Cdrom device is successfully unlocked" " from VM.") else: raise error.TestFail("Cdrom device should be unlocked" " in VM.") self.mig.migrate_wait([self.vms[0]], self.dsthost, self.srchost) if self.is_src: # Starts in source locked = check_cdrom_lock(vm, device) if not locked: logging.debug("Cdrom device stayed unlocked after " "migration in VM.") else: raise error.TestFail("Cdrom device should stayed unlocked" " after migration in VM.") self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'Finish_cdrom_test', login_timeout)
def test(self): super(test_multihost_copy, self).test() copy_timeout = int(params.get("copy_timeout", 480)) checksum_timeout = int(params.get("checksum_timeout", 180)) pid = None sync_id = {'src': self.srchost, 'dst': self.dsthost, "type": "file_trasfer"} filename = "orig" if self.is_src: # Starts in source serial_num = generate_serial_num() cdrom = params.get("cdroms", "").split()[-1] params["drive_serial_%s" % cdrom] = serial_num params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.monitor.migrate_set_speed("1G") session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) cdrom = get_testing_cdrom_device(session, cdrom_dev_list, serial_num) mount_point = get_cdrom_mount_point(session, cdrom, params) mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point) src_file = params["src_file"] % (mount_point, filename) dst_file = params["dst_file"] % filename copy_file_cmd = params[ "copy_file_cmd"] % (mount_point, filename) remove_file_cmd = params["remove_file_cmd"] % filename md5sum_cmd = params["md5sum_cmd"] if params["os_type"] != "windows": error.context("Mount and copy data") session.cmd(mount_cmd, timeout=30) error.context("File copying test") session.cmd(copy_file_cmd) pid = disk_copy(vm, src_file, dst_file, copy_timeout) sync = SyncData(self.mig.master_id(), self.mig.hostid, self.mig.hosts, sync_id, self.mig.sync_server) pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost] self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) if not self.is_src: # Starts in source vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) error.context("Wait for copy finishing.") def is_copy_done(): if params["os_type"] == "windows": cmd = "tasklist /FI \"PID eq %s\"" % pid else: cmd = "ps -p %s" % pid return session.cmd_status(cmd) != 0 if utils_misc.wait_for(is_copy_done, timeout=copy_timeout) is None: raise error.TestFail("Wait for file copy finish timeout") error.context("Compare file on disk and on cdrom") f1_hash = session.cmd("%s %s" % (md5sum_cmd, dst_file), timeout=checksum_timeout).split()[0] f2_hash = session.cmd("%s %s" % (md5sum_cmd, src_file), timeout=checksum_timeout).split()[0] if f1_hash.strip() != f2_hash.strip(): raise error.TestFail("On disk and on cdrom files are" " different, md5 mismatch") session.cmd(remove_file_cmd) self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'Finish_cdrom_test', login_timeout)
def test(self): super(test_multihost_copy, self).test() copy_timeout = int(params.get("copy_timeout", 480)) checksum_timeout = int(params.get("checksum_timeout", 180)) pid = None sync_id = { 'src': self.srchost, 'dst': self.dsthost, "type": "file_trasfer" } filename = "orig" if self.is_src: # Starts in source serial_num = generate_serial_num() cdrom = params.get("cdroms", "").split()[-1] params["drive_serial_%s" % cdrom] = serial_num params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) vm.monitor.migrate_set_speed("1G") session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) cdrom = get_testing_cdrom_device(session, cdrom_dev_list, serial_num) mount_point = get_cdrom_mount_point(session, cdrom, params) mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point) src_file = params["src_file"] % (mount_point, filename) dst_file = params["dst_file"] % filename copy_file_cmd = params["copy_file_cmd"] % (mount_point, filename) remove_file_cmd = params["remove_file_cmd"] % filename md5sum_cmd = params["md5sum_cmd"] if params["os_type"] != "windows": error.context("Mount and copy data") session.cmd(mount_cmd, timeout=30) error.context("File copying test") session.cmd(copy_file_cmd) pid = disk_copy(vm, src_file, dst_file, copy_timeout) sync = SyncData(self.mig.master_id(), self.mig.hostid, self.mig.hosts, sync_id, self.mig.sync_server) pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost] self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) if not self.is_src: # Starts in source vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) error.context("Wait for copy finishing.") def is_copy_done(): if params["os_type"] == "windows": cmd = "tasklist /FI \"PID eq %s\"" % pid else: cmd = "ps -p %s" % pid return session.cmd_status(cmd) != 0 if utils_misc.wait_for(is_copy_done, timeout=copy_timeout) is None: raise error.TestFail("Wait for file copy finish timeout") error.context("Compare file on disk and on cdrom") f1_hash = session.cmd("%s %s" % (md5sum_cmd, dst_file), timeout=checksum_timeout).split()[0] f2_hash = session.cmd("%s %s" % (md5sum_cmd, src_file), timeout=checksum_timeout).split()[0] if f1_hash.strip() != f2_hash.strip(): raise error.TestFail("On disk and on cdrom files are" " different, md5 mismatch") session.cmd(remove_file_cmd) self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'Finish_cdrom_test', login_timeout)
def run(test, params, env): """ Test multiple VMs during host is under high stress. Steps: 1. Run a stress test on host. 2. Start multiple VMs on host and check if all can start successfully. 3. Shutdown all VMs. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def unpack_stress_pkg(): """Unpack the stress package.""" process.system("tar -xzvf %s -C %s" % (archive_path, stress_inst_dir)) def install_stress_pkg(): """Install the stress package.""" cmd_configure = "cd {0} && ./configure --prefix={0}".format( os.path.join(stress_inst_dir, params['stress_ver'])) cmd_make = "make && make install" process.system(" && ".join((cmd_configure, cmd_make)), shell=True) def run_stress_background(): """Run stress in background.""" process.system(params['stress_cmd'], shell=True, ignore_bg_processes=True) def is_stress_alive(): """Whether the stress process is alive.""" cmd = 'pgrep -xl stress' if not utils_misc.wait_for( lambda: re.search( r'\d+\s+stress', process.system_output(cmd, ignore_status=True).decode()), 10): test.error("The stress process is not alive.") def copy_base_vm_image(): """Copy the base vm image for VMs.""" src_img = qemu_storage.QemuImg(params, data_dir.get_data_dir(), params['images']) src_filename = src_img.image_filename src_format = src_img.image_format dst_dir = os.path.dirname(src_filename) for vm_name in vms_list: dst_filename = os.path.join(dst_dir, '%s.%s' % (vm_name, src_format)) logging.info('Copying %s to %s.', src_filename, dst_filename) shutil.copy(src_filename, dst_filename) def configure_images_copied(): """Configure the images copied for VMs.""" for vm_name in vms_list: params['images_%s' % vm_name] = vm_name image_name = 'image_name_{0}_{0}'.format(vm_name) params[image_name] = 'images/%s' % vm_name params['remove_image_%s' % vm_name] = 'yes' def wait_for_login_all_vms(): """Wait all VMs to login.""" return [vm.wait_for_login() for vm in vms] def wait_for_shutdown_all_vms(vms, sessions): """Wait all VMs to shutdown.""" for vm, session in zip(vms, sessions): logging.info('Shutting down %s.', vm.name) session.sendline(params["shutdown_command"]) if not vm.wait_for_shutdown(): test.fail("Failed to shutdown %s." % vm.name) vms_list = params['vms'].split()[1:] copy_base_vm_image() configure_images_copied() stress_inst_dir = params['stress_inst_dir'] stress_deps_dir = data_dir.get_deps_dir('stress') archive_path = os.path.join(stress_deps_dir, params['stress_pkg_name']) unpack_stress_pkg() install_stress_pkg() run_stress_background() is_stress_alive() params['start_vm'] = 'yes' env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vms = env.get_all_vms() for vm in vms: vm.verify_alive() wait_for_shutdown_all_vms(vms, wait_for_login_all_vms())
def run(test, params, env): """ Verify the boot order from SLOF. Step: Scenario 1: 1.1 Boot a guest with an empty disk, cdrom and nic, and don't specify disk bootindex=0, then set "order=cdn,once=n,menu=off, strict=off" for boot options. 1.2 Check the boot order which should be nic->disk->cdrom->nic. Scenario 2: 2.1 Boot a guest with an empty disk and nic, and don't specify this device bootindex=0, then set "order=cdn,once=n, menu=off, strict=off" for boot options. 2.2 Check the boot order which should be nic->disk->nic. Scenario 3: 3.1 Boot a guest with an empty disk, specify this device bootindex=0, then set "order=cdn,once=n,menu=off,strict=on" for boot options. 3.2 Check the boot order which should be just disk. Scenario 4: 4.1 Boot a guest with an empty disk and nic, specify this device bootindex=0, then set "order=cdn,once=n,menu=off,strict=off" for boot options. 4.2 Check the boot order which should be disk->nic. :param test: Qemu test object. :param params: Dictionary with the test . :param env: Dictionary with test environment. """ def _send_custom_key(): """ Send custom keyword to SLOF's user interface. """ logging.info('Sending \"%s\" to SLOF user interface.' % send_key) for key in send_key: key = 'minus' if key == '-' else key vm.send_key(key) vm.send_key('ret') def _verify_boot_order(order): """ Verify the order of booted devices. """ for index, dev in enumerate(order.split()): args = device_map[dev] details = 'The device({}@{}) is not the {} bootable device.'.format( args[1], args[2], index) if not slof.verify_boot_device( content, args[0], args[1], args[2], position=index): test.fail('Fail: ' + details) logging.info('Pass: '******'parent_bus') child_bus = params.get('child_bus') parent_bus_nic = params.get('parent_bus_nic') child_bus_nic = params.get('child_bus_nic') send_key = params.get('send_key') device_map = { 'c': (parent_bus, child_bus, params.get('disk_addr')), 'd': (parent_bus, child_bus, params.get('cdrom_addr')), 'n': (parent_bus_nic, child_bus_nic, params.get('nic_addr')) } env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() content, next_pos = slof.wait_for_loaded(vm, test, end_str='0 >') _verify_boot_order(params['order_before_send_key']) if send_key in ('reset-all', 'boot'): error_context.context("Reboot guest by sending key.", logging.info) _send_custom_key() content, _ = slof.wait_for_loaded(vm, test, next_pos, end_str='0 >') _verify_boot_order(params['order_after_send_key'])
def run(test, params, env): """ PCI Devices test 1) print outs the used setup 2) boots the defined VM 3) verifies monitor "info qtree" vs. autotest representation 4) verifies guest "lspci" vs. info qtree (Linux only) :note: Only PCI device properties are checked :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ error.context("Creating early names representation") env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) qdev = vm.make_create_command() # parse params into qdev error.context("Getting main PCI bus info") error.context("Processing test params") test_params = params['test_setup'] test_devices = params['test_devices'] test_device_type = params['test_device_type'] if not params.get('pci_controllers'): params['pci_controllers'] = '' _lasts = [PCIBusInfo(qdev.get_by_properties({'aobject': 'pci.0'})[0])] _lasts[0].first = 7 # first 6 slots might be already occupied on pci.0 _lasts[0].last -= 1 # last port is usually used by the VM use_buses = [] names = {} logging.info("Test setup") for line in test_params.split('\\n'): _idx = 0 out = "" for device in line.split('->'): device = device.strip() if device: if device == 'devices': use_buses.append(_lasts[_idx]) out += "->(test_devices)" break idx = names.get(device, 0) + 1 name = "pci_%s%d" % (device, idx) names[device] = idx params, bus = add_bus(qdev, params, device, name, _lasts[_idx]) # we inserted a device, increase the upper bus first idx _lasts[_idx].first += 1 out += "->%s" % (name) _idx += 1 if len(_lasts) > _idx: _lasts = _lasts[:_idx] _lasts.append(bus) else: _idx += 1 out += " " * (len(_lasts[_idx].name) + 2) logging.info(out) add_devices = {'first': add_devices_first, 'all': add_devices_all}.get(test_devices, add_devices_random) add_device = {'uhci': add_device_usb_uhci, 'ehci': add_device_usb_ehci, 'xhci': add_device_usb_xhci, 'virtio_disk': add_virtio_disk, }.get(test_device_type, add_device_random) name_idxs = {} for bus in use_buses: params, name_idxs = add_devices(params, name_idxs, bus, add_device) params['start_vm'] = 'yes' env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) # PCI devices are initialized by firmware, which might require some time # to setup. Wait 10s before getting the qtree. time.sleep(10) qtree = qemu_qtree.QtreeContainer() error.context("Verify qtree vs. qemu devices", logging.info) _info_qtree = vm.monitor.info('qtree', False) qtree.parse_info_qtree(_info_qtree) info_qdev = process_qdev(vm.devices) info_qtree = process_qtree(qtree) errors = "" err = verify_qdev_vs_qtree(info_qdev, info_qtree) if err: logging.error(_info_qtree) logging.error(qtree.get_qtree().str_qtree()) logging.error(vm.devices.str_bus_long()) logging.error(err) errors += "qdev vs. qtree, " error.context("Verify VM booted properly.", logging.info) session = vm.wait_for_login() error.context("Verify lspci vs. qtree", logging.info) if params.get('lspci_cmd'): _info_lspci = session.cmd_output(params['lspci_cmd']) info_lspci = process_lspci(_info_lspci) err = verify_lspci(info_lspci, info_qtree[2]) if err: logging.error(_info_lspci) logging.error(_info_qtree) logging.error(err) errors += "qtree vs. lspci, " error.context("Results") if errors: raise error.TestFail("Errors occurred while comparing %s. Please check" " the log for details." % errors[:-2])
def run(test, params, env): """ Verify SLOF info by user interface. Step: Scenario 1: 1.1 Boot a guest with at least two blocks, with "-boot menu=on", Press "F12" in the guest desktop at the early stage of booting process. 1.2 Check the boot menu info whether are match with guest info. 1.3 Select one of valid device to boot up the guest. 1.4 Check whether errors in SLOF. 1.5 Log in guest successfully. 1.6 Ping external host ip successfully. Scenario 2: 2.1. Boot the guest with spapr-vty and press 's' immediately when the guest boot up. 2.2. Check the output of console, whether is stopped enter kernel. 2.3. Type "boot" or "reset-all". 2.4. Check guest whether boot up successfully. 2.5. Log in guest successfully. :param test: Qemu test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ STOP, F12 = range(2) enter_key = {STOP: 's', F12: 'f12'} def _send_custom_key(keystr): """ Send custom keyword to SLOF's user interface. """ logging.info('Sending \"%s\" to SLOF user interface.' % keystr) for key in keystr: key = 'minus' if key == '-' else key vm.send_key(key) vm.send_key('ret') def _send_key(key, custom=True, sleep=0.0): """ Send keywords to SLOF's user interface. """ obj_name = 'select' if re.search(r'^\d+$', key) else key k_params = params.object_params(obj_name.replace('-', '_')) if custom: _send_custom_key(key) else: vm.send_key(key) time.sleep(sleep) content, _ = slof.get_boot_content(vm, 0, k_params['start'], k_params['end']) if content: logging.info('Output of SLOF:\n%s' % ''.join(content)) return ''.join(content) return None def _check_menu_info(menu_info): """ Check the menu info by each items. """ bootable_num = '' for i in range(1, int(params['boot_dev_num']) + 1): option = params['menu_option%d' % i] logging.info( 'Checking the device(%s) if is included in menu list.' % '->'.join(option.split())) dev_type, hba_type, child_bus, addr = option.split() addr = re.sub(r'^0x0?', '', addr) pattern = re.compile( r'(\d+)\)\s+%s(\d+)?\s+:\s+/%s(\S+)?/%s@%s' % (dev_type, hba_type, child_bus, addr), re.M) searched = pattern.search(menu_info) if not searched: test.fail('No such item(%s) in boot menu list.' % '->'.join(option.split())) if i == int(params['bootable_index']): bootable_num = searched.group(1) return bootable_num def _enter_user_interface(mode): """ Enter user interface. """ o = utils_misc.wait_for(lambda: _send_key(enter_key[mode], False), ack_timeout, step=0.0) if not o: test.fail('Failed to enter user interface in %s sec.' % ack_timeout) return o def _f12_user_interface_test(): """ Test f12 user interface. """ menu_list = _enter_user_interface(F12) actual_num = len(re.findall(r'\d+\)', menu_list)) dev_num = params['boot_dev_num'] if actual_num != int(dev_num): test.fail('The number of boot devices is not %s in menu list.' % dev_num) if not utils_misc.wait_for( lambda: _send_key(_check_menu_info(menu_list), False), ack_timeout, step=0.0): test.fail('Failed to load after selecting boot device ' 'in %s sec.' % ack_timeout) def _load_user_interface_test(): """ Test boot/reset-all user interface. """ _enter_user_interface(STOP) if not utils_misc.wait_for( lambda: _send_key(keys, True, 3), ack_timeout, step=0.0): test.fail('Failed to load after \'%s\' in %s sec.' % (keys, ack_timeout)) def _check_serial_log_status(): """ Check the status of serial log. """ file_timeout = 30 if not utils_misc.wait_for( lambda: os.path.isfile(vm.serial_console_log), file_timeout): test.error('No found serial log during %s sec.' % file_timeout) main_tests = { 'f12': _f12_user_interface_test, 'boot': _load_user_interface_test, 'reset-all': _load_user_interface_test } ack_timeout = params['ack_timeout'] keys = params['send_keys'] env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() _check_serial_log_status() main_tests[keys]() error_context.context("Try to log into guest '%s'." % vm.name, logging.info) session = vm.wait_for_login(timeout=float(params['login_timeout'])) logging.info("log into guest '%s' successfully." % vm.name) error_context.context("Try to ping external host.", logging.info) extra_host_ip = utils_net.get_host_ip_address(params) session.cmd('ping %s -c 5' % extra_host_ip) logging.info("Ping host(%s) successfully." % extra_host_ip) vm.destroy(gracefully=True)
def run(test, params, env): """ Verify the "-debugcon" parameter under the UEFI environment: 1) Boot up a guest. If params["ovmf_log"] is not None, append debugcon parameter to qemu command lines. 2) Remove the existing isa-log device. 3) Destroy the guest. 4) Start the trace command on host. 5) Re-create the guest and verify it is alive. 6) Destroy the guest. 7) Check pio_read counts and pio_write counts. 7.1) If disable debugcon: pio_read_counts > 0 pio_write_counts = 0 7.2) If enable debugcon: pio_read_counts > 0 pio_write_counts > 0 :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_trace_process(): """ check whether trace process is existing """ if process.system( params["grep_trace_cmd"], ignore_status=True, shell=True): return False else: return True def remove_isa_debugcon(vm): """ remove the existing isa-log device """ for device in vm.devices: if device.type == "isa-log": vm.devices.remove(device) break env.register_vm(vm.name, vm) def trace_kvm_pio(): """ trace event kvm_pio """ process.system(trace_record_cmd) # install trace-cmd in host utils_package.package_install("trace-cmd") if params.get("ovmf_log"): error_context.context("Append debugcon parameter to " "qemu command lines.", test.log.info) ovmf_log = utils_misc.get_path(test.debugdir, params["ovmf_log"]) params["extra_params"] %= ovmf_log params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) trace_output_file = utils_misc.get_path(test.debugdir, params["trace_output"]) trace_record_cmd = params["trace_record_cmd"] % trace_output_file check_pio_read = params["check_pio_read"] % trace_output_file check_pio_write = params["check_pio_write"] % trace_output_file stop_trace_record = params["stop_trace_record"] timeout = int(params.get("timeout", 120)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Remove the existing isa-log device.", test.log.info) remove_isa_debugcon(vm) vm.destroy() error_context.context("Run trace record command on host.", test.log.info) bg = utils_test.BackgroundTest(trace_kvm_pio, ()) bg.start() if not utils_misc.wait_for(lambda: bg.is_alive, timeout): test.fail("Failed to start command: '%s'" % trace_record_cmd) try: vm.create() vm.verify_alive() vm.destroy() process.system(stop_trace_record, ignore_status=True, shell=True) if not utils_misc.wait_for( lambda: not check_trace_process(), timeout, 30, 3): test.fail("Failed to stop command: '%s' after %s seconds." % (stop_trace_record, timeout)) pio_read_counts = int(process.run( check_pio_read, shell=True).stdout.decode().strip()) err_str = "pio_read counts should be greater than 0. " err_str += "But the actual counts are %s." % pio_read_counts test.assertGreater(pio_read_counts, 0, err_str) pio_write_counts = int(process.run( check_pio_write, shell=True).stdout.decode().strip()) if params.get("ovmf_log"): err_str = "pio_write counts should be greater than 0. " err_str += "But the actual counts are %s." % pio_write_counts test.assertGreater(pio_write_counts, 0, err_str) else: err_str = "pio_write counts should be equal to 0. " err_str += "But the actual counts are %s." % pio_write_counts test.assertEqual(pio_write_counts, 0, err_str) finally: if check_trace_process(): process.system(stop_trace_record, ignore_status=True, shell=True)
def run(test, params, env): """ Verification that image lock has no effect on the read operation from different image chain. Steps: 1. create the first snapshot chain: image1 -> sn01 -> sn02 2. boot first vm from sn02 3. create the second snapshot chain: image1 -> sn11 -> sn12 ->sn13 4. boot second vm frm sn13 and create a temporary file 5. commit sn13 6. boot second vm from sn12 and verify the temporary file is presented. """ params.update({ "image_name_image1": params["image_name"], "image_format_image1": params["image_format"] }) error_context.context("boot first vm from first image chain", logging.info) env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm1 = env.get_vm(params["main_vm"]) vm1.verify_alive() params["images"] = params["image_chain"] = params["image_chain_second"] params["main_vm"] = params["vms"].split()[-1] sn_tags = params["image_chain"].split()[1:] images = [QemuImgTest(test, params, env, image) for image in sn_tags] error_context.context("create the second snapshot chain", logging.info) for image in images: logging.debug("create snapshot %s based on %s", image.image_filename, image.base_image_filename) image.create_snapshot() logging.debug("boot from snapshot %s", image.image_filename) try: # ensure vm only boot with this snapshot image.start_vm({"boot_drive_%s" % image.tag: "yes"}) except virt_vm.VMCreateError: # add images in second chain to images so they could be deleted # in postprocess params["images"] += " %s" % image test.fail("fail to start vm from snapshot %s" % image.image_filename) else: if image is not images[-1]: image.destroy_vm() tmpfile = params.get("guest_tmp_filename") error_context.context( "create a temporary file: %s in %s" % (tmpfile, image.image_filename), logging.info) hash_val = image.save_file(tmpfile) logging.debug("The hash of temporary file:\n%s", hash_val) image.destroy_vm() error_context.context("commit image %s" % image.image_filename, logging.info) fail_on()(image.commit)() error_context.context("check temporary file after commit", logging.info) image = images[-2] logging.debug("boot vm from %s", image.image_filename) image.start_vm({"boot_drive_%s" % image.tag: "yes"}) if not image.check_file(tmpfile, hash_val): test.fail("File %s's hash is different after commit" % tmpfile)
def run(test, params, env): """ Verify the boot order from SLOF. Step: Scenario 1: 1.1 Boot a guest with an empty disk, cdrom and nic, and don't specify disk bootindex=0, then set "order=cdn,once=n,menu=off, strict=off" for boot options. 1.2 Check the boot order which should be nic->disk->cdrom->nic. Scenario 2: 2.1 Boot a guest with an empty disk and nic, and don't specify this device bootindex=0, then set "order=cdn,once=n, menu=off, strict=off" for boot options. 2.2 Check the boot order which should be nic->disk->nic. Scenario 3: 3.1 Boot a guest with an empty disk, specify this device bootindex=0, then set "order=cdn,once=n,menu=off,strict=on" for boot options. 3.2 Check the boot order which should be just disk. Scenario 4: 4.1 Boot a guest with an empty disk and nic, specify this device bootindex=0, then set "order=cdn,once=n,menu=off,strict=off" for boot options. 4.2 Check the boot order which should be disk->nic. :param test: Qemu test object. :param params: Dictionary with the test . :param env: Dictionary with test environment. """ def _send_custom_key(): """ Send custom keyword to SLOF's user interface. """ logging.info('Sending \"%s\" to SLOF user interface.' % send_key) for key in send_key: key = 'minus' if key == '-' else key vm.send_key(key) vm.send_key('ret') def _verify_boot_order(order): """ Verify the order of booted devices. """ for index, dev in enumerate(order.split()): args = device_map[dev] details = 'The device({}@{}) is not the {} bootable device.'.format( args[1], args[2], index) if not slof.verify_boot_device( content, args[0], args[1], args[2], position=index): test.fail('Fail: ' + details) logging.info('Pass: '******'parent_bus') child_bus = params.get('child_bus') parent_bus_nic = params.get('parent_bus_nic') child_bus_nic = params.get('child_bus_nic') send_key = params.get('send_key') device_map = {'c': (parent_bus, child_bus, params.get('disk_addr')), 'd': (parent_bus, child_bus, params.get('cdrom_addr')), 'n': (parent_bus_nic, child_bus_nic, params.get('nic_addr'))} env_process.process( test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() content, next_pos = slof.wait_for_loaded(vm, test, end_str='0 >') _verify_boot_order(params['order_before_send_key']) if send_key in ('reset-all', 'boot'): error_context.context("Reboot guest by sending key.", logging.info) _send_custom_key() content, _ = slof.wait_for_loaded(vm, test, next_pos, end_str='0 >') _verify_boot_order(params['order_after_send_key'])
def run(test, params, env): """ PCI Devices test 1) prints out the setup to be used 2) boots the defined VM 3) verifies monitor "info qtree" vs. internal representation 4) verifies guest "lspci" vs. info qtree (Linux only) :note: Only PCI device properties are checked :param test: VirtTest instance :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ error_context.context("Creating early names representation") env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) qdev = vm.make_create_command() # parse params into qdev if isinstance(qdev, tuple): qdev = qdev[0] error_context.context("Getting main PCI bus info") error_context.context("Processing test params") test_params = params['test_setup'] test_devices = params['test_devices'] test_device_type = params['test_device_type'] if not params.get('pci_controllers'): params['pci_controllers'] = '' _lasts = [PCIBusInfo(qdev.get_by_properties({'aobject': 'pci.0'})[0])] _lasts[0].first = 7 # first 6 slots might be already occupied on pci.0 _lasts[0].last -= 1 # last port is usually used by the VM use_buses = [] names = {} logging.info("Test setup") for line in test_params.split('\\n'): _idx = 0 out = "" for device in line.split('->'): device = device.strip() if device: if device == 'devices': use_buses.append(_lasts[_idx]) out += "->(test_devices)" break idx = names.get(device, 0) + 1 name = "test_pci_%s%d" % (device, idx) names[device] = idx params, bus = add_bus(qdev, params, device, name, _lasts[_idx]) # we inserted a device, increase the upper bus first idx _lasts[_idx].first += 1 out += "->%s" % (name) _idx += 1 if len(_lasts) > _idx: _lasts = _lasts[:_idx] _lasts.append(bus) else: _idx += 1 out += " " * (len(_lasts[_idx].name) + 2) logging.info(out) add_devices = { 'first': add_devices_first, 'all': add_devices_all }.get(test_devices, add_devices_random) add_device = { 'uhci': add_device_usb_uhci, 'ehci': add_device_usb_ehci, 'xhci': add_device_usb_xhci, 'virtio_disk': add_virtio_disk, }.get(test_device_type, add_device_random) name_idxs = {} for bus in use_buses: params, name_idxs = add_devices(params, name_idxs, bus, add_device) params['start_vm'] = 'yes' env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(params["main_vm"]) # PCI devices are initialized by firmware, which might require some time # to setup. Wait 5s before getting the qtree. time.sleep(5) qtree = qemu_qtree.QtreeContainer() error_context.context("Verify qtree vs. qemu devices", logging.info) _info_qtree = vm.monitor.info('qtree', False) qtree.parse_info_qtree(_info_qtree) info_qdev = process_qdev(vm.devices) info_qtree = process_qtree(qtree) errors = "" err = verify_qdev_vs_qtree(info_qdev, info_qtree) if err: logging.error(_info_qtree) logging.error(qtree.get_qtree().str_qtree()) logging.error(vm.devices.str_bus_long()) logging.error(err) errors += "qdev vs. qtree, " error_context.context("Verify VM booted properly.", logging.info) session = vm.wait_for_login() error_context.context("Verify lspci vs. qtree", logging.info) if params.get('lspci_cmd'): _info_lspci = session.cmd_output(params['lspci_cmd']) info_lspci = process_lspci(_info_lspci) err = verify_lspci(info_lspci, info_qtree[2]) if err: logging.error(_info_lspci) logging.error(_info_qtree) logging.error(err) errors += "qtree vs. lspci, " error_context.context("Results") if errors: test.fail("Errors occurred while comparing %s. Please check" " the log for details." % errors[:-2])
def run(test, params, env): """ Test simple io on FC device pass-through to guest as lun device. Step: 1. Find FC device on host. 2. Boot a guest with FC disk as scsi-block device for guest. 3. Access guest then do io on the data disk. 4. Check vm status. 5. repeat step 2-4 but as scsi-generic """ def _clean_disk_windows(index): tmp_file = "disk_" + ''.join( random.sample(string.ascii_letters + string.digits, 4)) online_cmd = "echo select disk %s > " + tmp_file online_cmd += " && echo clean >> " + tmp_file online_cmd += " && echo rescan >> " + tmp_file online_cmd += " && echo detail disk >> " + tmp_file online_cmd += " && diskpart /s " + tmp_file online_cmd += " && del /f " + tmp_file return session.cmd(online_cmd % index, timeout=timeout) def _get_window_disk_index_by_wwn(uid): cmd = "powershell -command \"get-disk| Where-Object" cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % uid status, output = session.cmd_status_output(cmd) if status != 0: test.fail("execute command fail: %s" % output) output = "".join([s for s in output.splitlines(True) if s.strip()]) logging.debug(output) info = output.split(":") if len(info) > 1: return info[1].strip() test.fail("Not find expected disk ") def _get_fc_devices(): devs = [] cmd = "lsblk -Spo 'NAME,TRAN' |awk '{if($2==\"fc\") print $1}'" status, output = process.getstatusoutput(cmd) devs_str = output.strip().replace("\n", " ") if devs_str: cmd = "lsblk -Jpo 'NAME,HCTL,SERIAL,TRAN,FSTYPE,WWN' %s" % devs_str status, output = process.getstatusoutput(cmd) devs = copy.deepcopy(json.loads(output)["blockdevices"]) for dev in devs: cmd = "lsscsi -gb %s|awk '{print $3}'" % dev["hctl"] status, output = process.getstatusoutput(cmd) dev["sg_dev"] = output logging.debug(devs) return devs fc_devs = _get_fc_devices() if not len(fc_devs): test.cancel("No FC device") fc_dev = fc_devs[0] vm = env.get_vm(params['main_vm']) timeout = float(params.get("timeout", 240)) drive_type = params.get("drive_type") os_type = params["os_type"] driver_name = params.get("driver_name") guest_cmd = params["guest_cmd"] clean_cmd = params["clean_cmd"] if drive_type == "scsi_block": params["image_name_stg0"] = fc_dev["name"] if fc_dev["fstype"] == "mpath_member": params["image_name_stg0"] = fc_dev["children"][0]["name"] else: params["image_name_stg0"] = fc_dev["sg_dev"] clean_cmd = clean_cmd % params["image_name_stg0"] error_context.context("run clean cmd %s" % clean_cmd, logging.info) process.getstatusoutput(clean_cmd) params['start_vm'] = 'yes' env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) session = vm.wait_for_login(timeout=timeout) disk_wwn = fc_dev["wwn"] disk_wwn = disk_wwn.replace("0x", "") if os_type == 'windows' and driver_name: session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name, timeout) if os_type == 'windows': part_size = params["part_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) did = _get_window_disk_index_by_wwn(disk_wwn) utils_disk.update_windows_disk_attributes(session, did) logging.info("Clean partition disk:%s", did) _clean_disk_windows(did) try: driver = configure_empty_disk(session, did, part_size, os_type)[0] except Exception as err: logging.warning("configure_empty_disk again due to:%s", err) time.sleep(10) _clean_disk_windows(did) driver = configure_empty_disk(session, did, part_size, os_type)[0] logging.debug("configure_empty_disk over") output_path = driver + ":\\test.dat" else: output_path = get_linux_drive_path(session, disk_wwn) if not output_path: test.fail("Can not get output file path in guest.") logging.debug("Get output file path %s", output_path) guest_cmd = guest_cmd.format(output_path) error_context.context('Start io test...', logging.info) session.cmd(guest_cmd, timeout=360) if not vm.monitor.verify_status("running"): test.fail("Guest not run after dd")
def run(test, params, env): """ Qemu virtio-rng device test: 1) boot guest with virtio-rng device 2) read random data in guest 3) check the read data rate :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def _is_rngd_running(): """ Check whether rngd is running """ output = session.cmd_output(check_rngd_service) return 'running' in output timeout = params.get_numeric("login_timeout", 360) read_rng_timeout = float(params.get("read_rng_timeout", 3600)) read_rng_cmd = params["read_rng_cmd"] max_bytes = params.get("max-bytes_virtio-rng-pci") period = params.get("period_virtio-rng-pci") if not max_bytes and not period: test.error("Please specify the expected max-bytes and/or period.") if not max_bytes or not period: if max_bytes != '0': error_info = params["expected_error_info"] try: env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) except virt_vm.VMCreateError as e: if error_info not in e.output: test.fail("Expected error info '%s' is not reported, " "output: %s" % (error_info, e.output)) return vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.context("Read virtio-rng device to get random number", logging.info) check_rngd_service = params.get("check_rngd_service") if check_rngd_service: if not utils_misc.wait_for(_is_rngd_running, 30, first=5): start_rngd_service = params["start_rngd_service"] status, output = session.cmd_status_output(start_rngd_service) if status: test.error(output) if max_bytes == '0': try: s, o = session.cmd_status_output(read_rng_cmd, timeout=read_rng_timeout) except ShellTimeoutError: pass else: test.fail("Unexpected dd result, status: %s, output: %s" % (s, o)) else: s, o = session.cmd_status_output(read_rng_cmd, timeout=read_rng_timeout) if s: test.error(o) logging.info(o) data_rate = re.search(r'\s(\d+\.\d+) kB/s', o, re.M) expected_data_rate = float(params["expected_data_rate"]) if float(data_rate.group(1)) > expected_data_rate * 1.1: test.error("Read data rate is not as expected. " "data rate: %s kB/s, max-bytes: %s, period: %s" % (data_rate.group(1), max_bytes, period)) session.close()
def run(test, params, env): """ When VM encounter fault disk result in it loss response. The kill vm should non-infinite. Steps: 1) Emulate fault disk with dmsetup and iscsi. 2) Boot vm with the pass-through disk. 3) Login guest and do io on the disk. 4) Kill the qemu process and wait it truly be killed. 5) Check the kill time it should less than expected timeout. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _prepare_fault_disk(): cmd = params['cmd_get_scsi_debug'] process.run(cmd, shell=True) cmd = "cat " + params['dev_scsi_debug'] params['scsi_debug_disk'] = process.getoutput(cmd, shell=True) if not params['scsi_debug_disk']: test.fail("Can not find scsi_debug disk %s" % cmd) cmd_dmsetup = params['cmd_dmsetup'].format(params['dev_mapper'], params['scsi_debug_disk']) process.run(cmd_dmsetup, shell=True) cmd = "dmsetup info " + params['dev_mapper'] process.run(cmd, shell=True) params['mapper_disk'] = "/dev/mapper/" + params['dev_mapper'] params['emulated_image'] = params['mapper_disk'] def _cleanup(): if vm and vm.is_alive(): vm.destroy() if params['mapper_disk']: cmd_cleanup = params['cmd_cleanup'] process.run(cmd_cleanup, 600, shell=True) def _online_disk_windows(index): disk = "disk_" + ''.join( random.sample(string.ascii_letters + string.digits, 4)) online_cmd = "echo select disk %s > " + disk online_cmd += " && echo online disk noerr >> " + disk online_cmd += " && echo clean >> " + disk online_cmd += " && echo attributes disk clear readonly >> " + disk online_cmd += " && echo detail disk >> " + disk online_cmd += " && diskpart /s " + disk online_cmd += " && del /f " + disk return session.cmd(online_cmd % index, timeout=timeout) def _get_window_disk_index_by_uid(wwn): cmd = "powershell -command \"get-disk|?" cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % wwn status, output = session.cmd_status_output(cmd) if status != 0: test.fail("execute command fail: %s" % output) logging.debug(output) output = "".join([s for s in output.splitlines(True) if s.strip()]) info = output.split(":") if len(info) > 1: return info[1].strip() cmd = "powershell -command \"get-disk| FL\"" output = session.cmd_output(cmd) logging.debug(output) test.fail("Not find expected disk:" + wwn) def _get_disk_wwn(devname): cmd = "lsblk -ndo WWN " + devname output = process.system_output(cmd, shell=True).decode() wwn = output.replace("0x", "") return wwn vm = None iscsi = None params['scsi_debug_disk'] = None params['mapper_disk'] = None timeout = params.get_numeric("timeout", 360) kill_max_timeout = params.get_numeric("kill_max_timeout", 240) kill_min_timeout = params.get_numeric("kill_min_timeout", 60) os_type = params["os_type"] guest_cmd = params["guest_cmd"] host_kill_command = params["host_kill_command"] try: logging.info("Prepare fault disk.") _prepare_fault_disk() logging.info("Create iscsi disk disk.") base_dir = data_dir.get_data_dir() iscsi = Iscsi.create_iSCSI(params, base_dir) iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: test.error('Can not get the iSCSI device.') logging.info('Create host disk %s', dev_name) disk_wwn = _get_disk_wwn(dev_name) params["image_name_stg0"] = dev_name logging.info('Booting vm...') params['start_vm'] = 'yes' vm = env.get_vm(params['main_vm']) env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) session = vm.wait_for_login(timeout=600) if os_type == 'windows': guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk_drive = _get_window_disk_index_by_uid(disk_wwn) _online_disk_windows(disk_drive) else: disk_drive = get_linux_drive_path(session, disk_wwn) guest_cmd = guest_cmd % disk_drive logging.debug("guest_cmd:%s", guest_cmd) logging.info("Execute io in guest...") session.sendline(guest_cmd) time.sleep(10) logging.info("Ready to kill vm...") process.system_output(host_kill_command, shell=True).decode() real_timeout = int( process.system_output(params["get_timeout_command"], shell=True).decode()) if kill_min_timeout < real_timeout < kill_max_timeout: logging.info("Succeed kill timeout: %d", real_timeout) else: test.fail("Kill timeout %d not in range (%d , %d)" % (real_timeout, kill_min_timeout, kill_max_timeout)) vm = None finally: logging.info("cleanup") if iscsi: iscsi.cleanup() _cleanup()
def test(self): super(test_multihost_locking, self).test() error.context("Lock cdrom in VM.") if self.is_src: # Starts in source serial_num = generate_serial_num() cdrom = params.get("cdroms", "").split()[-1] params["drive_serial_%s" % cdrom] = serial_num params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) guest_cdrom_device = get_testing_cdrom_device( session, cdrom_dev_list, serial_num) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device) locked = check_cdrom_lock(vm, device) if locked: logging.debug("Cdrom device is successfully locked in VM.") else: raise error.TestFail("Cdrom device should be locked" " in VM.") self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'cdrom_dev', cdrom_prepare_timeout) self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) if not self.is_src: # Starts in dest vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) locked = check_cdrom_lock(vm, device) if locked: logging.debug("Cdrom device stayed locked after " "migration in VM.") else: raise error.TestFail("Cdrom device should stayed locked" " after migration in VM.") error.context("Unlock cdrom from VM.") if not self.is_src: # Starts in dest cdrom_dev_list = list_guest_cdroms(session) guest_cdrom_device = get_testing_cdrom_device( session, cdrom_dev_list, serial_num) session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device) locked = check_cdrom_lock(vm, device) if not locked: logging.debug("Cdrom device is successfully unlocked" " from VM.") else: raise error.TestFail("Cdrom device should be unlocked" " in VM.") self.mig.migrate_wait([self.vms[0]], self.dsthost, self.srchost) if self.is_src: # Starts in source locked = check_cdrom_lock(vm, device) if not locked: logging.debug("Cdrom device stayed unlocked after " "migration in VM.") else: raise error.TestFail("Cdrom device should stayed unlocked" " after migration in VM.") self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'Finish_cdrom_test', login_timeout)
def run(test, params, env): """ Test IO on specific max_sector_kb of disk. Steps: 1) Create lvs based on iscsi disk with specific max_sector_kb. 2) Boot vm with the lvs disks. 3) Login guest and do io on the disks. 4) Wait minutes then Check the VM still running. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _setup_lvs(dev): cmd = params['cmd_set_max_sector'].format(dev.replace("/dev/", "")) process.run(cmd, shell=True) cmd = params['cmd_setup_vg'].format(dev) process.run(cmd, shell=True) for lv in lvs: cmd = params['cmd_setup_lv'].format(lv) process.run(cmd, shell=True) cmd = params['cmd_build_img'].format(lv) process.run(cmd, shell=True) def _cleanup_lvs(dev): if vm and vm.is_alive(): vm.destroy() if not dev: return cmd = params['cmd_clean_lv'] process.run(cmd, shell=True) cmd = params['cmd_clean_vg'].format(dev) process.run(cmd, shell=True) def _execute_io_in_guest(): all_cmd = [] for serial in lvs: drive = get_linux_drive_path(session, serial) cmd = guest_cmd.format(drive) all_cmd.append(cmd) for cmd in all_cmd: log.info("Run io in guest: %s", cmd) dd_session = vm.wait_for_login(timeout=timeout) dd_session.sendline(cmd) vm = None iscsi = None dev_name = None log = logging.getLogger('avocado.test') lvs = params['lvs_name'].split(",") timeout = params.get_numeric("timeout", 180) guest_cmd = params["guest_cmd"] try: params['image_size'] = params['emulated_image_size'] log.info("Create iscsi disk.") base_dir = data_dir.get_data_dir() iscsi = Iscsi.create_iSCSI(params, base_dir) iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: test.error('Can not get the iSCSI device.') log.info("Prepare lvs disks on %s", dev_name) _setup_lvs(dev_name) log.info('Booting vm...') params['start_vm'] = 'yes' vm = env.get_vm(params['main_vm']) env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) session = vm.wait_for_login(timeout=timeout) log.info('Execute IO in guest ...') _execute_io_in_guest() log.info('Check guest status.') if utils_misc.wait_for(lambda: not vm.monitor.verify_status("running"), 600, first=10, step=20): if vm.is_dead(): test.fail("Vm in dead status.") test.fail("VM not in running: %s" % vm.monitor.get_status()) finally: log.info("cleanup") _cleanup_lvs(dev_name) if iscsi: iscsi.cleanup()