def install_stress_app(session): """ Install stress app in guest. """ if session.cmd_status(params.get("app_check_cmd", "true")) == 0: logging.info("Stress app already installed in guest.") return link = params.get("download_link") md5sum = params.get("pkg_md5sum") tmp_dir = params.get("tmp_dir", "/var/tmp") install_cmd = params.get("install_cmd") logging.info("Fetch package: '%s'" % link) pkg_name = os.path.basename(link) pkg_path = os.path.join(test.tmpdir, pkg_name) download.get_file(link, pkg_path, hash_expected=md5sum) vm.copy_files_to(pkg_path, tmp_dir) logging.info("Install app: '%s' in guest." % install_cmd) s, o = session.cmd_status_output(install_cmd, timeout=300) if s != 0: test.error("Fail to install stress app(%s)" % o) logging.info("Install app successed")
def download(self): """ Copies patch files from remote locations to the source directory """ for patch in self.patches: download.get_file(patch, os.path.join(self.source_dir, os.path.basename(patch)))
def _get(self, url, dst): """ Download a given file to a destination path. This is a wrapper to download.get_file(), that will keep trying to download the file from the URL for the time defined in the RETRY_TIMEOUT class attribute, in step intervals defined in the RETRY_STEP class attribute. :param url: Universal Resource Location of the source file :param dst: Destination path :raise: class `KojiDownloadError` """ success = False last_error = "" end_time = time.time() + self.RETRY_TIMEOUT while time.time() < end_time: try: download.get_file(url, dst) success = True break except Exception, e: last_error = str(e) logging.error("Download failed: %s", last_error) logging.error("Retrying after %s seconds...", self.RETRY_STEP) if os.path.isfile(dst): os.unlink(dst) time.sleep(self.RETRY_STEP)
def install_stress_app(session): """ Install stress app in guest. """ if session.cmd_status(params.get("app_check_cmd", "true")) == 0: logging.info("Stress app already installed in guest.") return link = params.get("download_link") md5sum = params.get("pkg_md5sum") tmp_dir = params.get("tmp_dir", "/var/tmp") install_cmd = params.get("install_cmd") logging.info("Fetch package: '%s'" % link) pkg_name = os.path.basename(link) pkg_path = os.path.join(test.tmpdir, pkg_name) download.get_file(link, pkg_path, hash_expected=md5sum) vm.copy_files_to(pkg_path, tmp_dir) logging.info("Install app: '%s' in guest." % install_cmd) s, o = session.cmd_status_output(install_cmd, timeout=300) if s != 0: test.error("Fail to install stress app(%s)" % o) logging.info("Install app successed")
def _get(self, url, dst): """ Download a given file to a destination path. This is a wrapper to download.get_file(), that will keep trying to download the file from the URL for the time defined in the RETRY_TIMEOUT class attribute, in step intervals defined in the RETRY_STEP class attribute. :param url: Universal Resource Location of the source file :param dst: Destination path :raise: class `KojiDownloadError` """ success = False last_error = "" end_time = time.time() + self.RETRY_TIMEOUT while time.time() < end_time: try: download.get_file(url, dst) success = True break except Exception, e: last_error = str(e) logging.error("Download failed: %s", last_error) logging.error("Retrying after %s seconds...", self.RETRY_STEP) if os.path.isfile(dst): os.unlink(dst) time.sleep(self.RETRY_STEP)
def download(self): """ Copies patch files from remote locations to the source directory """ for patch in self.patches: download.get_file(patch, os.path.join(self.source_dir, os.path.basename(patch)))
def run(test, params, env): """ Negative test for virtio/virtio-non-transitional model of disk """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() status_error = params['status_error'] guest_src_url = params["guest_src_url"] image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path try: if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) try: libvirt.set_vm_disk(vm, params) except xcepts.LibvirtXMLError: if status_error == 'undefinable': return else: raise else: if status_error == 'undefinable': test.fail("Vm is expected to fail on defining with" " invalid model, while it succeeds") try: if not vm.is_alive(): vm.start() vm.wait_for_serial_login() except remote.LoginTimeoutError: pass else: test.fail("Vm is expected to fail on booting from disk" " with wrong model, while login successfully.") data = vm.serial_console.get_output() if data is None or len(data.splitlines()) < 5: logging.warn( "Unable to read serial console or no sufficient data in" " serial console output to detect the kernel panic.") else: match = re.search('Kernel panic', data, re.S | re.M | re.I) if not match: test.fail("Can not find 'Kernel panic' keyword in" " serial console output.") finally: backup_xml.sync()
def run(test, params, env): """ Negative test for virtio/virtio-non-transitional model of disk """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() status_error = params['status_error'] guest_src_url = params["guest_src_url"] image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) params["blk_source_name"] = target_path if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) try: if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) try: libvirt.set_vm_disk(vm, params) except xcepts.LibvirtXMLError: if status_error == 'undefinable': return else: raise else: if status_error == 'undefinable': test.fail("Vm is expected to fail on defining with" " invalid model, while it succeeds") try: if not vm.is_alive(): vm.start() vm.wait_for_serial_login() except (remote.LoginTimeoutError, aexpect.ExpectError): pass else: test.fail("Vm is expected to fail on booting from disk" " with wrong model, while login successfully.") finally: backup_xml.sync()
def execute(self): """ Executes all action this helper class is supposed to perform This is the main entry point method for this class, and all other helper classes. This implementation fetches the remote tar file and then extracts it using the functionality present in the parent class. """ name = os.path.basename(self.source) base_dest = os.path.dirname(self.destination_dir) dest = os.path.join(base_dest, name) download.get_file(self.source, dest) self.source = dest self.extract()
def test_firmware_upgrade(self): """ Updates firmware of the device. """ fw_file = self.firmware_url.split('/')[-1] fw_version = fw_file.split('.')[0] fw_file_path = download.get_file(self.firmware_url, os.path.join(self.teststmpdir, fw_file)) # Getting the current FW details self.log.debug("Current FW: %s", self.get_firmware_version()) fw_log = self.get_firmware_log() # Downloading new FW to the device cmd = "nvme fw-download %s --fw=%s" % (self.device, fw_file_path) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to download firmware to the device") # Acvitating new FW on the device for line in fw_log.splitlines(): if "frs" in line: s_num = line.split()[0].split("s")[-1] cmd = "nvme fw-activate %s -a 1 -s %s" % (self.device, s_num) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to activate firmware for %s" % s_num) if self.reset_controller_sysfs(): self.fail("Controller reset after FW update failed") # Getting the current FW details after updating self.get_firmware_log() if fw_version != self.get_firmware_version(): self.fail("New Firmware not reflecting after updating")
def test_firmware_upgrade(self): """ Updates firmware of the device. """ fw_file = self.firmware_url.split('/')[-1] fw_version = fw_file.split('.')[0] fw_file_path = download.get_file( self.firmware_url, os.path.join(self.teststmpdir, fw_file)) # Getting the current FW details self.log.debug("Current FW: %s", self.get_firmware_version()) fw_log = self.get_firmware_log() # Downloading new FW to the device cmd = "nvme fw-download %s --fw=%s" % (self.device, fw_file_path) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to download firmware to the device") # Acvitating new FW on the device for line in fw_log.splitlines(): if "frs" in line: s_num = line.split()[0].split("s")[-1] cmd = "nvme fw-activate %s -a 1 -s %s" % (self.device, s_num) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to activate firmware for %s" % s_num) if self.reset_controller_sysfs(): self.fail("Controller reset after FW update failed") # Getting the current FW details after updating self.get_firmware_log() if fw_version != self.get_firmware_version(): self.fail("New Firmware not reflecting after updating")
def make_guest_kernel(self): """ Runs "make", using a single job """ os.chdir(self.source) logging.info("Building guest kernel") logging.debug("Kernel config is %s" % self.config) download.get_file(self.config, ".config") # FIXME currently no support for builddir # run old config process.system('yes "" | make oldconfig > /dev/null', shell=True) parallel_make_jobs = multiprocessing.cpu_count() make_command = "make -j %s %s" % (parallel_make_jobs, self.build_target) logging.info("Running parallel make on src dir") process.system(make_command)
def execute(self): """ Executes all action this helper class is supposed to perform This is the main entry point method for this class, and all other helper classes. This implementation fetches the remote tar file and then extracts it using the functionality present in the parent class. """ name = os.path.basename(self.source) base_dest = os.path.dirname(self.destination) dest = os.path.join(base_dest, name) download.get_file(self.source, dest) self.source = dest self.extract()
def make_guest_kernel(self): """ Runs "make", using a single job """ os.chdir(self.source) LOG.info("Building guest kernel") LOG.debug("Kernel config is %s" % self.config) download.get_file(self.config, '.config') # FIXME currently no support for builddir # run old config process.system('yes "" | make oldconfig > /dev/null', shell=True) parallel_make_jobs = multiprocessing.cpu_count() make_command = "make -j %s %s" % ( parallel_make_jobs, self.build_target) LOG.info("Running parallel make on src dir") process.system(make_command)
def test_firmware_upgrade(self): """ Updates firmware of the device. """ fw_file = self.firmware_url.split('/')[-1] fw_version = fw_file.split('.')[0] fw_file_path = download.get_file(self.firmware_url, os.path.join(self.teststmpdir, fw_file)) # Getting the current FW details self.log.debug("Current FW: %s", self.get_firmware_version()) self.get_firmware_log() # Activating new FW passed_commits = [] failed = False d_cmd = "%s fw-download %s --fw=%s" % (self.binary, self.device, fw_file_path) for slot in range(1, self.get_firmware_slots() + 1): if not self.firmware_slot_write_supported(slot): continue passed_actions = [] for action in range(0, 4): # Downloading new FW to the device for each slot if process.system(d_cmd, shell=True, ignore_status=True): continue cmd = "%s fw-commit %s -s %d -a %d" % (self.binary, self.device, slot, action) if process.system(cmd, shell=True, ignore_status=True): failed = True else: passed_actions.append(action) passed_commits.append(passed_actions) # Reset device if not already taken care reset_needed = False for commit in passed_commits: if 3 not in commit: reset_needed = True if reset_needed: if self.reset_controller_sysfs(): self.fail("Controller reset after FW update failed") if failed: self.log.debug(passed_commits) self.fail("Passed only for the above slot actions") # Getting the current FW details after updating self.get_firmware_log() if fw_version != self.get_firmware_version(): self.log.warn("New Firmware not reflecting after updating")
def pull_file(self, netperf_source=None): """ Copy file from remote to local. """ if aurl.is_url(netperf_source): LOG.debug("Download URL file to local path") tmp_dir = data_dir.get_download_dir() dst = os.path.join(tmp_dir, os.path.basename(netperf_source)) self.netperf_source = download.get_file(src=netperf_source, dst=dst, hash_expected=self.md5sum) else: self.netperf_source = netperf_source return self.netperf_source
def pull_file(self, netperf_source=None): """ Copy file from remote to local. """ if aurl.is_url(netperf_source): logging.debug("Download URL file to local path") tmp_dir = data_dir.get_download_dir() dst = os.path.join(tmp_dir, os.path.basename(netperf_source)) self.netperf_source = download.get_file(src=netperf_source, dst=dst, hash_expected=self.md5sum) else: self.netperf_source = netperf_source return self.netperf_source
def setUp(self): """ Install genwqe packages, and downloads test tarball. """ self.card = self.params.get('device', default='0') url = "http://corpus.canterbury.ac.nz/resources/cantrbry.tar.gz" self.url = self.params.get('test_tar_url', default=url) if not os.path.isdir("/sys/class/genwqe/genwqe%s_card/" % self.card): self.cancel("Device %s does not exist" % self.card) smm = SoftwareManager() for pkg in ['genwqe-tools', 'genwqe-zlib']: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) self.test_tar = download.get_file(self.url, "cantrbry.tar.gz") self.files_used = [self.test_tar] self.dirs_used = []
def test_firmware_upgrade(self): """ Updates firmware of the device. """ fw_file = self.firmware_url.split('/')[-1] fw_version = fw_file.split('.')[0] fw_file_path = download.get_file(self.firmware_url, os.path.join(self.teststmpdir, fw_file)) # Getting the current FW details self.log.debug("Current FW: %s", self.get_firmware_version()) self.get_firmware_log() # Activating new FW for i in range(1, self.get_firmware_slots() + 1): if not self.firmware_slot_write_supported(i): continue # Downloading new FW to the device for each slot d_cmd = "nvme fw-download %s --fw=%s" % (self.device, fw_file_path) if process.system(d_cmd, shell=True, ignore_status=True): self.fail("Failed to download firmware to the device") cmd = "nvme fw-activate %s -a 0 -s %d" % (self.device, i) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to write firmware on slot %d" % i) if i == self.get_firmware_slots(): # Downloading new FW to the device for each action if process.system(d_cmd, shell=True, ignore_status=True): self.fail("Failed to download firmware to the device") cmd = "nvme fw-activate %s -a 3 -s %d" % (self.device, i) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to activate firmware on slot %d" % i) if self.reset_controller_sysfs(): self.fail("Controller reset after FW update failed") # Getting the current FW details after updating self.get_firmware_log() if fw_version != self.get_firmware_version(): self.fail("New Firmware not reflecting after updating")
def test_firmware_upgrade(self): """ Updates firmware of the device. """ fw_file = self.firmware_url.split('/')[-1] fw_version = fw_file.split('.')[0] fw_file_path = download.get_file( self.firmware_url, os.path.join(self.teststmpdir, fw_file)) # Getting the current FW details self.log.debug("Current FW: %s", self.get_firmware_version()) self.get_firmware_log() # Activating new FW for i in range(1, self.get_firmware_slots() + 1): if not self.firmware_slot_write_supported(i): continue # Downloading new FW to the device for each slot d_cmd = "nvme fw-download %s --fw=%s" % (self.device, fw_file_path) if process.system(d_cmd, shell=True, ignore_status=True): self.fail("Failed to download firmware to the device") cmd = "nvme fw-activate %s -a 0 -s %d" % (self.device, i) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to write firmware on slot %d" % i) if i == self.get_firmware_slots(): # Downloading new FW to the device for each action if process.system(d_cmd, shell=True, ignore_status=True): self.fail("Failed to download firmware to the device") cmd = "nvme fw-activate %s -a 3 -s %d" % (self.device, i) if process.system(cmd, shell=True, ignore_status=True): self.fail("Failed to activate firmware on slot %d" % i) if self.reset_controller_sysfs(): self.fail("Controller reset after FW update failed") # Getting the current FW details after updating self.get_firmware_log() if fw_version != self.get_firmware_version(): self.fail("New Firmware not reflecting after updating")
def run(test, params, env): """ Multicast test using iperf. 1) Boot up VM(s) 2) Prepare the test environment in server/client/host,install iperf 3) Execute iperf tests, analyze the results :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def server_start(cmd, catch_data): """ Start the iperf server in host, and check whether the guest have connected this server through multicast address of the server """ try: process.run(cmd) except process.CmdError as e: if not re.findall(catch_data, e.result.stdout): test.fail("Client not connected '%s'" % str(e)) logging.info("Client multicast test pass " % re.findall(catch_data, str(e))) os_type = params.get("os_type") win_iperf_url = params.get("win_iperf_url") linux_iperf_url = params.get("linux_iperf_url") iperf_version = params.get("iperf_version", "2.0.5") transfer_timeout = int(params.get("transfer_timeout", 360)) login_timeout = int(params.get("login_timeout", 360)) dir_name = test.tmpdir tmp_dir = params.get("tmp_dir", "/tmp/") host_path = os.path.join(dir_name, "iperf") vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) clean_cmd = "" client_ip = vm.get_address(0) try: error_context.context("Test Env setup") iperf_downloaded = 0 iperf_url = linux_iperf_url app_check_cmd = params.get("linux_app_check_cmd", "false") app_check_exit_status = int(params.get("linux_app_check_exit_status", "0")) exit_status = process.system(app_check_cmd, ignore_status=True, shell=True) # Install iperf in host if not available default_install_cmd = "tar zxvf %s; cd iperf-%s;" default_install_cmd += " ./configure; make; make install" install_cmd = params.get("linux_install_cmd", default_install_cmd) if not exit_status == app_check_exit_status: error_context.context("install iperf in host", logging.info) download.get_file(iperf_url, host_path) iperf_downloaded = 1 process.system(install_cmd % (host_path, iperf_version), shell=True) # The guest may not be running Linux, see if we should update the # app_check variables if not os_type == "linux": app_check_cmd = params.get("win_app_check_cmd", "false") app_check_exit_status = int(params.get("win_app_check_exit_status", "0")) # Install iperf in guest if not available if not session.cmd_status(app_check_cmd) == app_check_exit_status: error_context.context("install iperf in guest", logging.info) if not iperf_downloaded: download.get_file(iperf_url, host_path) if os_type == "linux": guest_path = (tmp_dir + "iperf.tgz") clean_cmd = "rm -rf %s iperf-%s" % (guest_path, iperf_version) else: guest_path = (tmp_dir + "iperf.exe") iperf_url = win_iperf_url download.get_file(iperf_url, host_path) clean_cmd = "del %s" % guest_path vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) if os_type == "linux": session.cmd(install_cmd % (guest_path, iperf_version)) muliticast_addr = params.get("muliticast_addr", "225.0.0.3") multicast_port = params.get("multicast_port", "5001") step_msg = "Start iperf server, bind host to multicast address %s " error_context.context(step_msg % muliticast_addr, logging.info) server_start_cmd = ("iperf -s -u -B %s -p %s " % (muliticast_addr, multicast_port)) default_flag = "%s port %s connected with %s" connected_flag = params.get("connected_flag", default_flag) catch_data = connected_flag % (muliticast_addr, multicast_port, client_ip) t = utils_misc.InterruptedThread(server_start, (server_start_cmd, catch_data)) t.start() if not _process_is_alive("iperf"): test.error("Start iperf server failed cmd: %s" % server_start_cmd) logging.info("Server start successfully") step_msg = "In client try to connect server and transfer file " step_msg += " through multicast address %s" error_context.context(step_msg % muliticast_addr, logging.info) if os_type == "linux": client_cmd = "iperf" else: client_cmd = guest_path start_cmd = params.get("start_client_cmd", "%s -c %s -u -p %s") start_client_cmd = start_cmd % (client_cmd, muliticast_addr, multicast_port) session.cmd(start_client_cmd) logging.info("Client start successfully") error_context.context("Test finish, check the result", logging.info) process.system("pkill -2 iperf") t.join() finally: if _process_is_alive("iperf"): process.system("killall -9 iperf") process.system("rm -rf %s" % host_path) if session: if clean_cmd: session.cmd(clean_cmd) session.close()
def _copy_file_to_test_dir(file_path): if aurl.is_url(file_path): return file_path file_abs_path = os.path.join(test.bindir, file_path) dest = os.path.join(sub_test_path, os.path.basename(file_abs_path)) return os.path.basename(download.get_file(file_path, dest))
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of memory balloon :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment Test steps: 1. Prepareguest domain and balloon xml use one of virtio/virtio-non-transitional/virtio-transitional model 2. Start domain and check the device exist in guest 3. Save/Restore and check if guest works well 4. Set balloon memory and see if it works in guest """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] os_variant = params.get("os_variant", "") params["disk_model"] = virtio_model if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path try: # Update disk and interface to correct model if (os_variant == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # The local variable "vmxml" will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Update memory balloon device to correct model membal_dict = { 'membal_model': virtio_model, 'membal_stats_period': '10' } libvirt.update_memballoon_xml(vmxml, membal_dict) if not vm.is_alive(): vm.start() is_windows_guest = (params['os_type'] == 'Windows') session = vm.wait_for_login() # Check memory statistic if libvirt_version.version_compare(6, 6, 0): if (os_variant != 'rhel6' or 'rhel6' not in params.get("shortname")): rs = virsh.dommemstat(vm_name, ignore_status=True, debug=True).stdout_text if "available" not in rs: test.fail("Can't get memory stats in %s model" % virtio_model) # Finish test for Windows guest if is_windows_guest: return # Check if memory balloon device exists on guest status = session.cmd_status_output('lspci |grep balloon')[0] if status != 0: test.fail("Didn't detect memory balloon device on guest.") # Save and restore guest sn_path = os.path.join(data_dir.get_tmp_dir(), os_variant) session.close() virsh.save(vm_name, sn_path) virsh.restore(sn_path) session = vm.wait_for_login() # Get original memory for later balloon function check ori_outside_mem = vm.get_max_mem() ori_guest_mem = vm.get_current_memory_size() # balloon half of the memory ballooned_mem = ori_outside_mem // 2 # Set memory to test balloon function virsh.setmem(vm_name, ballooned_mem) # Check if memory is ballooned successfully logging.info("Check memory status") unusable_mem = ori_outside_mem - ori_guest_mem gcompare_threshold = int( params.get("guest_compare_threshold", unusable_mem)) after_mem = vm.get_current_memory_size() act_threshold = ballooned_mem - after_mem if (after_mem > ballooned_mem) or (abs(act_threshold) > gcompare_threshold): test.fail("Balloon test failed") finally: vm.destroy() backup_xml.sync()
def run(test, params, env): """ Update virtio driver: 1) Boot up guest with default devices and virtio_win iso 2) Install virtio driver 3) Check dirver info :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def reboot(vm, session=None): nic_idx = len(vm.virtnet) - 1 while nic_idx >= 0: try: return vm.reboot(session, nic_index=nic_idx) except Exception: nic_idx -= 1 if nic_idx < 0: raise logging.warn("Unable to login guest, " "try to login via nic %d" % nic_idx) def check_cdrom(timeout): cdrom_chk_cmd = "echo list volume > cmd && echo exit >>" cdrom_chk_cmd += " cmd && diskpart /s cmd" vols = [] start_time = time.time() while time.time() - start_time < timeout: vols_str = session.cmd(cdrom_chk_cmd) if len(re.findall("CDFS", vols_str)) >= cdrom_num: vols = re.findall(".*CDFS.*?\n", vols_str) break return vols if params.get("case_type") == "driver_install": error_context.context("Update the device type to default.", logging.info) default_drive_format = params.get("default_drive_format", "ide") default_nic_model = params.get("default_nic_model", "rtl8139") default_display = params.get("default_display", "vnc") default_parameters = { "default_drive_format": default_drive_format, "default_nic_model": default_nic_model, "default_display": default_display, "default_cd_format": default_drive_format } for key in default_parameters: params[key[8:]] = default_parameters[key] if params.get("prewhql_install") == "yes": error_context.context("Prepare the prewhql virtio_win driver iso") url_virtio_win = params.get("url_virtio_win") if os.path.isdir("/tmp/virtio_win"): process.system("rm -rf /tmp/virtio_win") process.system("mkdir /tmp/virtio_win") pkg_name = os.path.basename(url_virtio_win) pkg_path = os.path.join("/tmp/virtio_win", pkg_name) download.get_file(url_virtio_win, pkg_path) if re.findall("zip$", url_virtio_win): process.system("cd /tmp/virtio_win; unzip *; rm -f *.zip", shell=True) virtio_iso = utils_misc.get_path( data_dir.get_data_dir(), params.get("cdrom_virtio", "/tmp/prewhql.iso")) process.system("mkisofs -J -o %s /tmp/virtio_win" % virtio_iso) drivers_install = re.split(";", params.get("drivers_install")) timeout = float(params.get("login_timeout", 240)) install_cmds = {} check_str = {} check_cmds = {} op_cmds = {} setup_ps = False error_context.context("Fill up driver install command line", logging.info) for driver in drivers_install: params_driver = params.object_params(driver) mount_point = params_driver.get("mount_point") storage_path = params_driver.get("cdrom_virtio") re_hw_id = params_driver.get("re_hw_id", "(PCI.{14,50})\r\n") driver_install_cmd = params_driver.get("driver_install_cmd") if "hwidcmd" in driver_install_cmd: pattern_drive = params.get("pattern_drive", r"\s+\w:(.[^\s]+)\s+hwidcmd") driver_path = re.findall(pattern_drive, driver_install_cmd)[0] driver_path = "/".join(driver_path.split("\\\\")[1:]) storage_path = utils_misc.get_path(data_dir.get_data_dir(), storage_path) hw_id = utils_test.get_driver_hardware_id( driver_path, mount_point=mount_point, storage_path=storage_path, re_hw_id=re_hw_id) install_cmds[driver] = re.sub("hwidcmd", hw_id, driver_install_cmd) else: install_cmds[driver] = driver_install_cmd check_str[driver] = params_driver.get("check_str") check_cmds[driver] = params_driver.get("check_cmd") if params_driver.get('op_cmd'): op_cmds[driver] = params_driver["op_cmd"].split("::") if "pecheck.py" in check_cmds[driver]: setup_ps = True if params.get("check_info") == "yes": mount_point = params.get("virtio_mount_point", "/tmp/virtio_win") iso_path = utils_misc.get_path(data_dir.get_data_dir(), params.get("cdrom_virtio")) process.system("mount -o loop %s %s" % (iso_path, mount_point)) pattern_driver = params_driver.get("pattern_driver") driver_path = re.findall(pattern_driver, driver_install_cmd)[0] driver_path = "/".join(driver_path.split("\\\\")[1:]) storage_path = utils_misc.get_path(mount_point, driver_path) storage_path = os.path.dirname(storage_path) files = " ".join(os.listdir(storage_path)) file_name = re.findall(r"\s+(.*?\.inf)", files) if file_name: file_name = utils_misc.get_path(storage_path, file_name[0]) else: test.error("Can not find .inf file.") inf = open(file_name) inf_context = inf.read() inf.close() process.system("umount %s" % mount_point) patterns_check_str = params_driver.get("check_str") check_str[driver] = {} for i in patterns_check_str.split(";"): check_n, check_p = i.split("::") check_str[driver][check_n] = re.findall(check_p, inf_context)[0] check_cmds[driver] = {} for i in params_driver.get("check_cmd").split(";"): cmd_n, cmd_c = i.split("::") cmd_c = re.sub("DRIVER_PATH", params_driver.get("sys_file_path", ""), cmd_c) cmd_c = re.sub( "DRIVER_PATTERN_%s" % cmd_n, params_driver.get("info_pattern_%s" % cmd_n, ""), cmd_c) check_cmds[driver][cmd_n] = cmd_c error_context.context("Boot up guest with setup parameters", logging.info) params["start_vm"] = "yes" vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login(timeout=timeout) cdroms = params.get("cdroms") cdrom_num = len(re.split(r"\s+", cdroms.strip())) init_timeout = int(params.get("init_timeout", "60")) driver_install_timeout = int(params.get('driver_install_timeout', 720)) error_context.context("Check the cdrom is available") volumes = check_cdrom(init_timeout) vol_info = [] for volume in volumes: vol_info += re.findall(r"Volume\s+\d+\s+(\w).*?(\d+)\s+\w+", volume) if len(volumes) > 1: if int(vol_info[0][1]) > int(vol_info[1][1]): vol_utils = vol_info[0][0] vol_virtio = vol_info[1][0] else: vol_utils = vol_info[1][0] vol_virtio = vol_info[0][0] else: vol_utils = vol_info[0][0] error_context.context("Install drivers", logging.info) for driver in drivers_install: error_context.context("Install drivers %s" % driver, logging.info) if params.get("kill_rundll", "no") == "yes": kill_cmd = 'tasklist | find /I "rundll32"' status, tasks = session.cmd_status_output(kill_cmd) if status == 0: for i in re.findall(r"rundll32.*?(\d+)", tasks): session.cmd('taskkill /PID %s' % i) if install_cmds: cmd = re.sub("WIN_UTILS", vol_utils, install_cmds[driver]) cmd = re.sub("WIN_VIRTIO", vol_virtio, cmd) session.cmd(cmd, timeout=driver_install_timeout) session = reboot(vm, session) if params.get("check_info") == "yes": fail_log = "Details check failed in guest." fail_log += " Please check the error_log. " else: fail_log = "Failed to install:" error_log = open("%s/error_log" % test.resultsdir, "w") fail_flag = False error_context.context("Check driver available in guest", logging.info) if setup_ps: setup_cmd = params.get("python_scripts") session.cmd(setup_cmd) for driver in drivers_install: error_log.write("For driver %s:\n" % driver) if isinstance(check_str[driver], dict): for i in check_str[driver]: output = session.cmd(check_cmds[driver][i]) if not re.findall(check_str[driver][i], output, re.I): fail_flag = True fail_log += " %s" % driver fail_log += "(%s) is not right; " % i error_log.write("inf:\t%s\n" % check_str[driver][i]) error_log.write("sys: \t%s\n" % output) else: output = session.cmd(check_cmds[driver]) if not re.findall(check_str[driver], output, re.I): fail_flag = True fail_log += " %s" % driver error_log.write("Check command output: %s\n" % output) if fail_flag: test.fail(fail_log) if op_cmds: error_context.context("Do more operates in guest to check the driver", logging.info) for driver in drivers_install: if driver not in op_cmds: continue for cmd in op_cmds[driver]: session.cmd(cmd)
def run(test, params, env): """ Test virsh migrate command. """ migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = {'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}'} check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} remote_dargs = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd, 'file_path': "/etc/libvirt/libvirt.conf"} xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None remove_dict = {} remote_libvirt_file = None src_libvirt_file = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") extra_args = migration_test.update_virsh_migrate_extra_args(params) vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) migration_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, **extra_args) mig_result = migration_test.ret if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, dest_uri) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True) .stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)} remote_libvirt_file = libvirt_config\ .remove_key_for_modular_daemon(remove_dict, remote_dargs) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination migration_test.cleanup_vm(vm, dest_uri) logging.info("Recover VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if src_libvirt_file: src_libvirt_file.restore() if remote_libvirt_file: del remote_libvirt_file # Clean up of pre migration setup for local machine if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run(test, params, env): """ Update virtio driver: 1) Boot up guest with default devices and virtio_win iso 2) Install virtio driver 3) Check dirver info :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def reboot(vm, session=None): nic_idx = len(vm.virtnet) - 1 while nic_idx >= 0: try: return vm.reboot(session, nic_index=nic_idx) except Exception: nic_idx -= 1 if nic_idx < 0: raise logging.warn("Unable to login guest, " "try to login via nic %d" % nic_idx) def check_cdrom(timeout): cdrom_chk_cmd = "echo list volume > cmd && echo exit >>" cdrom_chk_cmd += " cmd && diskpart /s cmd" vols = [] start_time = time.time() while time.time() - start_time < timeout: vols_str = session.cmd(cdrom_chk_cmd) if len(re.findall("CDFS", vols_str)) >= cdrom_num: vols = re.findall(".*CDFS.*?\n", vols_str) break return vols if params.get("case_type") == "driver_install": error_context.context("Update the device type to default.", logging.info) default_drive_format = params.get("default_drive_format", "ide") default_nic_model = params.get("default_nic_model", "rtl8139") default_display = params.get("default_display", "vnc") default_parameters = {"default_drive_format": default_drive_format, "default_nic_model": default_nic_model, "default_display": default_display, "default_cd_format": default_drive_format} for key in default_parameters: params[key[8:]] = default_parameters[key] if params.get("prewhql_install") == "yes": error_context.context("Prepare the prewhql virtio_win driver iso") url_virtio_win = params.get("url_virtio_win") if os.path.isdir("/tmp/virtio_win"): process.system("rm -rf /tmp/virtio_win") process.system("mkdir /tmp/virtio_win") pkg_name = os.path.basename(url_virtio_win) pkg_path = os.path.join("/tmp/virtio_win", pkg_name) download.get_file(url_virtio_win, pkg_path) if re.findall("zip$", url_virtio_win): process.system("cd /tmp/virtio_win; unzip *; rm -f *.zip", shell=True) virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), params.get("cdrom_virtio", "/tmp/prewhql.iso")) process.system("mkisofs -J -o %s /tmp/virtio_win" % virtio_iso) drivers_install = re.split(";", params.get("drivers_install")) timeout = float(params.get("login_timeout", 240)) install_cmds = {} check_str = {} check_cmds = {} op_cmds = {} setup_ps = False error_context.context("Fill up driver install command line", logging.info) for driver in drivers_install: params_driver = params.object_params(driver) mount_point = params_driver.get("mount_point") storage_path = params_driver.get("cdrom_virtio") re_hw_id = params_driver.get("re_hw_id", "(PCI.{14,50})\r\n") driver_install_cmd = params_driver.get("driver_install_cmd") if "hwidcmd" in driver_install_cmd: pattern_drive = params.get("pattern_drive", r"\s+\w:(.[^\s]+)\s+hwidcmd") driver_path = re.findall(pattern_drive, driver_install_cmd)[0] driver_path = "/".join(driver_path.split("\\\\")[1:]) storage_path = utils_misc.get_path( data_dir.get_data_dir(), storage_path) hw_id = utils_test.get_driver_hardware_id(driver_path, mount_point=mount_point, storage_path=storage_path, re_hw_id=re_hw_id) install_cmds[driver] = re.sub("hwidcmd", hw_id, driver_install_cmd) else: install_cmds[driver] = driver_install_cmd check_str[driver] = params_driver.get("check_str") check_cmds[driver] = params_driver.get("check_cmd") if params_driver.get('op_cmd'): op_cmds[driver] = params_driver["op_cmd"].split("::") if "pecheck.py" in check_cmds[driver]: setup_ps = True if params.get("check_info") == "yes": mount_point = params.get("virtio_mount_point", "/tmp/virtio_win") iso_path = utils_misc.get_path(data_dir.get_data_dir(), params.get("cdrom_virtio")) process.system("mount -o loop %s %s" % (iso_path, mount_point)) pattern_driver = params_driver.get("pattern_driver") driver_path = re.findall(pattern_driver, driver_install_cmd)[0] driver_path = "/".join(driver_path.split("\\\\")[1:]) storage_path = utils_misc.get_path(mount_point, driver_path) storage_path = os.path.dirname(storage_path) files = " ".join(os.listdir(storage_path)) file_name = re.findall(r"\s+(.*?\.inf)", files) if file_name: file_name = utils_misc.get_path(storage_path, file_name[0]) else: test.error("Can not find .inf file.") inf = open(file_name) inf_context = inf.read() inf.close() process.system("umount %s" % mount_point) patterns_check_str = params_driver.get("check_str") check_str[driver] = {} for i in patterns_check_str.split(";"): check_n, check_p = i.split("::") check_str[driver][check_n] = re.findall(check_p, inf_context)[0] check_cmds[driver] = {} for i in params_driver.get("check_cmd").split(";"): cmd_n, cmd_c = i.split("::") cmd_c = re.sub("DRIVER_PATH", params_driver.get("sys_file_path", ""), cmd_c) cmd_c = re.sub("DRIVER_PATTERN_%s" % cmd_n, params_driver.get("info_pattern_%s" % cmd_n, ""), cmd_c) check_cmds[driver][cmd_n] = cmd_c error_context.context("Boot up guest with setup parameters", logging.info) params["start_vm"] = "yes" vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login(timeout=timeout) cdroms = params.get("cdroms") cdrom_num = len(re.split(r"\s+", cdroms.strip())) init_timeout = int(params.get("init_timeout", "60")) driver_install_timeout = int(params.get('driver_install_timeout', 720)) error_context.context("Check the cdrom is available") volumes = check_cdrom(init_timeout) vol_info = [] for volume in volumes: vol_info += re.findall(r"Volume\s+\d+\s+(\w).*?(\d+)\s+\w+", volume) if len(volumes) > 1: if int(vol_info[0][1]) > int(vol_info[1][1]): vol_utils = vol_info[0][0] vol_virtio = vol_info[1][0] else: vol_utils = vol_info[1][0] vol_virtio = vol_info[0][0] else: vol_utils = vol_info[0][0] error_context.context("Install drivers", logging.info) for driver in drivers_install: error_context.context("Install drivers %s" % driver, logging.info) if params.get("kill_rundll", "no") == "yes": kill_cmd = 'tasklist | find /I "rundll32"' status, tasks = session.cmd_status_output(kill_cmd) if status == 0: for i in re.findall(r"rundll32.*?(\d+)", tasks): session.cmd('taskkill /PID %s' % i) if install_cmds: cmd = re.sub("WIN_UTILS", vol_utils, install_cmds[driver]) cmd = re.sub("WIN_VIRTIO", vol_virtio, cmd) session.cmd(cmd, timeout=driver_install_timeout) session = reboot(vm, session) if params.get("check_info") == "yes": fail_log = "Details check failed in guest." fail_log += " Please check the error_log. " else: fail_log = "Failed to install:" error_log = open("%s/error_log" % test.resultsdir, "w") fail_flag = False error_context.context("Check driver available in guest", logging.info) if setup_ps: setup_cmd = params.get("python_scripts") session.cmd(setup_cmd) for driver in drivers_install: error_log.write("For driver %s:\n" % driver) if isinstance(check_str[driver], dict): for i in check_str[driver]: output = session.cmd(check_cmds[driver][i]) if not re.findall(check_str[driver][i], output, re.I): fail_flag = True fail_log += " %s" % driver fail_log += "(%s) is not right; " % i error_log.write("inf:\t%s\n" % check_str[driver][i]) error_log.write("sys: \t%s\n" % output) else: output = session.cmd(check_cmds[driver]) if not re.findall(check_str[driver], output, re.I): fail_flag = True fail_log += " %s" % driver error_log.write("Check command output: %s\n" % output) if fail_flag: test.fail(fail_log) if op_cmds: error_context.context("Do more operates in guest to check the driver", logging.info) for driver in drivers_install: if driver not in op_cmds: continue for cmd in op_cmds[driver]: session.cmd(cmd)
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of vsock :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] boot_with_vsock = (params.get('boot_with_vsock', 'yes') == 'yes') hotplug = (params.get('hotplug', 'no') == 'yes') addr_pattern = params['addr_pattern'] device_pattern = params['device_pattern'] if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") def check_vsock_inside_guest(): """ check vsock device inside guest """ lspci_cmd = 'lspci' lspci_output = session.cmd_output(lspci_cmd) device_str = re.findall(r'%s\s%s' % (addr_pattern, device_pattern), lspci_output) if not device_str: test.fail('lspci failed, no device "%s"' % device_pattern) # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path # Add pcie-to-pci-bridge when it is required if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': break else: contr_dict = {'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge'} cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) # Generate xml for device vsock vsock_xml = libvirt.create_vsock_xml(virtio_model) if boot_with_vsock: # Add vsock xml to vm only when needed libvirt.add_vm_device(vmxml, vsock_xml) try: if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): # Update interface to virtio-transitional mode for # rhel6 guest to make it works for login iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) if hotplug: file_arg = vsock_xml.xml with open(file_arg) as vsock_file: logging.debug("Attach vsock by XML: %s", vsock_file.read()) s_attach = virsh.attach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_attach) if add_pcie_to_pci_bridge: # Check device should be plug to right bus virtio_transitional_base.check_plug_to(vm_name, 'vsock') session = vm.wait_for_login() check_vsock_inside_guest() if hotplug: with open(file_arg) as vsock_file: logging.debug("Detach vsock by XML: %s", vsock_file.read()) s_detach = virsh.detach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_detach) finally: vm.destroy() backup_xml.sync()
def run(test, params, env): """ KVM performance test: The idea is similar to 'client/tests/kvm/tests/autotest.py', but we can implement some special requests for performance testing. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() test_timeout = int(params.get("test_timeout", 240)) monitor_cmd = params["monitor_cmd"] login_timeout = int(params.get("login_timeout", 360)) test_cmd = params["test_cmd"] guest_path = params.get("result_path", "/tmp/guest_result") test_src = params["test_src"] test_patch = params.get("test_patch") # Prepare test environment in guest session = vm.wait_for_login(timeout=login_timeout) prefix = test.outputdir.split(".performance.")[0] summary_results = params.get("summary_results") guest_ver = session.cmd_output("uname -r").strip() if summary_results: result_dir = params.get("result_dir", os.path.dirname(test.outputdir)) result_sum(result_dir, params, guest_ver, test.resultsdir, test) session.close() return guest_launcher = os.path.join(test.virtdir, "scripts/cmd_runner.py") vm.copy_files_to(guest_launcher, "/tmp") md5value = params.get("md5value") tar_name = os.path.basename(test_src) tarball = os.path.join(test.tmpdir, tar_name) download.get_file(test_src, tarball, hash_expected=md5value) vm.copy_files_to(tarball, "/tmp") session.cmd("rm -rf /tmp/src*") session.cmd("mkdir -p /tmp/src_tmp") session.cmd("tar -xf /tmp/%s -C %s" % (tar_name, "/tmp/src_tmp")) # Find the newest file in src tmp directory cmd = "ls -rt /tmp/src_tmp" s, o = session.cmd_status_output(cmd) if len(o) > 0: new_file = re.findall("(.*)\n", o)[-1] else: test.error("Can not decompress test file in guest") session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file) if test_patch: test_patch_path = os.path.join(data_dir.get_deps_dir(), 'performance', test_patch) vm.copy_files_to(test_patch_path, "/tmp/src") session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch) compile_cmd = params.get("compile_cmd") if compile_cmd: session.cmd("cd /tmp/src && %s" % compile_cmd) prepare_cmd = params.get("prepare_cmd") if prepare_cmd: s, o = session.cmd_status_output(prepare_cmd, test_timeout) if s != 0: test.error("Fail to prepare test env in guest") cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd cmd += "/tmp/guest_result_monitor\" \"/tmp/src/%s" % test_cmd cmd += " &> %s \" \"/tmp/guest_result\"" cmd += " %s" % int(test_timeout) test_cmd = cmd # Run guest test with monitor tag = cmd_runner_monitor(test, vm, monitor_cmd, test_cmd, guest_path, timeout=test_timeout) # Result collecting result_list = [ "/tmp/guest_result_%s" % tag, "/tmp/host_monitor_result_%s" % tag, "/tmp/guest_monitor_result_%s" % tag ] guest_results_dir = os.path.join(test.outputdir, "guest_results") if not os.path.exists(guest_results_dir): os.mkdir(guest_results_dir) ignore_pattern = params.get("ignore_pattern") head_pattern = params.get("head_pattern") row_pattern = params.get("row_pattern") for i in result_list: if re.findall("monitor_result", i): result = utils_test.summary_up_result(i, ignore_pattern, head_pattern, row_pattern) fd = open("%s.sum" % i, "w") sum_info = {} head_line = "" for keys in result: head_line += "\t%s" % keys for col in result[keys]: col_sum = "line %s" % col if col_sum in sum_info: sum_info[col_sum] += "\t%s" % result[keys][col] else: sum_info[col_sum] = "%s\t%s" % (col, result[keys][col]) fd.write("%s\n" % head_line) for keys in sum_info: fd.write("%s\n" % sum_info[keys]) fd.close() shutil.copy("%s.sum" % i, guest_results_dir) shutil.copy(i, guest_results_dir) session.cmd("rm -rf /tmp/src") session.cmd("rm -rf guest_test*") session.cmd("rm -rf pid_file*") session.close()
def run(test, params, env): """ flag_check test: steps: 1. boot guest with -cpu model,+extra_flags (extra_flags is optional) a. no defined model_name in cfg file guest_model = host_model b. model_name defined in cfg file guest_model = params.get("cpu_model") 2. get guest flags 3. get expected model flags from dump file a. -cpu host: qemu_model = host_model b. guest_model > host_model --> expected_model = host_model e.g guest_model = Haswell, host_model = Sandybridge expected_model = Sandybridge c. guest_model < host_model --> expected_model = guest_model 4. get extra flags a. add_flags = +flag 1). flag is exposed to guest if it's supported in host 2). flag is not supported to guest if it's unknown in host 3). ignore "check", "enforce" which are params not flag b. del_flags = -flag flag is removed if it's supported in guest c. params check: check lack flag in host include unknow flag 5. compare expected flag with flags in guest a. out_flags: not supported with some conf, this kinds of flag will be displayed in dump file, but not in guest. e.g tsc-dedline is not supported with -M rhel6.3.0 b. option_flags: some flag is generated by kernel which is not defined in dump file. it's acceptable when display in guest. e.g rep_good expected_flags = expected_model_flags + add_flags - del_flags - out_flags miss_flag = expected_flags - guest_flags unexpect_flag = guest_flags - expected_flags - option_flags :param test: Kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def qemu_model_info(models_list, cpumodel): """ Get cpumodel info from models_list :param models_list: all models info :param cpumodel: model name :return: model info of cpumodel """ for model in models_list: if cpumodel in model: return model return None def qemu_support_flag(model_info, reg): """ Get register's supported flags from model_info :param model_info: model_info get from dump file :param reg: reg name, e.g feature_edx """ reg_re = re.compile(r".*%s.*\((.*)\)\n" % reg) flag = reg_re.search(model_info) try: if flag: return flag.groups()[0] except Exception as e: logging.error("Failed to get support flag %s" % e) def get_all_support_flags(): """ Get all supported flags with qemu query cmd. """ qemu_binary = utils_misc.get_qemu_binary(params) cmd = qemu_binary + params.get("query_cmd", " -cpu ?") output = process.system_output(cmd) flags_re = re.compile(params.get("pattern", "flags:(.*)")) flag_list = flags_re.search(output) flags = [] if flag_list: for flag in flag_list.groups(): flags += flag return set(map(utils_misc.Flag, flags)) def get_extra_flag(extra_flags, symbol, lack_check=False): """ Get added/removed flags :param extra_flags: exposed/removed flags. e.g "+sse4.1,+sse4.2" :param symbol: "+","-" :return: return all extra_flags if lack_check is true return host supported flags if lack_check is false """ flags = [] re_flags = [_[1:] for _ in extra_flags.split(",") if _ and symbol == _[0]] for flag in re_flags: if lack_check: flags.append(flag) elif flag in host_flags: flags.append(flag) return set(map(utils_misc.Flag, flags)) def get_guest_cpuflags(vm_session): """ Get guest system cpuflags. :param vm_session: session to checked vm. :return: [corespond flags] """ flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE) out = vm_session.cmd_output("cat /proc/cpuinfo") try: flags = flags_re.search(out).groups()[0].split() return set(map(utils_misc.Flag, flags)) except Exception as e: logging.error("Failed to get guest cpu flags %s" % e) utils_misc.Flag.aliases = utils_misc.kvm_map_flags_aliases # Get all models' info from dump file dump_file = params.get("dump_file") default_dump_path = os.path.join(data_dir.get_deps_dir(), "cpuid") dump_path = params.get("dump_path", default_dump_path) dump_file_path = os.path.join(dump_path, dump_file) cpuinfo_file = os.path.join(default_dump_path, dump_file) download.get_file(dump_file_path, dump_file) host_flags = utils_misc.get_cpu_flags() vm = env.get_vm(params["main_vm"]) guest_cpumodel = vm.cpuinfo.model extra_flags = params.get("cpu_model_flags", " ") error_context.context("Boot guest with -cpu %s,%s" % (guest_cpumodel, extra_flags), logging.info) if params.get("start_vm") == "no" and "unknown,check" in extra_flags: params["start_vm"] = "yes" try: vm.create(params=params) vm.verify_alive() output = vm.process.get_output() vm.destroy() except virt_vm.VMCreateError as detail: output = str(detail) if params["qemu_output"] not in output: test.fail("no qemu output: %s" % params["qemu_output"]) else: vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) # Get qemu model host_cpumodel = utils_misc.get_host_cpu_models() if guest_cpumodel not in host_cpumodel: qemu_model = host_cpumodel[0] else: qemu_model = guest_cpumodel error_context.context("Get model %s support flags" % qemu_model, logging.info) # Get flags for every reg from model's info models_info = process.system_output( "cat %s" % cpuinfo_file).split("x86") model_info = qemu_model_info(models_info, qemu_model) reg_list = params.get("reg_list", "feature_edx ").split() model_support_flags = " " if model_info: for reg in reg_list: reg_flags = qemu_support_flag(model_info, reg) if reg_flags: model_support_flags += " %s" % reg_flags model_support_flags = set(map(utils_misc.Flag, model_support_flags.split())) error_context.context("Get guest flags", logging.info) guest_flags = get_guest_cpuflags(session) error_context.context("Get expected flag list", logging.info) # out_flags is definded in dump file, but not in guest out_flags = params.get("out_flags", " ").split() out_flags = set(map(utils_misc.Flag, out_flags)) # no_check_flags is definded in all_support_flags, but not in guest and host no_check_flags = params.get("no_check_flags", " ").split() no_check_flags = set(map(utils_misc.Flag, no_check_flags)) # option_flags are generated by kernel or kvm, which are not definded in # dump file, but can be displayed in guest option_flags = params.get("option_flags", " ").split() if params['smp'] == '1' and 'up' not in option_flags: option_flags.append('up') option_flags = set(map(utils_misc.Flag, option_flags)) # add_flags are exposed by +flag add_flags = get_extra_flag(extra_flags, "+") # del_flags are disabled by -flag del_flags = get_extra_flag(extra_flags, "-", lack_check=True) expected_flags = ((model_support_flags | add_flags) - del_flags - out_flags) # get all flags for host lack flag checking check_flags = get_extra_flag(extra_flags, "+", lack_check=True) check_flags = check_flags - no_check_flags host_flags = set(map(utils_misc.Flag, host_flags)) lack_flags = set(expected_flags | check_flags) - host_flags if "check" in extra_flags and "unknown" not in extra_flags: error_context.context("Check lack flag in host", logging.info) process_output = vm.process.get_output() miss_warn = [] if lack_flags: for flag in lack_flags: if flag not in process_output: miss_warn.extend(flag.split()) if miss_warn: test.fail("no warning for lack flag %s" % miss_warn) error_context.context("Compare guest flags with expected flags", logging.info) all_support_flags = get_all_support_flags() missing_flags = expected_flags - guest_flags unexpect_flags = (guest_flags - expected_flags - all_support_flags - option_flags) if missing_flags or unexpect_flags: test.fail("missing flags:\n %s\n" "more flags than expected:\n %s\n" "expected flags:\n %s\n" "guest flags:\n %s\n" % (missing_flags, unexpect_flags, expected_flags, guest_flags))
def run(test, params, env): """ Convert specific esx guest """ V2V_UNSUPPORT_RHEV_APT_VER = "[virt-v2v-1.43.3-4.el9,)" for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') enable_legacy_cp = params.get("enable_legacy_crypto_policies", 'no') == 'yes' version_requried = params.get("version_requried") unprivileged_user = params_get(params, 'unprivileged_user') vpx_hostname = params.get('vpx_hostname') vpx_passwd = params.get("vpx_password") esxi_host = esx_ip = params.get('esx_hostname') vpx_dc = params.get('vpx_dc') vm_name = params.get('main_vm') output_mode = params.get('output_mode') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) v2v_timeout = int(params.get('v2v_timeout', 1200)) v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000)) v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on' ] else '' if params.get("v2v_opts"): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") status_error = 'yes' == params.get('status_error', 'no') address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '').split(',') skip_vm_check = params.get('skip_vm_check', 'no') skip_reason = params.get('skip_reason') error_list = [] remote_host = vpx_hostname # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') src_uri_type = params.get('src_uri_type') esxi_password = params.get('esxi_password') json_disk_pattern = params.get('json_disk_pattern') # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') os_pool = os_storage = params.get('output_storage', 'default') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") os_version = params.get('os_version') os_type = params.get('os_type') virtio_win_path = params.get('virtio_win_path') # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso qa_path = params.get('qa_path') # download url of qemu-guest-agent qa_url = params.get('qa_url') v2v_sasl = None # default values for v2v_cmd auto_clean = True cmd_only = False cmd_has_ip = 'yes' == params.get('cmd_has_ip', 'yes') interaction_run = 'yes' == params.get('interaction_run', 'no') def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def check_vmtools(vmcheck, check): """ Check whether vmware tools packages have been removed, or vmware-tools service has stopped :param vmcheck: VMCheck object for vm checking :param check: Checkpoint of different cases :return: None """ if "service" not in check: logging.info('Check if packages been removed') pkgs = vmcheck.session.cmd('rpm -qa').strip() removed_pkgs = params.get('removed_pkgs').strip().split(',') if not removed_pkgs: test.error('Missing param "removed_pkgs"') for pkg in removed_pkgs: if pkg in pkgs: log_fail('Package "%s" not removed' % pkg) else: logging.info('Check if service stopped') vmtools_service = params.get('service_name') status = utils_misc.get_guest_service_status( vmcheck.session, vmtools_service) logging.info('Service %s status: %s', vmtools_service, status) if status != 'inactive': log_fail('Service "%s" is not stopped' % vmtools_service) def check_modprobe(vmcheck): """ Check whether content of /etc/modprobe.conf meets expectation """ content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip() logging.debug(content) cfg_content = params.get('cfg_content') if not cfg_content: test.error('Missing content for search') logging.info('Search "%s" in /etc/modprobe.conf', cfg_content) pattern = r'\s+'.join(cfg_content.split()) if not re.search(pattern, content): log_fail('Not found "%s"' % cfg_content) def check_device_map(vmcheck): """ Check if the content of device.map meets expectation. """ logging.info(vmcheck.session.cmd('fdisk -l').strip()) device_map = params.get('device_map_path') content = vmcheck.session.cmd('cat %s' % device_map) logging.debug('Content of device.map:\n%s', content) logging.info('Found device: %d', content.count('/dev/')) logging.info('Found virtio device: %d', content.count('/dev/vd')) if content.count('/dev/') != content.count('/dev/vd'): log_fail('Content of device.map not correct') else: logging.info('device.map has been remaped to "/dev/vd*"') def check_resume_swap(vmcheck): """ Check the content of grub files meet expectation. """ if os_version == 'rhel7': chkfiles = [ '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg' ] if os_version == 'rhel6': chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf'] for file_i in chkfiles: status, content = vmcheck.run_cmd('cat %s' % file_i) if status != 0: log_fail('%s does not exist' % file_i) resume_dev_count = content.count('resume=/dev/') if resume_dev_count == 0 or resume_dev_count != content.count( 'resume=/dev/vd'): reason = 'Maybe the VM\'s swap pariton is lvm' log_fail('Content of %s is not correct or %s' % (file_i, reason)) content = vmcheck.session.cmd('cat /proc/cmdline') logging.debug('Content of /proc/cmdline:\n%s', content) if 'resume=/dev/vd' not in content: log_fail('Content of /proc/cmdline is not correct') def check_rhev_file_exist(vmcheck): """ Check if rhev files exist """ file_path = { 'rhev-apt.exe': r'C:\rhev-apt.exe', 'rhsrvany.exe': r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"' } # rhev-apt.ext is removed on rhel9 if utils_v2v.multiple_versions_compare(V2V_UNSUPPORT_RHEV_APT_VER): file_path.pop('rhev-apt.exe') for key in file_path: status = vmcheck.session.cmd_status('dir %s' % file_path[key]) if status == 0: logging.info('%s exists' % key) else: log_fail('%s does not exist after convert to rhv' % key) def check_file_architecture(vmcheck): """ Check the 3rd party module info :param vmcheck: VMCheck object for vm checking """ content = vmcheck.session.cmd('uname -r').strip() status = vmcheck.session.cmd_status( 'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content) if status == 0: log_fail('3rd party module info is not correct') else: logging.info( 'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package' % content) def check_windows_signature(vmcheck, full_name): """ Check signature of a file in windows VM :param vmcheck: VMCheck object for vm checking :param full_name: a file's full path name """ logging.info( 'powershell or signtool needs to be installed in guest first') cmds = [('powershell "Get-AuthenticodeSignature %s | format-list"' % full_name, r'SignerCertificate.*?Not After](.*?)\[Thumbprint', '%m/%d/%Y %I:%M:%S %p'), ('signtool verify /v %s' % full_name, r'Issued to: Red Hat.*?Expires:(.*?)SHA1 hash', '')] for cmd, ptn, fmt in cmds: _, output = vmcheck.run_cmd(cmd) if re.search(ptn, output, re.S): expire_time = re.search(ptn, output, re.S).group(1).strip() if fmt: expire_time = time.strptime(expire_time, fmt) else: expire_time = time.strptime(expire_time) if time.time() > time.mktime(expire_time): test.fail("Signature of '%s' has expired" % full_name) return # Get here means the guest doesn't have powershell or signtool test.error("Powershell or Signtool must be installed in guest") def check_windows_vmware_tools(vmcheck): """ Check vmware tools is uninstalled in VM :param vmcheck: VMCheck object for vm checking """ def _get_vmware_info(cmd): _, res = vmcheck.run_cmd(cmd) if res and not re.search('vmtools', res, re.I): return True return False cmds = ['tasklist', 'sc query vmtools'] for cmd in cmds: res = utils_misc.wait_for(lambda: _get_vmware_info(cmd), 600, step=30) if not res: test.fail("Failed to verification vmtools uninstallation") def check_windows_service(vmcheck, service_name): """ Check service in VM :param vmcheck: VMCheck object for vm checking :param service_name: a service's name """ try: res = utils_misc.wait_for(lambda: re.search( 'running', vmcheck.get_service_info(service_name), re.I), 600, step=30) except (ShellProcessTerminatedError, ShellStatusError): # Windows guest may reboot after installing qemu-ga service logging.debug('Windows guest is rebooting') if vmcheck.session: vmcheck.session.close() vmcheck.session = None # VM boots up is extremely slow when all testing in running on # rhv server simultaneously, so set timeout to 1200. vmcheck.create_session(timeout=1200) res = utils_misc.wait_for(lambda: re.search( 'running', vmcheck.get_service_info(service_name), re.I), 600, step=30) if not res: test.fail('Not found running %s service' % service_name) def check_linux_ogac(vmcheck): """ Check qemu-guest-agent service in VM :param vmcheck: VMCheck object for vm checking """ def get_pkgs(pkg_path): """ Get all qemu-guest-agent pkgs """ pkgs = [] for _, _, files in os.walk(pkg_path): for file_name in files: pkgs.append(file_name) return pkgs def get_pkg_version_vm(): """ Get qemu-guest-agent version in VM """ vendor = vmcheck.get_vm_os_vendor() if vendor in ['Ubuntu', 'Debian']: cmd = 'dpkg -l qemu-guest-agent' else: cmd = 'rpm -q qemu-guest-agent' _, output = vmcheck.run_cmd(cmd) pkg_ver_ptn = [ r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +', r'qemu-guest-agent-(.*?)\.x86_64' ] for ptn in pkg_ver_ptn: if re.search(ptn, output): return re.search(ptn, output).group(1) return '' if os.path.isfile(os.getenv('VIRTIO_WIN')): mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'), 'rhv_tools_setup_iso', fstype='iso9660') export_path = params['tmp_mount_point'] = mount_point else: export_path = os.getenv('VIRTIO_WIN') qemu_guest_agent_dir = os.path.join(export_path, qa_path) all_pkgs = get_pkgs(qemu_guest_agent_dir) logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs) vm_pkg_ver = get_pkg_version_vm() logging.debug('qemu-guest-agent version in vm: %s' % vm_pkg_ver) # Check the service status of qemu-guest-agent in VM status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running' cmd = 'service qemu-ga status;systemctl status qemu-guest-agent;systemctl status qemu-ga*' _, output = vmcheck.run_cmd(cmd) if not re.search(status_ptn, output): log_fail('qemu-guest-agent service exception') def check_ubuntools(vmcheck): """ Check open-vm-tools, ubuntu-server in VM :param vmcheck: VMCheck object for vm checking """ logging.info('Check if open-vm-tools service stopped') status = utils_misc.get_guest_service_status(vmcheck.session, 'open-vm-tools') logging.info('Service open-vm-tools status: %s', status) if status != 'inactive': log_fail('Service open-vm-tools is not stopped') else: logging.info('Check if the ubuntu-server exist') content = vmcheck.session.cmd('dpkg -s ubuntu-server') if 'install ok installed' in content: logging.info('ubuntu-server has not been removed.') else: log_fail('ubuntu-server has been removed') def global_pem_setup(f_pem): """ Setup global rhv server ca :param f_pem: ca file path """ ca_anchors_dir = '/etc/pki/ca-trust/source/anchors' shutil.copy(f_pem, ca_anchors_dir) process.run('update-ca-trust extract', shell=True) os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem))) def global_pem_cleanup(): """ Cleanup global rhv server ca """ process.run('update-ca-trust extract', shell=True) def find_net(bridge_name): """ Find which network use specified bridge :param bridge_name: bridge name you want to find """ net_list = virsh.net_state_dict(only_names=True) net_name = '' if len(net_list): for net in net_list: net_info = virsh.net_info(net).stdout.strip() search = re.search(r'Bridge:\s+(\S+)', net_info) if search: if bridge_name == search.group(1): net_name = net else: logging.info('Conversion server has no network') return net_name def destroy_net(net_name): """ destroy network in conversion server """ if virsh.net_state_dict()[net_name]['active']: logging.info("Remove network %s in conversion server", net_name) virsh.net_destroy(net_name) if virsh.net_state_dict()[net_name]['autostart']: virsh.net_autostart(net_name, "--disable") output = virsh.net_list("--all").stdout.strip() logging.info(output) def start_net(net_name): """ start network in conversion server """ logging.info("Recover network %s in conversion server", net_name) virsh.net_autostart(net_name) if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name) output = virsh.net_list("--all").stdout.strip() logging.info(output) def check_static_ip_conf(vmcheck): """ Check static IP configuration in VM :param vmcheck: VMCheck object for vm checking """ def _static_ip_check(): cmd = 'ipconfig /all' _, output = vmcheck.run_cmd(cmd, debug=False) v2v_cmd = params_get(params, 'v2v_command') # --mac 00:50:56:ac:7a:4d:ip:192.168.1.2,192.168.1.1,22,192.168.1.100,10.73.2.108,10.66.127.10' mac_ip_pattern = '--mac (([0-9a-zA-Z]{2}:){6})ip:([0-9,.]+)' ip_config_list = re.search(mac_ip_pattern, v2v_cmd).group(3) mac_addr = re.search(mac_ip_pattern, v2v_cmd).group(1)[0:-1].upper().replace( ':', '-') eth_adapter_ptn = r'Ethernet adapter Ethernet.*?NetBIOS over Tcpip' try: ipconfig = [ v for v in re.findall(eth_adapter_ptn, output, re.S) if mac_addr in v ][0] except IndexError: return False for i, value in enumerate(ip_config_list.split(',')): if not value: continue # IP address if i == 0: ip_addr = r'IPv4 Address.*?: %s' % value if not re.search(ip_addr, ipconfig, re.S): logging.debug('Found IP addr failed') return False # Default gateway if i == 1: ip_gw = r'Default Gateway.*?: .*?%s' % value if not re.search(ip_gw, ipconfig, re.S): logging.debug('Found Gateway failed') return False # Subnet mask if i == 2: # convert subnet mask to cidr bin_mask = '1' * int(value) + '0' * (32 - int(value)) cidr = '.'.join([ str(int(bin_mask[i * 8:i * 8 + 8], 2)) for i in range(4) ]) sub_mask = r'Subnet Mask.*?: %s' % cidr if not re.search(sub_mask, ipconfig, re.S): logging.debug('Found subnet mask failed') return False # DNS server list if i >= 3: dns_server = r'DNS Servers.*?:.*?%s' % value if not re.search(dns_server, ipconfig, re.S): logging.debug('Found DNS Server failed') return False return True try: vmcheck.create_session() res = utils_misc.wait_for(_static_ip_check, 1800, step=300) except (ShellTimeoutError, ShellProcessTerminatedError): logging.debug( 'Lost connection to windows guest, the static IP may take effect' ) if vmcheck.session: vmcheck.session.close() vmcheck.session = None vmcheck.create_session() res = utils_misc.wait_for(_static_ip_check, 300, step=30) vmcheck.run_cmd('ipconfig /all') # debug msg if not res: test.fail('Checking static IP configuration failed') def check_rhsrvany_checksums(vmcheck): """ Check if MD5 and SHA1 of rhsrvany.exe are correct """ def _get_expected_checksums(tool_exec, file): val = process.run('%s %s' % (tool_exec, rhsrvany_path), shell=True).stdout_text.split()[0] if not val: test.error('Get checksum failed') logging.info('%s: Expect %s: %s', file, tool_exec, val) return val def _get_real_checksums(algorithm, file): certutil_cmd = r'certutil -hashfile "%s"' % file if algorithm == 'md5': certutil_cmd += ' MD5' res = vmcheck.session.cmd_output(certutil_cmd, safe=True) logging.debug('%s output:\n%s', certutil_cmd, res) val = res.strip().splitlines()[1].strip() logging.info('%s: Real %s: %s', file, algorithm, val) return val logging.info('Check md5 and sha1 of rhsrvany.exe') algorithms = {'md5': 'md5sum', 'sha1': 'sha1sum'} rhsrvany_path = r'/usr/share/virt-tools/rhsrvany.exe' rhsrvany_path_windows = r"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe" for key, val in algorithms.items(): expect_val = _get_expected_checksums(val, rhsrvany_path) real_val = _get_real_checksums(key, rhsrvany_path_windows) if expect_val == real_val: logging.info('%s are correct', key) else: test.fail('%s of rhsrvany.exe is not correct' % key) def check_result(result, status_error): """ Check virt-v2v command result """ def vm_check(status_error): """ Checking the VM """ if status_error: return if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return # vmchecker must be put before skip_vm_check in order to clean up # the VM. vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if skip_vm_check == 'yes': logging.info('Skip checking vm after conversion: %s' % skip_reason) return if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': virsh.start(vm_name, debug=True) # Check guest following the checkpoint document after conversion logging.info('Checking common checkpoints for v2v') if 'ogac' in checkpoint: # windows guests will reboot at any time after qemu-ga is # installed. The process cannot be controlled. In order to # don't break vmchecker.run() process, It's better to put # check_windows_ogac before vmchecker.run(). Because in # check_windows_ogac, it waits until rebooting completes. vmchecker.checker.create_session() if os_type == 'windows': services = ['qemu-ga'] if not utils_v2v.multiple_versions_compare( V2V_UNSUPPORT_RHEV_APT_VER): services.append('rhev-apt') if 'rhv-guest-tools' in os.getenv('VIRTIO_WIN'): services.append('spice-ga') for ser in services: check_windows_service(vmchecker.checker, ser) else: check_linux_ogac(vmchecker.checker) if 'mac_ip' in checkpoint: check_static_ip_conf(vmchecker.checker) ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if 'ogac' in checkpoint and 'signature' in checkpoint: if not utils_v2v.multiple_versions_compare( V2V_UNSUPPORT_RHEV_APT_VER): check_windows_signature(vmchecker.checker, r'c:\rhev-apt.exe') if 'cdrom' in checkpoint and "device='cdrom'" not in vmchecker.vmxml: test.fail('CDROM no longer exists') if 'vmtools' in checkpoint: check_vmtools(vmchecker.checker, checkpoint) if 'modprobe' in checkpoint: check_modprobe(vmchecker.checker) if 'device_map' in checkpoint: check_device_map(vmchecker.checker) if 'resume_swap' in checkpoint: check_resume_swap(vmchecker.checker) if 'rhev_file' in checkpoint: check_rhev_file_exist(vmchecker.checker) if 'file_architecture' in checkpoint: check_file_architecture(vmchecker.checker) if 'ubuntu_tools' in checkpoint: check_ubuntools(vmchecker.checker) if 'vmware_tools' in checkpoint: check_windows_vmware_tools(vmchecker.checker) if 'without_default_net' in checkpoint: if virsh.net_state_dict()[net_name]['active']: log_fail("Bridge virbr0 already started during conversion") if 'rhsrvany_checksum' in checkpoint: check_rhsrvany_checksums(vmchecker.checker) if 'block_dev' in checkpoint and not os.path.exists(blk_dev_link): test.fail("checkpoint '%s' failed" % checkpoint) # Merge 2 error lists error_list.extend(vmchecker.errors) # Virtio drivers will not be installed without virtio-win setup if 'virtio_win_unset' in checkpoint: missing_list = params.get('missing').split(',') expect_errors = ['Not find driver: ' + x for x in missing_list] logging.debug('Expect errors: %s' % expect_errors) logging.debug('Actual errors: %s' % error_list) if set(error_list) == set(expect_errors): error_list[:] = [] else: logging.error('Virtio drivers not meet expectation') utils_v2v.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text # VM or local output checking vm_check(status_error) # Check log size decrease option if 'log decrease' in checkpoint: nbdkit_option = r'nbdkit\.backend\.datapath=0' if not re.search(nbdkit_option, output): test.fail("checkpoint '%s' failed" % checkpoint) if 'fstrim_warning' in checkpoint: # Actually, fstrim has no relationship with v2v, it may be related # to kernel, this warning really doesn't matter and has no harm to # the conversion. V2V_FSTRIM_SUCESS_VER = "[virt-v2v-1.45.1-1.el9,)" if utils_v2v.multiple_versions_compare(V2V_FSTRIM_SUCESS_VER): params.update({'expect_msg': None}) # Log checking log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: if version_requried and not utils_v2v.multiple_versions_compare( version_requried): test.cancel("Testing requires version: %s" % version_requried) # See man virt-v2v-input-xen(1) if enable_legacy_cp: process.run('update-crypto-policies --set LEGACY', verbose=True, ignore_status=True, shell=True) v2v_params = { 'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name, 'vpx_dc': vpx_dc, 'esx_ip': esx_ip, 'new_name': vm_name + utils_misc.generate_random_string(4), 'v2v_opts': v2v_opts, 'input_mode': 'libvirt', 'os_storage': os_storage, 'os_pool': os_pool, 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target'), 'password': vpx_passwd if src_uri_type != 'esx' else esxi_password, 'input_transport': input_transport, 'vcenter_host': vpx_hostname, 'vcenter_password': vpx_passwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'src_uri_type': src_uri_type, 'esxi_password': esxi_password, 'esxi_host': esxi_host, 'output_method': output_method, 'os_storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts, 'oo_json_disk_pattern': json_disk_pattern, 'cmd_has_ip': cmd_has_ip, 'params': params } os.environ['LIBGUESTFS_BACKEND'] = 'direct' v2v_uri = utils_v2v.Uri('esx') remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip) # Create password file for access to ESX hypervisor vpx_passwd_file = params.get("vpx_passwd_file") with open(vpx_passwd_file, 'w') as pwd_f: if src_uri_type == 'esx': pwd_f.write(esxi_password) else: pwd_f.write(vpx_passwd) v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file if params.get('output_format'): v2v_params.update({'of_format': params['output_format']}) # Rename guest with special name while converting to rhev if '#' in vm_name and output_mode == 'rhev': v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_') # Create SASL user on the ovirt host if output_mode == 'rhev': # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) logging.debug('A SASL session %s was created', v2v_sasl) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') if 'root' in checkpoint and 'ask' in checkpoint: v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '2') if 'root' in checkpoint and 'ask' not in checkpoint: root_option = params.get('root_option') v2v_params['v2v_opts'] += ' --root %s' % root_option if 'with_proxy' in checkpoint: http_proxy = params.get('esx_http_proxy') https_proxy = params.get('esx_https_proxy') logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy, https_proxy) os.environ['http_proxy'] = http_proxy os.environ['https_proxy'] = https_proxy if 'ogac' in checkpoint: os.environ['VIRTIO_WIN'] = virtio_win_path if not os.path.exists(os.getenv('VIRTIO_WIN')): test.fail('%s does not exist' % os.getenv('VIRTIO_WIN')) if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux': export_path = os.getenv('VIRTIO_WIN') qemu_guest_agent_dir = os.path.join(export_path, qa_path) if not os.path.exists(qemu_guest_agent_dir) and os.access( export_path, os.W_OK) and qa_url: logging.debug( 'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,' ' Try to prepare it manually. This is not a permanent step, once' ' the official build includes it, this step should be removed.' ) os.makedirs(qemu_guest_agent_dir) rpm_name = os.path.basename(qa_url) download.get_file( qa_url, os.path.join(qemu_guest_agent_dir, rpm_name)) if 'virtio_iso_blk' in checkpoint: if not os.path.exists(virtio_win_path): test.fail('%s does not exist' % virtio_win_path) # Find a free loop device free_loop_dev = process.run("losetup --find", shell=True).stdout_text.strip() # Setup a loop device cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path) process.run(cmd, shell=True) os.environ['VIRTIO_WIN'] = free_loop_dev if 'block_dev' in checkpoint: os_directory = params_get(params, 'os_directory') block_count = params_get(params, 'block_count') os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_', dir=os_directory) diskimage = '%s/diskimage' % os_directory.name # Update 'os_directory' for '-os' option params['os_directory'] = os_directory.name # Create a 1G image cmd = 'dd if=/dev/zero of=%s bs=10M count=%s' % (diskimage, block_count) process.run(cmd, shell=True) # Build filesystem cmd = 'mkfs.ext4 %s' % diskimage process.run(cmd, shell=True) # Find a free loop device free_loop_dev = process.run("losetup --find", shell=True).stdout_text.strip() # Setup the image as a block device cmd = 'losetup %s %s' % (free_loop_dev, diskimage) process.run(cmd, shell=True) # Create a soft link to the loop device blk_dev_link = '%s/mydisk1' % os_directory.name cmd = 'ln -s %s %s' % (free_loop_dev, blk_dev_link) process.run(cmd, shell=True) if 'invalid_pem' in checkpoint: # simply change the 2nd line to lowercase to get an invalid pem with open(local_ca_file_path, 'r+') as fd: for i in range(2): pos = fd.tell() res = fd.readline() fd.seek(pos) fd.write(res.lower()) fd.flush() if 'without_default_net' in checkpoint: net_name = find_net('virbr0') if net_name: destroy_net(net_name) if 'bandwidth' in checkpoint: dynamic_speeds = params_get(params, 'dynamic_speeds') bandwidth_file = params_get(params, 'bandwidth_file') with open(bandwidth_file, 'w') as fd: fd.write(dynamic_speeds) if checkpoint[0].startswith('virtio_win'): cp = checkpoint[0] src_dir = params.get('virtio_win_dir') dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win') iso_path = os.path.join(dest_dir, 'virtio-win.iso') if not os.path.exists(dest_dir): shutil.copytree(src_dir, dest_dir) virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN') process.run('rpm -e virtio-win') if process.run('rpm -q virtio-win', ignore_status=True).exit_status == 0: test.error('not removed') if cp.endswith('unset'): logging.info('Unset env %s' % virtio_win_env) os.unsetenv(virtio_win_env) if cp.endswith('custom'): logging.info('Set env %s=%s' % (virtio_win_env, dest_dir)) os.environ[virtio_win_env] = dest_dir if cp.endswith('iso_mount'): logging.info('Mount iso to /opt') process.run('mount %s /opt' % iso_path) os.environ[virtio_win_env] = '/opt' if cp.endswith('iso_file'): logging.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if 'luks_dev_keys' in checkpoint: luks_password = params_get(params, 'luks_password', '') luks_keys = params_get(params, 'luks_keys', '') keys_options = ' '.join( list( map(lambda i: '--key %s' % i if i else '', luks_keys.split(';')))) if 'invalid_pwd_file' not in checkpoint: is_file_key = r'--key \S+:file:(\S+)' for file_key in re.findall(is_file_key, keys_options): with open(file_key, 'w') as fd: fd.write(luks_password) v2v_params['v2v_opts'] += ' ' + keys_options if 'empty_cdrom' in checkpoint: virsh_dargs = { 'uri': remote_uri, 'remote_ip': remote_host, 'remote_user': '******', 'remote_pwd': vpx_passwd, 'auto_close': True, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) v2v_result = remote_virsh.dumpxml(vm_name) remote_virsh.close_session() else: if 'exist_uuid' in checkpoint: auto_clean = False if checkpoint[0] in [ 'mismatched_uuid', 'no_uuid', 'invalid_source', 'system_rhv_pem' ]: cmd_only = True auto_clean = False v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only, interaction_run) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] if 'system_rhv_pem' in checkpoint: if 'set' in checkpoint: global_pem_setup(local_ca_file_path) rhv_cafile = r'-oo rhv-cafile=\S+\s*' new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_cafile) logging.debug('New v2v command:\n%s', new_cmd) if 'mismatched_uuid' in checkpoint: # append more uuid new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4()) if 'no_uuid' in checkpoint: rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*' new_cmd = utils_v2v.cmd_remove_option(v2v_result, rhv_disk_uuid) logging.debug('New v2v command:\n%s', new_cmd) if 'exist_uuid' in checkpoint: # Use to cleanup the VM because it will not be run in check_result vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker # Update name to avoid conflict new_vm_name = v2v_params['new_name'] + '_exist_uuid' new_cmd = v2v_result.command.replace('-on %s' % vm_name, '-on %s' % new_vm_name) new_cmd += ' --no-copy' logging.debug('re-run v2v command:\n%s', new_cmd) if 'invalid_source' in checkpoint: if params.get('invalid_vpx_hostname'): new_cmd = v2v_result.replace( vpx_hostname, params.get('invalid_vpx_hostname')) if params.get('invalid_esx_hostname'): new_cmd = v2v_result.replace( esxi_host, params.get('invalid_esx_hostname')) if checkpoint[0] in [ 'mismatched_uuid', 'no_uuid', 'invalid_source', 'exist_uuid', 'system_rhv_pem' ]: v2v_result = utils_v2v.cmd_run(new_cmd, params.get('v2v_dirty_resources')) check_result(v2v_result, status_error) finally: if enable_legacy_cp: process.run('update-crypto-policies --set DEFAULT', verbose=True, ignore_status=True, shell=True) if checkpoint[0].startswith('virtio_win'): utils_package.package_install(['virtio-win']) if 'virtio_win_iso_mount' in checkpoint: process.run('umount /opt', ignore_status=True) if 'ogac' in checkpoint and params.get('tmp_mount_point'): if os.path.exists(params.get('tmp_mount_point')): utils_misc.umount(os.getenv('VIRTIO_WIN'), params['tmp_mount_point'], 'iso9660') os.environ.pop('VIRTIO_WIN') if 'block_dev' in checkpoint and hasattr(os_directory, 'name'): process.run('losetup -d %s' % free_loop_dev, shell=True) os_directory.cleanup() if 'virtio_iso_blk' in checkpoint: process.run('losetup -d %s' % free_loop_dev, shell=True) os.environ.pop('VIRTIO_WIN') if 'system_rhv_pem' in checkpoint and 'set' in checkpoint: global_pem_cleanup() if 'without_default_net' in checkpoint: if net_name: start_net(net_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() logging.debug('SASL session %s is closing', v2v_sasl) v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if 'with_proxy' in checkpoint: logging.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') os.environ.pop('https_proxy') if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) if params.get('os_directory') and os.path.isdir( params['os_directory']): shutil.rmtree(params['os_directory'], ignore_errors=True) # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def run(test, params, env): """ KVM whql env setup test: 1) Log into a guest 2) Update Windows kernel to the newest version 3) Un-check Automatically restart in system failure 4) Disable UAC 5) Get the symbol files 6) Set VM to physical memory + 100M 7) Update the nic configuration 8) Install debug view and make it auto run :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ log_path = "%s/../debug" % test.resultsdir # Prepare the tools iso error_context.context("Prepare the tools iso", logging.info) src_list = params.get("src_list") src_path = params.get("src_path", "%s/whql_src" % test.tmpdir) if not os.path.exists(src_path): os.makedirs(src_path) if src_list is not None: for i in re.split(",", src_list): pkg_name = os.path.basename(i) pkg_path = os.path.join(src_path, pkg_name) utils_download.get_file(i, pkg_path) # Make iso for src cdrom_whql = params.get("cdrom_whql") cdrom_whql = utils_misc.get_path(data_dir.get_data_dir(), cdrom_whql) cdrom_whql_dir = os.path.split(cdrom_whql)[0] if not os.path.exists(cdrom_whql_dir): os.makedirs(cdrom_whql_dir) cmd = "mkisofs -J -o %s %s" % (cdrom_whql, src_path) process.system(cmd, shell=True) params["cdroms"] += " whql" vm = "vm1" vm_params = params.object_params(vm) env_process.preprocess_vm(test, vm_params, env, vm) vm = env.get_vm(vm) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_log = utils_misc.get_path(log_path, "whql_setup_error_log") run_guest_log = params.get("run_guest_log", "%s/whql_qemu_comman" % test.tmpdir) # Record qmmu command line in a log file error_context.context("Record qemu command line", logging.info) if os.path.isfile(run_guest_log): fd = open(run_guest_log, "r+") fd.read() else: fd = open(run_guest_log, "w") fd.write("%s\n" % vm.qemu_command) fd.close() # Get set up commands update_cmd = params.get("update_cmd", "") timezone_cmd = params.get("timezone_cmd", "") auto_restart = params.get("auto_restart", "") qxl_install = params.get("qxl_install", "") debuggers_install = params.get("debuggers_install", "") disable_uas = params.get("disable_uas", "") symbol_files = params.get("symbol_files", "") vm_size = int(params.get("mem")) + 100 nic_cmd = params.get("nic_config_cmd", "") dbgview_cmd = params.get("dbgview_cmd", "") format_cmd = params.get("format_cmd", "") disable_firewall = params.get("disable_firewall", "") disable_update = params.get("disable_update", "") setup_timeout = int(params.get("setup_timeout", "7200")) disk_init_cmd = params.get("disk_init_cmd", "") disk_driver_install = params.get("disk_driver_install", "") vm_ma_cmd = "wmic computersystem set AutomaticManagedPagefile=False" vm_cmd = "wmic pagefileset where name=\"C:\\\\pagefile.sys\" set " vm_cmd += "InitialSize=%s,MaximumSize=%s" % (vm_size, vm_size) vm_ma_cmd = "" vm_cmd = "" if symbol_files: symbol_cmd = "del C:\\\\symbols &&" symbol_cmd += "git clone %s C:\\\\symbol_files C:\\\\symbols" % \ symbol_files else: symbol_cmd = "" wmic_prepare_cmd = "echo exit > cmd && cmd /s wmic" error_context.context("Configure guest system", logging.info) cmd_list = [ wmic_prepare_cmd, auto_restart, disable_uas, symbol_cmd, vm_ma_cmd, vm_cmd, dbgview_cmd, qxl_install, disable_firewall, timezone_cmd ] if nic_cmd: for index, nic in enumerate(re.split(r"\s+", params.get("nics"))): setup_params = params.get("nic_setup_params_%s" % nic, "") if params.get("vm_arch_name", "") == "x86_64": nic_cmd = re.sub("set", "set_64", nic_cmd) cmd_list.append("%s %s %s" % (nic_cmd, str(index + 1), setup_params)) if disk_init_cmd: disk_num = len(re.split(r"\s+", params.get("images"))) if disk_driver_install: cmd_list.append(disk_driver_install + str(disk_num - 1)) labels = "IJKLMNOPQRSTUVWXYZ" for index, images in enumerate(re.split(r"\s+", params.get("images"))): if index > 0: cmd_list.append(disk_init_cmd % (str(index), labels[index - 1])) format_cmd_image = format_cmd % ( labels[index - 1], params.get("win_format_%s" % images)) if params.get("win_extra_%s" % images): format_cmd_image += " %s" % params.get( "win_extra_%s" % images) cmd_list.append(format_cmd_image) cmd_list += [update_cmd, disable_update] failed_flag = 0 # Check symbol files in guest if symbol_files: error_context.context("Update symbol files", logging.info) install_check_tool = False check_tool_chk = params.get("check_tool_chk", "C:\\debuggers\\symchk.exe") output = session.cmd_output(check_tool_chk) if "cannot find" in output: install_check_tool = True if install_check_tool: output = session.cmd_output(debuggers_install) symbol_file_check = params.get("symbol_file_check") symbol_file_download = params.get("symbol_file_download") symbol_check_pattern = params.get("symbol_check_pattern") symbol_pid_pattern = params.get("symbol_pid_pattern") download = utils_test.BackgroundTest( session.cmd, (symbol_file_download, setup_timeout)) sessioncheck = vm.wait_for_login(timeout=timeout) download.start() while download.is_alive(): o = sessioncheck.cmd_output(symbol_file_check, setup_timeout) if symbol_check_pattern in o: # Check is done kill download process cmd = "tasklist /FO list" s, o = sessioncheck.cmd_status_output(cmd) pid = re.findall(symbol_pid_pattern, o, re.S) if pid: cmd = "taskkill /PID %s /F" % pid[0] try: sessioncheck.cmd(cmd) except Exception: pass break time.sleep(5) sessioncheck.close() download.join() for cmd in cmd_list: if len(cmd) > 0: s = 0 try: s, o = session.cmd_status_output(cmd, timeout=setup_timeout) except Exception as err: failed_flag += 1 utils_misc.log_line(error_log, "Unexpected exception: %s" % err) if s != 0: failed_flag += 1 utils_misc.log_line(error_log, o) if failed_flag != 0: test.fail("Have %s setup fialed. Please check the log." % failed_flag)
def run(test, params, env): """ KVM whql env setup test: 1) Log into a guest 2) Update Windows kernel to the newest version 3) Un-check Automatically restart in system failure 4) Disable UAC 5) Get the symbol files 6) Set VM to physical memory + 100M 7) Update the nic configuration 8) Install debug view and make it auto run :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ log_path = "%s/../debug" % test.resultsdir # Prepare the tools iso error_context.context("Prepare the tools iso", logging.info) src_list = params.get("src_list") src_path = params.get("src_path", "%s/whql_src" % test.tmpdir) if not os.path.exists(src_path): os.makedirs(src_path) if src_list is not None: for i in re.split(",", src_list): pkg_name = os.path.basename(i) pkg_path = os.path.join(src_path, pkg_name) utils_download.get_file(i, pkg_path) # Make iso for src cdrom_whql = params.get("cdrom_whql") cdrom_whql = utils_misc.get_path(data_dir.get_data_dir(), cdrom_whql) cdrom_whql_dir = os.path.split(cdrom_whql)[0] if not os.path.exists(cdrom_whql_dir): os.makedirs(cdrom_whql_dir) cmd = "mkisofs -J -o %s %s" % (cdrom_whql, src_path) process.system(cmd, shell=True) params["cdroms"] += " whql" vm = "vm1" vm_params = params.object_params(vm) env_process.preprocess_vm(test, vm_params, env, vm) vm = env.get_vm(vm) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_log = utils_misc.get_path(log_path, "whql_setup_error_log") run_guest_log = params.get( "run_guest_log", "%s/whql_qemu_comman" % test.tmpdir) # Record qmmu command line in a log file error_context.context("Record qemu command line", logging.info) if os.path.isfile(run_guest_log): fd = open(run_guest_log, "r+") fd.read() else: fd = open(run_guest_log, "w") fd.write("%s\n" % vm.qemu_command) fd.close() # Get set up commands update_cmd = params.get("update_cmd", "") timezone_cmd = params.get("timezone_cmd", "") auto_restart = params.get("auto_restart", "") qxl_install = params.get("qxl_install", "") debuggers_install = params.get("debuggers_install", "") disable_uas = params.get("disable_uas", "") symbol_files = params.get("symbol_files", "") vm_size = int(params.get("mem")) + 100 nic_cmd = params.get("nic_config_cmd", "") dbgview_cmd = params.get("dbgview_cmd", "") format_cmd = params.get("format_cmd", "") disable_firewall = params.get("disable_firewall", "") disable_update = params.get("disable_update", "") setup_timeout = int(params.get("setup_timeout", "7200")) disk_init_cmd = params.get("disk_init_cmd", "") disk_driver_install = params.get("disk_driver_install", "") vm_ma_cmd = "wmic computersystem set AutomaticManagedPagefile=False" vm_cmd = "wmic pagefileset where name=\"C:\\\\pagefile.sys\" set " vm_cmd += "InitialSize=%s,MaximumSize=%s" % (vm_size, vm_size) vm_ma_cmd = "" vm_cmd = "" if symbol_files: symbol_cmd = "del C:\\\\symbols &&" symbol_cmd += "git clone %s C:\\\\symbol_files C:\\\\symbols" % \ symbol_files else: symbol_cmd = "" wmic_prepare_cmd = "echo exit > cmd && cmd /s wmic" error_context.context("Configure guest system", logging.info) cmd_list = [wmic_prepare_cmd, auto_restart, disable_uas, symbol_cmd, vm_ma_cmd, vm_cmd, dbgview_cmd, qxl_install, disable_firewall, timezone_cmd] if nic_cmd: for index, nic in enumerate(re.split(r"\s+", params.get("nics"))): setup_params = params.get("nic_setup_params_%s" % nic, "") if params.get("vm_arch_name", "") == "x86_64": nic_cmd = re.sub("set", "set_64", nic_cmd) cmd_list.append("%s %s %s" % (nic_cmd, str(index + 1), setup_params)) if disk_init_cmd: disk_num = len(re.split(r"\s+", params.get("images"))) if disk_driver_install: cmd_list.append(disk_driver_install + str(disk_num - 1)) labels = "IJKLMNOPQRSTUVWXYZ" for index, images in enumerate(re.split(r"\s+", params.get("images"))): if index > 0: cmd_list.append(disk_init_cmd % (str(index), labels[index - 1])) format_cmd_image = format_cmd % (labels[index - 1], params.get("win_format_%s" % images)) if params.get("win_extra_%s" % images): format_cmd_image += " %s" % params.get( "win_extra_%s" % images) cmd_list.append(format_cmd_image) cmd_list += [update_cmd, disable_update] failed_flag = 0 # Check symbol files in guest if symbol_files: error_context.context("Update symbol files", logging.info) install_check_tool = False check_tool_chk = params.get("check_tool_chk", "C:\\debuggers\\symchk.exe") output = session.cmd_output(check_tool_chk) if "cannot find" in output: install_check_tool = True if install_check_tool: output = session.cmd_output(debuggers_install) symbol_file_check = params.get("symbol_file_check") symbol_file_download = params.get("symbol_file_download") symbol_check_pattern = params.get("symbol_check_pattern") symbol_pid_pattern = params.get("symbol_pid_pattern") download = utils_test.BackgroundTest(session.cmd, (symbol_file_download, setup_timeout)) sessioncheck = vm.wait_for_login(timeout=timeout) download.start() while download.is_alive(): o = sessioncheck.cmd_output(symbol_file_check, setup_timeout) if symbol_check_pattern in o: # Check is done kill download process cmd = "tasklist /FO list" s, o = sessioncheck.cmd_status_output(cmd) pid = re.findall(symbol_pid_pattern, o, re.S) if pid: cmd = "taskkill /PID %s /F" % pid[0] try: sessioncheck.cmd(cmd) except Exception: pass break time.sleep(5) sessioncheck.close() download.join() for cmd in cmd_list: if len(cmd) > 0: s = 0 try: s, o = session.cmd_status_output(cmd, timeout=setup_timeout) except Exception as err: failed_flag += 1 utils_misc.log_line( error_log, "Unexpected exception: %s" % err) if s != 0: failed_flag += 1 utils_misc.log_line(error_log, o) if failed_flag != 0: test.fail("Have %s setup fialed. Please check the log." % failed_flag)
for param in params.get("copy_to_local").split(): l_value = params.get(param) if l_value: need_copy = True nfs_link = utils_misc.get_path(test.bindir, l_value) i_name = os.path.basename(l_value) local_link = os.path.join(local_dir, i_name) if os.path.isfile(local_link): file_hash = crypto.hash_file(local_link, algorithm="md5") expected_hash = crypto.hash_file(nfs_link, algorithm="md5") if file_hash == expected_hash: need_copy = False if need_copy: msg = "Copy %s to %s in local host." % (i_name, local_link) error_context.context(msg, logging.info) download.get_file(nfs_link, local_link) params[param] = local_link unattended_install_config = UnattendedInstallConfig(test, params, vm) unattended_install_config.setup() # params passed explicitly, because they may have been updated by # unattended install config code, such as when params['url'] == auto vm.create(params=params) post_finish_str = params.get("post_finish_str", "Post set up finished") install_timeout = int(params.get("install_timeout", 4800)) migrate_background = params.get("migrate_background") == "yes" if migrate_background:
def _copy_file_to_test_dir(file_path): if aurl.is_url(file_path): return file_path file_abs_path = os.path.join(test.bindir, file_path) dest = os.path.join(sub_test_path, os.path.basename(file_abs_path)) return os.path.basename(download.get_file(file_path, dest))
def run(test, params, env): """ Test virtio/virtio-transitional model of serial device :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def get_free_pci_slot(): """ Get a free slot from pcie-to-pci-bridge :return: The free slot """ used_slot = [] for dev in pci_devices: address = dev.find('address') if (address is not None and address.get('bus') == pci_bridge_index): used_slot.append(address.get('slot')) for slot_index in range(1, 30): slot = "%0#4x" % slot_index if slot not in used_slot: return slot return None def test_data_transfer(dev_type): """ Test data transfer between guest and host via console/serial device :param dev_type: The device type to be tested, console or channel """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) console_xml = vmxml.xmltreefile.find('devices').find(dev_type) host_path = console_xml.find('source').get('path') guest_path = '/dev/hvc0' if dev_type == 'console' else '/dev/vport0p1' test_message = 'virtiochannel' cat_cmd = "cat %s" % host_path logfile = "test_data_transfer-%s.log" % dev_type host_process = aexpect.ShellSession(cat_cmd, auto_close=False, output_func=utils_misc.log_line, output_params=(logfile, )) guest_session = vm.wait_for_login() guest_session.cmd_output('echo %s > %s' % (test_message, guest_path)) guest_session.close() try: host_process.read_until_last_line_matches(test_message, timeout=10) except aexpect.exceptions.ExpectError as e: test.fail('Did not catch the expected output from host side,' ' the detail of the failure: %s' % str(e)) finally: host_process.close() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path # Add pcie-to-pci-bridge when it is required if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } pci_bridge = libvirt.create_controller_xml(contr_dict, "add_controller", vm_name) pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) try: # Update interface to virtio-transitional mode for # rhel6 guest to make it works for login iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Remove all current serial devices vmxml.remove_all_device_by_type('serial') vmxml.remove_all_device_by_type('channel') vmxml.remove_all_device_by_type('console') vmxml.del_controller('virtio-serial') vmxml.sync() # Add virtio-serial with right model contr_dict = { 'controller_type': 'virtio-serial', 'controller_model': virtio_model } if add_pcie_to_pci_bridge: pci_devices = vmxml.xmltreefile.find('devices').getchildren() slot = get_free_pci_slot() addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot) contr_dict.update({'controller_addr': addr}) libvirt.create_controller_xml(contr_dict, "add_controller", vm_name) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add channel and console device attached to virtio-serial bus target_dict = {'type': 'virtio', 'name': 'ptychannel'} address_dict = {'type': 'virtio-serial', 'controller': '0', 'bus': '0'} channel_xml = Channel('pty') channel_xml.target = target_dict channel_xml.address = address_dict console_xml = Console() console_xml.target_port = '0' console_xml.target_type = 'virtio' vmxml.add_device(channel_xml) vmxml.add_device(console_xml) vmxml.sync() if vm.is_alive(): vm.destroy() vm.start(autoconsole=False) # Test data transfer via console and channel devices test_data_transfer('console') test_data_transfer('channel') finally: vm.destroy() backup_xml.sync()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of disk :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def reboot(): """ Shutdown and restart guest, then wait for login """ vm.destroy() vm.start() vm.wait_for_login() def attach(xml, device_name, plug_method="hot"): """ Attach device with xml, for both hot and cold plug :param xml: Device xml to be attached :param device_name: Device name to be attached :param plug_method: hot or cold for plug method """ device_before_plug = find_device(vm, params) with open(xml) as disk_file: logging.debug("Attach disk by XML: %s", disk_file.read()) file_arg = xml if plug_method == "cold": file_arg += ' --config' s_attach = virsh.attach_device(domainarg=vm_name, filearg=file_arg, debug=True) libvirt.check_exit_status(s_attach) if plug_method == "cold": reboot() detect_time = params.get("detect_disk_time", 20) plug_disks = utils_misc.wait_for( lambda: get_new_device(device_before_plug, find_device(vm, params) ), detect_time) if not plug_disks: test.fail("Failed to hotplug device %s to guest" % device_name) def detach(xml, device_name, unplug_method="hot"): """ Detach device with xml, for both hot and cold unplug :param xml: Device xml to be attached :param device_name: Device name to be attached :param plug_method: hot or cold for unplug method """ with open(xml) as disk_file: logging.debug("Detach device by XML: %s", disk_file.read()) file_arg = xml if unplug_method == "cold": file_arg = xml + ' --config' s_detach = virsh.detach_device(domainarg=vm_name, filearg=file_arg, debug=True) if unplug_method == "cold": reboot() libvirt.check_exit_status(s_detach) def attach_disk(): # pylint: disable=W0611 """ Sub test for attach disk, including hot and cold plug/unplug """ plug_method = params.get("plug_method", "hot") device_source_format = params.get("at_disk_source_format", "raw") device_target = params.get("at_disk_target", "vdb") device_disk_bus = params.get("at_disk_bus", "virtio") device_source_name = params.get("at_disk_source", "attach.img") detect_time = params.get("detect_disk_time", 10) device_source_path = os.path.join(tmp_dir, device_source_name) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1", disk_format=device_source_format) def _generate_disk_xml(): """Generate xml for device hotplug/unplug usage""" diskxml = devices.disk.Disk("file") diskxml.device = "disk" source_params = {"attrs": {'file': device_source}} diskxml.source = diskxml.new_disk_source(**source_params) diskxml.target = {'dev': device_target, 'bus': device_disk_bus} if params.get("disk_model"): diskxml.model = params.get("disk_model") if pci_bridge_index and device_disk_bus == 'virtio': addr = diskxml.new_disk_address('pci') addr.set_attrs({'bus': pci_bridge_index, 'slot': slot}) diskxml.address = addr return diskxml.xml v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) slot = get_free_slot(pci_bridge_index, v_xml) disk_xml = _generate_disk_xml() attach(disk_xml, device_target, plug_method) if plug_method == "cold": disk_xml = _generate_disk_xml() detach(disk_xml, device_target, plug_method) if not utils_misc.wait_for( lambda: not libvirt.device_exists(vm, device_target), detect_time): test.fail("Detach disk failed.") def attach_controller(): # pylint: disable=W0611 """ Sub test for attach controller """ v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) contr_index = len(v_xml.get_controllers('scsi')) contr_type = params.get("controller_type", 'scsi') contr_model = params.get("controller_model", "virtio-scsi") contr_dict = { 'controller_type': contr_type, 'controller_model': contr_model, 'controller_index': contr_index } if pci_bridge_index: slot = get_free_slot(pci_bridge_index, v_xml) addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot) contr_dict.update({'controller_addr': addr}) cntl_add = libvirt.create_controller_xml(contr_dict=contr_dict) attach(cntl_add.xml, params['controller_model']) cntl_add = libvirt.create_controller_xml(contr_dict=contr_dict) detach(cntl_add.xml, params['controller_model']) def snapshot(): # pylint: disable=W0611 """ Sub test for snapshot """ for i in range(1, 4): ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i) libvirt.check_exit_status(ret) libvirtd_obj = utils_libvirtd.Libvirtd() libvirtd_obj.restart() save_path = os.path.join(tmp_dir, "test.save") ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() session.close() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") pci_bridge_index = None tmp_dir = data_dir.get_tmp_dir() guest_src_url = params.get("guest_src_url") set_crypto_policy = params.get("set_crypto_policy") if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path if set_crypto_policy: utils_conn.update_crypto_policy(set_crypto_policy) try: if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } pci_bridge = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, pci_bridge) pci_bridge = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\ .get_controllers('pci', 'pcie-to-pci-bridge')[0] pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) if pci_bridge_index: v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if params.get("disk_target_bus") == "scsi": scsi_controllers = v_xml.get_controllers('scsi') for index, controller in enumerate(scsi_controllers): controller.find('address').set('bus', pci_bridge_index) controller.find('address').set( 'slot', get_free_slot(pci_bridge_index, v_xml)) else: disks = v_xml.get_devices(device_type="disk") for index, disk in enumerate(disks): args = { 'bus': pci_bridge_index, 'slot': get_free_slot(pci_bridge_index, v_xml) } libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address', args) v_xml.xmltreefile.write() v_xml.sync() if not vm.is_alive(): vm.start() vm.wait_for_login() test_step = params.get("sub_test_step") if test_step: eval(test_step)() finally: vm.destroy() libvirt.clean_up_snapshots(vm_name) backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path) if set_crypto_policy: utils_conn.update_crypto_policy()
def run(test, params, env): """ KVM guest stop test: 1) Log into a guest 2) Check is HeavyLoad.exe installed , download and install it if not installed. 3) Start Heavyload to make guest in heavyload 4) Check vm is alive 5) Stop heavyload process and clean temp file. :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def loop_session_cmd(session, cmd): def session_cmd(session, cmd): try: return session.cmd_status(cmd) == 0 except (aexpect.ShellStatusError, aexpect.ShellTimeoutError): pass count = 0 while count < 3: ret = session_cmd(session, cmd) if ret is not None: return ret count += 1 return None def add_option(cmd, key, val): """ Append options into command; """ if re.match(r".*/%s.*", cmd, re.I): if val: rex = r"/%s\b+\S+\b+" % key val = "/%s %s " % (key, val) cmd = re.sub(rex, val, cmd, re.I) else: cmd += " /%s %s " % (key, val) return cmd tmp_dir = data_dir.get_tmp_dir() install_path = params["install_path"].rstrip("\\") heavyload_bin = r'"%s\heavyload.exe"' % install_path start_cmd = "%s /CPU /MEMORY /FILE " % heavyload_bin stop_cmd = "taskkill /T /F /IM heavyload.exe" stop_cmd = params.get("stop_cmd", stop_cmd) start_cmd = params.get("start_cmd", start_cmd) check_running_cmd = "tasklist|findstr /I heavyload" check_running_cmd = params.get("check_running_cmd", check_running_cmd) test_installed_cmd = 'dir "%s"|findstr /I heavyload' % install_path test_installed_cmd = params.get("check_installed_cmd", test_installed_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) installed = session.cmd_status(test_installed_cmd) == 0 if not installed: download_url = params.get("download_url") if download_url: dst = r"c:\\" pkg_md5sum = params["pkg_md5sum"] error_context.context("Download HeavyLoadSetup.exe", logging.info) pkg_name = os.path.basename(download_url) pkg_path = os.path.join(tmp_dir, pkg_name) download.get_file(download_url, pkg_path, hash_expected=pkg_md5sum) vm.copy_files_to(pkg_path, dst) else: dst = r"%s:\\" % utils_misc.get_winutils_vol(session) error_context.context("Install HeavyLoad in guest", logging.info) install_cmd = params["install_cmd"] install_cmd = re.sub(r"DRIVE:\\+", dst, install_cmd) session.cmd(install_cmd) config_cmd = params.get("config_cmd") if config_cmd: session.cmd(config_cmd) error_context.context("Start heavyload in guest", logging.info) # genery heavyload command automaticly if params.get("autostress") == "yes": free_mem = utils_misc.get_free_mem(session, "windows") free_disk = utils_misc.get_free_disk(session, "C:") start_cmd = r'"%s\heavyload.exe"' % params["install_path"] start_cmd = add_option(start_cmd, 'CPU', vm.cpuinfo.smp) start_cmd = add_option(start_cmd, 'MEMORY', free_mem) start_cmd = add_option(start_cmd, 'FILE', free_disk) else: start_cmd = params["start_cmd"] # reformat command to ensure heavyload started as except test_timeout = int(params.get("timeout", "60")) steping = 60 if test_timeout < 60: logging.warn( "Heavyload use mins as unit of timeout, given timeout " "is too small (%ss), force set to 60s", test_timeout) test_timeout = 60 steping = 30 start_cmd = add_option(start_cmd, 'DURATION', test_timeout / 60) start_cmd = add_option(start_cmd, 'START', '') start_cmd = add_option(start_cmd, 'AUTOEXIT', '') logging.info("heavyload cmd: %s" % start_cmd) session.sendline(start_cmd) if not loop_session_cmd(session, check_running_cmd): test.error("heavyload process is not started") sleep_before_migration = int(params.get("sleep_before_migration", "0")) time.sleep(sleep_before_migration) error_context.context("Verify vm is alive", logging.info) utils_misc.wait_for(vm.verify_alive, timeout=test_timeout * 1.2, step=steping) if not session.cmd_status(check_running_cmd): test.fail("heavyload doesn't exist normally") if session: session.close()
def run(test, params, env): """ Multicast test using iperf. 1) Boot up VM(s) 2) Prepare the test environment in server/client/host,install iperf 3) Execute iperf tests, analyze the results :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def server_start(cmd, catch_data): """ Start the iperf server in host, and check whether the guest have connected this server through multicast address of the server """ try: process.run(cmd) except process.CmdError as e: if not re.findall(catch_data, e.result.stdout): test.fail("Client not connected '%s'" % str(e)) logging.info("Client multicast test pass " % re.findall(catch_data, str(e))) os_type = params.get("os_type") win_iperf_url = params.get("win_iperf_url") linux_iperf_url = params.get("linux_iperf_url") iperf_version = params.get("iperf_version", "2.0.12") transfer_timeout = int(params.get("transfer_timeout", 360)) login_timeout = int(params.get("login_timeout", 360)) dir_name = test.tmpdir tmp_dir = params.get("tmp_dir", "/tmp/") host_path = os.path.join(dir_name, "iperf") vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) clean_cmd = "" client_ip = vm.get_address(0) try: error_context.context("Test Env setup") iperf_downloaded = 0 iperf_url = linux_iperf_url app_check_cmd = params.get("linux_app_check_cmd", "false") app_check_exit_status = int( params.get("linux_app_check_exit_status", "0")) exit_status = process.system(app_check_cmd, ignore_status=True, shell=True) # Install gcc-c++ to compile iperf gcc_cpp_chk_cmd = params.get("gcc_cpp_chk_cmd") gcc_cpp = process.system_output(gcc_cpp_chk_cmd, shell=True) if not gcc_cpp: if not utils_package.package_install("gcc-c++", session): test.cancel("Please install gcc-c++ to proceed") # Install iperf in host if not available default_install_cmd = "tar zxvf %s; cd iperf-%s;" default_install_cmd += " ./configure; make; make install" install_cmd = params.get("linux_install_cmd", default_install_cmd) if not exit_status == app_check_exit_status: error_context.context("install iperf in host", logging.info) download.get_file(iperf_url, host_path) iperf_downloaded = 1 process.system(install_cmd % (host_path, iperf_version), shell=True) # The guest may not be running Linux, see if we should update the # app_check variables if not os_type == "linux": app_check_cmd = params.get("win_app_check_cmd", "false") app_check_exit_status = int( params.get("win_app_check_exit_status", "0")) # Install iperf in guest if not available if not session.cmd_status(app_check_cmd) == app_check_exit_status: error_context.context("install iperf in guest", logging.info) if not iperf_downloaded: download.get_file(iperf_url, host_path) if os_type == "linux": guest_path = (tmp_dir + "iperf.tgz") clean_cmd = "rm -rf %s iperf-%s" % (guest_path, iperf_version) else: guest_path = (tmp_dir + "iperf.exe") iperf_url = win_iperf_url download.get_file(iperf_url, host_path) clean_cmd = "del %s" % guest_path vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) if os_type == "linux": session.cmd(install_cmd % (guest_path, iperf_version)) muliticast_addr = params.get("muliticast_addr", "225.0.0.3") multicast_port = params.get("multicast_port", "5001") step_msg = "Start iperf server, bind host to multicast address %s " error_context.context(step_msg % muliticast_addr, logging.info) server_start_cmd = ("iperf -s -u -B %s -p %s " % (muliticast_addr, multicast_port)) default_flag = "%s port %s connected with %s" connected_flag = params.get("connected_flag", default_flag) catch_data = connected_flag % (muliticast_addr, multicast_port, client_ip) t = utils_misc.InterruptedThread(server_start, (server_start_cmd, catch_data)) t.start() if not _process_is_alive("iperf"): test.error("Start iperf server failed cmd: %s" % server_start_cmd) logging.info("Server start successfully") step_msg = "In client try to connect server and transfer file " step_msg += " through multicast address %s" error_context.context(step_msg % muliticast_addr, logging.info) if os_type == "linux": client_cmd = "iperf" else: client_cmd = guest_path start_cmd = params.get("start_client_cmd", "%s -c %s -u -p %s") start_client_cmd = start_cmd % (client_cmd, muliticast_addr, multicast_port) session.cmd(start_client_cmd) logging.info("Client start successfully") error_context.context("Test finish, check the result", logging.info) process.system("pkill -2 iperf") t.join(timeout=60) finally: if _process_is_alive("iperf"): process.system("killall -9 iperf") process.system("rm -rf %s" % host_path) if session: if clean_cmd: session.cmd(clean_cmd) session.close()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of rng :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def get_free_pci_slot(): """ Get a free slot from pcie-to-pci-bridge :return: The free slot """ used_slot = [] for dev in pci_devices: address = dev.find('address') if (address is not None and address.get('bus') == pci_bridge_index): used_slot.append(address.get('slot')) for slot_index in range(1, 30): slot = "%0#4x" % slot_index if slot not in used_slot: return slot return None def get_free_root_port(): """ Get a free root port for rng device :return: The bus index of free root port """ root_ports = set() other_ports = set() used_slot = set() # Record the bus indexes for all pci controllers for controller in pci_controllers: if controller.get('model') == 'pcie-root-port': root_ports.add(controller.get('index')) else: other_ports.add(controller.get('index')) # Record the addresses being allocated for all pci devices pci_devices = vmxml.xmltreefile.find('devices').getchildren() for dev in pci_devices: address = dev.find('address') if address is not None: used_slot.add(address.get('bus')) # Find the bus address unused for bus_index in root_ports: bus = "%0#4x" % int(bus_index) if bus not in used_slot: return bus # Add a new pcie-root-port if no free one for index in range(1, 30): if index not in (root_ports | other_ports): contr_dict = {'controller_type': 'pci', 'controller_index': index, 'controller_model': 'pcie-root-port'} cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) return "%0#4x" % int(index) return None def check_plug_to(bus_type='pcie-to-pci-bridge'): """ Check if the nic is plugged onto pcie-to-pci-bridge :param bus_type: The bus type been expected to plug to :return True if plugged onto 'bus_type', otherwise False """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng = vmxml.xmltreefile.find('devices').find('rng') bus = int(eval(rng.find('address').get('bus'))) controllers = vmxml.get_controllers('pci') for controller in controllers: if controller.get('index') == bus: if controller.get('model') == bus_type: return True break return False def check_rng_inside_guest(): """ check rng device inside guest """ check_cmd = params['check_cmd'] lspci_output = session.cmd_output(check_cmd) session.cmd_output('pkill -9 hexdump') if 'No such file or directory' in lspci_output and device_exists: test.fail('Can not detect device by %s.' % check_cmd) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] boot_with_rng = (params.get('boot_with_rng', 'yes') == 'yes') hotplug = (params.get('hotplug', 'no') == 'yes') device_exists = (params.get('device_exists', 'yes') == 'yes') plug_to = params.get('plug_to', '') if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and update image if required if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path # Add 'pcie-to-pci-bridge' if there is no one pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = {'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge'} pci_bridge = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, pci_bridge) pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) try: # Update nic and vm disks if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Remove existed rng devices if there are rng_devs = vmxml.get_devices('rng') for rng in rng_devs: vmxml.del_device(rng) vmxml.xmltreefile.write() vmxml.sync() # General new rng xml per configurations rng_xml = libvirt.create_rng_xml({"rng_model": virtio_model}) if params.get('specify_addr', 'no') == 'yes': pci_devices = vmxml.xmltreefile.find('devices').getchildren() addr = rng_xml.new_rng_address() if plug_to == 'pcie-root-port': bus = get_free_root_port() addr.set_attrs({'bus': bus}) else: slot = get_free_pci_slot() addr.set_attrs({'bus': pci_bridge_index, 'slot': slot}) rng_xml.address = addr if boot_with_rng: # Add to vm if required libvirt.add_vm_device(vmxml, rng_xml) if not vm.is_alive(): vm.start() if hotplug: # Hotplug rng if required file_arg = rng_xml.xml with open(file_arg) as rng_file: logging.debug("Attach rng by XML: %s", rng_file.read()) s_attach = virsh.attach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_attach) check_plug_to(plug_to) session = vm.wait_for_login() check_rng_inside_guest() if hotplug: # Unplug rng if hotplugged previously vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng = vmxml.get_devices('rng')[0] file_arg = rng.xml with open(file_arg) as rng_file: logging.debug("Detach rng by XML: %s", rng_file.read()) s_detach = virsh.detach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_detach) if not hotplug: session.close() save_path = os.path.join( data_dir.get_tmp_dir(), '%s.save' % params['os_variant']) ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() check_rng_inside_guest() process.run('rm -f %s' % save_path, ignore_status=True) finally: vm.destroy() backup_xml.sync()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of interface :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def reboot(): """ Shutdown and restart guest, then wait for login """ vm.destroy() vm.start() return vm.wait_for_login() def check_plug_to_pci_bridge(vm_name, mac): """ Check if the nic is plugged onto pcie-to-pci-bridge :param vm_name: Vm name :param mac: The mac address of plugged interface :return True if plugged onto pcie-to-pci-bridge, otherwise False """ v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) interface = v_xml.get_iface_all()[mac] bus = int(eval(interface.find('address').get('bus'))) controllers = vmxml.get_controllers('pci') for controller in controllers: if controller.get('index') == bus: if controller.get('model') == 'pcie-to-pci-bridge': return True break return False def detect_new_nic(mac): """ Detect the new interface by domiflist :param mac: The mac address of plugged interface :return plugged interface name """ def check_mac_exist(): all_infos = libvirt.get_interface_details(vm_name) for nic_info in all_infos: if nic_info.get('mac') == mac: return nic_info.get('interface') return False plugged_nic = utils_misc.wait_for(lambda: check_mac_exist(), 5) if not plugged_nic: test.fail("Failed to plug device %s" % mac) return plugged_nic def renew_ip_address(session, mac, guest_os_type): """ Renew ip for plugged nic :param session: Vm session :param mac: The mac address of plugged interface :param guest_os_type: Guest os type, Linux or Windows """ if guest_os_type == 'Windows': utils_net.restart_windows_guest_network_by_key( session, "macaddress", mac) ifname = utils_net.get_linux_ifname(session, mac) utils_net.create_network_script(ifname, mac, 'dhcp', '255.255.255.0') utils_net.restart_guest_network(session, mac) arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh" session.cmd_output_safe(arp_clean) def get_hotplug_nic_ip(vm, nic, session, guest_os_type): """ Get if of the plugged interface :param vm: Vm object :param nic: Nic object :param session: Vm session :param guest_os_type: Guest os type, Linux or Windows :return: Nic ip """ def __get_address(): """ Get ip address and return it, configure a new ip if device exists but no ip :return: Ip address if get, otherwise None """ try: index = [ _idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic ][0] return vm.wait_for_get_address(index, timeout=90) except IndexError: test.error("Nic '%s' not exists in VM '%s'" % (nic["nic_name"], vm.name)) except (virt_vm.VMIPAddressMissingError, virt_vm.VMAddressVerificationError): renew_ip_address(session, nic["mac"], guest_os_type) return # Wait for ip address is configured for the nic device nic_ip = utils_misc.wait_for(__get_address, timeout=360) if nic_ip: return nic_ip cached_ip = vm.address_cache.get(nic["mac"]) arps = process.system_output("arp -aen").decode() logging.debug("Can't get IP address:") logging.debug("\tCached IP: %s", cached_ip) logging.debug("\tARP table: %s", arps) return None def check_nic_removed(mac, session): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. :param mac: The mac address of plugged interface :param session: Vm session """ except_mesg = '' try: if guest_os_type == 'Windows': except_mesg = "Get nic netconnectionid failed" utils_net.restart_windows_guest_network_by_key( session, "macaddress", mac) else: except_mesg = ("Failed to determine interface" " name with mac %s" % mac) utils_net.get_linux_ifname(session, mac) except exceptions.TestError as e: if except_mesg in str(e): return True else: return False def attach_nic(): # pylint: disable=W0611 """ Attach interface, by xml or cmd, for both hot and cold plug """ def create_iface_xml(mac): """ Create interface xml file :param mac: The mac address of nic device """ iface = Interface(type_name='network') iface.source = iface_source iface.model = iface_model iface.mac_address = mac logging.debug("Create new interface xml: %s", iface) return iface plug_method = params.get('plug_method', 'interface') cold_plug = params.get('cold_plug', 'no') mac = utils_net.generate_mac_address_simple() iface_source = {'network': 'default'} iface_model = params["virtio_model"] options = ("network %s --model %s --mac %s" % (iface_source['network'], iface_model, mac)) nic_params = { 'mac': mac, 'nettype': params['nettype'], 'ip_version': 'ipv4' } if cold_plug == "yes": options += ' --config' if plug_method == 'interface': # Hotplug nic vir attach-interface ret = virsh.attach_interface(vm_name, options, ignore_status=True) else: # Hotplug nic via attach-device nic_xml = create_iface_xml(mac) nic_xml.xmltreefile.write() xml_file = nic_xml.xml with open(xml_file) as nic_file: logging.debug("Attach device by XML: %s", nic_file.read()) ret = virsh.attach_device(domainarg=vm_name, filearg=xml_file, debug=True) libvirt.check_exit_status(ret) if cold_plug == "yes": reboot() # Reboot guest if it is cold plug test detect_new_nic(mac) if plug_method == 'interface' and cold_plug == 'no': check_plug_to_pci_bridge(vm_name, mac) session = vm.wait_for_login(serial=True) # Add nic to VM object for further check nic_name = vm.add_nic(**nic_params)["nic_name"] nic = vm.virtnet[nic_name] # Config ip inside guest for new added nic if not utils_misc.wait_for( lambda: get_hotplug_nic_ip(vm, nic, session, guest_os_type), timeout=30): test.fail("Does not find plugged nic %s in guest" % mac) options = ("network %s" % mac) if cold_plug == "yes": options += ' --config' # Detach nic device if plug_method == 'interface': ret = virsh.detach_interface(vm_name, options, ignore_status=True) else: with open(xml_file) as nic_file: logging.debug("Detach device by XML: %s", nic_file.read()) ret = virsh.detach_device(domainarg=vm_name, filearg=xml_file, debug=True) libvirt.check_exit_status(ret) if cold_plug == "yes": session = reboot() # Reboot guest if it is cold plug test # Check if nic is removed from guest if not utils_misc.wait_for(lambda: check_nic_removed(mac, session), timeout=30): test.fail("The nic %s still exist in guest after being unplugged" % nic_name) def save_restore(): # pylint: disable=W0611 """ Sub test for save and restore """ save_path = os.path.join(data_dir.get_tmp_dir(), '%s.save' % params['os_variant']) ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) def ping_test(restart_network=False): """ Basic ping test for interface :param restart_network: True or False. Whether to restart network :raise: test.fail if ping test fails """ session = vm.wait_for_login() if restart_network: utils_net.restart_guest_network(session) dest = params.get('ping_dest', 'www.baidu.com') status, output = utils_test.ping(dest, 10, session=session, timeout=20) session.close() if status != 0: test.fail("Ping failed, status: %s," " output: %s" % (status, output)) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") params['disk_model'] = params['virtio_model'] guest_os_type = params['os_type'] target_path = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path libvirt.set_vm_disk(vm, params) # Add pcie-to-pci-bridge when there is no one pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': break else: contr_dict = { 'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge' } cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) try: # Update interface model as defined iface_params = {'model': params['virtio_model']} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if not vm.is_alive(): vm.start() # Test if nic works well via ping ping_test() test_step = params.get("sub_test_step") if test_step: eval(test_step)() # Test if nic still work well afeter sub steps test ping_test(True) finally: vm.destroy() backup_xml.sync() if guest_src_url and target_path: libvirt.delete_local_disk("file", path=target_path)
def run(test, params, env): """ Test virsh migrate command. """ def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = { 'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}' } check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migr_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migration_test = migration.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) check_vm_network_accessed() # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra) mig_result = migration_test.ret check_migration_res(mig_result) if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml( vm_name, debug=True, ignore_status=True).stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination vm.connect_uri = '' migration_test.cleanup_dest_vm(vm, src_uri, dest_uri) logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run(test, params, env): """ Convert specific esx guest """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') version_requried = params.get("version_requried") vpx_hostname = params.get('vpx_hostname') vpx_passwd = params.get("vpx_password") esxi_host = esx_ip = params.get('esx_hostname') vpx_dc = params.get('vpx_dc') vm_name = params.get('main_vm') output_mode = params.get('output_mode') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) v2v_timeout = int(params.get('v2v_timeout', 1200)) v2v_cmd_timeout = int(params.get('v2v_cmd_timeout', 18000)) v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else '' if params.get("v2v_opts"): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") status_error = 'yes' == params.get('status_error', 'no') address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') skip_vm_check = params.get('skip_vm_check', 'no') skip_reason = params.get('skip_reason') error_list = [] remote_host = vpx_hostname # For VDDK input_transport = params.get("input_transport") vddk_libdir = params.get('vddk_libdir') # nfs mount source vddk_libdir_src = params.get('vddk_libdir_src') vddk_thumbprint = params.get('vddk_thumbprint') src_uri_type = params.get('src_uri_type') esxi_password = params.get('esxi_password') json_disk_pattern = params.get('json_disk_pattern') # For construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') os_pool = os_storage = params.get('output_storage', 'default') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") os_version = params.get('os_version') os_type = params.get('os_type') virtio_win_path = params.get('virtio_win_path') # qemu-guest-agent path in virtio-win or rhv-guest-tools-iso qa_path = params.get('qa_path') # download url of qemu-guest-agent qa_url = params.get('qa_url') v2v_sasl = None # default values for v2v_cmd auto_clean = True cmd_only = False def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def check_device_exist(check, virsh_session_id): """ Check if device exist after convertion """ xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout if check == 'cdrom': if "device='cdrom'" not in xml: log_fail('CDROM no longer exists') def check_vmtools(vmcheck, check): """ Check whether vmware tools packages have been removed, or vmware-tools service has stopped :param vmcheck: VMCheck object for vm checking :param check: Checkpoint of different cases :return: None """ if check == 'vmtools': logging.info('Check if packages been removed') pkgs = vmcheck.session.cmd('rpm -qa').strip() removed_pkgs = params.get('removed_pkgs').strip().split(',') if not removed_pkgs: test.error('Missing param "removed_pkgs"') for pkg in removed_pkgs: if pkg in pkgs: log_fail('Package "%s" not removed' % pkg) elif check == 'vmtools_service': logging.info('Check if service stopped') vmtools_service = params.get('service_name') status = utils_misc.get_guest_service_status( vmcheck.session, vmtools_service) logging.info('Service %s status: %s', vmtools_service, status) if status != 'inactive': log_fail('Service "%s" is not stopped' % vmtools_service) def check_modprobe(vmcheck): """ Check whether content of /etc/modprobe.conf meets expectation """ content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip() logging.debug(content) cfg_content = params.get('cfg_content') if not cfg_content: test.error('Missing content for search') logging.info('Search "%s" in /etc/modprobe.conf', cfg_content) pattern = r'\s+'.join(cfg_content.split()) if not re.search(pattern, content): log_fail('Not found "%s"' % cfg_content) def check_device_map(vmcheck): """ Check if the content of device.map meets expectation. """ logging.info(vmcheck.session.cmd('fdisk -l').strip()) device_map = params.get('device_map_path') content = vmcheck.session.cmd('cat %s' % device_map) logging.debug('Content of device.map:\n%s', content) logging.info('Found device: %d', content.count('/dev/')) logging.info('Found virtio device: %d', content.count('/dev/vd')) if content.count('/dev/') != content.count('/dev/vd'): log_fail('Content of device.map not correct') else: logging.info('device.map has been remaped to "/dev/vd*"') def check_resume_swap(vmcheck): """ Check the content of grub files meet expectation. """ if os_version == 'rhel7': chkfiles = [ '/etc/default/grub', '/boot/grub2/grub.cfg', '/etc/grub2.cfg' ] if os_version == 'rhel6': chkfiles = ['/boot/grub/grub.conf', '/etc/grub.conf'] for file_i in chkfiles: status, content = vmcheck.run_cmd('cat %s' % file_i) if status != 0: log_fail('%s does not exist' % file_i) resume_dev_count = content.count('resume=/dev/') if resume_dev_count == 0 or resume_dev_count != content.count( 'resume=/dev/vd'): reason = 'Maybe the VM\'s swap pariton is lvm' log_fail('Content of %s is not correct or %s' % (file_i, reason)) content = vmcheck.session.cmd('cat /proc/cmdline') logging.debug('Content of /proc/cmdline:\n%s', content) if 'resume=/dev/vd' not in content: log_fail('Content of /proc/cmdline is not correct') def check_rhev_file_exist(vmcheck): """ Check if rhev files exist """ file_path = { 'rhev-apt.exe': r'C:\rhev-apt.exe', 'rhsrvany.exe': r'"C:\Program Files\Guestfs\Firstboot\rhsrvany.exe"' } for key in file_path: status = vmcheck.session.cmd_status('dir %s' % file_path[key]) if status == 0: logging.info('%s exists' % key) else: log_fail('%s does not exist after convert to rhv' % key) def check_file_architecture(vmcheck): """ Check the 3rd party module info :param vmcheck: VMCheck object for vm checking """ content = vmcheck.session.cmd('uname -r').strip() status = vmcheck.session.cmd_status( 'rpm -qf /lib/modules/%s/fileaccess/fileaccess_mod.ko ' % content) if status == 0: log_fail('3rd party module info is not correct') else: logging.info( 'file /lib/modules/%s/fileaccess/fileaccess_mod.ko is not owned by any package' % content) def check_windows_ogac(vmcheck): """ Check qemu-guest-agent service in VM :param vmcheck: VMCheck object for vm checking """ try: res = utils_misc.wait_for(lambda: re.search( 'running', vmcheck.get_service_info('qemu-ga'), re.I), 300, step=30) except ShellProcessTerminatedError: # Windows guest may reboot after installing qemu-ga service logging.debug('Windows guest is rebooting') if vmcheck.session: vmcheck.session.close() vmcheck.session = None vmcheck.create_session() res = utils_misc.wait_for(lambda: re.search( 'running', vmcheck.get_service_info('qemu-ga'), re.I), 300, step=30) if not res: test.fail('Not found running qemu-ga service') def check_linux_ogac(vmcheck): """ Check qemu-guest-agent service in VM :param vmcheck: VMCheck object for vm checking """ def get_pkgs(pkg_path): """ Get all qemu-guest-agent pkgs """ pkgs = [] for _, _, files in os.walk(pkg_path): for file_name in files: pkgs.append(file_name) return pkgs def get_pkg_version_vm(): """ Get qemu-guest-agent version in VM """ vender = vmcheck.get_vm_os_vendor() if vender in ['Ubuntu', 'Debian']: cmd = 'dpkg -l qemu-guest-agent' else: cmd = 'rpm -q qemu-guest-agent' _, output = vmcheck.run_cmd(cmd) pkg_ver_ptn = [ r'qemu-guest-agent +[0-9]+:(.*?dfsg.*?) +', r'qemu-guest-agent-(.*?)\.x86_64' ] for ptn in pkg_ver_ptn: if re.search(ptn, output): return re.search(ptn, output).group(1) return '' if os.path.isfile(os.getenv('VIRTIO_WIN')): mount_point = utils_v2v.v2v_mount(os.getenv('VIRTIO_WIN'), 'rhv_tools_setup_iso', fstype='iso9660') export_path = params['tmp_mount_point'] = mount_point else: export_path = os.getenv('VIRTIO_WIN') qemu_guest_agent_dir = os.path.join(export_path, qa_path) all_pkgs = get_pkgs(qemu_guest_agent_dir) logging.debug('The installing qemu-guest-agent is: %s' % all_pkgs) vm_pkg_ver = get_pkg_version_vm() logging.debug('qemu-guest-agent verion in vm: %s' % vm_pkg_ver) # If qemu-guest-agent version in VM is higher than the pkg in qemu-guest-agent-iso, # v2v will not update the qemu-guest-agent version and report a warning. # # e.g. # virt-v2v: warning: failed to install QEMU Guest Agent: command: package # qemu-guest-agent-10:2.12.0-3.el7.x86_64 (which is newer than # qemu-guest-agent-10:2.12.0-2.el7.x86_64) is already installed if not any([vm_pkg_ver in pkg for pkg in all_pkgs]): logging.debug( 'Wrong qemu-guest-agent version, maybe it is higher than package version in ISO' ) logging.info( 'Unexpected qemu-guest-agent version, set v2v log checking') expect_msg_ptn = r'virt-v2v: warning: failed to install QEMU Guest Agent.*?is newer than.*? is already installed' params.update({'msg_content': expect_msg_ptn, 'expect_msg': 'yes'}) # Check the service status of qemu-guest-agent in VM status_ptn = r'Active: active \(running\)|qemu-ga \(pid +[0-9]+\) is running' cmd = 'service qemu-ga status;systemctl status qemu-guest-agent' _, output = vmcheck.run_cmd(cmd) if not re.search(status_ptn, output): log_fail('qemu-guest-agent service exception') def check_ubuntools(vmcheck): """ Check open-vm-tools, ubuntu-server in VM :param vmcheck: VMCheck object for vm checking """ logging.info('Check if open-vm-tools service stopped') status = utils_misc.get_guest_service_status(vmcheck.session, 'open-vm-tools') logging.info('Service open-vm-tools status: %s', status) if status != 'inactive': log_fail('Service open-vm-tools is not stopped') else: logging.info('Check if the ubuntu-server exist') content = vmcheck.session.cmd('dpkg -s ubuntu-server') if 'install ok installed' in content: logging.info('ubuntu-server has not been removed.') else: log_fail('ubuntu-server has been removed') def global_pem_setup(f_pem): """ Setup global rhv server ca :param f_pem: ca file path """ ca_anchors_dir = '/etc/pki/ca-trust/source/anchors' shutil.copy(f_pem, ca_anchors_dir) process.run('update-ca-trust extract', shell=True) os.unlink(os.path.join(ca_anchors_dir, os.path.basename(f_pem))) def global_pem_cleanup(): """ Cleanup global rhv server ca """ process.run('update-ca-trust extract', shell=True) def cmd_remove_option(cmd, opt_pattern): """ Remove an option from cmd :param cmd: the cmd :param opt_pattern: a pattern stands for the option """ for item in re.findall(opt_pattern, cmd): cmd = cmd.replace(item, '').strip() return cmd def find_net(bridge_name): """ Find which network use specified bridge :param bridge_name: bridge name you want to find """ net_list = virsh.net_state_dict(only_names=True) net_name = '' if len(net_list): for net in net_list: net_info = virsh.net_info(net).stdout.strip() search = re.search(r'Bridge:\s+(\S+)', net_info) if search: if bridge_name == search.group(1): net_name = net else: logging.info('Conversion server has no network') return net_name def destroy_net(net_name): """ destroy network in conversion server """ if virsh.net_state_dict()[net_name]['active']: logging.info("Remove network %s in conversion server", net_name) virsh.net_destroy(net_name) if virsh.net_state_dict()[net_name]['autostart']: virsh.net_autostart(net_name, "--disable") output = virsh.net_list("--all").stdout.strip() logging.info(output) def start_net(net_name): """ start network in conversion server """ logging.info("Recover network %s in conversion server", net_name) virsh.net_autostart(net_name) if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name) output = virsh.net_list("--all").stdout.strip() logging.info(output) def check_result(result, status_error): """ Check virt-v2v command result """ def vm_check(status_error): """ Checking the VM """ if status_error: return if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return # vmchecker must be put before skip_vm_check in order to clean up # the VM. vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if skip_vm_check == 'yes': logging.info('Skip checking vm after conversion: %s' % skip_reason) return if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': virsh.start(vm_name, debug=True) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') if checkpoint == 'ogac': # windows guests will reboot at any time after qemu-ga is # installed. The process cannot be controled. In order to # don't break vmchecker.run() process, It's better to put # check_windows_ogac before vmchecker.run(). Because in # check_windows_ogac, it waits until rebooting completes. vmchecker.checker.create_session() if os_type == 'windows': check_windows_ogac(vmchecker.checker) else: check_linux_ogac(vmchecker.checker) ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if checkpoint == 'cdrom': virsh_session = utils_sasl.VirshSessionSASL(params) virsh_session_id = virsh_session.get_id() check_device_exist('cdrom', virsh_session_id) virsh_session.close() if checkpoint.startswith('vmtools'): check_vmtools(vmchecker.checker, checkpoint) if checkpoint == 'modprobe': check_modprobe(vmchecker.checker) if checkpoint == 'device_map': check_device_map(vmchecker.checker) if checkpoint == 'resume_swap': check_resume_swap(vmchecker.checker) if checkpoint == 'rhev_file': check_rhev_file_exist(vmchecker.checker) if checkpoint == 'file_architecture': check_file_architecture(vmchecker.checker) if checkpoint == 'ubuntu_tools': check_ubuntools(vmchecker.checker) if checkpoint == 'without_default_net': if virsh.net_state_dict()[net_name]['active']: log_fail("Bridge virbr0 already started during conversion") # Merge 2 error lists error_list.extend(vmchecker.errors) libvirt.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text # VM or local output checking vm_check(status_error) # Log checking log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: if version_requried and not utils_v2v.compare_version( version_requried): test.cancel("Testing requries version: %s" % version_requried) v2v_params = { 'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name, 'vpx_dc': vpx_dc, 'esx_ip': esx_ip, 'new_name': vm_name + utils_misc.generate_random_string(4), 'v2v_opts': v2v_opts, 'input_mode': 'libvirt', 'os_storage': os_storage, 'os_pool': os_pool, 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target'), 'password': vpx_passwd if src_uri_type != 'esx' else esxi_password, 'input_transport': input_transport, 'vcenter_host': vpx_hostname, 'vcenter_password': vpx_passwd, 'vddk_thumbprint': vddk_thumbprint, 'vddk_libdir': vddk_libdir, 'vddk_libdir_src': vddk_libdir_src, 'src_uri_type': src_uri_type, 'esxi_password': esxi_password, 'esxi_host': esxi_host, 'output_method': output_method, 'os_storage_name': storage_name, 'rhv_upload_opts': rhv_upload_opts, 'oo_json_disk_pattern': json_disk_pattern, 'params': params } os.environ['LIBGUESTFS_BACKEND'] = 'direct' v2v_uri = utils_v2v.Uri('esx') remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip) # Create password file for access to ESX hypervisor vpx_passwd_file = params.get("vpx_passwd_file") with open(vpx_passwd_file, 'w') as pwd_f: if src_uri_type == 'esx': pwd_f.write(esxi_password) else: pwd_f.write(vpx_passwd) v2v_params['v2v_opts'] += " -ip %s" % vpx_passwd_file if params.get('output_format'): v2v_params.update({'of_format': params['output_format']}) # Rename guest with special name while converting to rhev if '#' in vm_name and output_mode == 'rhev': v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_') # Create SASL user on the ovirt host if output_mode == 'rhev': # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) logging.debug('A SASL session %s was created', v2v_sasl) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'root_ask': v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '2') if checkpoint.startswith('root_') and checkpoint != 'root_ask': root_option = params.get('root_option') v2v_params['v2v_opts'] += ' --root %s' % root_option if checkpoint == 'with_proxy': http_proxy = params.get('esx_http_proxy') https_proxy = params.get('esx_https_proxy') logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy, https_proxy) os.environ['http_proxy'] = http_proxy os.environ['https_proxy'] = https_proxy if checkpoint == 'ogac': os.environ['VIRTIO_WIN'] = virtio_win_path if not os.path.exists(os.getenv('VIRTIO_WIN')): test.fail('%s does not exist' % os.getenv('VIRTIO_WIN')) if os.path.isdir(os.getenv('VIRTIO_WIN')) and os_type == 'linux': export_path = os.getenv('VIRTIO_WIN') qemu_guest_agent_dir = os.path.join(export_path, qa_path) if not os.path.exists(qemu_guest_agent_dir) and os.access( export_path, os.W_OK) and qa_url: logging.debug( 'Not found qemu-guest-agent in virtio-win or rhv-guest-tools-iso,' ' Try to prepare it manually. This is not a permanant step, once' ' the official build includes it, this step should be removed.' ) os.makedirs(qemu_guest_agent_dir) rpm_name = os.path.basename(qa_url) download.get_file( qa_url, os.path.join(qemu_guest_agent_dir, rpm_name)) if checkpoint == 'virtio_iso_blk': if not os.path.exists(virtio_win_path): test.fail('%s does not exist' % virtio_win_path) # Find a free loop device free_loop_dev = process.run("losetup --find", shell=True).stdout_text.strip() # Setup a loop device cmd = 'losetup %s %s' % (free_loop_dev, virtio_win_path) process.run(cmd, shell=True) os.environ['VIRTIO_WIN'] = free_loop_dev if checkpoint == 'invalid_pem': # simply change the 2nd line to lowercase to get an invalid pem with open(local_ca_file_path, 'r+') as fd: for i in range(2): pos = fd.tell() res = fd.readline() fd.seek(pos) fd.write(res.lower()) fd.flush() if checkpoint == 'without_default_net': net_name = find_net('virbr0') if net_name: destroy_net(net_name) if checkpoint == 'empty_cdrom': virsh_dargs = { 'uri': remote_uri, 'remote_ip': remote_host, 'remote_user': '******', 'remote_pwd': vpx_passwd, 'auto_close': True, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) v2v_result = remote_virsh.dumpxml(vm_name) remote_virsh.close_session() else: if checkpoint == 'exist_uuid': auto_clean = False if checkpoint in [ 'mismatched_uuid', 'no_uuid', 'system_rhv_pem_set', 'system_rhv_pem_unset' ]: cmd_only = True auto_clean = False v2v_result = utils_v2v.v2v_cmd(v2v_params, auto_clean, cmd_only) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] if checkpoint.startswith('system_rhv_pem'): if checkpoint == 'system_rhv_pem_set': global_pem_setup(local_ca_file_path) rhv_cafile = r'-oo rhv-cafile=\S+\s*' new_cmd = cmd_remove_option(v2v_result, rhv_cafile) logging.debug('New v2v command:\n%s', new_cmd) if checkpoint == 'mismatched_uuid': # append more uuid new_cmd = v2v_result + ' -oo rhv-disk-uuid=%s' % str(uuid.uuid4()) if checkpoint == 'no_uuid': rhv_disk_uuid = r'-oo rhv-disk-uuid=\S+\s*' new_cmd = cmd_remove_option(v2v_result, rhv_disk_uuid) logging.debug('New v2v command:\n%s', new_cmd) if checkpoint == 'exist_uuid': new_vm_name = v2v_params['new_name'] + '_exist_uuid' new_cmd = v2v_result.command.replace('-on %s' % vm_name, '-on %s' % new_vm_name) logging.debug('re-run v2v command:\n%s', new_cmd) if checkpoint in [ 'mismatched_uuid', 'no_uuid', 'exist_uuid', 'system_rhv_pem_set', 'system_rhv_pem_unset' ]: v2v_result = utils_v2v.cmd_run(new_cmd, params.get('v2v_dirty_resources')) check_result(v2v_result, status_error) finally: if checkpoint == 'ogac' and params.get('tmp_mount_point'): if os.path.exists(params.get('tmp_mount_point')): utils_misc.umount(os.getenv('VIRTIO_WIN'), params['tmp_mount_point'], 'iso9660') os.environ.pop('VIRTIO_WIN') if checkpoint == 'virtio_iso_blk': process.run('losetup -d %s' % free_loop_dev, shell=True) os.environ.pop('VIRTIO_WIN') if checkpoint == 'system_rhv_pem_set': global_pem_cleanup() if checkpoint == 'without_default_net': if net_name: start_net(net_name) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() logging.debug('SASL session %s is closing', v2v_sasl) v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'with_proxy': logging.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') os.environ.pop('https_proxy') # Cleanup constant files utils_v2v.cleanup_constant_files(params)
for param in params.get("copy_to_local").split(): l_value = params.get(param) if l_value: need_copy = True nfs_link = utils_misc.get_path(test.bindir, l_value) i_name = os.path.basename(l_value) local_link = os.path.join(local_dir, i_name) if os.path.isfile(local_link): file_hash = crypto.hash_file(local_link, algorithm="md5") expected_hash = crypto.hash_file(nfs_link, algorithm="md5") if file_hash == expected_hash: need_copy = False if need_copy: msg = "Copy %s to %s in local host." % (i_name, local_link) error_context.context(msg, logging.info) download.get_file(nfs_link, local_link) params[param] = local_link unattended_install_config = UnattendedInstallConfig(test, params, vm) unattended_install_config.setup() # params passed explicitly, because they may have been updated by # unattended install config code, such as when params['url'] == auto vm.create(params=params) post_finish_str = params.get("post_finish_str", "Post set up finished") install_timeout = int(params.get("install_timeout", 4800)) migrate_background = params.get("migrate_background") == "yes" if migrate_background: mig_timeout = float(params.get("mig_timeout", "3600"))