def run(test, params, env): """ Test for virt-top, it is a top like tool for virtual machine. """ # Install virt-top package if missing. software_mgr = software_manager.SoftwareManager() if not software_mgr.check_installed('virt-top'): logging.info('Installing virt-top package:') software_mgr.install('virt-top') # Get the full path of virt-top command. try: VIRT_TOP = path.find_command("virt-top") except path.CmdNotFoundError as info: raise exceptions.TestSkipError("No virt-top command found - %s" % info) vm_name = params.get("main_vm", "avocado-vt-vm1") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) status_error = ("yes" == params.get("status_error", "no")) options = params.get("options", "") id_result = virsh.domid(vm_name) if id_result.exit_status: raise exceptions.TestError("Get domid failed.") domid = id_result.stdout.strip() if "--stream" in options: cmd = "%s %s 1>%s" % (VIRT_TOP, options, output_path) else: cmd = "%s %s" % (VIRT_TOP, options) # Add a timeout command to end it automatically. cmd = "timeout 10 %s" % cmd cmd_result = process.run(cmd, ignore_status=True, shell=True) if not status_error: # Read and analyse the output of virt-top. success = False with open(output_path) as output_file: lines = output_file.readlines() for line in lines: if line.count(vm_name): sub_string = line.split() if domid == sub_string[0].strip(): success = True break else: continue else: continue if not success: raise exceptions.TestFail("Command virt-top exit successfully, but" "domid is expected") else: if cmd_result.exit_status != 2: raise exceptions.TestFail("Command virt-top exit successfully with" "invalid option:%s" % cmd_result.stdout_text)
def find_playbook_program(): """ Return to the path of ansible-playbook. """ try: path.find_command('ansible-playbook') except path.CmdNotFoundError: sm = software_manager.SoftwareManager() sm.install('ansible') return path.find_command('ansible-playbook')
def check_if_package_is_installed(pkg): sm = software_manager.SoftwareManager() ret = sm.check_installed(pkg) if not ret: TestError('Package {} is not installed on host, cannot continue.'. format(pkg)) else: logging.debug('The required package {} is available on host.'. format(pkg))
def __init__(self): # libcgroup lack libcgroup-tools dependency will introduces # following error, # Failed to issue method call: # Unit cgconfig.service failed to load: # No such file or directory # # Please refer to # https://bugzilla.redhat.com/show_bug.cgi?format=multiple&id=882887 manager = software_manager.SoftwareManager() if not manager.install('libcgroup-tools'): exceptions.TestError("Failed to install libcgroup-tools on host") self._service_manager = service.Factory.create_service("cgconfig")
def run(test, params, env): """ Test packages to check file permissions. 1) Check libvirtd status; 2) Reinstall packages; 3) Verify packages; 4) Clean up. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") pkg_list = eval(params.get("package_list", "[]")) logging.info("package list: %s" % pkg_list) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not len(pkg_list): test.cancel("Please specify libvirt package.") try: if libvirtd_state == "on": utils_libvirtd.libvirtd_start() elif libvirtd_state == "off": utils_libvirtd.libvirtd_stop() # if package exist, remove it sm = software_manager.SoftwareManager() pkg_exist = [] for item in pkg_list: if sm.check_installed(item): pkg_exist.append(item) sm.remove(item) # install package sm.install(item) # verify package cmd = "rpm -V %s" % item ret = process.run("rpm -V %s" % item, shell=True) if ret.exit_status: test.fail("Check %s failed." % item) if ret.stdout_text.strip(): test.fail("Verify %s failed: %s" % (item, ret.stdout_text.strip())) finally: vmxml_backup.sync() for item in pkg_list: if item in pkg_exist: break else: process.run("rpm -e %s" % item, shell=True, ignore_status=True)
def run(test, params, env): """ Test for virt-top, it is a top like tool for virtual machine. """ # Install virt-top package if missing. software_mgr = software_manager.SoftwareManager() if not software_mgr.check_installed('virt-top'): logging.info('Installing virt-top package:') software_mgr.install('virt-top') # Get the full path of virt-top command. try: VIRT_TOP = path.find_command("virt-top") except path.CmdNotFoundError, info: raise exceptions.TestSkipError("No virt-top command found - %s" % info)
def run(test, params, env): """ Installs virtualization software using the selected installers :param test: test object. :param params: Dictionary with test parameters. :param env: Test environment. """ srcdir = params.get("srcdir", test.srcdir) params["srcdir"] = srcdir # Flag if a installer minor failure occurred minor_failure = False minor_failure_reasons = [] sm = software_manager.SoftwareManager() for name in params.get("installers", "").split(): installer_obj = installer.make_installer(name, params, test) if installer_obj.name == "ovirt_engine_sdk": installer_obj.install(cleanup=False, build=False, install=False) if installer_obj.minor_failure is True: minor_failure = True reason = "%s_%s: %s" % (installer_obj.name, installer_obj.mode, installer_obj.minor_failure_reason) minor_failure_reasons.append(reason) ovirt_src = os.path.join(srcdir, installer_obj.name) topdir = os.getcwd() os.chdir(ovirt_src) build.make("rpm") os.chdir(topdir) pkgs = glob.glob(os.path.join(ovirt_src, "rpmtop/RPMS/noarch/*")) for pkg in pkgs: sm.install(pkg) else: installer_obj.install(cleanup=False, build=False) time.sleep(5) if installer_obj.minor_failure is True: minor_failure = True reason = "%s_%s: %s" % (installer_obj.name, installer_obj.mode, installer_obj.minor_failure_reason) minor_failure_reasons.append(reason) env.register_installer(installer_obj) if minor_failure: test.warn("Minor (worked around) failures during build " "test: %s" % ", ".join(minor_failure_reasons))
def test_provides(self): sm = software_manager.SoftwareManager() self.assertEqual(sm.provides('/bin/login'), 'login') self.assertTrue(isinstance(sm.backend, software_manager.AptBackend))
def is_qemu_kvm_ma(): """ Check if qemu-kvm-ma is installed in host """ sm = software_manager.SoftwareManager() return sm.check_installed("qemu-kvm-ma")
def run(test, params, env): """ Nested test: 1) Boot VM 2) Install ansible and related packages 3) Generate inventory file with L1 guest IP 4) Generate parameter file with parameters for tests on L2 guest 5) Execute ansible command :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ setup_bridge_sh = "/root/setup_bridge.sh" def get_live_vms(env): live_vms = [] for vm in env.get_all_vms(): if vm.is_alive(): live_vms.append(vm) if not live_vms: test.fail("No live VM.") return live_vms def generate_invent_file(env): vms = get_live_vms(env) tmp_dir = virttest_data_dir.get_tmp_dir() file_name = "inventory_file" ip_lst = list(map(lambda v: v.wait_for_get_address(0, 240), vms)) invent_file = open(os.path.join(tmp_dir, file_name), "w") invent_file.writelines(ip_lst) invent_file.close() return invent_file.name def copy_network_script(env): login_timeout = params.get_numeric("login_timeout", 360) deps_dir = virttest_data_dir.get_deps_dir() file_name = os.path.basename(setup_bridge_sh) br_file = os.path.join(deps_dir, file_name) for vm in get_live_vms(env): vm.wait_for_login(timeout=login_timeout) vm.copy_files_to(br_file, setup_bridge_sh) def generate_parameter_file(params): tmp_dir = virttest_data_dir.get_tmp_dir() file_name = "parameter_file" guest_password = params.get("password") bootstrap_options = params.get("nested_bs_options") accept_cancel = params.get_boolean("accept_cancel") kar_cmd = "python3 ./ConfigTest.py " test_type = params.get("test_type") variant_name = params.get("nested_test") case_name = params.get("case_name", "") if variant_name == "check_cpu_model_l2": host_cpu_models = virttest_cpu.get_host_cpu_models() case_name = ','.join(["%s.%s" % (case_name, i) for i in host_cpu_models]) kar_cmd += " --%s=%s " % (test_type, case_name) l2_guest_name = params.get("l2_guest_name") if l2_guest_name: kar_cmd += " --guestname=%s" % l2_guest_name clone = params.get("install_node") if clone == "yes": kar_cmd += " --clone=yes" else: kar_cmd += " --clone=no" l2_kar_options = params.get("l2_kar_options") if l2_kar_options: kar_cmd += " %s" % l2_kar_options logging.info("Kar cmd: %s", kar_cmd) results_dir = test.logdir logging.info("Result_dir: %s", results_dir) kar_repo = params.get("kar_repo") cert_url = params.get("cert_url") data = {"guest_password": guest_password, "bootstrap_options": bootstrap_options, "accept_cancel": accept_cancel, "command_line": kar_cmd, "setup_br_sh": setup_bridge_sh, "host_log_files_dir": results_dir, "kar_repo": kar_repo, "cert_url": cert_url} json_file = open(os.path.join(tmp_dir, file_name), "w") json.dump(data, json_file) json_file.close() return json_file.name if (params.get('check_vendor', 'no') == 'yes' and cpu.get_vendor() != 'intel'): test.cancel("We only test this case with Intel platform now") sm = software_manager.SoftwareManager() if not sm.check_installed("ansible"): sm.install("ansible") invent_file = generate_invent_file(env) copy_network_script(env) deps_dir = virttest_data_dir.get_deps_dir() playbook_file = os.path.join(deps_dir, "playbook.yml") params_file = generate_parameter_file(params) ansible_cmd = "export ANSIBLE_SSH_ARGS=\"-C -o ControlMaster=auto " \ "-o ControlPersist=60s " \ "-o StrictHostKeyChecking=no " \ "-o UserKnownHostsFile=/dev/null\"; " \ "ansible-playbook %s " \ "--extra-vars \"@%s\" " \ "-i %s " \ % (playbook_file, params_file, invent_file) logging.debug("ansible cmd: %s", ansible_cmd) timeout = float(params.get("test_timeout", 3600)) status, output = process.getstatusoutput(ansible_cmd, timeout) if status != 0: test.fail("ansible_cmd failed, status: %s, output: %s" % (status, output))
def run(test, params, env): """ Ansible playbook basic test: 1) Check ansible package exists 2) Launch the guest 3) Clone an ansible playbook repo 4) Generate the ansible-playbook command 5) Execute the playbook and verify the return status :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ sm = software_manager.SoftwareManager() if not (sm.check_installed("ansible") or sm.install("ansible")): test.cancel("ansible package install failed") guest_user = params["username"] guest_passwd = params["password"] ansible_callback_plugin = params.get("ansible_callback_plugin") ansible_addl_opts = params.get("ansible_addl_opts", "") ansible_ssh_extra_args = params["ansible_ssh_extra_args"] ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") playbook_dir = params.get("playbook_dir", os.path.join(test.workdir, "ansible_playbook")) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir guest_ip_list = [] for vm in env.get_all_vms(): vm.verify_alive() vm.wait_for_login() guest_ip_list.append(vm.get_address()) logging.info("Cloning %s", playbook_repo) process.run("git clone {src} {dst}".format(src=playbook_repo, dst=playbook_dir), verbose=False) error_context.base_context("Generate playbook related options.", logging.info) extra_vars = { "ansible_ssh_extra_args": ansible_ssh_extra_args, "ansible_ssh_pass": guest_passwd, "test_harness_log_dir": test_harness_log_dir } extra_vars.update(json.loads(ansible_extra_vars)) ansible_cmd_options = [ "ansible-playbook", "-u {}".format(guest_user), "-i {},".format(",".join(guest_ip_list)), "-e '{}'".format(json.dumps(extra_vars)), ansible_addl_opts, toplevel_playbook ] ansible_cmd = r" ".join(ansible_cmd_options) error_context.context("Execute the ansible playbook.", logging.info) env_vars = ({ "ANSIBLE_STDOUT_CALLBACK": ansible_callback_plugin } if ansible_callback_plugin else None) logging.info("Command of ansible playbook: '%s'", ansible_cmd) play_s, play_o = process.getstatusoutput(ansible_cmd, timeout=playbook_timeout, shell=False, env=env_vars) ansible_log = "ansible_playbook.log" with open(os.path.join(test_harness_log_dir, ansible_log), "w") as log_file: log_file.write(play_o) log_file.flush() if play_s != 0: test.fail("Ansible playbook execution failed, please check the {} " "for details.".format(ansible_log)) logging.info("Ansible playbook execution passed.")
def test_extract_permission(self): manager = software_manager.SoftwareManager() with self.assertRaises(NotImplementedError) as context: manager.extract_from_package('/dev/null', self.tmpdir.name) expected = 'No package manager supported was found for package ' self.assertIn(expected, str(context.exception))
def run(test, params, env): """ This case check error messages in libvirtd logging. Implemented test cases: with_iptables: Start libvirtd when using iptables service as firewall. with_firewalld: Start libvirtd when using firewalld service as firewall. no_firewall: Start libvirtd With both firewall services shut off. """ def _error_handler(line, errors): """ A callback function called when new error lines appears in libvirtd log, then this line is appended to list 'errors' :param errors: A list to contain all error lines. :param line: Newly found error line in libvirtd log. """ errors.append(line) def _check_errors(): """ Check for unexpected error messages in libvirtd log. """ logging.info('Checking errors in libvirtd log') accepted_error_patterns = [ 'Cannot access storage file', 'Failed to autostart storage pool', 'cannot open directory', ] if (not iptables_service and not firewalld_service and 'virt_t' not in libvirt_context): logging.info("virt_t is not in libvirtd process context. " "Failures for setting iptables rules will be ignored") # libvirtd process started without virt_t will failed to set # iptables rules which is expected here accepted_error_patterns.append( '/sbin/iptables .* unexpected exit status 1') logging.debug("Accepted errors are: %s", accepted_error_patterns) if errors: logging.debug("Found errors in libvirt log:") for line in errors: logging.debug(line) unexpected_errors = [] for line in errors: if any([re.search(p, line) for p in accepted_error_patterns]): logging.debug('Error "%s" is acceptable', line) else: unexpected_errors.append(line) if unexpected_errors: raise exceptions.TestFail( "Found unexpected errors in libvirt log:\n%s" % '\n'.join(unexpected_errors)) iptables_service = params.get('iptables_service', 'off') == 'on' firewalld_service = params.get('firewalld_service', 'off') == 'on' # In RHEL7 iptables service is provided by a separated package # In RHEL6 iptables-services and firewalld is not supported # So try to install all required packages but ignore failures logging.info('Preparing firewall related packages') software_mgr = software_manager.SoftwareManager() for pkg in ['iptables', 'iptables-services', 'firewalld']: if not software_mgr.check_installed(pkg): software_mgr.install(pkg) # Backup services status service_mgr = service.ServiceManager() logging.info('Backing up firewall services status') backup_iptables_status = service_mgr.status('iptables') backup_firewalld_status = service_mgr.status('firewalld') # iptables-service got deprecated in newer distros if iptables_service and backup_iptables_status is None: raise exceptions.TestSkipError('iptables service not found') # firewalld service could not exists on many distros if firewalld_service and backup_firewalld_status is None: raise exceptions.TestSkipError('firewalld service not found') try: if iptables_service and firewalld_service: raise exceptions.TestError( 'iptables service and firewalld service can not be started at ' 'the same time') # We should stop services first then start the other after. # Directly start one service will force the other service stop, # which will not be easy to handle. # Backup status should be compared with None to make sure that # service exists before action. logging.info('Changing firewall services status') if not iptables_service and backup_iptables_status is not None: process.run('iptables-save > /tmp/iptables.save', shell=True) service_mgr.stop('iptables') if not firewalld_service and backup_firewalld_status is not None: service_mgr.stop('firewalld') if iptables_service and backup_iptables_status is not None: service_mgr.start('iptables') if firewalld_service and backup_firewalld_status is not None: service_mgr.start('firewalld') errors = [] # Run libvirt session and collect errors in log. libvirtd_session = utils_libvirtd.LibvirtdSession( service_name="virtnetworkd", logging_handler=_error_handler, logging_params=(errors, ), logging_pattern=r'[-\d]+ [.:+\d]+ [:\d]+ error :', ) try: logging.info('Starting libvirtd session') libvirtd_session.start() time.sleep(3) libvirt_pid = libvirtd_session.tail.get_pid() sestatus = utils_selinux.get_status() if sestatus == "disabled": raise exceptions.TestSkipError("SELinux is in Disabled mode." "It must be in enforcing mode " "for test execution") libvirt_context = utils_selinux.get_context_of_process(libvirt_pid) logging.debug("The libvirtd process context is: %s", libvirt_context) finally: libvirtd_session.exit() _check_errors() finally: logging.info('Recovering services status') #Restart socket service after starting process at foreground utils_libvirtd.Libvirtd("virtnetworkd.socket").restart() # If service do not exists, then backup status and current status # will all be none and nothing will be done if service_mgr.status('iptables') != backup_iptables_status: if backup_iptables_status: service_mgr.start('iptables') process.run('iptables-restore < /tmp/iptables.save', shell=True) else: service_mgr.stop('iptables') if service_mgr.status('firewalld') != backup_firewalld_status: if backup_firewalld_status: service_mgr.start('firewalld') else: service_mgr.stop('firewalld') logging.info('Removing backup iptables') if os.path.exists("/tmp/iptables.save"): os.remove("/tmp/iptables.save")
def run(test, params, env): """ Setup and run syzkaller (https://github.com/google/syzkaller) 1. Install/Setup syzkaller in host 2. Setup Guest for passwordless ssh from host 3. Prepare and compile Guest kernel 4. Prepare syzkaller config with qemu params and guest params 5. Start sykaller with above config and run for specified time(test_timeout) 6. Test fails out incase of any host issues """ start_time = time.time() # Step 1: Install/Setup syzkaller in host sm = software_manager.SoftwareManager() if not sm.check_installed("go") and not sm.install("go"): test.cancel("golang package install failed") home = os.environ["HOME"] if not ("goroot/bin" in os.environ["PATH"] and "go/bin" in os.environ["PATH"]): process.run('echo "PATH=%s/goroot/bin:%s/go/bin:$PATH" >> %s/.bashrc' % (home, home, home), shell=True) process.run("source %s/.bashrc" % home, shell=True) process.run("go get -u -d github.com/google/syzkaller/...", shell=True) process.run("cd %s/go/src/github.com/google/syzkaller;make" % home, shell=True) syzkaller_path = "%s/go/src/github.com/google/syzkaller" % home # Step 2: Setup Guest for passwordless ssh from host vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() ssh_key.setup_ssh_key(vm.get_address(), params.get("username"), params.get("password")) session.close() vm.destroy() # Step 3: Prepare Guest kernel guest_kernel_repo = params.get("syz_kernel_repo") guest_kernel_branch = params.get("syz_kernel_branch") guest_kernel_config = params.get("syz_kernel_config") guest_kernel_build_path = utils_misc.get_path(test.debugdir, "linux") process.run( "git clone --depth 1 %s -b %s %s" % (guest_kernel_repo, guest_kernel_branch, guest_kernel_build_path), shell=True) process.run("cd %s;git log -1;make %s" % (guest_kernel_build_path, guest_kernel_config), shell=True) process.run( 'cd %s; echo "CONFIG_KCOV=y\nCONFIG_GCC_PLUGINS=y" >> .config; make olddefconfig' % guest_kernel_build_path, shell=True) process.run("cd %s;make -j 40" % guest_kernel_build_path, shell=True) # Step 4: Prepare syzkaller config with qemu params and guest params syz_config_path = utils_misc.get_path(test.debugdir, "syzkaller_config") os.makedirs("%s/syzkaller" % test.debugdir) workdir = "%s/syzkaller" % test.debugdir sshkey = "%s/.ssh/id_rsa" % os.environ["HOME"] kernel_path = "%s/vmlinux" % guest_kernel_build_path vm_config = { "count": int(params.get("syz_count")), "cpu": int(params.get("smp")), "mem": int(params.get("mem")), "kernel": kernel_path, "cmdline": params.get("kernel_args"), "qemu_args": params.get("syz_qemu_args") } syz_config = { 'target': params.get("syz_target"), 'workdir': workdir, "http": params.get("syz_http"), "image": storage.get_image_filename(params, data_dir.get_data_dir()), "syzkaller": syzkaller_path, "procs": int(params.get("syz_procs")), "type": "qemu", "sshkey": sshkey, "vm": vm_config } try: with open(syz_config_path, "w") as fp: json.dump(syz_config, fp) except IOError as err: test.error("Unable to update syzkaller config: %s", err) end_time = time.time() # Step 5: Start sykaller config with specified time # Let's calculate the syzkaller timeout from # test timeout excluding current elapsed time + buffer testtimeout = int( params.get("test_timeout")) - (int(end_time - start_time) + 10) cmd = "%s/bin/syz-manager -config %s %s" % ( syzkaller_path, syz_config_path, params.get("syz_cmd_params")) process.run(cmd, timeout=testtimeout, allow_output_check="combined", ignore_status=True, shell=True) # Let's delete linux kernel folder from test-results as it would # consume lot of space and test log have all the information about # it incase to retrieve it back. if os.path.isdir(guest_kernel_build_path): shutil.rmtree(guest_kernel_build_path)
def _get_installed_packages(self): sm = software_manager.SoftwareManager() installed_pkgs = sm.list_all() self._installed_pkgs = installed_pkgs return installed_pkgs
def run(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name shutdown_timeout = int(params.get('shutdown_timeout', 60)) # define function def vm_recover_check(option, libvirtd, check_shutdown=False): """ Check if the vm can be recovered correctly. :param guest_name : Checked vm's name. :param option : managedsave command option. """ # This time vm not be shut down if vm.is_alive(): test.fail("Guest should be inactive") # Check vm managed save state. ret = virsh.dom_list("--managed-save --inactive", debug=True) vm_state1 = re.findall(r".*%s.*" % vm_name, ret.stdout.strip())[0].split()[2] ret = virsh.dom_list("--managed-save --all", debug=True) vm_state2 = re.findall(r".*%s.*" % vm_name, ret.stdout.strip())[0].split()[2] if vm_state1 != "saved" or vm_state2 != "saved": test.fail("Guest state should be saved") virsh.start(vm_name, debug=True) # This time vm should be in the list if vm.is_dead(): test.fail("Guest should be active") # Restart libvirtd and check vm status again. libvirtd.restart() if vm.is_dead(): test.fail("Guest should be active after" " restarting libvirtd") # Check managed save file: if os.path.exists(managed_save_file): test.fail("Managed save image exist " "after starting the domain") if option: if option.count("running"): if vm.is_dead() or vm.is_paused(): test.fail("Guest state should be" " running after started" " because of '--running' option") elif option.count("paused"): if not vm.is_paused(): test.fail("Guest state should be" " paused after started" " because of '--paused' option") else: if params.get("paused_after_start_vm") == "yes": if not vm.is_paused(): test.fail("Guest state should be" " paused after started" " because of initia guest state") if check_shutdown: # Resume the domain. if vm.is_paused(): vm.resume() vm.wait_for_login() # Shutdown and start the domain, # it should be in runing state and can be login. vm.shutdown() if not vm.wait_for_shutdown(shutdown_timeout): test.fail('VM failed to shutdown') vm.start() vm.wait_for_login() def vm_undefine_check(vm_name): """ Check if vm can be undefined with manage-save option """ #backup xml file xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not os.path.exists(managed_save_file): test.fail("Can't find managed save image") #undefine domain with no options. if not virsh.undefine(vm_name, options=None, ignore_status=True).exit_status: test.fail("Guest shouldn't be undefined" "while domain managed save image exists") #undefine domain with managed-save option. if virsh.undefine(vm_name, options="--managed-save", ignore_status=True).exit_status: test.fail("Guest can't be undefine with " "managed-save option") if os.path.exists(managed_save_file): test.fail("Managed save image exists" " after undefining vm") #restore and start the vm. xml_backup.define() vm.start() def check_flags_parallel(virsh_cmd, bash_cmd, flags): """ Run the commands parallel and check the output. """ cmd = ("%s & %s" % (virsh_cmd, bash_cmd)) ret = process.run(cmd, ignore_status=True, shell=True, ignore_bg_processes=True) output = ret.stdout_text.strip() logging.debug("check flags output: %s" % output) lines = re.findall(r"flags:.(\d+)", output, re.M) logging.debug("Find all fdinfo flags: %s" % lines) lines = [int(i, 8) & flags for i in lines] if flags not in lines: test.fail("Checking flags %s failed" % flags) return ret def check_multi_guests(guests, start_delay, libvirt_guests): """ Check start_delay option for multiple guests. """ # Destroy vm first if vm.is_alive(): vm.destroy(gracefully=False) # Clone given number of guests timeout = params.get("clone_timeout", 360) for i in range(int(guests)): dst_vm = "%s_%s" % (vm_name, i) utils_libguestfs.virt_clone_cmd(vm_name, dst_vm, True, timeout=timeout) virsh.start(dst_vm, debug=True) # Wait 10 seconds for vm to start time.sleep(10) is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd") if is_systemd: libvirt_guests.restart() pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done' else: ret = process.run("service libvirt-guests restart | \ awk '{ print strftime(\"%b %y %H:%M:%S\"), \ $0; fflush(); }'", shell=True) pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done' # libvirt-guests status command read messages from systemd # journal, in cases of messages are not ready in time, # add a time wait here. def wait_func(): return libvirt_guests.raw_status().stdout.count("Resuming guest") utils_misc.wait_for(wait_func, 5) if is_systemd: ret = libvirt_guests.raw_status() logging.info("status output: %s", ret.stdout_text) resume_time = re.findall(pattern, ret.stdout_text, re.M) if not resume_time: test.fail("Can't see messages of resuming guest") # Convert time string to int resume_seconds = [ time.mktime(time.strptime(tm, "%b %y %H:%M:%S")) for tm in resume_time ] logging.info("Resume time in seconds: %s", resume_seconds) # Check if start_delay take effect for i in range(len(resume_seconds) - 1): if resume_seconds[i + 1] - resume_seconds[i] < int(start_delay): test.fail("Checking start_delay failed") def wait_for_state(vm_state): """ Wait for vm state is ready. """ utils_misc.wait_for(lambda: vm.state() == vm_state, 10) def check_guest_flags(bash_cmd, flags): """ Check bypass_cache option for single guest. """ # Drop caches. drop_caches() # form proper parallel command based on if systemd is used or not is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd") if is_systemd: virsh_cmd_stop = "systemctl stop libvirt-guests" virsh_cmd_start = "systemctl start libvirt-guests" else: virsh_cmd_stop = "service libvirt-guests stop" virsh_cmd_start = "service libvirt-guests start" ret = check_flags_parallel( virsh_cmd_stop, bash_cmd % (managed_save_file, managed_save_file, "1"), flags) if is_systemd: ret = libvirt_guests.raw_status() logging.info("status output: %s", ret.stdout_text) if all([ "Suspending %s" % vm_name not in ret.stdout_text, "stopped, with saved guests" not in ret.stdout_text ]): test.fail("Can't see messages of suspending vm") # status command should return 3. if not is_systemd: ret = libvirt_guests.raw_status() if ret.exit_status != 3: test.fail("The exit code %s for libvirt-guests" " status is not correct" % ret) # Wait for VM in shut off state wait_for_state("shut off") check_flags_parallel( virsh_cmd_start, bash_cmd % (managed_save_file, managed_save_file, "0"), flags) # Wait for VM in running state wait_for_state("running") def vm_msave_remove_check(vm_name): """ Check managed save remove command. """ if not os.path.exists(managed_save_file): test.fail("Can't find managed save image") virsh.managedsave_remove(vm_name, debug=True) if os.path.exists(managed_save_file): test.fail("Managed save image still exists") virsh.start(vm_name, debug=True) # The domain state should be running if vm.state() != "running": test.fail("Guest state should be" " running after started") def vm_managedsave_loop(vm_name, loop_range, libvirtd): """ Run a loop of managedsave command and check its result. """ if vm.is_dead(): virsh.start(vm_name, debug=True) for i in range(int(loop_range)): logging.debug("Test loop: %s" % i) virsh.managedsave(vm_name, debug=True) virsh.start(vm_name, debug=True) # Check libvirtd status. if not libvirtd.is_running(): test.fail("libvirtd is stopped after cmd") # Check vm status. if vm.state() != "running": test.fail("Guest isn't in running state") def build_vm_xml(vm_name, **dargs): """ Build the new domain xml and define it. """ try: # stop vm before doing any change to xml if vm.is_alive(): vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if dargs.get("cpu_mode"): if "cpu" in vmxml: del vmxml.cpu cpuxml = vm_xml.VMCPUXML() cpuxml.mode = params.get("cpu_mode", "host-model") cpuxml.match = params.get("cpu_match", "exact") cpuxml.fallback = params.get("cpu_fallback", "forbid") cpu_topology = {} cpu_topology_sockets = params.get("cpu_topology_sockets") if cpu_topology_sockets: cpu_topology["sockets"] = cpu_topology_sockets cpu_topology_cores = params.get("cpu_topology_cores") if cpu_topology_cores: cpu_topology["cores"] = cpu_topology_cores cpu_topology_threads = params.get("cpu_topology_threads") if cpu_topology_threads: cpu_topology["threads"] = cpu_topology_threads if cpu_topology: cpuxml.topology = cpu_topology vmxml.cpu = cpuxml vmxml.vcpu = int(params.get("vcpu_nums")) if dargs.get("sec_driver"): seclabel_dict = { "type": "dynamic", "model": "selinux", "relabel": "yes" } vmxml.set_seclabel([seclabel_dict]) vmxml.sync() vm.start() except Exception as e: logging.error(str(e)) test.cancel("Build domain xml failed") status_error = ("yes" == params.get("status_error", "no")) vm_ref = params.get("managedsave_vm_ref", "name") libvirtd_state = params.get("libvirtd", "on") extra_param = params.get("managedsave_extra_param", "") progress = ("yes" == params.get("managedsave_progress", "no")) cpu_mode = "yes" == params.get("managedsave_cpumode", "no") test_undefine = "yes" == params.get("managedsave_undefine", "no") test_bypass_cache = "yes" == params.get("test_bypass_cache", "no") autostart_bypass_cache = params.get("autostart_bypass_cache", "") multi_guests = params.get("multi_guests", "") test_libvirt_guests = params.get("test_libvirt_guests", "") check_flags = "yes" == params.get("check_flags", "no") security_driver = params.get("security_driver", "") remove_after_cmd = "yes" == params.get("remove_after_cmd", "no") option = params.get("managedsave_option", "") check_shutdown = "yes" == params.get("shutdown_after_cmd", "no") pre_vm_state = params.get("pre_vm_state", "") move_saved_file = "yes" == params.get("move_saved_file", "no") test_loop_cmd = "yes" == params.get("test_loop_cmd", "no") if option: if not virsh.has_command_help_match('managedsave', option): # Older libvirt does not have this option test.cancel("Older libvirt does not" " handle arguments consistently") # Backup xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Get the libvirtd service libvirtd = utils_libvirtd.Libvirtd() # Get config files. qemu_config = utils_config.LibvirtQemuConfig() libvirt_guests_config = utils_config.LibvirtGuestsConfig() # Get libvirt-guests service libvirt_guests = Factory.create_service("libvirt-guests") try: # Destroy vm first for setting configuration file if vm.state() == "running": vm.destroy(gracefully=False) # Prepare test environment. if libvirtd_state == "off": libvirtd.stop() if autostart_bypass_cache: ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True) libvirt.check_exit_status(ret) qemu_config.auto_start_bypass_cache = autostart_bypass_cache libvirtd.restart() if security_driver: qemu_config.security_driver = [security_driver] if test_libvirt_guests: if multi_guests: start_delay = params.get("start_delay", "20") libvirt_guests_config.START_DELAY = start_delay if check_flags: libvirt_guests_config.BYPASS_CACHE = "1" # The config file format should be "x=y" instead of "x = y" process.run( "sed -i -e 's/ = /=/g' " "/etc/sysconfig/libvirt-guests", shell=True) libvirt_guests.restart() # Change domain xml. if cpu_mode: build_vm_xml(vm_name, cpu_mode=True) if security_driver: build_vm_xml(vm_name, sec_driver=True) # Turn VM into certain state. if pre_vm_state == "transient": logging.info("Creating %s..." % vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Wait for VM to be in shut off state utils_misc.wait_for(lambda: vm.state() == "shut off", 10) vm.undefine() if virsh.create(vmxml_for_test.xml, ignore_status=True, debug=True).exit_status: vmxml_backup.define() test.cancel("Cann't create the domain") # Wait for vm in stable state if params.get("start_vm") == "yes": if vm.state() == "shut off": vm.start() vm.wait_for_login() # run test case domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.count("invalid"): vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name # Ignore exception with "ignore_status=True" if progress: option += " --verbose" option += extra_param # For bypass_cache test. Run a shell command to check fd flags while # excuting managedsave command software_mgr = software_manager.SoftwareManager() if not software_mgr.check_installed('lsof'): logging.info('Installing lsof package:') software_mgr.install('lsof') bash_cmd = ( "let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc" "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/%s |" "grep 'flags:.*') && break; else sleep 0.05; fi; done;") # Flags to check bypass cache take effect flags = os.O_DIRECT if test_bypass_cache: # Drop caches. drop_caches() virsh_cmd = "virsh managedsave %s %s" % (option, vm_name) check_flags_parallel( virsh_cmd, bash_cmd % (managed_save_file, managed_save_file, "1"), flags) # Wait for VM in shut off state wait_for_state("shut off") virsh_cmd = "virsh start %s %s" % (option, vm_name) check_flags_parallel( virsh_cmd, bash_cmd % (managed_save_file, managed_save_file, "0"), flags) # Wait for VM in running state wait_for_state("running") elif test_libvirt_guests: logging.debug("libvirt-guests status: %s", libvirt_guests.status()) if multi_guests: check_multi_guests(multi_guests, start_delay, libvirt_guests) if check_flags: check_guest_flags(bash_cmd, flags) else: # Ensure VM is running utils_misc.wait_for(lambda: vm.state() == "running", 10) ret = virsh.managedsave(vm_ref, options=option, ignore_status=True, debug=True) status = ret.exit_status # The progress information outputed in error message error_msg = ret.stderr.strip() if move_saved_file: cmd = "echo > %s" % managed_save_file process.run(cmd, shell=True) # recover libvirtd service start if libvirtd_state == "off": libvirtd.start() if status_error: if not status: if libvirtd_state == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed") else: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") if progress: if not error_msg.count("Managedsave:"): test.fail("Got invalid progress output") if remove_after_cmd: vm_msave_remove_check(vm_name) elif test_undefine: vm_undefine_check(vm_name) elif autostart_bypass_cache: # rhbz#1755303 if libvirt_version.version_compare(5, 6, 0): os.remove("/run/libvirt/qemu/autostarted") libvirtd.stop() virsh_cmd = ("(service libvirtd start)") check_flags_parallel( virsh_cmd, bash_cmd % (managed_save_file, managed_save_file, "0"), flags) elif test_loop_cmd: loop_range = params.get("loop_range", "20") vm_managedsave_loop(vm_name, loop_range, libvirtd) else: vm_recover_check(option, libvirtd, check_shutdown) finally: # Restore test environment. # Restart libvirtd.service qemu_config.restore() libvirt_guests_config.restore() libvirtd.restart() if autostart_bypass_cache: virsh.autostart(vm_name, "--disable", ignore_status=True, debug=True) vm.destroy(gracefully=False) virsh.managedsave_remove(vm_name, debug=True) vmxml_backup.sync() if multi_guests: for i in range(int(multi_guests)): virsh.remove_domain("%s_%s" % (vm_name, i), "--remove-all-storage", debug=True)
def test_extract_from_deb(self): manager = software_manager.SoftwareManager() result = manager.extract_from_package(self.deb_path, self.tmpdir.name) self.assertEqual(self.tmpdir.name, result)