def test_locking(self): """ 1) Create an env file. 2) Create a thread that creates a dict as one of env's elements, and keeps updating it, using the env save_lock attribute. 3) Try to save the environment. """ termination_event = threading.Event() env = utils_env.Env(filename=self.envfilename) def update_env(env): @utils_env.lock_safe def _update_env(env, key, value): env["changing_dict"][key] = value if "changing_dict" not in env: env["changing_dict"] = {} while True: key = "%s" % utils_misc.generate_random_string(length=10) value = "%s" % utils_misc.generate_random_string(length=10) _update_env(env, key, value) if termination_event.isSet(): break changing_thread = threading.Thread(target=update_env, args=(env, )) changing_thread.start() time.sleep(0.3) try: env.save() finally: termination_event.set()
def test_register_syncserver(self): """ 1) Create an env file. 2) Create a SyncListenServer object and register it in the env. 3) Get that SyncListenServer with get_syncserver. 4) Verify that both objects are the same. """ env = utils_env.Env(filename=self.envfilename) sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) sync2 = env.get_syncserver(333) assert sync1 == sync2
def worker(self, index, run_test_func): """ The worker function. Waits for commands from the scheduler and processes them. :param index: The index of this worker (in the range 0..num_workers-1). :param run_test_func: A function to be called to run a test (e.g. job.run_test). """ r = self.s2w_r[index] w = self.w2s_w[index] self_dict = self.worker_dicts[index] # Inform the scheduler this worker is ready w.write("ready\n") while True: cmd = r.readline().split() if not cmd: continue # The scheduler wants this worker to run a test if cmd[0] == "run": test_index = int(cmd[1]) test = self.tests[test_index].copy() test.update(self_dict) test_iterations = int(test.get("iterations", 1)) status = run_test_func("kvm", params=test, tag=test.get("shortname"), iterations=test_iterations) w.write("done %s %s\n" % (test_index, status)) w.write("ready\n") # The scheduler wants this worker to free its used resources elif cmd[0] == "cleanup": env_filename = os.path.join(self.bindir, self_dict["env"]) env = utils_env.Env(env_filename) for obj in list(env.values()): if isinstance(obj, virt_vm.BaseVM): obj.destroy() elif isinstance(obj, aexpect.Spawn): obj.close() env.save() w.write("cleanup_done\n") w.write("ready\n") # There's no more work for this worker elif cmd[0] == "terminate": break
def test_save(self): """ 1) Verify that calling env.save() with no filename where env doesn't specify a filename will throw an EnvSaveError. 2) Register a VM in environment, save env to a file, recover env from that file, get the vm and verify that the instance attribute of the 2 objects is the same. 3) Register a SyncListenServer and don't save env. Restore env from file and try to get the syncserver, verify it doesn't work. 4) Now save env to a file, restore env from file and verify that the syncserver can be found there, and that the sync server instance attribute is equal to the initial sync server instance. """ env = utils_env.Env() self.assertRaises(utils_env.EnvSaveError, env.save, {}) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() env.register_vm(params['main_vm'], vm1) env.save(filename=self.envfilename) env2 = utils_env.Env(filename=self.envfilename) vm2 = env2.get_vm(params['main_vm']) vm2.is_alive() assert vm1.instance == vm2.instance sync1 = FakeSyncListenServer(port=222) env.register_syncserver(222, sync1) env3 = utils_env.Env(filename=self.envfilename) syncnone = env3.get_syncserver(222) assert syncnone is None env.save(filename=self.envfilename) env4 = utils_env.Env(filename=self.envfilename) sync2 = env4.get_syncserver(222) assert sync2.instance == sync1.instance
def test_register_vm(self): """ 1) Create an env object. 2) Create a VM and register it from env. 3) Get the vm back from the env. 4) Verify that the 2 objects are the same. """ env = utils_env.Env(filename=self.envfilename) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() env.register_vm(params['main_vm'], vm1) vm2 = env.get_vm(params['main_vm']) vm2.is_alive() assert vm1 == vm2
def test_get_all_vms(self): """ 1) Create an env object. 2) Create 2 vms and register them in the env. 3) Create a SyncListenServer and register it in the env. 4) Verify that the 2 vms are in the output of get_all_vms. 5) Verify that the sync server is not in the output of get_all_vms. """ env = utils_env.Env(filename=self.envfilename) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() vm2 = FakeVm('vm2', params) vm2.is_alive() env.register_vm(params['main_vm'], vm1) env.register_vm('vm2', vm2) sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) assert vm1 in env.get_all_vms() assert vm2 in env.get_all_vms() assert sync1 not in env.get_all_vms()
def test_unregister_vm(self): """ 1) Create an env object. 2) Register 2 vms to the env. 3) Verify both vms are in the env. 4) Remove one of those vms. 5) Verify that the removed vm is no longer in env. """ env = utils_env.Env(filename=self.envfilename) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() vm2 = FakeVm('vm2', params) vm2.is_alive() env.register_vm(params['main_vm'], vm1) env.register_vm('vm2', vm2) assert vm1 in env.get_all_vms() assert vm2 in env.get_all_vms() env.unregister_vm('vm2') assert vm1 in env.get_all_vms() assert vm2 not in env.get_all_vms()
def test_unregister_syncserver(self): """ Unregister a sync server. 1) Create an env file. 2) Create and register 2 SyncListenServers in the env. 3) Get one of the SyncListenServers in the env. 4) Unregister one of the SyncListenServers. 5) Verify that the SyncListenServer unregistered can't be retrieved anymore with ``get_syncserver()``. """ env = utils_env.Env(filename=self.envfilename) sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) sync2 = FakeSyncListenServer(port=444) env.register_syncserver(444, sync2) sync3 = env.get_syncserver(333) assert sync1 == sync3 env.unregister_syncserver(444) sync4 = env.get_syncserver(444) assert sync4 is None
def run_once(self, params): # Convert params to a Params object params = utils_params.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise exceptions.TestSkipError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file custom_env_path = params.get("custom_env_path", "") if custom_env_path: env_path = custom_env_path else: env_path = params.get("vm_type") env_filename = os.path.join(self.bindir, "backends", env_path, params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) other_subtests_dirs = params.get("other_tests_dirs", "") test_passed = False t_type = None try: try: try: subtest_dirs = [] bin_dir = self.bindir for d in other_subtests_dirs.split(): # Replace split char. d = os.path.join(*d.split("/")) subtestdir = os.path.join(bin_dir, d, "tests") if not os.path.isdir(subtestdir): raise exceptions.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs += data_dir.SubdirList(subtestdir, bootstrap.test_filter) # Verify if we have the correspondent source file for it for generic_subdir in asset.get_test_provider_subdirs('generic'): subtest_dirs += data_dir.SubdirList(generic_subdir, bootstrap.test_filter) for multi_host_migration_subdir in asset.get_test_provider_subdirs( 'multi_host_migration'): subtest_dirs += data_dir.SubdirList(multi_host_migration_subdir, bootstrap.test_filter) for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")): subtest_dirs += data_dir.SubdirList(specific_subdir, bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug("Searching for test modules that match " "'type = %s' and 'provider = %s' " "on this cartesian dict", params.get("type"), params.get("provider", None)) t_types = params.get("type").split() provider = params.get("provider", None) if provider is not None: subtest_dirs = [ d for d in subtest_dirs if provider in d] # Make sure we can load provider_lib in tests for s in subtest_dirs: if os.path.dirname(s) not in sys.path: sys.path.insert(0, os.path.dirname(s)) test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on tests" "dirs %s" % (t_type, subtest_dirs)) raise exceptions.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: params = env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type in t_types: test_module = test_modules[t_type] run_func = utils_misc.get_test_entrypoint_func( t_type, test_module) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise exceptions.TestWarn("funcatexit failed with: %s" % error_message) except Exception as e: if t_type is not None: error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error( self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception as e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception as e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info( "'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info( "The command line used to start '%s' was:\n%s", vm.name, vm.make_create_command()) raise exceptions.JobError("Abort requested (%s)" % e)
def cleanup_env(env_filename, env_version): """ Pickable function to initialize and destroy the virttest env """ env = utils_env.Env(env_filename, env_version) env.destroy()
def _runTest(self): params = self.params # Report virt test version logging.info(version.get_pretty_version_info()) self._log_parameters() # Warn of this special condition in related location in output & logs if os.getuid() == 0 and params.get('nettype', 'user') == 'user': logging.warning("") logging.warning("Testing with nettype='user' while running " "as root may produce unexpected results!!!") logging.warning("") subtest_dirs = self._get_subtest_dirs() # Get the test routine corresponding to the specified # test type logging.debug("Searching for test modules that match " "'type = %s' and 'provider = %s' " "on this cartesian dict", params.get("type"), params.get("provider", None)) t_types = params.get("type").split() utils.insert_dirs_to_path(subtest_dirs) test_modules = utils.find_test_modules(t_types, subtest_dirs) # Open the environment file env_filename = os.path.join(data_dir.get_tmp_dir(), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) if params.get_boolean("job_env_cleanup", "yes"): self.runner_queue.put({"func_at_exit": cleanup_env, "args": (env_filename, self.env_version), "once": True}) test_passed = False t_type = None try: try: try: # Pre-process try: params = env_process.preprocess(self, params, env) finally: self._safe_env_save(env) # Run the test function for t_type in t_types: test_module = test_modules[t_type] run_func = utils_misc.get_test_entrypoint_func( t_type, test_module) try: run_func(self, params, env) self.verify_background_errors() finally: self._safe_env_save(env) test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise exceptions.TestWarn("funcatexit failed with: %s" % error_message) except: # nopep8 Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), 'avocado.test') if t_type is not None: error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) try: env_process.postprocess_on_error(self, params, env) finally: self._safe_env_save(env) raise finally: # Post-process try: try: params['test_passed'] = str(test_passed) env_process.postprocess(self, params, env) except: # nopep8 Old-style exceptions are not inherited from Exception() stacktrace.log_exc_info(sys.exc_info(), 'avocado.test') if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", sys.exc_info()[1]) finally: if self._safe_env_save(env) or params.get("env_cleanup", "no") == "yes": env.destroy() # Force-clean as it can't be stored except Exception as e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_create_command()) raise exceptions.JobError("Abort requested (%s)" % e) return test_passed
def _runTest(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Warn of this special condition in related location in output & logs if os.getuid() == 0 and params.get('nettype', 'user') == 'user': logging.warning("") logging.warning("Testing with nettype='user' while running " "as root may produce unexpected results!!!") logging.warning("") # Find the test subtest_dirs = [] test_filter = bootstrap.test_filter other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): d = os.path.join(*d.split("/")) subtestdir = os.path.join(self.bindir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s does not " "exist" % subtestdir) subtest_dirs += data_dir.SubdirList(subtestdir, test_filter) provider = params.get("provider", None) if provider is None: # Verify if we have the correspondent source file for # it generic_subdirs = asset.get_test_provider_subdirs( 'generic') for generic_subdir in generic_subdirs: subtest_dirs += data_dir.SubdirList(generic_subdir, test_filter) specific_subdirs = asset.get_test_provider_subdirs( params.get("vm_type")) for specific_subdir in specific_subdirs: subtest_dirs += data_dir.SubdirList( specific_subdir, bootstrap.test_filter) else: provider_info = asset.get_test_provider_info(provider) for key in provider_info['backends']: subtest_dirs += data_dir.SubdirList( provider_info['backends'][key]['path'], bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug("Searching for test modules that match " "'type = %s' and 'provider = %s' " "on this cartesian dict", params.get("type"), params.get("provider", None)) t_types = params.get("type").split() # Make sure we can load provider_lib in tests for s in subtest_dirs: if os.path.dirname(s) not in sys.path: sys.path.insert(0, os.path.dirname(s)) test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): logging.debug("Found subtest module %s", module_path) subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # TODO: the environment file is deprecated code, and should be removed # in future versions. Right now, it's being created on an Avocado temp # dir that is only persisted during the runtime of one job, which is # different from the original idea of the environment file (which was # persist information accross virt-test/avocado-vt job runs) env_filename = os.path.join(data_dir.get_tmp_dir(), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) self.runner_queue.put({"func_at_exit": cleanup_env, "args": (env_filename, self.env_version), "once": True}) test_passed = False t_type = None try: try: try: # Preprocess try: params = env_process.preprocess(self, params, env) finally: self.__safe_env_save(env) # Run the test function for t_type in t_types: test_module = test_modules[t_type] run_func = utils_misc.get_test_entrypoint_func( t_type, test_module) try: run_func(self, params, env) self.verify_background_errors() finally: self.__safe_env_save(env) test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise error.TestWarn("funcatexit failed with: %s" % error_message) except Exception: if t_type is not None: error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) try: env_process.postprocess_on_error(self, params, env) finally: self.__safe_env_save(env) raise finally: # Postprocess try: try: params['test_passed'] = str(test_passed) env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: if self.__safe_env_save(env): env.destroy() # Force-clean as it can't be stored except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_create_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self, params): # Convert params to a Params object params = utils_params.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file env_filename = os.path.join(self.bindir, params.get("vm_type"), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.job.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): # Replace split char. d = os.path.join(*d.split("/")) subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it virt_dir = os.path.dirname(self.virtdir) subtest_dirs.append(os.path.join(virt_dir, "tests")) subtest_dirs.append( os.path.join(self.bindir, params.get("vm_type"), "tests")) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = [] for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on tests" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules.append( (t_type, imp.load_module(t_type, f, p, d))) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules: msg = "Running function: %s.run_%s()" % (t_type, t_type) logging.info(msg) run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True except Exception, e: logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info( "'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info( "The command line used to start '%s' was:\n%s", vm.name, vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)