def test_register_syncserver(self): """ 1) Create an env file. 2) Create a SyncListenServer object and register it in the env. 3) Get that SyncListenServer with get_syncserver. 4) Verify that both objects are the same. """ env = utils_misc.Env() sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) sync2 = env.get_syncserver(333) assert sync1 == sync2
def test_save(self): """ 1) Verify that calling env.save() with no filename where env doesn't specify a filename will throw an EnvSaveError. 2) Register a VM in environment, save env to a file, recover env from that file, get the vm and verify that the instance attribute of the 2 objects is the same. 3) Register a SyncListenServer and don't save env. Restore env from file and try to get the syncserver, verify it doesn't work. 4) Now save env to a file, restore env from file and verify that the syncserver can be found there, and that the sync server instance attribute is equal to the initial sync server instance. """ fname = "/dev/shm/EnvUnittest" env = utils_misc.Env() self.assertRaises(utils_misc.EnvSaveError, env.save, {}) params = utils_misc.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() env.register_vm(params['main_vm'], vm1) env.save(filename=fname) env2 = utils_misc.Env(filename=fname) vm2 = env2.get_vm(params['main_vm']) vm2.is_alive() assert vm1.instance == vm2.instance sync1 = FakeSyncListenServer(port=222) env.register_syncserver(222, sync1) env3 = utils_misc.Env(filename=fname) syncnone = env3.get_syncserver(222) assert syncnone is None env.save(filename=fname) env4 = utils_misc.Env(filename=fname) sync2 = env4.get_syncserver(222) assert sync2.instance == sync1.instance if os.path.isfile(fname): os.unlink(fname)
def worker(self, index, run_test_func): """ The worker function. Waits for commands from the scheduler and processes them. @param index: The index of this worker (in the range 0..num_workers-1). @param run_test_func: A function to be called to run a test (e.g. job.run_test). """ r = self.s2w_r[index] w = self.w2s_w[index] self_dict = self.worker_dicts[index] # Inform the scheduler this worker is ready w.write("ready\n") while True: cmd = r.readline().split() if not cmd: continue # The scheduler wants this worker to run a test if cmd[0] == "run": test_index = int(cmd[1]) test = self.tests[test_index].copy() test.update(self_dict) test_iterations = int(test.get("iterations", 1)) status = run_test_func("kvm", params=test, tag=test.get("shortname"), iterations=test_iterations) w.write("done %s %s\n" % (test_index, status)) w.write("ready\n") # The scheduler wants this worker to free its used resources elif cmd[0] == "cleanup": env_filename = os.path.join(self.bindir, self_dict["env"]) env = utils_misc.Env(env_filename) for obj in env.values(): if isinstance(obj, virt_vm.BaseVM): obj.destroy() elif isinstance(obj, aexpect.Spawn): obj.close() env.save() w.write("cleanup_done\n") w.write("ready\n") # There's no more work for this worker elif cmd[0] == "terminate": break
def test_register_vm(self): """ 1) Create an env object. 2) Create a VM and register it from env. 3) Get the vm back from the env. 4) Verify that the 2 objects are the same. """ env = utils_misc.Env() params = utils_misc.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() env.register_vm(params['main_vm'], vm1) vm2 = env.get_vm(params['main_vm']) vm2.is_alive() assert vm1 == vm2
def test_unregister_syncserver(self): """ 1) Create an env file. 2) Create and register 2 SyncListenServers in the env. 4) Get one of the SyncListenServers in the env. 5) Unregister one of the SyncListenServers. 6) Verify that the SyncListenServer unregistered can't be retrieved anymore with get_syncserver(). """ env = utils_misc.Env() sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) sync2 = FakeSyncListenServer(port=444) env.register_syncserver(444, sync2) sync3 = env.get_syncserver(333) assert sync1 == sync3 env.unregister_syncserver(444) sync4 = env.get_syncserver(444) assert sync4 is None
def test_get_all_vms(self): """ 1) Create an env object. 2) Create 2 vms and register them in the env. 3) Create a SyncListenServer and register it in the env. 4) Verify that the 2 vms are in the output of get_all_vms. 5) Verify that the sync server is not in the output of get_all_vms. """ env = utils_misc.Env() params = utils_misc.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() vm2 = FakeVm('vm2', params) vm2.is_alive() env.register_vm(params['main_vm'], vm1) env.register_vm('vm2', vm2) sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) assert vm1 in env.get_all_vms() assert vm2 in env.get_all_vms() assert sync1 not in env.get_all_vms()
def test_unregister_vm(self): """ 1) Create an env object. 2) Register 2 vms to the env. 3) Verify both vms are in the env. 4) Remove one of those vms. 5) Verify that the removed vm is no longer in env. """ env = utils_misc.Env() params = utils_misc.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() vm2 = FakeVm('vm2', params) vm2.is_alive() env.register_vm(params['main_vm'], vm1) env.register_vm('vm2', vm2) assert vm1 in env.get_all_vms() assert vm2 in env.get_all_vms() env.unregister_vm('vm2') assert vm1 in env.get_all_vms() assert vm2 not in env.get_all_vms()
def run_once(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Open the environment file env_filename = os.path.join(self.bindir, params.get("vm_type"), params.get("env", "env")) env = utils_misc.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s does not " "exist" % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it subtest_dirs.append(self.testdir) specific_testdir = os.path.join(self.bindir, params.get("vm_type"), "tests") subtest_dirs.append(specific_testdir) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules.items(): run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) finally: env.save() test_passed = True except Exception, e: try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "kvm": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self, params): # Convert params to a Params object params = utils_misc.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file env_filename = os.path.join(self.bindir, params.get("env", "env")) env = utils_misc.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.job.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it virt_dir = os.path.dirname(utils_misc.__file__) subtest_dirs.append(os.path.join(virt_dir, "tests")) subtest_dirs.append(os.path.join(self.bindir, "tests")) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = [] for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = "Could not find test file %s.py on tests"\ "dirs %s" % (t_type, subtest_dirs) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules.append( (t_type, imp.load_module(t_type, f, p, d))) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules: msg = "Running function: %s.run_%s()" % (t_type, t_type) logging.info(msg) run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) finally: env.save() test_passed = True except Exception, e: logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "kvm": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info( "'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info( "The command line used to start '%s' was:\n%s", vm.name, vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)