def run_once(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Open the environment file env_filename = os.path.join(self.bindir, params.get("vm_type"), params.get("env", "env")) env = utils_misc.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s does not " "exist" % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it subtest_dirs.append(self.testdir) specific_testdir = os.path.join(self.bindir, params.get("vm_type"), "tests") subtest_dirs.append(specific_testdir) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules.items(): run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) finally: env.save() test_passed = True except Exception, e: try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "kvm": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.info("Starting test %s", self.tag) logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Warn of this special condition in related location in output & logs if os.getuid() == 0 and params.get('nettype', 'user') == 'user': logging.warning("") logging.warning("Testing with nettype='user' while running " "as root may produce unexpected results!!!") logging.warning("") # Open the environment file env_filename = os.path.join( data_dir.get_backend_dir(params.get("vm_type")), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) test_passed = False t_types = None t_type = None try: try: try: subtest_dirs = [] other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): d = os.path.join(*d.split("/")) subtestdir = os.path.join(self.bindir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s does not " "exist" % (subtestdir)) subtest_dirs += data_dir.SubdirList(subtestdir, bootstrap.test_filter) provider = params.get("provider", None) if provider is None: # Verify if we have the correspondent source file for it for generic_subdir in asset.get_test_provider_subdirs('generic'): subtest_dirs += data_dir.SubdirList(generic_subdir, bootstrap.test_filter) for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")): subtest_dirs += data_dir.SubdirList(specific_subdir, bootstrap.test_filter) else: provider_info = asset.get_test_provider_info(provider) for key in provider_info['backends']: subtest_dirs += data_dir.SubdirList( provider_info['backends'][key]['path'], bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug("Searching for test modules that match " "'type = %s' and 'provider = %s' " "on this cartesian dict", params.get("type"), params.get("provider", None)) t_types = params.get("type").split() # Make sure we can load provider_lib in tests for s in subtest_dirs: if os.path.dirname(s) not in sys.path: sys.path.insert(0, os.path.dirname(s)) test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): logging.debug("Found subtest module %s", module_path) subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: params = env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type in t_types: test_module = test_modules[t_type] run_func = utils_misc.get_test_entrypoint_func( t_type, test_module) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise error.TestWarn("funcatexit failed with: %s" % error_message) except Exception, e: if (t_type is not None): error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.info("Starting test %s", self.tag) logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Open the environment file env_filename = os.path.join(self.bindir, params.get("vm_type"), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) test_passed = False t_types = None try: try: try: subtest_dirs = [] tests_dir = self.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): d = os.path.join(*d.split("/")) subtestdir = os.path.join(self.bindir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s does not " "exist" % (subtestdir)) subtest_dirs += data_dir.SubdirList( subtestdir, bootstrap.test_filter) # Verify if we have the correspondent source file for it subtest_dirs += data_dir.SubdirList( self.testdir, bootstrap.test_filter) specific_testdir = os.path.join(self.bindir, params.get("vm_type"), "tests") subtest_dirs += data_dir.SubdirList( specific_testdir, bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug( "Searching for test modules that match " "param 'type = %s' on this cartesian dict", params.get("type")) t_types = params.get("type").split() test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): logging.debug("Found subtest module %s", module_path) subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: params = env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules.items(): run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise error.TestWarn("funcatexit failed with: %s" % error_message) except Exception, e: if (not t_type is None): error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self, params): # Convert params to a Params object params = utils_misc.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file env_filename = os.path.join(self.bindir, params.get("env", "env")) env = utils_misc.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.job.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it virt_dir = os.path.dirname(utils_misc.__file__) subtest_dirs.append(os.path.join(virt_dir, "tests")) subtest_dirs.append(os.path.join(self.bindir, "tests")) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = [] for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = "Could not find test file %s.py on tests"\ "dirs %s" % (t_type, subtest_dirs) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules.append( (t_type, imp.load_module(t_type, f, p, d))) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules: msg = "Running function: %s.run_%s()" % (t_type, t_type) logging.info(msg) run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) finally: env.save() test_passed = True except Exception, e: logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "kvm": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info( "'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info( "The command line used to start '%s' was:\n%s", vm.name, vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self): params = self.params # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.info("Starting test %s", self.tag) logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) # Open the environment file env_filename = os.path.join(self.bindir, params.get("vm_type"), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) test_passed = False t_types = None try: try: try: subtest_dirs = [] tests_dir = self.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): d = os.path.join(*d.split("/")) subtestdir = os.path.join(self.bindir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s does not " "exist" % (subtestdir)) subtest_dirs += data_dir.SubdirList(subtestdir, bootstrap.test_filter) # Verify if we have the correspondent source file for it subtest_dirs += data_dir.SubdirList(self.testdir, bootstrap.test_filter) specific_testdir = os.path.join(self.bindir, params.get("vm_type"), "tests") subtest_dirs += data_dir.SubdirList(specific_testdir, bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug("Searching for test modules that match " "param 'type = %s' on this cartesian dict", params.get("type")) #ting comment t_types = testname t_types = params.get("type").split() test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): logging.debug("Found subtest module %s", module_path) subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on test" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: params = env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules.items(): run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise error.TestWarn("funcatexit failed with: %s" % error_message) except Exception, e: if (not t_type is None): error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: #ting add """ #origin code env_process.postprocess(self, params, env) """ if params.get("vm_type") != "FT_kvm": env_process.postprocess(self, params, env) else: aexpect.kill_tail_threads() vms = env.get_all_vms() for vm in vms: vm.destroy() #end add except Exception, e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("It has a %s monitor unix socket at: %s", m.protocol, m.filename) logging.info("The command line used to start it was:\n%s", vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)
def run_once(self, params): # Convert params to a Params object params = utils_misc.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file env_filename = os.path.join(self.bindir, params.get("env", "env")) env = utils_misc.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.job.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it virt_dir = os.path.dirname(utils_misc.__file__) subtest_dirs.append(os.path.join(virt_dir, "tests")) subtest_dirs.append(os.path.join(self.bindir, "tests")) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = [] for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = "Could not find test file %s.py on tests"\ "dirs %s" % (t_type, subtest_dirs) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules.append((t_type, imp.load_module(t_type, f, p, d))) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules: msg = "Running function: %s.run_%s()" % (t_type, t_type) logging.info(msg) run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) finally: env.save() test_passed = True except Exception, e: logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error( self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "kvm": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info("'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info("The command line used to start '%s' was:\n%s", vm.name, vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)