def get_modules(testfile, tests_to_run, config_overrides): """ Gets modules and objects required to run tests :param testfile: :param tests_to_run: :param config_overrides: :return: """ test_dic = _parse_input(testfile) master_config, configs = load_configs_from_directory(test_dic["configs_directory"], config_overrides) _setup_paths(master_config.mapping.get("additional_paths", [])) deployment_module = utils.load_module(test_dic["deployment_code"]) if "dynamic_configuration_code" in test_dic: perf_module = utils.load_module(test_dic["dynamic_configuration_code"]) else: perf_module = utils.load_module(test_dic["perf_code"]) test_modules = [utils.load_module(testcode) for testcode in test_dic["test_code"]] if tests_to_run is not None: tests = [test for test in _determine_tests(test_modules) if test.name in tests_to_run] else: tests = [test for test in _determine_tests(test_modules)] tests_with_phases = {} for test in tests: tests_with_phases[test.phase] = tests_with_phases.get(test.phase, [])+[test] serial_tests = tests_with_phases.pop(constants.DEFAULT_TEST_PHASE, []) tests = serial_tests+[test_with_phase[1] for test_with_phase in sorted(tests_with_phases.items(), key=lambda pair: pair[0])] return deployment_module, perf_module, tests, master_config, configs
def test_determine_tests_read_correct_number_of_tests_in_file(self): """ Test that we can read the correct number of tests from the test file """ test_directory = os.path.join(self.FILE_LOCATION, "samples/determine_tests") test_files = [ file_name for file_name in os.listdir(test_directory) if os.path.splitext(file_name)[1] == ".py" ] for testfile in test_files: test_module = utils.load_module( os.path.join(test_directory, testfile)) tests = [ test.name for test in test_runner_helper._determine_tests([test_module]) ] if ("empty" in testfile) or ("invalid" in testfile) or ("no_test" in testfile): self.assertEqual(len(tests), 0) elif ("single" in testfile) or ("no_validate" in testfile) or ("no_match" in testfile): self.assertEqual(len(tests), 1) for i in xrange(0, 1): self.assertTrue("test{0}".format(i) in tests) elif "multi" in testfile: self.assertEqual(len(tests), 2) for i in xrange(0, 2): self.assertTrue("test{0}".format(i) in tests) else: print testfile + " was not used as test input"
def call_main(args): # Get output directory. try: if args.output_dir is not None: runtime.set_output_dir(args.output_dir) except ValueError as e: print str(e) raise # Set up logging. setup_logging(runtime.get_output_dir(), args.log_level, args.console_level) logger = logging.getLogger("zopkio") logger.info("Starting zopkio") try: utils.check_file_with_exception(args.testfile) utils.check_testfile_dir_structure(args.testfile) machines = utils.make_machine_mapping(args.machine_list) config_overrides = utils.parse_config_list(args.config_overrides) except ValueError as e: logger.error(str(e)) print("Error in processing command line arguments:\n {0}".format( traceback.format_exc())) raise runtime.set_machines(machines) if args.user is not None: user = args.user else: user = getpass.getuser() if args.nopassword: password = "" else: password = getpass.getpass() runtime.set_user(user, password) try: testmodule = utils.load_module(args.testfile) ztestsuites = [ getattr(testmodule, attr) for attr in dir(testmodule) if isinstance(getattr(testmodule, attr), ZTestSuite) ] if len( ztestsuites ) > 0: #TODO(jehrlich) intelligently handle multiple test suites test_runner = TestRunner(ztestsuite=ztestsuites[0], testlist=args.test_list, config_overrides=config_overrides) else: test_runner = TestRunner(args.testfile, args.test_list, config_overrides) except BaseException as e: print("Error setting up testrunner:\n%s" % traceback.format_exc()) raise ValueError(e.message) test_runner.run() logger.info("Exiting zopkio") return test_runner.success_count(), test_runner.fail_count()
def test_determine_tests_read_correct_total_number_of_tests_in_files(self): """ Tests that determine_tests works for a list of modules """ test_directory = os.path.join(self.FILE_LOCATION, "samples/determine_tests") test_files = [file_name for file_name in os.listdir(test_directory) if os.path.splitext(file_name)[1] == ".py"] test_modules = [utils.load_module(os.path.join(test_directory, test_file)) for test_file in test_files] tests = test_runner_helper._determine_tests(test_modules) self.assertEqual(sum(1 for test in tests), 5)
def call_main(args): # Get output directory. try: if args.output_dir is not None: runtime.set_output_dir(args.output_dir) except ValueError as e: print str(e) raise # Set up logging. setup_logging(runtime.get_output_dir(), args.log_level, args.console_level) logger = logging.getLogger("zopkio") logger.info("Starting zopkio") try: utils.check_file_with_exception(args.testfile) utils.check_testfile_dir_structure(args.testfile) machines = utils.make_machine_mapping(args.machine_list) config_overrides = utils.parse_config_list(args.config_overrides) except ValueError as e: logger.error(str(e)) print("Error in processing command line arguments:\n {0}".format(traceback.format_exc())) raise runtime.set_machines(machines) if args.user is not None: user = args.user else: user = getpass.getuser() if args.nopassword: password = "" else: password = getpass.getpass() runtime.set_user(user, password) try: testmodule = utils.load_module(args.testfile) ztestsuites = [getattr(testmodule, attr) for attr in dir(testmodule) if isinstance(getattr(testmodule, attr), ZTestSuite)] if len(ztestsuites) > 0: #TODO(jehrlich) intelligently handle multiple test suites test_runner = TestRunner(ztestsuite=ztestsuites[0], testlist=args.test_list, config_overrides=config_overrides) else: test_runner = TestRunner(args.testfile, args.test_list, config_overrides) except BaseException as e: print("Error setting up testrunner:\n%s" % traceback.format_exc()) raise ValueError(e.message) test_runner.run() logger.info("Exiting zopkio") return test_runner.success_count(), test_runner.fail_count()
def test_determine_tests_generates_correct_test_object(self): """ Tests that determine_tests generate correct test objects """ test_directory = os.path.join(self.FILE_LOCATION, "samples/determine_tests") test_module = utils.load_module(os.path.join(test_directory, "meta_test_single.py")) single_test_iterator = test_runner_helper._determine_tests([test_module]) test = next(single_test_iterator) self.assertEqual(test.name, "test0") self.assertTrue(test.validation_function is not None) self.assertEqual(test.validation_function.__name__, "validate0")
def test_directory_setup_makes_correct_directories(self): """ Tests that directory_setup makes the correct directories """ perf_module = utils.load_module(os.path.join(self.FILE_LOCATION, "samples/sample_perf.py")) dir_info = test_runner_helper.directory_setup( os.path.join(self.FILE_LOCATION, "samples/sample_input.py"), perf_module, configobj.Config("Master", {})) self.assertTrue(os.path.isdir(runtime.get_reports_dir())) self.assertTrue("sample_input" in dir_info["report_name"]) self.assertTrue(os.path.isdir(dir_info["results_dir"])) self.assertTrue(os.path.isdir(dir_info["logs_dir"])) shutil.rmtree(dir_info["results_dir"]) shutil.rmtree(dir_info["logs_dir"])
def _parse_input(testfile): """ Extract the dictionary from the input file """ ext = os.path.splitext(testfile)[-1].lower() if ext == ".py": test_dic = utils.load_module(testfile).test elif ext == ".json": json_data = open(testfile).read() test_dic = json.loads(json_data) else: logger.critical(testfile + " is not supported; currently only supports python and json files") raise ValueError("currently only supports python and json files") # checking test_dic to see if it has valid keys and values if not len(test_dic.keys()) == 4: logger.critical("input requires four fields: deployment_code, test_code, perf_code, configs_directory") raise ValueError("input requires four fields: deployment_code, test_code, perf_code, configs_directory") old_valid_key_list = ["deployment_code", "test_code", "perf_code", "configs_directory"] new_valid_key_list = ["deployment_code", "test_code", "dynamic_configuration_code", "configs_directory"] if not set(old_valid_key_list) == set(test_dic.keys()) and not set(new_valid_key_list) == set(test_dic.keys()): logger.critical("input requires four fields: deployment_code, test_code, dynamic_configuration_code, configs_directory") raise ValueError("input requires four fields: deployment_code, test_code, dynamic_configuration_code, configs_directory") filename = test_dic["deployment_code"] utils.check_file_with_exception(filename) filenames = test_dic["test_code"] for filename in filenames: utils.check_file_with_exception(filename) if "dynamic_configuration_code" in test_dic: filename = test_dic["dynamic_configuration_code"] else: filename = test_dic["perf_code"] utils.check_file_with_exception(filename) dirname = test_dic["configs_directory"] utils.check_dir_with_exception(dirname) return test_dic
def test_determine_tests_read_correct_number_of_tests_in_file(self): """ Test that we can read the correct number of tests from the test file """ test_directory = os.path.join(self.FILE_LOCATION, "samples/determine_tests") test_files = [file_name for file_name in os.listdir(test_directory) if os.path.splitext(file_name)[1] == ".py"] for testfile in test_files: test_module = utils.load_module(os.path.join(test_directory, testfile)) tests = [test.name for test in test_runner_helper._determine_tests([test_module])] if ("empty" in testfile) or ("invalid" in testfile) or ("no_test" in testfile): self.assertEqual(len(tests), 0) elif ("single" in testfile) or ("no_validate" in testfile) or ("no_match" in testfile): self.assertEqual(len(tests), 1) for i in xrange(0, 1): self.assertTrue("test{0}".format(i) in tests) elif "multi" in testfile: self.assertEqual(len(tests), 2) for i in xrange(0, 2): self.assertTrue("test{0}".format(i) in tests) else: print testfile + " was not used as test input"
def main(): """ Parse command line arguments and then run the test suite """ parser = argparse.ArgumentParser(description='A distributed test framework') parser.add_argument('testfile', help='The file that is used to determine the test suite run') parser.add_argument('--test-only', nargs='*', dest='test_list', help='run only the named tests to help debug broken tests') parser.add_argument('--machine-list', nargs='*', dest='machine_list', help='''mapping of logical host names to physical names allowing the same test suite to run on different hardware, each argument is a pair of logical name and physical name separated by a =''') parser.add_argument('--config-overrides', nargs='*', dest='config_overrides', help='''config overrides at execution time, each argument is a config with its value separated by a =. This has the highest priority of all configs''') parser.add_argument('-d', '--output-dir', dest='output_dir', help='''Directory to write output files and logs. Defaults to the current directory.''') parser.add_argument("--log-level", dest="log_level", help="Log level (default INFO)", default="INFO") parser.add_argument("--console-log-level", dest="console_level", help="Console Log level (default ERROR)", default="ERROR") parser.add_argument("--nopassword", action='store_true', dest="nopassword", help="Disable password prompt") parser.add_argument("--user", dest="user", help="user to run the test as (defaults to current user)") args = parser.parse_args() # Get output directory. try: if args.output_dir is not None: runtime.set_output_dir(args.output_dir) except ValueError as e: print str(e) sys.exit(1) # Set up logging. setup_logging(runtime.get_output_dir(), args.log_level, args.console_level) logger = logging.getLogger("zopkio") logger.info("Starting zopkio") try: utils.check_file_with_exception(args.testfile) utils.check_testfile_dir_structure(args.testfile) machines = utils.make_machine_mapping(args.machine_list) config_overrides = utils.parse_config_list(args.config_overrides) except ValueError as e: logger.error(str(e)) print("Error in processing command line arguments:\n {0}".format(traceback.format_exc())) sys.exit(1) runtime.set_machines(machines) if args.user is not None: user = args.user else: user = getpass.getuser() if args.nopassword: password = "" else: password = getpass.getpass() runtime.set_user(user, password) try: testmodule = utils.load_module(args.testfile) ztestsuites = [getattr(testmodule, attr) for attr in dir(testmodule) if isinstance(getattr(testmodule, attr), ZTestSuite)] if len(ztestsuites) > 0: #TODO(jehrlich) intelligently handle multiple test suites test_runner = TestRunner(ztestsuite=ztestsuites[0], testlist=args.test_list, config_overrides=config_overrides) else: test_runner = TestRunner(args.testfile, args.test_list, config_overrides) except BaseException as e: print("Error setting up testrunner:\n%s" % traceback.format_exc()) sys.exit(1) test_runner.run() logger.info("Exiting zopkio")
def main(): """ Parse command line arguments and then run the test suite """ parser = argparse.ArgumentParser( description='A distributed test framework') parser.add_argument( 'testfile', help='The file that is used to determine the test suite run') parser.add_argument( '--test-only', nargs='*', dest='test_list', help='run only the named tests to help debug broken tests') parser.add_argument( '--machine-list', nargs='*', dest='machine_list', help='''mapping of logical host names to physical names allowing the same test suite to run on different hardware, each argument is a pair of logical name and physical name separated by a =''') parser.add_argument( '--config-overrides', nargs='*', dest='config_overrides', help= '''config overrides at execution time, each argument is a config with its value separated by a =. This has the highest priority of all configs''') parser.add_argument( '-d', '--output-dir', dest='output_dir', help='''Directory to write output files and logs. Defaults to the current directory.''') parser.add_argument("--log-level", dest="log_level", help="Log level (default INFO)", default="INFO") parser.add_argument("--console-log-level", dest="console_level", help="Console Log level (default ERROR)", default="ERROR") parser.add_argument("--nopassword", action='store_true', dest="nopassword", help="Disable password prompt") parser.add_argument( "--user", dest="user", help="user to run the test as (defaults to current user)") args = parser.parse_args() # Get output directory. try: if args.output_dir is not None: runtime.set_output_dir(args.output_dir) except ValueError as e: print str(e) sys.exit(1) # Set up logging. setup_logging(runtime.get_output_dir(), args.log_level, args.console_level) logger = logging.getLogger("zopkio") logger.info("Starting zopkio") try: utils.check_file_with_exception(args.testfile) utils.check_testfile_dir_structure(args.testfile) machines = utils.make_machine_mapping(args.machine_list) config_overrides = utils.parse_config_list(args.config_overrides) except ValueError as e: logger.error(str(e)) print("Error in processing command line arguments:\n {0}".format( traceback.format_exc())) sys.exit(1) runtime.set_machines(machines) if args.user is not None: user = args.user else: user = getpass.getuser() if args.nopassword: password = "" else: password = getpass.getpass() runtime.set_user(user, password) try: testmodule = utils.load_module(args.testfile) ztestsuites = [ getattr(testmodule, attr) for attr in dir(testmodule) if isinstance(getattr(testmodule, attr), ZTestSuite) ] if len( ztestsuites ) > 0: #TODO(jehrlich) intelligently handle multiple test suites test_runner = TestRunner(ztestsuite=ztestsuites[0], testlist=args.test_list, config_overrides=config_overrides) else: test_runner = TestRunner(args.testfile, args.test_list, config_overrides) except BaseException as e: print("Error setting up testrunner:\n%s" % traceback.format_exc()) sys.exit(1) test_runner.run() logger.info("Exiting zopkio")