def check_empty_args(self): """Check that parsing an empty args list results in printing a usage message, followed by sys.exit(0) """ try: with Capturing() as captured: parse_args([]) except SystemExit as e: assert e.code == 0 assert captured.output.find("usage") >= 0
def check_version(self): """If --version is present, ducktape should print version and exit""" try: with Capturing() as captured: parse_args(["--version"]) except SystemExit as e: assert e.code == 0 assert re.search("[\d]+\.[\d]+\.[\d]+", captured.output) is not None
def check_multiple_test_paths(self): """Check that parser properly handles multiple "test paths". It should capture a list of test paths. """ paths = ["path1"] args = ["--debug"] + paths + ["--collect-only"] parsed = parse_args(args) assert parsed["test_path"] == paths paths = ["path%d" % i for i in range(10)] args = ["--cluster-file", "my-cluster-file"] + paths + ["--debug", "--exit-first"] parsed = parse_args(args) assert parsed["test_path"] == paths
def check_multiple_test_paths(self): """Check that parser properly handles multiple "test paths". It should capture a list of test paths. """ paths = ["path1"] args = ["--debug"] + paths + ["--collect-only"] parsed = parse_args(args) assert parsed["test_path"] == paths paths = ["path%d" % i for i in range(10)] args = ["--cluster-file", "my-cluster-file" ] + paths + ["--debug", "--exit-first"] parsed = parse_args(args) assert parsed["test_path"] == paths
def check_empty_test_path(self): """Check that default test_path is an array consisting of cwd.""" args = ["--collect-only"] parsed = parse_args(args) parsed_paths = [os.path.abspath(p) for p in parsed["test_path"]] assert parsed_paths == [os.path.abspath('.')]
def check_config_file_option(self): """Check that config file option works""" tmpdir = tempfile.mkdtemp(dir="/tmp") user_cfg_filename = os.path.join(tmpdir, "ducktape-user.cfg") user_cfg = [ "--results-root RESULTSROOT-user", "--parameters PARAMETERS-user" ] try: with open(user_cfg_filename, "w") as user_f: user_f.write("\n".join(user_cfg)) args_dict = parse_args(["--config-file", user_cfg_filename]) assert args_dict["results_root"] == "RESULTSROOT-user" assert args_dict["parameters"] == "PARAMETERS-user" finally: shutil.rmtree(tmpdir)
def check_config_overrides(self, monkeypatch): """Check that parsed arguments pick up values from config files, and that overrides match precedence.""" tmpdir = tempfile.mkdtemp(dir="/tmp") # Create tmp file for global config project_cfg_filename = os.path.join(tmpdir, "ducktape-project.cfg") user_cfg_filename = os.path.join(tmpdir, "ducktape-user.cfg") project_cfg = [ "--cluster-file CLUSTERFILE-project", "--results-root RESULTSROOT-project", "--parameters PARAMETERS-project" ] # user_cfg options should override project_cfg user_cfg = [ "--results-root RESULTSROOT-user", "--parameters PARAMETERS-user" ] try: monkeypatch.setattr( "ducktape.command_line.defaults.ConsoleDefaults.PROJECT_CONFIG_FILE", project_cfg_filename) monkeypatch.setattr( "ducktape.command_line.defaults.ConsoleDefaults.USER_CONFIG_FILE", user_cfg_filename) with open(project_cfg_filename, "w") as project_f: project_f.write("\n".join(project_cfg)) with open(user_cfg_filename, "w") as user_f: user_f.write("\n".join(user_cfg)) # command-line options should override user_cfg and project_cfg args_dict = parse_args(["--parameters", "PARAMETERS-commandline"]) assert args_dict["cluster_file"] == "CLUSTERFILE-project" assert args_dict["results_root"] == "RESULTSROOT-user" assert args_dict["parameters"] == "PARAMETERS-commandline" finally: shutil.rmtree(tmpdir)
def check_config_overrides(self, monkeypatch): """Check that parsed arguments pick up values from config files, and that overrides match precedence.""" tmpdir = tempfile.mkdtemp(dir="/tmp") # Create tmp file for global config project_cfg_filename = os.path.join(tmpdir, "ducktape-project.cfg") user_cfg_filename = os.path.join(tmpdir, "ducktape-user.cfg") project_cfg = [ "--cluster-file CLUSTERFILE-project", "--results-root RESULTSROOT-project", "--parameters PARAMETERS-project" ] # user_cfg options should override project_cfg user_cfg = [ "--results-root RESULTSROOT-user", "--parameters PARAMETERS-user" ] try: monkeypatch.setattr("ducktape.command_line.defaults.ConsoleDefaults.PROJECT_CONFIG_FILE", project_cfg_filename) monkeypatch.setattr("ducktape.command_line.defaults.ConsoleDefaults.USER_CONFIG_FILE", user_cfg_filename) with open(project_cfg_filename, "w") as project_f: project_f.write("\n".join(project_cfg)) with open(user_cfg_filename, "w") as user_f: user_f.write("\n".join(user_cfg)) # command-line options should override user_cfg and project_cfg args_dict = parse_args(["--parameters", "PARAMETERS-commandline"]) assert args_dict["cluster_file"] == "CLUSTERFILE-project" assert args_dict["results_root"] == "RESULTSROOT-user" assert args_dict["parameters"] == "PARAMETERS-commandline" finally: shutil.rmtree(tmpdir)
def main(): """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following: Discover tests Initialize cluster for distributed services Run tests Report a summary of all results """ args_dict = parse_args(sys.argv[1:]) parameters = None if args_dict["parameters"]: try: parameters = json.loads(args_dict["parameters"]) except ValueError as e: print "parameters are not valid json: " + str(e.message) sys.exit(1) args_dict["globals"] = get_user_defined_globals(args_dict.get("globals")) # Make .ducktape directory where metadata such as the last used session_id is stored if not os.path.isdir(ConsoleDefaults.METADATA_DIR): os.makedirs(ConsoleDefaults.METADATA_DIR) # Generate a shared 'global' identifier for this test run and create the directory # in which all test results will be stored session_id = generate_session_id(ConsoleDefaults.SESSION_ID_FILE) results_dir = generate_results_dir(args_dict["results_root"], session_id) setup_results_directory(results_dir) session_context = SessionContext(session_id=session_id, results_dir=results_dir, **args_dict) for k, v in args_dict.iteritems(): session_context.logger.debug("Configuration: %s=%s", k, v) # Discover and load tests to be run extend_import_paths(args_dict["test_path"]) loader = TestLoader(session_context, parameters) try: tests = loader.discover(args_dict["test_path"]) except LoaderException as e: print "Failed while trying to discover tests: {}".format(e) sys.exit(1) if args_dict["collect_only"]: print "Collected %d tests:" % len(tests) for test in tests: print " " + str(test) sys.exit(0) # Initializing the cluster is slow, so do so only if # tests are sure to be run try: (cluster_mod_name, cluster_class_name) = args_dict["cluster"].rsplit('.', 1) cluster_mod = importlib.import_module(cluster_mod_name) cluster_class = getattr(cluster_mod, cluster_class_name) session_context.cluster = cluster_class(cluster_file=args_dict["cluster_file"]) except: print "Failed to load cluster: ", str(sys.exc_info()[0]) print traceback.format_exc(limit=16) sys.exit(1) # Run the tests runner = SerialTestRunner(session_context, tests) test_results = runner.run_all_tests() # Report results reporter = SimpleStdoutSummaryReporter(test_results) reporter.report() reporter = SimpleFileSummaryReporter(test_results) reporter.report() # Generate HTML reporter reporter = HTMLSummaryReporter(test_results) reporter.report() update_latest_symlink(args_dict["results_root"], results_dir) if not test_results.get_aggregate_success(): sys.exit(1)
def main(): """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following: Discover tests Initialize cluster for distributed services Run tests Report a summary of all results """ args_dict = parse_args(sys.argv[1:]) injected_args = None if args_dict["parameters"]: try: injected_args = json.loads(args_dict["parameters"]) except ValueError as e: print("parameters are not valid json: " + str(e)) sys.exit(1) args_dict["globals"] = get_user_defined_globals(args_dict.get("globals")) # Make .ducktape directory where metadata such as the last used session_id is stored if not os.path.isdir(ConsoleDefaults.METADATA_DIR): os.makedirs(ConsoleDefaults.METADATA_DIR) # Generate a shared 'global' identifier for this test run and create the directory # in which all test results will be stored session_id = generate_session_id(ConsoleDefaults.SESSION_ID_FILE) results_dir = generate_results_dir(args_dict["results_root"], session_id) setup_results_directory(results_dir) session_context = SessionContext(session_id=session_id, results_dir=results_dir, **args_dict) session_logger = SessionLoggerMaker(session_context).logger for k, v in iteritems(args_dict): session_logger.debug("Configuration: %s=%s", k, v) # Discover and load tests to be run extend_import_paths(args_dict["test_path"]) loader = TestLoader(session_context, session_logger, repeat=args_dict["repeat"], injected_args=injected_args, subset=args_dict["subset"], subsets=args_dict["subsets"]) try: tests = loader.load(args_dict["test_path"]) except LoaderException as e: print("Failed while trying to discover tests: {}".format(e)) sys.exit(1) if args_dict["collect_only"]: print("Collected %d tests:" % len(tests)) for test in tests: print(" " + str(test)) sys.exit(0) if args_dict["sample"]: print("Running a sample of %d tests" % args_dict["sample"]) try: tests = random.sample(tests, args_dict["sample"]) except ValueError as e: if args_dict["sample"] > len(tests): print( "sample size %d greater than number of tests %d; running all tests" % (args_dict["sample"], len(tests))) else: print("invalid sample size (%s), running all tests" % e) # Initializing the cluster is slow, so do so only if # tests are sure to be run try: (cluster_mod_name, cluster_class_name) = args_dict["cluster"].rsplit('.', 1) cluster_mod = importlib.import_module(cluster_mod_name) cluster_class = getattr(cluster_mod, cluster_class_name) cluster = cluster_class(cluster_file=args_dict["cluster_file"]) for ctx in tests: # Note that we're attaching a reference to cluster # only after test context objects have been instantiated ctx.cluster = cluster except Exception: print("Failed to load cluster: ", str(sys.exc_info()[0])) print(traceback.format_exc(limit=16)) sys.exit(1) # Run the tests runner = TestRunner(cluster, session_context, session_logger, tests) test_results = runner.run_all_tests() # Report results reporters = [ SimpleStdoutSummaryReporter(test_results), SimpleFileSummaryReporter(test_results), HTMLSummaryReporter(test_results), JSONReporter(test_results) ] for r in reporters: r.report() update_latest_symlink(args_dict["results_root"], results_dir) close_logger(session_logger) if not test_results.get_aggregate_success(): # Non-zero exit if at least one test failed sys.exit(1)
def check_multiple_exclude(self): excluded = ["excluded1", "excluded2"] args = ["--collect-only", "--exclude"] + excluded + ["--debug"] parsed = parse_args(args) assert parsed["exclude"] == excluded
def main(): """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following: Discover tests Initialize cluster for distributed services Run tests Report a summary of all results """ args_dict = parse_args(sys.argv[1:]) parameters = None if args_dict["parameters"]: try: parameters = json.loads(args_dict["parameters"]) except ValueError as e: print "parameters are not valid json: " + str(e.message) sys.exit(1) args_dict["globals"] = get_user_defined_globals(args_dict.get("globals")) # Make .ducktape directory where metadata such as the last used session_id is stored if not os.path.isdir(ConsoleDefaults.METADATA_DIR): os.makedirs(ConsoleDefaults.METADATA_DIR) # Generate a shared 'global' identifier for this test run and create the directory # in which all test results will be stored session_id = generate_session_id(ConsoleDefaults.SESSION_ID_FILE) results_dir = generate_results_dir(args_dict["results_root"], session_id) setup_results_directory(results_dir) session_context = SessionContext(session_id=session_id, results_dir=results_dir, **args_dict) for k, v in args_dict.iteritems(): session_context.logger.debug("Configuration: %s=%s", k, v) # Discover and load tests to be run extend_import_paths(args_dict["test_path"]) loader = TestLoader(session_context, parameters) try: tests = loader.discover(args_dict["test_path"]) except LoaderException as e: print "Failed while trying to discover tests: {}".format(e) sys.exit(1) if args_dict["collect_only"]: print "Collected %d tests:" % len(tests) for test in tests: print " " + str(test) sys.exit(0) # Initializing the cluster is slow, so do so only if # tests are sure to be run try: (cluster_mod_name, cluster_class_name) = args_dict["cluster"].rsplit('.', 1) cluster_mod = importlib.import_module(cluster_mod_name) cluster_class = getattr(cluster_mod, cluster_class_name) session_context.cluster = cluster_class( cluster_file=args_dict["cluster_file"]) except: print "Failed to load cluster: ", str(sys.exc_info()[0]) print traceback.format_exc(limit=16) sys.exit(1) # Run the tests runner = SerialTestRunner(session_context, tests) test_results = runner.run_all_tests() # Report results reporter = SimpleStdoutSummaryReporter(test_results) reporter.report() reporter = SimpleFileSummaryReporter(test_results) reporter.report() # Generate HTML reporter reporter = HTMLSummaryReporter(test_results) reporter.report() update_latest_symlink(args_dict["results_root"], results_dir) if not test_results.get_aggregate_success(): sys.exit(1)