Esempio n. 1
0
    def check_close_logger(self):
        """Check that calling close_logger properly cleans up resources."""
        initial_open_files = open_files()

        n_handles = 100
        log_maker = DummyFileLoggerMaker(self.temp_dir, n_handles)
        # accessing logger attribute lazily triggers configuration of logger
        the_logger = log_maker.logger

        assert len(open_files()) == len(initial_open_files) + n_handles
        close_logger(the_logger)
        assert len(open_files()) == len(initial_open_files)
Esempio n. 2
0
    def close(self):
        """Release resources, etc."""
        if hasattr(self, "services"):
            for service in self.services:
                service.close()
            # Remove reference to services. This is important to prevent potential memory leaks if users write services
            # which themselves have references to large memory-intensive objects
            del self.services

        # Remove local scratch directory
        if self._local_scratch_dir and os.path.exists(self._local_scratch_dir):
            shutil.rmtree(self._local_scratch_dir)

        # Release file handles held by logger
        if self._logger:
            close_logger(self._logger)
Esempio n. 3
0
    def close(self):
        """Release resources, etc."""
        if hasattr(self, "services"):
            for service in self.services:
                service.close()
            # Remove reference to services. This is important to prevent potential memory leaks if users write services
            # which themselves have references to large memory-intensive objects
            del self.services

        # Remove local scratch directory
        if self._local_scratch_dir and os.path.exists(self._local_scratch_dir):
            shutil.rmtree(self._local_scratch_dir)

        # Release file handles held by logger
        if self._logger:
            close_logger(self._logger)
Esempio n. 4
0
def main():
    """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following:

        Discover tests
        Initialize cluster for distributed services
        Run tests
        Report a summary of all results
    """
    args_dict = parse_args(sys.argv[1:])

    injected_args = None
    if args_dict["parameters"]:
        try:
            injected_args = json.loads(args_dict["parameters"])
        except ValueError as e:
            print("parameters are not valid json: " + str(e))
            sys.exit(1)

    args_dict["globals"] = get_user_defined_globals(args_dict.get("globals"))

    # Make .ducktape directory where metadata such as the last used session_id is stored
    if not os.path.isdir(ConsoleDefaults.METADATA_DIR):
        os.makedirs(ConsoleDefaults.METADATA_DIR)

    # Generate a shared 'global' identifier for this test run and create the directory
    # in which all test results will be stored
    session_id = generate_session_id(ConsoleDefaults.SESSION_ID_FILE)
    results_dir = generate_results_dir(args_dict["results_root"], session_id)
    setup_results_directory(results_dir)

    session_context = SessionContext(session_id=session_id,
                                     results_dir=results_dir,
                                     **args_dict)
    session_logger = SessionLoggerMaker(session_context).logger
    for k, v in iteritems(args_dict):
        session_logger.debug("Configuration: %s=%s", k, v)

    # Discover and load tests to be run
    extend_import_paths(args_dict["test_path"])
    loader = TestLoader(session_context,
                        session_logger,
                        repeat=args_dict["repeat"],
                        injected_args=injected_args,
                        subset=args_dict["subset"],
                        subsets=args_dict["subsets"])
    try:
        tests = loader.load(args_dict["test_path"])
    except LoaderException as e:
        print("Failed while trying to discover tests: {}".format(e))
        sys.exit(1)

    if args_dict["collect_only"]:
        print("Collected %d tests:" % len(tests))
        for test in tests:
            print("    " + str(test))
        sys.exit(0)

    if args_dict["sample"]:
        print("Running a sample of %d tests" % args_dict["sample"])
        try:
            tests = random.sample(tests, args_dict["sample"])
        except ValueError as e:
            if args_dict["sample"] > len(tests):
                print(
                    "sample size %d greater than number of tests %d; running all tests"
                    % (args_dict["sample"], len(tests)))
            else:
                print("invalid sample size (%s), running all tests" % e)

    # Initializing the cluster is slow, so do so only if
    # tests are sure to be run
    try:
        (cluster_mod_name,
         cluster_class_name) = args_dict["cluster"].rsplit('.', 1)
        cluster_mod = importlib.import_module(cluster_mod_name)
        cluster_class = getattr(cluster_mod, cluster_class_name)
        cluster = cluster_class(cluster_file=args_dict["cluster_file"])
        for ctx in tests:
            # Note that we're attaching a reference to cluster
            # only after test context objects have been instantiated
            ctx.cluster = cluster
    except Exception:
        print("Failed to load cluster: ", str(sys.exc_info()[0]))
        print(traceback.format_exc(limit=16))
        sys.exit(1)

    # Run the tests
    runner = TestRunner(cluster, session_context, session_logger, tests)
    test_results = runner.run_all_tests()

    # Report results
    reporters = [
        SimpleStdoutSummaryReporter(test_results),
        SimpleFileSummaryReporter(test_results),
        HTMLSummaryReporter(test_results),
        JSONReporter(test_results)
    ]

    for r in reporters:
        r.report()

    update_latest_symlink(args_dict["results_root"], results_dir)
    close_logger(session_logger)
    if not test_results.get_aggregate_success():
        # Non-zero exit if at least one test failed
        sys.exit(1)
Esempio n. 5
0
def main():
    """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following:

        Discover tests
        Initialize cluster for distributed services
        Run tests
        Report a summary of all results
    """
    args_dict = parse_args(sys.argv[1:])

    injected_args = None
    if args_dict["parameters"]:
        try:
            injected_args = json.loads(args_dict["parameters"])
        except ValueError as e:
            print "parameters are not valid json: " + str(e.message)
            sys.exit(1)

    args_dict["globals"] = get_user_defined_globals(args_dict.get("globals"))

    # Make .ducktape directory where metadata such as the last used session_id is stored
    if not os.path.isdir(ConsoleDefaults.METADATA_DIR):
        os.makedirs(ConsoleDefaults.METADATA_DIR)

    # Generate a shared 'global' identifier for this test run and create the directory
    # in which all test results will be stored
    session_id = generate_session_id(ConsoleDefaults.SESSION_ID_FILE)
    results_dir = generate_results_dir(args_dict["results_root"], session_id)
    setup_results_directory(results_dir)

    session_context = SessionContext(session_id=session_id, results_dir=results_dir, **args_dict)
    session_logger = SessionLoggerMaker(session_context).logger
    for k, v in args_dict.iteritems():
        session_logger.debug("Configuration: %s=%s", k, v)

    # Discover and load tests to be run
    extend_import_paths(args_dict["test_path"])
    loader = TestLoader(session_context, session_logger, repeat=args_dict["repeat"], injected_args=injected_args,
                        subset=args_dict["subset"], subsets=args_dict["subsets"])
    try:
        tests = loader.load(args_dict["test_path"])
    except LoaderException as e:
        print "Failed while trying to discover tests: {}".format(e)
        sys.exit(1)

    if args_dict["collect_only"]:
        print "Collected %d tests:" % len(tests)
        for test in tests:
            print "    " + str(test)
        sys.exit(0)

    # Initializing the cluster is slow, so do so only if
    # tests are sure to be run
    try:
        (cluster_mod_name, cluster_class_name) = args_dict["cluster"].rsplit('.', 1)
        cluster_mod = importlib.import_module(cluster_mod_name)
        cluster_class = getattr(cluster_mod, cluster_class_name)
        cluster = cluster_class(cluster_file=args_dict["cluster_file"])
        for ctx in tests:
            # Note that we're attaching a reference to cluster
            # only after test context objects have been instantiated
            ctx.cluster = cluster
    except Exception:
        print "Failed to load cluster: ", str(sys.exc_info()[0])
        print traceback.format_exc(limit=16)
        sys.exit(1)

    # Run the tests
    runner = TestRunner(cluster, session_context, session_logger, tests)
    test_results = runner.run_all_tests()

    # Report results
    reporters = [
        SimpleStdoutSummaryReporter(test_results),
        SimpleFileSummaryReporter(test_results),
        HTMLSummaryReporter(test_results),
        JSONReporter(test_results)
    ]

    for r in reporters:
        r.report()

    update_latest_symlink(args_dict["results_root"], results_dir)
    close_logger(session_logger)
    if not test_results.get_aggregate_success():
        # Non-zero exit if at least one test failed
        sys.exit(1)