Esempio n. 1
0
def parse_args(args):
    """Parse in command-line and config file options.

    Command line arguments have the highest priority, then user configs specified in ~/.ducktape/config, and finally
    project configs specified in <ducktape_dir>/config.
    """

    parser = create_ducktape_parser()

    if len(args) == 0:
        # Show help if there are no arguments
        parser.print_help()
        sys.exit(0)

    # Collect arguments from project config file, user config file, and command line
    # later arguments supersede earlier arguments
    args_list = []

    project_config_file = ConsoleDefaults.PROJECT_CONFIG_FILE
    if os.path.exists(project_config_file):
        args_list.extend(config_file_to_args_list(project_config_file))

    user_config_file = get_user_config_file(args)
    if os.path.exists(user_config_file):
        args_list.extend(config_file_to_args_list(user_config_file))

    args_list.extend(args)
    parsed_args_dict = vars(parser.parse_args(args_list))

    if parsed_args_dict["version"]:
        print ducktape_version()
        sys.exit(0)

    return parsed_args_dict
Esempio n. 2
0
    def to_json(self):
        if self.run_time_seconds == 0 or len(self.cluster) == 0:
            # If things go horribly wrong, the test run may be effectively instantaneous
            # Let's handle this case gracefully, and avoid divide-by-zero
            cluster_utilization = 0
            parallelism = 0
        else:
            cluster_utilization = (1.0 / len(self.cluster)) * (1.0 / self.run_time_seconds) * \
                sum([r.nodes_used * r.run_time_seconds for r in self])
            parallelism = sum([r.run_time_seconds for r in self._results]) / self.run_time_seconds

        return {
            "ducktape_version": ducktape_version(),
            "session_context": self.session_context,
            "run_time_seconds": self.run_time_seconds,
            "start_time": self.start_time,
            "stop_time": self.stop_time,
            "run_time_statistics": self._stats([r.run_time_seconds for r in self]),
            "cluster_nodes_used": self._stats([r.nodes_used for r in self]),
            "cluster_nodes_allocated": self._stats([r.nodes_allocated for r in self]),
            "cluster_utilization": cluster_utilization,
            "cluster_num_nodes": len(self.cluster),
            "num_passed": self.num_passed,
            "num_failed": self.num_failed,
            "num_ignored": self.num_ignored,
            "parallelism": parallelism,
            "results": [r for r in self._results]
        }
Esempio n. 3
0
    def format_report(self):
        template = pkg_resources.resource_string(__name__, '../templates/report/report.html')

        num_tests = len(self.results)
        num_passes = 0
        result_string = ""
        for result in self.results:
            if result.test_status == PASS:
                num_passes += 1
            result_string += json.dumps(self.format_result(result))
            result_string += ","

        args = {
            'ducktape_version': ducktape_version(),
            'num_tests': num_tests,
            'num_passes': self.results.num_passed,
            'num_failures': self.results.num_failed,
            'num_ignored': self.results.num_ignored,
            'run_time': format_time(self.results.run_time_seconds),
            'session': self.results.session_context.session_id,
            'tests': result_string,
            'test_status_names': ",".join(["\'%s\'" % str(status) for status in [PASS, FAIL, IGNORE]])
        }

        html = template % args
        report_html = os.path.join(self.results.session_context.results_dir, "report.html")
        with open(report_html, "w") as fp:
            fp.write(html)
            fp.close()

        report_css = os.path.join(self.results.session_context.results_dir, "report.css")
        report_css_origin = pkg_resources.resource_filename(__name__, '../templates/report/report.css')
        shutil.copy2(report_css_origin, report_css)
Esempio n. 4
0
    def header_string(self):
        """Header lines of the report"""
        header_lines = [
            "=" * self.width, "SESSION REPORT (ALL TESTS)",
            "ducktape version: %s" % ducktape_version(),
            "session_id:       %s" % self.results.session_context.session_id,
            "run time:         %s" %
            format_time(self.results.run_time_seconds),
            "tests run:        %d" % len(self.results),
            "passed:           %d" % self.results.num_passed,
            "failed:           %d" % self.results.num_failed,
            "ignored:          %d" % self.results.num_ignored, "=" * self.width
        ]

        return "\n".join(header_lines)
Esempio n. 5
0
    def header_string(self):
        """Header lines of the report"""
        header_lines = [
            "=" * self.width,
            "SESSION REPORT (ALL TESTS)",
            "ducktape version: %s" % ducktape_version(),
            "session_id:       %s" % self.results.session_context.session_id,
            "run time:         %s" % format_time(self.results.run_time_seconds),
            "tests run:        %d" % len(self.results),
            "passed:           %d" % self.results.num_passed,
            "failed:           %d" % self.results.num_failed,
            "ignored:          %d" % self.results.num_ignored,
            "=" * self.width
        ]

        return "\n".join(header_lines)
Esempio n. 6
0
    def format_report(self):
        template = pkg_resources.resource_string(__name__, '../templates/report/report.html')

        num_tests = len(self.results)
        num_passes = 0
        failed_result_string = ""
        passed_result_string = ""
        ignored_result_string = ""

        for result in self.results:
            json_string = json.dumps(self.format_result(result))
            if result.test_status == PASS:
                num_passes += 1
                passed_result_string += json_string
                passed_result_string += ","
            elif result.test_status == FAIL:
                failed_result_string += json_string
                failed_result_string += ","
            else:
                ignored_result_string += json_string
                ignored_result_string += ","

        args = {
            'ducktape_version': ducktape_version(),
            'num_tests': num_tests,
            'num_passes': self.results.num_passed,
            'num_failures': self.results.num_failed,
            'num_ignored': self.results.num_ignored,
            'run_time': format_time(self.results.run_time_seconds),
            'session': self.results.session_context.session_id,
            'passed_tests': passed_result_string,
            'failed_tests': failed_result_string,
            'ignored_tests': ignored_result_string,
            'test_status_names': ",".join(["\'%s\'" % str(status) for status in [PASS, FAIL, IGNORE]])
        }

        html = template % args
        report_html = os.path.join(self.results.session_context.results_dir, "report.html")
        with open(report_html, "w") as fp:
            fp.write(html)
            fp.close()

        report_css = os.path.join(self.results.session_context.results_dir, "report.css")
        report_css_origin = pkg_resources.resource_filename(__name__, '../templates/report/report.css')
        shutil.copy2(report_css_origin, report_css)
Esempio n. 7
0
 def to_json(self):
     if self.run_time_seconds == 0 or len(self.cluster) == 0:
         # If things go horribly wrong, the test run may be effectively instantaneous
         # Let's handle this case gracefully, and avoid divide-by-zero
         cluster_utilization = 0
         parallelism = 0
     else:
         cluster_utilization = (1.0 / len(self.cluster)) * (1.0 / self.run_time_seconds) * \
             sum([r.nodes_used * r.run_time_seconds for r in self])
         parallelism = sum([r.run_time_seconds
                            for r in self._results]) / self.run_time_seconds
     result = {
         "ducktape_version":
         ducktape_version(),
         "session_context":
         self.session_context,
         "run_time_seconds":
         self.run_time_seconds,
         "start_time":
         self.start_time,
         "stop_time":
         self.stop_time,
         "run_time_statistics":
         self._stats([r.run_time_seconds for r in self]),
         "cluster_nodes_used":
         self._stats([r.nodes_used for r in self]),
         "cluster_nodes_allocated":
         self._stats([r.nodes_allocated for r in self]),
         "cluster_utilization":
         cluster_utilization,
         "cluster_num_nodes":
         len(self.cluster),
         "num_passed":
         self.num_passed,
         "num_failed":
         self.num_failed,
         "num_ignored":
         self.num_ignored,
         "parallelism":
         parallelism,
         "results": [r for r in self._results]
     }
     if self.num_flaky:
         result['num_flaky'] = self.num_flaky
     return result
Esempio n. 8
0
    def format_report(self):
        template = pkg_resources.resource_string(
            __name__, '../templates/report/report.html').decode('utf-8')

        num_tests = len(self.results)
        num_passes = 0
        failed_result_string = []
        passed_result_string = []
        ignored_result_string = []
        flaky_result_string = []

        for result in self.results:
            json_string = json.dumps(self.format_result(result))
            if result.test_status == PASS:
                num_passes += 1
                passed_result_string.append(json_string)
                passed_result_string.append(",")
            elif result.test_status == FAIL:
                failed_result_string.append(json_string)
                failed_result_string.append(",")
            elif result.test_status == IGNORE:
                ignored_result_string.append(json_string)
                ignored_result_string.append(",")
            elif result.test_status == FLAKY:
                flaky_result_string.append(json_string)
                flaky_result_string.append(",")
            else:
                raise Exception("Unknown test status in report: {}".format(
                    result.test_status.to_json()))

        args = {
            'ducktape_version':
            ducktape_version(),
            'num_tests':
            num_tests,
            'num_passes':
            self.results.num_passed,
            'num_flaky':
            self.results.num_flaky,
            'num_failures':
            self.results.num_failed,
            'num_ignored':
            self.results.num_ignored,
            'run_time':
            format_time(self.results.run_time_seconds),
            'session':
            self.results.session_context.session_id,
            'passed_tests':
            "".join(passed_result_string),
            'flaky_tests':
            "".join(flaky_result_string),
            'failed_tests':
            "".join(failed_result_string),
            'ignored_tests':
            "".join(ignored_result_string),
            'test_status_names':
            ",".join([
                "\'%s\'" % str(status)
                for status in [PASS, FAIL, IGNORE, FLAKY]
            ])
        }

        html = template % args
        report_html = os.path.join(self.results.session_context.results_dir,
                                   "report.html")
        with open(report_html, "w") as fp:
            fp.write(html)
            fp.close()

        report_css = os.path.join(self.results.session_context.results_dir,
                                  "report.css")
        report_css_origin = pkg_resources.resource_filename(
            __name__, '../templates/report/report.css')
        shutil.copy2(report_css_origin, report_css)
Esempio n. 9
0
def main():
    """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following:

        Discover tests
        Initialize cluster for distributed services
        Run tests
        Report a summary of all results
    """
    args = parse_args()
    if args.version:
        print ducktape_version()
        sys.exit(0)

    # Make .ducktape directory where metadata such as the last used session_id is stored
    if not os.path.isdir(ConsoleConfig.METADATA_DIR):
        os.makedirs(ConsoleConfig.METADATA_DIR)

    # Generate a shared 'global' identifier for this test run and create the directory
    # in which all test results will be stored
    session_id = generate_session_id(ConsoleConfig.SESSION_ID_FILE)
    results_dir = generate_results_dir(args.results_root, session_id)

    setup_results_directory(args.results_root, results_dir)
    session_context = SessionContext(session_id, results_dir, cluster=None, args=args)
    for k, v in vars(args).iteritems():
        session_context.logger.debug("Configuration: %s=%s", k, v)

    # Discover and load tests to be run
    extend_import_paths(args.test_path)
    loader = TestLoader(session_context)
    try:
        tests = loader.discover(args.test_path)
    except LoaderException as e:
        print "Failed while trying to discover tests: {}".format(e)
        sys.exit(1)

    if args.collect_only:
        print "Collected %d tests:" % len(tests)
        for test in tests:
            print "    " + str(test)
        sys.exit(0)

    # Initializing the cluster is slow, so do so only if
    # tests are sure to be run
    try:
        (cluster_mod_name, cluster_class_name) = args.cluster.rsplit('.', 1)
        cluster_mod = importlib.import_module(cluster_mod_name)
        cluster_class = getattr(cluster_mod, cluster_class_name)
        session_context.cluster = cluster_class()
    except:
        print "Failed to load cluster: ", str(sys.exc_info()[0])
        print traceback.format_exc(limit=16)
        sys.exit(1)

    # Run the tests
    runner = SerialTestRunner(session_context, tests)
    test_results = runner.run_all_tests()

    # Report results
    # TODO command-line hook for type of reporter
    reporter = SimpleStdoutSummaryReporter(test_results)
    reporter.report()
    reporter = SimpleFileSummaryReporter(test_results)
    reporter.report()

    # Generate HTML reporter
    reporter = HTMLSummaryReporter(test_results)
    reporter.report()

    if not test_results.get_aggregate_success():
        sys.exit(1)
Esempio n. 10
0
def main():
    """Ducktape entry point. This contains top level logic for ducktape command-line program which does the following:

        Discover tests
        Initialize cluster for distributed services
        Run tests
        Report a summary of all results
    """
    args = parse_args()
    if args.version:
        print ducktape_version()
        sys.exit(0)

    # Make .ducktape directory where metadata such as the last used session_id is stored
    if not os.path.isdir(ConsoleConfig.METADATA_DIR):
        os.makedirs(ConsoleConfig.METADATA_DIR)

    # Generate a shared 'global' identifier for this test run and create the directory
    # in which all test results will be stored
    session_id = generate_session_id(ConsoleConfig.SESSION_ID_FILE)
    results_dir = generate_results_dir(args.results_root, session_id)

    setup_results_directory(args.results_root, results_dir)
    session_context = SessionContext(session_id,
                                     results_dir,
                                     cluster=None,
                                     args=args)
    for k, v in vars(args).iteritems():
        session_context.logger.debug("Configuration: %s=%s", k, v)

    # Discover and load tests to be run
    extend_import_paths(args.test_path)
    loader = TestLoader(session_context)
    try:
        tests = loader.discover(args.test_path)
    except LoaderException as e:
        print "Failed while trying to discover tests: {}".format(e)
        sys.exit(1)

    if args.collect_only:
        print "Collected %d tests:" % len(tests)
        for test in tests:
            print "    " + str(test)
        sys.exit(0)

    # Initializing the cluster is slow, so do so only if
    # tests are sure to be run
    try:
        (cluster_mod_name, cluster_class_name) = args.cluster.rsplit('.', 1)
        cluster_mod = importlib.import_module(cluster_mod_name)
        cluster_class = getattr(cluster_mod, cluster_class_name)
        session_context.cluster = cluster_class()
    except:
        print "Failed to load cluster: ", str(sys.exc_info()[0])
        print traceback.format_exc(limit=16)
        sys.exit(1)

    # Run the tests
    runner = SerialTestRunner(session_context, tests)
    test_results = runner.run_all_tests()

    # Report results
    # TODO command-line hook for type of reporter
    reporter = SimpleStdoutSummaryReporter(test_results)
    reporter.report()
    reporter = SimpleFileSummaryReporter(test_results)
    reporter.report()

    # Generate HTML reporter
    reporter = HTMLSummaryReporter(test_results)
    reporter.report()

    if not test_results.get_aggregate_success():
        sys.exit(1)