Example #1
0
def main():
    start_time = time.time()

    major_version, minor_version = sys.version_info[0:2]

    if major_version < 3 or (major_version == 3 and minor_version < 6):
        println('stem requires python version 3.6 or greater\n')
        sys.exit(1)

    signal.signal(signal.SIGABRT, log_traceback)
    signal.signal(signal.SIGUSR1, log_traceback)

    test_config = stem.util.conf.get_config('test')
    test_config.load(os.path.join(test.STEM_BASE, 'test', 'settings.cfg'))

    if 'STEM_TEST_CONFIG' in os.environ:
        test_config.load(os.environ['STEM_TEST_CONFIG'])

    try:
        args = test.arguments.parse(sys.argv[1:])
        test.task.TOR_VERSION.args = (args.tor_path, )
        test.output.SUPPRESS_STDOUT = args.quiet
    except ValueError as exc:
        println(str(exc))
        sys.exit(1)

    if args.print_help:
        println(test.arguments.get_help())
        sys.exit()
    elif not args.run_unit and not args.run_integ:
        println('Nothing to run (for usage provide --help)\n')
        sys.exit()

    test.task.run(
        'INITIALISING',
        test.task.STEM_VERSION,
        test.task.TOR_VERSION if args.run_integ else None,
        test.task.PYTHON_VERSION,
        test.task.PLATFORM_VERSION,
        test.task.CRYPTO_VERSION,
        test.task.PYFLAKES_VERSION,
        test.task.PYCODESTYLE_VERSION,
        test.task.CLEAN_PYC,
        test.task.UNUSED_TESTS,
        test.task.IMPORT_TESTS,
        test.task.REMOVE_TOR_DATA_DIR if args.run_integ else None,
        test.task.PYFLAKES_TASK if not args.specific_test else None,
        test.task.PYCODESTYLE_TASK if not args.specific_test else None,
    )

    # Test logging. If '--log-file' is provided we log to that location,
    # otherwise we buffer messages and log to stdout after its test completes.

    logging_buffer = queue.Queue()

    if args.logging_runlevel:
        if args.logging_path:
            handler = logging.FileHandler(args.logging_path, mode='w')
            handler.setLevel(stem.util.log.logging_level(
                args.logging_runlevel))
            handler.setFormatter(stem.util.log.FORMATTER)
        else:
            handler = logging.handlers.QueueHandler(logging_buffer)
            handler.setLevel(stem.util.log.logging_level(
                args.logging_runlevel))

        stem.util.log.get_logger().addHandler(handler)

    # filters for how testing output is displayed

    error_tracker = test.output.ErrorTracker()

    output_filters = (
        error_tracker.get_filter(),
        test.output.runtimes,
        test.output.strip_module,
        test.output.align_results,
        test.output.colorize,
    )

    # Number of tests that we have skipped. This is only available with python
    # 2.7 or later because before that test results didn't have a 'skipped'
    # attribute.

    skipped_tests = 0

    if args.run_integ:
        default_test_dir = stem.util.system.expand_path(
            CONFIG['integ.test_directory'], test.STEM_BASE)
        async_args = test.AsyncTestArgs(default_test_dir, args.tor_path)

        for module_str in stem.util.test_tools.ASYNC_TESTS:
            module = importlib.import_module(module_str.rsplit('.', 1)[0])
            test_classes = [
                v for k, v in module.__dict__.items() if k.startswith('Test')
            ]

            if len(test_classes) != 1:
                print('BUG: Detected multiple tests for %s: %s' %
                      (module_str, ', '.join(test_classes)))
                sys.exit(1)

            test_classes[0].run_tests(async_args)

    if args.run_unit:
        test.output.print_divider('UNIT TESTS', True)
        error_tracker.set_category('UNIT TEST')

        for test_class in get_unit_tests(args.specific_test,
                                         args.exclude_test):
            run_result = _run_test(args, test_class, args.exclude_test,
                                   output_filters)
            test.output.print_logging(logging_buffer)
            skipped_tests += len(getattr(run_result, 'skipped', []))

        println()

    if args.run_integ:
        test.output.print_divider('INTEGRATION TESTS', True)
        integ_runner = test.runner.get_runner()

        for target in args.run_targets:
            error_tracker.set_category(target)

            try:
                integ_runner.start(target, args.attribute_targets,
                                   args.tor_path)

                println('Running tests...\n', STATUS)

                for test_class in get_integ_tests(args.specific_test,
                                                  args.exclude_test):
                    run_result = _run_test(args, test_class, args.exclude_test,
                                           output_filters)
                    test.output.print_logging(logging_buffer)
                    skipped_tests += len(getattr(run_result, 'skipped', []))

                    if not integ_runner.assert_tor_is_running():
                        # our tor process died

                        error_tracker.register_error()
                        break
            except KeyboardInterrupt:
                println('  aborted starting tor: keyboard interrupt\n', ERROR)
                break
            except ValueError as exc:
                println(str(exc),
                        ERROR)  # can arise if there's bad settings.cfg data
                break
            except OSError:
                error_tracker.register_error()
            finally:
                println()
                integ_runner.stop()
                println()

                # We should have joined on all threads. If not then that indicates a
                # leak that could both likely be a bug and disrupt further targets.

                active_threads = threading.enumerate()

                if len(active_threads) > 1:
                    println('Threads lingering after test run:', ERROR)

                    for lingering_thread in active_threads:
                        println('  %s' % lingering_thread, ERROR)

                    break

    static_check_issues = {}

    for task in (test.task.PYFLAKES_TASK, test.task.PYCODESTYLE_TASK):
        if not task.is_available and task.unavailable_msg:
            println(task.unavailable_msg, ERROR)
        else:
            task.join()  # no-op if these haven't been run

            if task.result:
                for path, issues in task.result.items():
                    for issue in issues:
                        static_check_issues.setdefault(path, []).append(issue)

    _print_static_issues(static_check_issues)

    if error_tracker.has_errors_occured():
        println('TESTING FAILED (%i seconds)' % (time.time() - start_time),
                ERROR, STDERR)

        for line in error_tracker:
            println('  %s' % line, ERROR, STDERR)

        error_modules = error_tracker.get_modules()

        if len(error_modules) < 10 and not args.specific_test:
            println('\nYou can re-run just these tests with:\n', ERROR, STDERR)

            for module in error_modules:
                println(
                    '  %s --test %s' % (' '.join(
                        sys.argv), test.arguments.crop_module_name(module)),
                    ERROR, STDERR)
    else:
        if skipped_tests > 0:
            println('%i TESTS WERE SKIPPED' % skipped_tests, STATUS)

        println('TESTING PASSED (%i seconds)\n' % (time.time() - start_time),
                SUCCESS)

    new_capabilities = test.get_new_capabilities()

    if new_capabilities:
        println(NEW_CAPABILITIES_FOUND, ERROR)

        for capability_type, msg in sorted(new_capabilities,
                                           key=lambda x: x[1]):
            println('  [%s] %s' % (capability_type, msg), ERROR)

    sys.exit(1 if error_tracker.has_errors_occured() else 0)
Example #2
0
def main():
    start_time = time.time()

    try:
        stem.prereq.check_requirements()
    except ImportError as exc:
        println('%s\n' % exc)
        sys.exit(1)

    test_config = stem.util.conf.get_config('test')
    test_config.load(os.path.join(test.STEM_BASE, 'test', 'settings.cfg'))

    try:
        args = test.arguments.parse(sys.argv[1:])
        test.task.TOR_VERSION.args = (args.tor_path, )
        test.output.SUPPRESS_STDOUT = args.quiet
    except ValueError as exc:
        println(str(exc))
        sys.exit(1)

    if args.print_help:
        println(test.arguments.get_help())
        sys.exit()
    elif not args.run_unit and not args.run_integ:
        println('Nothing to run (for usage provide --help)\n')
        sys.exit()

    if not stem.prereq.is_mock_available():
        try:
            import mock
            println(MOCK_OUT_OF_DATE_MSG % mock.__version__)
        except ImportError:
            println(MOCK_UNAVAILABLE_MSG)

        if stem.util.system.is_available('pip'):
            println("You can get it by running 'sudo pip install mock'.")
        elif stem.util.system.is_available('apt-get'):
            println(
                "You can get it by running 'sudo apt-get install python-mock'."
            )

        sys.exit(1)

    test.task.run(
        'INITIALISING',
        test.task.STEM_VERSION,
        test.task.TOR_VERSION if args.run_integ else None,
        test.task.PYTHON_VERSION,
        test.task.CRYPTO_VERSION,
        test.task.PYNACL_VERSION,
        test.task.MOCK_VERSION,
        test.task.PYFLAKES_VERSION,
        test.task.PYCODESTYLE_VERSION,
        test.task.CLEAN_PYC,
        test.task.UNUSED_TESTS,
        test.task.IMPORT_TESTS,
        test.task.PYFLAKES_TASK if not args.specific_test else None,
        test.task.PYCODESTYLE_TASK if not args.specific_test else None,
    )

    # buffer that we log messages into so they can be printed after a test has finished

    logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
    stem.util.log.get_logger().addHandler(logging_buffer)

    # filters for how testing output is displayed

    error_tracker = test.output.ErrorTracker()

    output_filters = (
        error_tracker.get_filter(),
        test.output.runtimes,
        test.output.strip_module,
        test.output.align_results,
        test.output.colorize,
    )

    # Number of tests that we have skipped. This is only available with python
    # 2.7 or later because before that test results didn't have a 'skipped'
    # attribute.

    skipped_tests = 0

    if args.run_integ:
        default_test_dir = stem.util.system.expand_path(
            CONFIG['integ.test_directory'], test.STEM_BASE)
        async_args = test.AsyncTestArgs(default_test_dir, args.tor_path)

        for module_str in stem.util.test_tools.ASYNC_TESTS:
            if not args.specific_test or module_str.startswith(
                    args.specific_test):
                module = importlib.import_module(module_str.rsplit('.', 1)[0])
                test_classes = [
                    v for k, v in module.__dict__.items()
                    if k.startswith('Test')
                ]

                if len(test_classes) != 1:
                    print('BUG: Detected multiple tests for %s: %s' %
                          (module_str, ', '.join(test_classes)))
                    sys.exit(1)

                test_classes[0].run_tests(async_args)

    if args.run_unit:
        test.output.print_divider('UNIT TESTS', True)
        error_tracker.set_category('UNIT TEST')

        for test_class in get_unit_tests(args.specific_test):
            run_result = _run_test(args, test_class, output_filters)
            test.output.print_logging(logging_buffer)
            skipped_tests += len(getattr(run_result, 'skipped', []))

        println()

    if args.run_integ:
        test.output.print_divider('INTEGRATION TESTS', True)
        integ_runner = test.runner.get_runner()

        for target in args.run_targets:
            error_tracker.set_category(target)

            try:
                integ_runner.start(target, args.attribute_targets,
                                   args.tor_path)

                println('Running tests...\n', STATUS)

                for test_class in get_integ_tests(args.specific_test):
                    run_result = _run_test(args, test_class, output_filters)
                    test.output.print_logging(logging_buffer)
                    skipped_tests += len(getattr(run_result, 'skipped', []))
            except KeyboardInterrupt:
                println('  aborted starting tor: keyboard interrupt\n', ERROR)
                break
            except ValueError as exc:
                println(str(exc),
                        ERROR)  # can arise if there's bad settings.cfg data
                break
            except OSError:
                error_tracker.register_error()
            finally:
                println()
                integ_runner.stop()
                println()

                # We should have joined on all threads. If not then that indicates a
                # leak that could both likely be a bug and disrupt further targets.

                active_threads = threading.enumerate()

                if len(active_threads) > 1:
                    println('Threads lingering after test run:', ERROR)

                    for lingering_thread in active_threads:
                        println('  %s' % lingering_thread, ERROR)

                    break

    static_check_issues = {}

    for task in (test.task.PYFLAKES_TASK, test.task.PYCODESTYLE_TASK):
        if not task.is_available and task.unavailable_msg:
            println(task.unavailable_msg, ERROR)
        else:
            task.join()  # no-op if these haven't been run

            if task.result:
                for path, issues in task.result.items():
                    for issue in issues:
                        static_check_issues.setdefault(path, []).append(issue)

    _print_static_issues(static_check_issues)

    if error_tracker.has_errors_occured():
        println('TESTING FAILED (%i seconds)' % (time.time() - start_time),
                ERROR, STDERR)

        for line in error_tracker:
            println('  %s' % line, ERROR, STDERR)

        error_modules = error_tracker.get_modules()

        if len(error_modules) < 10 and not args.specific_test:
            println('\nYou can re-run just these tests with:\n', ERROR, STDERR)

            for module in error_modules:
                println('  %s --test %s' % (' '.join(sys.argv), module), ERROR,
                        STDERR)
    else:
        if skipped_tests > 0:
            println('%i TESTS WERE SKIPPED' % skipped_tests, STATUS)

        println('TESTING PASSED (%i seconds)\n' % (time.time() - start_time),
                SUCCESS)

    new_capabilities = test.get_new_capabilities()

    if new_capabilities:
        println(NEW_CAPABILITIES_FOUND, ERROR)

        for capability_type, msg in new_capabilities:
            println('  [%s] %s' % (capability_type, msg), ERROR)

    sys.exit(1 if error_tracker.has_errors_occured() else 0)