Exemple #1
0
def main():
    # We can't use sys.path[0] to determine the script directory
    # because it doesn't work under a debugger
    test_directory = os.path.dirname(os.path.realpath(__file__))
    parser = OptionParser(usage="""\
Run lldb test suite using a separate process for each test file.

       Each test will run with a time limit of 10 minutes by default.

       Override the default time limit of 10 minutes by setting
       the environment variable LLDB_TEST_TIMEOUT.

       E.g., export LLDB_TEST_TIMEOUT=10m

       Override the time limit for individual tests by setting
       the environment variable LLDB_[TEST NAME]_TIMEOUT.

       E.g., export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=2m

       Set to "0" to run without time limit.

       E.g., export LLDB_TEST_TIMEOUT=0
       or    export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0
""")
    parser.add_option(
        '-o',
        '--options',
        type='string',
        action='store',
        dest='dotest_options',
        help="""The options passed to 'dotest.py' if specified.""")

    parser.add_option(
        '-s',
        '--output-on-success',
        action='store_true',
        dest='output_on_success',
        default=False,
        help="""Print full output of 'dotest.py' even when it succeeds.""")

    parser.add_option(
        '-t',
        '--threads',
        type='int',
        dest='num_threads',
        help="""The number of threads to use when running tests separately.""")

    opts, args = parser.parse_args()
    dotest_option_string = opts.dotest_options

    is_posix = (os.name == "posix")
    dotest_argv = (shlex.split(dotest_option_string, posix=is_posix)
                   if dotest_option_string else [])

    parser = dotest_args.create_parser()
    global dotest_options
    global output_on_success
    output_on_success = opts.output_on_success
    dotest_options = dotest_args.parse_args(parser, dotest_argv)

    if not dotest_options.s:
        # no session log directory, we need to add this to prevent
        # every dotest invocation from creating its own directory
        import datetime
        # The windows platforms don't like ':' in the pathname.
        timestamp_started = datetime.datetime.now().strftime("%F-%H_%M_%S")
        dotest_argv.append('-s')
        dotest_argv.append(timestamp_started)
        dotest_options.s = timestamp_started

    session_dir = os.path.join(os.getcwd(), dotest_options.s)

    # The root directory was specified on the command line
    if len(args) == 0:
        test_subdir = test_directory
    else:
        test_subdir = os.path.join(test_directory, args[0])

    # clean core files in test tree from previous runs (Linux)
    cores = find('core.*', test_subdir)
    for core in cores:
        os.unlink(core)

    if opts.num_threads:
        num_threads = opts.num_threads
    else:
        num_threads_str = os.environ.get("LLDB_TEST_THREADS")
        if num_threads_str:
            num_threads = int(num_threads_str)
        else:
            num_threads = multiprocessing.cpu_count()
    if num_threads < 1:
        num_threads = 1

    system_info = " ".join(platform.uname())
    (timed_out, passed, failed, unexpected_successes, pass_count,
     fail_count) = walk_and_invoke(test_directory, test_subdir, dotest_argv,
                                   num_threads)

    timed_out = set(timed_out)
    num_test_files = len(passed) + len(failed)
    num_test_cases = pass_count + fail_count

    # move core files into session dir
    cores = find('core.*', test_subdir)
    for core in cores:
        dst = core.replace(test_directory, "")[1:]
        dst = dst.replace(os.path.sep, "-")
        os.rename(core, os.path.join(session_dir, dst))

    # remove expected timeouts from failures
    expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)
    for xtime in expected_timeout:
        if xtime in timed_out:
            timed_out.remove(xtime)
            failed.remove(xtime)
            result = "ExpectedTimeout"
        elif xtime in passed:
            result = "UnexpectedCompletion"
        else:
            result = None  # failed

        if result:
            test_name = os.path.splitext(xtime)[0]
            touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))

    print
    sys.stdout.write("Ran %d test suites" % num_test_files)
    if num_test_files > 0:
        sys.stdout.write(" (%d failed) (%f%%)" %
                         (len(failed), 100.0 * len(failed) / num_test_files))
    print
    sys.stdout.write("Ran %d test cases" % num_test_cases)
    if num_test_cases > 0:
        sys.stdout.write(" (%d failed) (%f%%)" %
                         (fail_count, 100.0 * fail_count / num_test_cases))
    print
    exit_code = 0

    if len(failed) > 0:
        failed.sort()
        print "Failing Tests (%d)" % len(failed)
        for f in failed:
            print "%s: LLDB (suite) :: %s (%s)" % ("TIMEOUT" if f in timed_out
                                                   else "FAIL", f, system_info)
        exit_code = 1

    if len(unexpected_successes) > 0:
        unexpected_successes.sort()
        print "\nUnexpected Successes (%d)" % len(unexpected_successes)
        for u in unexpected_successes:
            print "UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" % (u,
                                                                   system_info)

    sys.exit(exit_code)
Exemple #2
0
def main(print_details_on_success, num_threads, test_subdir):
    """Run dotest.py in inferior mode in parallel.

    @param print_details_on_success the parsed value of the output-on-success
    command line argument.  When True, details of a successful dotest inferior
    are printed even when everything succeeds.  The normal behavior is to
    not print any details when all the inferior tests pass.

    @param num_threads the parsed value of the num-threads command line
    argument.

    @param test_subdir optionally specifies a subdir to limit testing
    within.  May be None if the entire test tree is to be used.  This subdir
    is assumed to be relative to the lldb/test root of the test hierarchy.
    """

    dotest_argv = sys.argv[1:]

    global output_on_success
    output_on_success = print_details_on_success

    # We can't use sys.path[0] to determine the script directory
    # because it doesn't work under a debugger
    test_directory = os.path.dirname(os.path.realpath(__file__))
    parser = OptionParser(usage="""\
Run lldb test suite using a separate process for each test file.

       Each test will run with a time limit of 10 minutes by default.

       Override the default time limit of 10 minutes by setting
       the environment variable LLDB_TEST_TIMEOUT.

       E.g., export LLDB_TEST_TIMEOUT=10m

       Override the time limit for individual tests by setting
       the environment variable LLDB_[TEST NAME]_TIMEOUT.

       E.g., export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=2m

       Set to "0" to run without time limit.

       E.g., export LLDB_TEST_TIMEOUT=0
       or    export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0
""")
    parser = dotest_args.create_parser()
    global dotest_options
    dotest_options = dotest_args.parse_args(parser, dotest_argv)

    if not dotest_options.s:
        # no session log directory, we need to add this to prevent
        # every dotest invocation from creating its own directory
        import datetime
        # The windows platforms don't like ':' in the pathname.
        timestamp_started = datetime.datetime.now().strftime("%F-%H_%M_%S")
        dotest_argv.append('-s')
        dotest_argv.append(timestamp_started)
        dotest_options.s = timestamp_started

    session_dir = os.path.join(os.getcwd(), dotest_options.s)

    # The root directory was specified on the command line
    if test_subdir and len(test_subdir) > 0:
        test_subdir = os.path.join(test_directory, test_subdir)
    else:
        test_subdir = test_directory

    # clean core files in test tree from previous runs (Linux)
    cores = find('core.*', test_subdir)
    for core in cores:
        os.unlink(core)

    if not num_threads:
        num_threads_str = os.environ.get("LLDB_TEST_THREADS")
        if num_threads_str:
            num_threads = int(num_threads_str)
        else:
            num_threads = multiprocessing.cpu_count()
    if num_threads < 1:
        num_threads = 1

    system_info = " ".join(platform.uname())
    (timed_out, passed, failed, unexpected_successes, pass_count, fail_count) = walk_and_invoke(
        test_directory, test_subdir, dotest_argv, num_threads)

    timed_out = set(timed_out)
    num_test_files = len(passed) + len(failed)
    num_test_cases = pass_count + fail_count

    # move core files into session dir
    cores = find('core.*', test_subdir)
    for core in cores:
        dst = core.replace(test_directory, "")[1:]
        dst = dst.replace(os.path.sep, "-")
        os.rename(core, os.path.join(session_dir, dst))

    # remove expected timeouts from failures
    expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)
    for xtime in expected_timeout:
        if xtime in timed_out:
            timed_out.remove(xtime)
            failed.remove(xtime)
            result = "ExpectedTimeout"
        elif xtime in passed:
            result = "UnexpectedCompletion"
        else:
            result = None  # failed

        if result:
            test_name = os.path.splitext(xtime)[0]
            touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))

    print
    sys.stdout.write("Ran %d test suites" % num_test_files)
    if num_test_files > 0:
        sys.stdout.write(" (%d failed) (%f%%)" % (
            len(failed), 100.0 * len(failed) / num_test_files))
    print
    sys.stdout.write("Ran %d test cases" % num_test_cases)
    if num_test_cases > 0:
        sys.stdout.write(" (%d failed) (%f%%)" % (
            fail_count, 100.0 * fail_count / num_test_cases))
    print
    exit_code = 0

    if len(failed) > 0:
        failed.sort()
        print "Failing Tests (%d)" % len(failed)
        for f in failed:
            print "%s: LLDB (suite) :: %s (%s)" % (
                "TIMEOUT" if f in timed_out else "FAIL", f, system_info
            )
        exit_code = 1

    if len(unexpected_successes) > 0:
        unexpected_successes.sort()
        print "\nUnexpected Successes (%d)" % len(unexpected_successes)
        for u in unexpected_successes:
            print "UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" % (u, system_info)

    sys.exit(exit_code)
Exemple #3
0
def main(print_details_on_success, num_threads, test_subdir,
         test_runner_name, results_formatter):
    """Run dotest.py in inferior mode in parallel.

    @param print_details_on_success the parsed value of the output-on-success
    command line argument.  When True, details of a successful dotest inferior
    are printed even when everything succeeds.  The normal behavior is to
    not print any details when all the inferior tests pass.

    @param num_threads the parsed value of the num-threads command line
    argument.

    @param test_subdir optionally specifies a subdir to limit testing
    within.  May be None if the entire test tree is to be used.  This subdir
    is assumed to be relative to the lldb/test root of the test hierarchy.

    @param test_runner_name if specified, contains the test runner
    name which selects the strategy used to run the isolated and
    optionally concurrent test runner. Specify None to allow the
    system to choose the most appropriate test runner given desired
    thread count and OS type.

    @param results_formatter if specified, provides the TestResultsFormatter
    instance that will format and output test result data from the
    side-channel test results.  When specified, inferior dotest calls
    will send test results side-channel data over a socket to the parallel
    test runner, which will forward them on to results_formatter.
    """

    # Do not shut down on sighup.
    if hasattr(signal, 'SIGHUP'):
        signal.signal(signal.SIGHUP, signal.SIG_IGN)

    dotest_argv = sys.argv[1:]

    global output_on_success, RESULTS_FORMATTER
    output_on_success = print_details_on_success
    RESULTS_FORMATTER = results_formatter

    # We can't use sys.path[0] to determine the script directory
    # because it doesn't work under a debugger
    parser = dotest_args.create_parser()
    global dotest_options
    dotest_options = dotest_args.parse_args(parser, dotest_argv)

    adjust_inferior_options(dotest_argv)

    session_dir = os.path.join(os.getcwd(), dotest_options.s)

    # The root directory was specified on the command line
    test_directory = os.path.dirname(os.path.realpath(__file__))
    if test_subdir and len(test_subdir) > 0:
        test_subdir = os.path.join(test_directory, test_subdir)
    else:
        test_subdir = test_directory

    # clean core files in test tree from previous runs (Linux)
    cores = find('core.*', test_subdir)
    for core in cores:
        os.unlink(core)

    system_info = " ".join(platform.uname())

    # Figure out which testrunner strategy we'll use.
    runner_strategies_by_name = get_test_runner_strategies(num_threads)

    # If the user didn't specify a test runner strategy, determine
    # the default now based on number of threads and OS type.
    if not test_runner_name:
        test_runner_name = default_test_runner_name(num_threads)

    if test_runner_name not in runner_strategies_by_name:
        raise Exception(
            "specified testrunner name '{}' unknown. Valid choices: {}".format(
                test_runner_name,
                runner_strategies_by_name.keys()))
    test_runner_func = runner_strategies_by_name[test_runner_name]

    summary_results = walk_and_invoke(
        test_directory, test_subdir, dotest_argv,
        num_threads, test_runner_func)

    (timed_out, passed, failed, unexpected_successes, pass_count,
     fail_count) = summary_results

    # The results formatter - if present - is done now.  Tell it to
    # terminate.
    if results_formatter is not None:
        results_formatter.send_terminate_as_needed()

    timed_out = set(timed_out)
    num_test_files = len(passed) + len(failed)
    num_test_cases = pass_count + fail_count

    # move core files into session dir
    cores = find('core.*', test_subdir)
    for core in cores:
        dst = core.replace(test_directory, "")[1:]
        dst = dst.replace(os.path.sep, "-")
        os.rename(core, os.path.join(session_dir, dst))

    # remove expected timeouts from failures
    expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)
    for xtime in expected_timeout:
        if xtime in timed_out:
            timed_out.remove(xtime)
            failed.remove(xtime)
            result = "ExpectedTimeout"
        elif xtime in passed:
            result = "UnexpectedCompletion"
        else:
            result = None  # failed

        if result:
            test_name = os.path.splitext(xtime)[0]
            touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))

    print
    sys.stdout.write("Ran %d test suites" % num_test_files)
    if num_test_files > 0:
        sys.stdout.write(" (%d failed) (%f%%)" % (
            len(failed), 100.0 * len(failed) / num_test_files))
    print
    sys.stdout.write("Ran %d test cases" % num_test_cases)
    if num_test_cases > 0:
        sys.stdout.write(" (%d failed) (%f%%)" % (
            fail_count, 100.0 * fail_count / num_test_cases))
    print
    exit_code = 0

    if len(failed) > 0:
        failed.sort()
        print "Failing Tests (%d)" % len(failed)
        for f in failed:
            print "%s: LLDB (suite) :: %s (%s)" % (
                "TIMEOUT" if f in timed_out else "FAIL", f, system_info
            )
        exit_code = 1

    if len(unexpected_successes) > 0:
        unexpected_successes.sort()
        print "\nUnexpected Successes (%d)" % len(unexpected_successes)
        for u in unexpected_successes:
            print "UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" % (u, system_info)

    sys.exit(exit_code)
Exemple #4
0
def main():
    # We can't use sys.path[0] to determine the script directory
    # because it doesn't work under a debugger
    test_directory = os.path.dirname(os.path.realpath(__file__))
    parser = OptionParser(usage="""\
Run lldb test suite using a separate process for each test file.

       Each test will run with a time limit of 10 minutes by default.

       Override the default time limit of 10 minutes by setting
       the environment variable LLDB_TEST_TIMEOUT.

       E.g., export LLDB_TEST_TIMEOUT=10m

       Override the time limit for individual tests by setting
       the environment variable LLDB_[TEST NAME]_TIMEOUT.

       E.g., export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=2m

       Set to "0" to run without time limit.

       E.g., export LLDB_TEST_TIMEOUT=0
       or    export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0
""")
    parser.add_option('-o', '--options',
                      type='string', action='store',
                      dest='dotest_options',
                      help="""The options passed to 'dotest.py' if specified.""")

    parser.add_option('-t', '--threads',
                      type='int',
                      dest='num_threads',
                      help="""The number of threads to use when running tests separately.""")

    opts, args = parser.parse_args()
    dotest_option_string = opts.dotest_options

    is_posix = (os.name == "posix")
    dotest_argv = shlex.split(dotest_option_string, posix=is_posix) if dotest_option_string else []

    parser = dotest_args.create_parser()
    dotest_options = dotest_args.parse_args(parser, dotest_argv)

    if not dotest_options.s:
        # no session log directory, we need to add this to prevent
        # every dotest invocation from creating its own directory
        import datetime
        # The windows platforms don't like ':' in the pathname.
        timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
        dotest_argv.append('-s')
        dotest_argv.append(timestamp_started)
        dotest_options.s = timestamp_started

    session_dir = os.path.join(os.getcwd(), dotest_options.s)

    # The root directory was specified on the command line
    if len(args) == 0:
        test_subdir = test_directory
    else:
        test_subdir = os.path.join(test_directory, args[0])

    # clean core files in test tree from previous runs (Linux)
    cores = find('core.*', test_subdir)
    for core in cores:
        os.unlink(core)

    if opts.num_threads:
        num_threads = opts.num_threads
    else:
        num_threads_str = os.environ.get("LLDB_TEST_THREADS")
        if num_threads_str:
            num_threads = int(num_threads_str)
        else:
            num_threads = multiprocessing.cpu_count()
    if num_threads < 1:
        num_threads = 1

    global default_timeout
    default_timeout = getDefaultTimeout(dotest_options.lldb_platform_name)

    system_info = " ".join(platform.uname())
    (timed_out, failed, passed, all_fails, all_passes) = walk_and_invoke(test_directory, test_subdir, dotest_argv, num_threads)

    timed_out = set(timed_out)
    num_test_files = len(failed) + len(passed)
    num_tests = all_fails + all_passes

    # move core files into session dir
    cores = find('core.*', test_subdir)
    for core in cores:
        dst = core.replace(test_directory, "")[1:]
        dst = dst.replace(os.path.sep, "-")
        os.rename(core, os.path.join(session_dir, dst))

    # remove expected timeouts from failures
    expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)
    for xtime in expected_timeout:
        if xtime in timed_out:
            timed_out.remove(xtime)
            failed.remove(xtime)
            result = "ExpectedTimeout"
        elif xtime in passed:
            result = "UnexpectedCompletion"
        else:
            result = None  # failed

        if result:
            test_name = os.path.splitext(xtime)[0]
            touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))

    print
    print "Ran %d test suites (%d failed) (%f%%)" % (num_test_files, len(failed),
            (100.0 * len(failed) / num_test_files) if num_test_files > 0 else float('NaN'))
    print "Ran %d test cases (%d failed) (%f%%)" % (num_tests, all_fails,
            (100.0 * all_fails / num_tests) if num_tests > 0 else float('NaN'))
    if len(failed) > 0:
        failed.sort()
        print "Failing Tests (%d)" % len(failed)
        for f in failed:
            print "%s: LLDB (suite) :: %s (%s)" % (
                "TIMEOUT" if f in timed_out else "FAIL", f, system_info
            )
        sys.exit(1)
    sys.exit(0)