Exemplo n.º 1
0
def load_test_suite(inputs):
    import unittest

    # Create the global config object.
    litConfig = LitConfig.LitConfig(progname='lit',
                                    path=[],
                                    quiet=False,
                                    useValgrind=False,
                                    valgrindLeakCheck=False,
                                    valgrindArgs=[],
                                    useTclAsSh=False,
                                    noExecute=False,
                                    ignoreStdErr=False,
                                    debug=False,
                                    isWindows=(platform.system() == 'Windows'),
                                    params={})

    # Load the tests from the inputs.
    tests = []
    testSuiteCache = {}
    localConfigCache = {}
    for input in inputs:
        prev = len(tests)
        tests.extend(
            getTests(input, litConfig, testSuiteCache, localConfigCache)[1])
        if prev == len(tests):
            litConfig.warning('input %r contained no tests' % input)

    # If there were any errors during test discovery, exit now.
    if litConfig.numErrors:
        print >> sys.stderr, '%d errors, exiting.' % litConfig.numErrors
        sys.exit(2)

    # Return a unittest test suite which just runs the tests in order.
    def get_test_fn(test):
        return unittest.FunctionTestCase(
            lambda: test.config.test_format.execute(test, litConfig),
            description=test.getFullName())

    from LitTestCase import LitTestCase
    return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
Exemplo n.º 2
0
def main():
    # Bump the GIL check interval, its more important to get any one thread to a
    # blocking operation (hopefully exec) than to try and unblock other threads.
    #
    # FIXME: This is a hack.
    import sys
    sys.setcheckinterval(1000)

    global options
    from optparse import OptionParser, OptionGroup
    parser = OptionParser("usage: %prog [options] {file-or-path}")

    parser.add_option("-j",
                      "--threads",
                      dest="numThreads",
                      metavar="N",
                      help="Number of testing threads",
                      type=int,
                      action="store",
                      default=None)
    parser.add_option("",
                      "--config-prefix",
                      dest="configPrefix",
                      metavar="NAME",
                      help="Prefix for 'lit' config files",
                      action="store",
                      default=None)
    parser.add_option("",
                      "--param",
                      dest="userParameters",
                      metavar="NAME=VAL",
                      help="Add 'NAME' = 'VAL' to the user defined parameters",
                      type=str,
                      action="append",
                      default=[])

    group = OptionGroup(parser, "Output Format")
    # FIXME: I find these names very confusing, although I like the
    # functionality.
    group.add_option("-q",
                     "--quiet",
                     dest="quiet",
                     help="Suppress no error output",
                     action="store_true",
                     default=False)
    group.add_option("-s",
                     "--succinct",
                     dest="succinct",
                     help="Reduce amount of output",
                     action="store_true",
                     default=False)
    group.add_option("-v",
                     "--verbose",
                     dest="showOutput",
                     help="Show all test output",
                     action="store_true",
                     default=False)
    group.add_option("",
                     "--no-progress-bar",
                     dest="useProgressBar",
                     help="Do not use curses based progress bar",
                     action="store_false",
                     default=True)
    parser.add_option_group(group)

    group = OptionGroup(parser, "Test Execution")
    group.add_option("",
                     "--path",
                     dest="path",
                     help="Additional paths to add to testing environment",
                     action="append",
                     type=str,
                     default=[])
    group.add_option("",
                     "--vg",
                     dest="useValgrind",
                     help="Run tests under valgrind",
                     action="store_true",
                     default=False)
    group.add_option("",
                     "--vg-arg",
                     dest="valgrindArgs",
                     metavar="ARG",
                     help="Specify an extra argument for valgrind",
                     type=str,
                     action="append",
                     default=[])
    group.add_option("",
                     "--time-tests",
                     dest="timeTests",
                     help="Track elapsed wall time for each test",
                     action="store_true",
                     default=False)
    group.add_option("",
                     "--no-execute",
                     dest="noExecute",
                     help="Don't execute any tests (assume PASS)",
                     action="store_true",
                     default=False)
    parser.add_option_group(group)

    group = OptionGroup(parser, "Test Selection")
    group.add_option("",
                     "--max-tests",
                     dest="maxTests",
                     metavar="N",
                     help="Maximum number of tests to run",
                     action="store",
                     type=int,
                     default=None)
    group.add_option("",
                     "--max-time",
                     dest="maxTime",
                     metavar="N",
                     help="Maximum time to spend testing (in seconds)",
                     action="store",
                     type=float,
                     default=None)
    group.add_option("",
                     "--shuffle",
                     dest="shuffle",
                     help="Run tests in random order",
                     action="store_true",
                     default=False)
    parser.add_option_group(group)

    group = OptionGroup(parser, "Debug and Experimental Options")
    group.add_option("",
                     "--debug",
                     dest="debug",
                     help="Enable debugging (for 'lit' development)",
                     action="store_true",
                     default=False)
    group.add_option("",
                     "--show-suites",
                     dest="showSuites",
                     help="Show discovered test suites",
                     action="store_true",
                     default=False)
    group.add_option("",
                     "--no-tcl-as-sh",
                     dest="useTclAsSh",
                     help="Don't run Tcl scripts using 'sh'",
                     action="store_false",
                     default=True)
    group.add_option("",
                     "--repeat",
                     dest="repeatTests",
                     metavar="N",
                     help="Repeat tests N times (for timing)",
                     action="store",
                     default=None,
                     type=int)
    parser.add_option_group(group)

    (opts, args) = parser.parse_args()

    if not args:
        parser.error('No inputs specified')

    if opts.configPrefix is not None:
        global gConfigName, gSiteConfigName
        gConfigName = '%s.cfg' % opts.configPrefix
        gSiteConfigName = '%s.site.cfg' % opts.configPrefix

    if opts.numThreads is None:
        # Python <2.5 has a race condition causing lit to always fail with numThreads>1
        # http://bugs.python.org/issue1731717
        # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
        # threads by default there.
        if sys.hexversion >= 0x2050200:
            opts.numThreads = Util.detectCPUs()
        else:
            opts.numThreads = 1

    inputs = args

    # Create the user defined parameters.
    userParams = {}
    for entry in opts.userParameters:
        if '=' not in entry:
            name, val = entry, ''
        else:
            name, val = entry.split('=', 1)
        userParams[name] = val

    # Create the global config object.
    litConfig = LitConfig.LitConfig(progname=os.path.basename(sys.argv[0]),
                                    path=opts.path,
                                    quiet=opts.quiet,
                                    useValgrind=opts.useValgrind,
                                    valgrindArgs=opts.valgrindArgs,
                                    useTclAsSh=opts.useTclAsSh,
                                    noExecute=opts.noExecute,
                                    debug=opts.debug,
                                    isWindows=(platform.system() == 'Windows'),
                                    params=userParams)

    # Load the tests from the inputs.
    tests = []
    testSuiteCache = {}
    localConfigCache = {}
    for input in inputs:
        prev = len(tests)
        tests.extend(
            getTests(input, litConfig, testSuiteCache, localConfigCache)[1])
        if prev == len(tests):
            litConfig.warning('input %r contained no tests' % input)

    # If there were any errors during test discovery, exit now.
    if litConfig.numErrors:
        print >> sys.stderr, '%d errors, exiting.' % litConfig.numErrors
        sys.exit(2)

    if opts.showSuites:
        suitesAndTests = dict([(ts, []) for ts, _ in testSuiteCache.values()
                               if ts])
        for t in tests:
            suitesAndTests[t.suite].append(t)

        print '-- Test Suites --'
        suitesAndTests = suitesAndTests.items()
        suitesAndTests.sort(key=lambda (ts, _): ts.name)
        for ts, ts_tests in suitesAndTests:
            print '  %s - %d tests' % (ts.name, len(ts_tests))
            print '    Source Root: %s' % ts.source_root
            print '    Exec Root  : %s' % ts.exec_root

    # Select and order the tests.
    numTotalTests = len(tests)
    if opts.shuffle:
        random.shuffle(tests)
    else:
        tests.sort(key=lambda t: t.getFullName())
    if opts.maxTests is not None:
        tests = tests[:opts.maxTests]

    extra = ''
    if len(tests) != numTotalTests:
        extra = ' of %d' % numTotalTests
    header = '-- Testing: %d%s tests, %d threads --' % (len(tests), extra,
                                                        opts.numThreads)

    if opts.repeatTests:
        tests = [
            t.copyWithIndex(i) for t in tests for i in range(opts.repeatTests)
        ]

    progressBar = None
    if not opts.quiet:
        if opts.succinct and opts.useProgressBar:
            try:
                tc = ProgressBar.TerminalController()
                progressBar = ProgressBar.ProgressBar(tc, header)
            except ValueError:
                print header
                progressBar = ProgressBar.SimpleProgressBar('Testing: ')
        else:
            print header

    # Don't create more threads than tests.
    opts.numThreads = min(len(tests), opts.numThreads)

    startTime = time.time()
    display = TestingProgressDisplay(opts, len(tests), progressBar)
    provider = TestProvider(tests, opts.maxTime)
    runTests(opts.numThreads, litConfig, provider, display)
    display.finish()

    if not opts.quiet:
        print 'Testing Time: %.2fs' % (time.time() - startTime)

    # Update results for any tests which weren't run.
    for t in tests:
        if t.result is None:
            t.setResult(Test.UNRESOLVED, '', 0.0)

    # List test results organized by kind.
    hasFailures = False
    byCode = {}
    for t in tests:
        if t.result not in byCode:
            byCode[t.result] = []
        byCode[t.result].append(t)
        if t.result.isFailure:
            hasFailures = True

    # FIXME: Show unresolved and (optionally) unsupported tests.
    for title, code in (('Unexpected Passing Tests', Test.XPASS),
                        ('Failing Tests', Test.FAIL)):
        elts = byCode.get(code)
        if not elts:
            continue
        print '*' * 20
        print '%s (%d):' % (title, len(elts))
        for t in elts:
            print '    %s' % t.getFullName()
        print

    if opts.timeTests:
        # Collate, in case we repeated tests.
        times = {}
        for t in tests:
            key = t.getFullName()
            times[key] = times.get(key, 0.) + t.elapsed

        byTime = list(times.items())
        byTime.sort(key=lambda (name, elapsed): elapsed)
        if byTime:
            Util.printHistogram(byTime, title='Tests')

    for name, code in (
        ('Expected Passes    ', Test.PASS),
        ('Expected Failures  ', Test.XFAIL),
        ('Unsupported Tests  ', Test.UNSUPPORTED),
        ('Unresolved Tests   ', Test.UNRESOLVED),
        ('Unexpected Passes  ', Test.XPASS),
        ('Unexpected Failures', Test.FAIL),
    ):
        if opts.quiet and not code.isFailure:
            continue
        N = len(byCode.get(code, []))
        if N:
            print '  %s: %d' % (name, N)

    # If we encountered any additional errors, exit abnormally.
    if litConfig.numErrors:
        print >> sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
        sys.exit(2)

    # Warn about warnings.
    if litConfig.numWarnings:
        print >> sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings

    if hasFailures:
        sys.exit(1)
    sys.exit(0)