示例#1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'],
                           verbose=results.options['verbose'])

    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))

    # attempt to restore a saved platform, if there is no saved platform just
    # go on
    try:
        os.environ['PIGLIT_PLATFORM'] = results.options['platform']
    except KeyError:
        pass

    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)
    
    profile = core.merge_test_profiles(results.options['profile'])
    if env.dmesg:
        profile.dmesg = env.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)

    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()

    print("\n"
          "Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
示例#2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'],
                           verbose=results.options['verbose'])

    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))

    # attempt to restore a saved platform, if there is no saved platform just
    # go on
    try:
        os.environ['PIGLIT_PLATFORM'] = results.options['platform']
    except KeyError:
        pass

    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if env.dmesg:
        profile.dmesg = env.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)

    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
示例#3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results",
                        metavar="<First Results File>",
                        nargs="*",
                        help="Space seperated list of results files")
    args = parser.parse_args()

    combined = core.load_results(args.results.pop(0))

    for resultsDir in args.results:
        results = core.load_results(resultsDir)

        for testname, result in results.tests.items():
            combined.tests[testname] = result

    combined.write(sys.stdout)
示例#4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results",
                        metavar="<First Results File>",
                        nargs="*",
                        help="Space seperated list of results files")
    args = parser.parse_args()

    combined = core.load_results(args.results.pop(0))

    for resultsDir in args.results:
        results = core.load_results(resultsDir)

        for testname, result in results.tests.items():
            combined.tests[testname] = result

    combined.write(sys.stdout)
示例#5
0
def test_load_results_folder():
    """ Test that load_results takes a folder with a file named main in it """
    with utils.tempdir() as tdir:
        with open(os.path.join(tdir, 'main'), 'w') as tfile:
            tfile.write(json.dumps(utils.JSON_DATA))

        results = core.load_results(tdir)
        assert results
示例#6
0
    def write(self, arg):
        testrun = core.load_results(arg)

        self.report.start()
        self.report.startSuite('piglit')
        try:
            for (path, result) in testrun.tests.items():
                self.write_test(testrun, path, result)
        finally:
            self.enter_path([])
            self.report.stopSuite()
            self.report.stop()
示例#7
0
    def write(self, arg):
        testrun = core.load_results(arg)

        self.report.start()
        self.report.startSuite('piglit')
        try:
            for (path, result) in testrun.tests.items():
                self.write_test(testrun, path, result)
        finally:
            self.enter_path([])
            self.report.stopSuite()
            self.report.stop()
示例#8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'])

    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))

    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)
    json_writer.close_dict()

    profile = core.loadTestProfile(results.options['profile'])
    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)

    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()

    print("\n"
          "Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
示例#9
0
    def __init__(self, resultfiles):
        """
        Create an initial object with all of the result information rolled up
        in an easy to process form.

        The constructor of the summary class has an attribute for each HTML
        summary page, which are fed into the index.mako file to produce HTML
        files. resultfiles is a list of paths to JSON results generated by
        piglit-run.
        """

        # Create a Result object for each piglit result and append it to the
        # results list
        self.results = [core.load_results(i) for i in resultfiles]

        self.status = {}
        self.fractions = {}
        self.totals = {}
        self.tests = {'all': set(), 'changes': set(), 'problems': set(),
                      'skipped': set(), 'regressions': set(), 'fixes': set()}

        def fgh(test, result):
            """ Helper for updating the fractions and status lists """
            fraction[test] = tuple(
                [sum(i) for i in zip(fraction[test], result.fraction)])
            if result != so.Skip() and status[test] < result:
                status[test] = result

        for results in self.results:
            # Create a set of all of the tset names across all of the runs
            self.tests['all'] = set(self.tests['all'] | set(results.tests))

            # Create two dictionaries that have a default factory: they return
            # a default value instead of a key error.
            # This default key must be callable
            self.fractions[results.name] = collections.defaultdict(lambda: (0, 0))
            self.status[results.name] = collections.defaultdict(so.NotRun)

            # short names
            fraction = self.fractions[results.name]
            status = self.status[results.name]

            for key, value in results.tests.iteritems():
                # if the first character of key is a / then our while loop will
                # become an infinite loop. Beyond that / should never be the
                # leading character, if it is then there is a bug in one of the
                # test profiles.
                assert key[0] != '/'

                #FIXME: Add subtest support

                # Walk the test name as if it was a path, at each level update
                # the tests passed over the total number of tests (fractions),
                # and update the status of the current level if the status of
                # the previous level was worse, but is not skip
                while key != '':
                    fgh(key, value['result'])
                    key = path.dirname(key)

                # when we hit the root update the 'all' group and stop
                fgh('all', value['result'])

        # Create the lists of statuses like problems, regressions, fixes,
        # changes and skips
        for test in self.tests['all']:
            status = []
            for each in self.results:
                try:
                    status.append(each.tests[test]['result'])
                except KeyError:
                    status.append(so.NotRun())

            # Problems include: warn, dmesg-warn, fail, dmesg-fail, and crash.
            # Skip does not go on this page, it has the 'skipped' page
            if max(status) > so.Pass():
                self.tests['problems'].add(test)

            # Find all tests with a status of skip
            if so.Skip() in status:
                self.tests['skipped'].add(test)

            # find fixes, regressions, and changes
            for i in xrange(len(status) - 1):
                first = status[i]
                last = status[i + 1]
                if max(first, so.Pass()) < last:
                    self.tests['regressions'].add(test)
                if first > max(last, so.Pass()):
                    self.tests['fixes'].add(test)
                # Changes cannot be added in the fixes and regressions passes
                # becasue NotRun is a change, but not a regression or fix
                if first != last:
                    self.tests['changes'].add(test)
示例#10
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                           action="store_true",
                           help="Resume an interupted test run")
    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    parser.add_argument("-1", "--no-concurrency",
                        action="store_false",
                        dest="concurrency",
                        help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.load_results(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_item('filter', args.include_tests)
    json_writer.write_dict_item('exclude_filter', args.exclude_tests)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
示例#11
0
    def __init__(self, resultfiles):
        """
        Create an initial object with all of the result information rolled up
        in an easy to process form.

        The constructor of the summary class has an attribute for each HTML
        summary page, which are fed into the index.mako file to produce HTML
        files. resultfiles is a list of paths to JSON results generated by
        piglit-run.
        """

        # Create a Result object for each piglit result and append it to the
        # results list
        self.results = [core.load_results(i) for i in resultfiles]

        self.status = {}
        self.fractions = {}
        self.totals = {}
        self.tests = {
            'all': set(),
            'changes': set(),
            'problems': set(),
            'skipped': set(),
            'regressions': set(),
            'fixes': set()
        }

        def fgh(test, result):
            """ Helper for updating the fractions and status lists """
            fraction[test] = tuple(
                [sum(i) for i in zip(fraction[test], result.fraction)])
            if result != so.Skip() and status[test] < result:
                status[test] = result

        for results in self.results:
            # Create a set of all of the tset names across all of the runs
            self.tests['all'] = set(self.tests['all'] | set(results.tests))

            # Create two dictionaries that have a default factory: they return
            # a default value instead of a key error.
            # This default key must be callable
            self.fractions[results.name] = collections.defaultdict(lambda:
                                                                   (0, 0))
            self.status[results.name] = collections.defaultdict(so.NotRun)

            # short names
            fraction = self.fractions[results.name]
            status = self.status[results.name]

            for key, value in results.tests.iteritems():
                # if the first character of key is a / then our while loop will
                # become an infinite loop. Beyond that / should never be the
                # leading character, if it is then there is a bug in one of the
                # test profiles.
                assert key[0] != '/'

                #FIXME: Add subtest support

                # Walk the test name as if it was a path, at each level update
                # the tests passed over the total number of tests (fractions),
                # and update the status of the current level if the status of
                # the previous level was worse, but is not skip
                while key != '':
                    fgh(key, value['result'])
                    key = path.dirname(key)

                # when we hit the root update the 'all' group and stop
                fgh('all', value['result'])

        # Create the lists of statuses like problems, regressions, fixes,
        # changes and skips
        for test in self.tests['all']:
            status = []
            for each in self.results:
                try:
                    status.append(each.tests[test]['result'])
                except KeyError:
                    status.append(so.NotRun())

            # Problems include: warn, dmesg-warn, fail, dmesg-fail, and crash.
            # Skip does not go on this page, it has the 'skipped' page
            if max(status) > so.Pass():
                self.tests['problems'].add(test)

            # Find all tests with a status of skip
            if so.Skip() in status:
                self.tests['skipped'].add(test)

            # find fixes, regressions, and changes
            for i in xrange(len(status) - 1):
                first = status[i]
                last = status[i + 1]
                if max(first, so.Pass()) < last:
                    self.tests['regressions'].add(test)
                if first > max(last, so.Pass()):
                    self.tests['fixes'].add(test)
                # Changes cannot be added in the fixes and regressions passes
                # becasue NotRun is a change, but not a regression or fix
                if first != last:
                    self.tests['changes'].add(test)
示例#12
0
    def __init__(self, resultfiles):
        """
        Create an initial object with all of the result information rolled up
        in an easy to process form.

        The constructor of the summary class has an attribute for each HTML
        summary page, which are fed into the index.mako file to produce HTML
        files. resultfiles is a list of paths to JSON results generated by
        piglit-run.
        """

        # Create a Result object for each piglit result and append it to the
        # results list
        self.results = [core.load_results(i) for i in resultfiles]

        self.status = {}
        self.fractions = {}
        self.totals = {}
        self.tests = {'all': set(), 'changes': set(), 'problems': set(),
                      'skipped': set(), 'regressions': set(), 'fixes': set(),
                      'enabled': set(), 'disabled': set()}

        def fgh(test, result):
            """ Helper for updating the fractions and status lists """
            fraction[test] = tuple(
                [sum(i) for i in zip(fraction[test], result.fraction)])

            # If the new status is worse update it, or if the new status is
            # SKIP (which is equivalent to notrun) and the current is NOTRUN
            # update it
            if (status[test] < result or 
                    (result == so.SKIP and status[test] == so.NOTRUN)):
                status[test] = result

        for results in self.results:
            # Create a set of all of the tset names across all of the runs
            self.tests['all'] = set(self.tests['all'] | set(results.tests))

            # Create two dictionaries that have a default factory: they return
            # a default value instead of a key error.
            # This default key must be callable
            self.fractions[results.name] = collections.defaultdict(lambda: (0, 0))
            self.status[results.name] = collections.defaultdict(lambda: so.NOTRUN)

            # short names
            fraction = self.fractions[results.name]
            status = self.status[results.name]

            # store the results to be appeneded to results. Adding them in the
            # loop will cause a RuntimeError
            temp_results = {}

            for key, value in results.tests.iteritems():
                # if the first character of key is a / then our while loop will
                # become an infinite loop. Beyond that / should never be the
                # leading character, if it is then there is a bug in one of the
                # test profiles.
                assert key[0] != '/'

                # Treat a test with subtests as if it is a group, assign the
                # subtests' statuses and fractions down to the test, and then
                # proceed like normal.
                if 'subtest' in value:
                    for (subt, subv) in value['subtest'].iteritems():
                        subt = path.join(key, subt)
                        subv = so.status_lookup(subv)

                        # Add the subtest to the fractions and status lists
                        fraction[subt] = subv.fraction
                        status[subt] = subv
                        temp_results.update({subt: {'result': subv}})

                        self.tests['all'].add(subt)
                        while subt != '':
                            fgh(subt, subv)
                            subt = path.dirname(subt)
                        fgh('all', subv)

                    # remove the test from the 'all' list, this will cause to
                    # be treated as a group
                    self.tests['all'].discard(key)
                else:
                    # Walk the test name as if it was a path, at each level
                    # update the tests passed over the total number of tests
                    # (fractions), and update the status of the current level
                    # if the status of the previous level was worse, but is not
                    # skip
                    while key != '':
                        fgh(key, value['result'])
                        key = path.dirname(key)

                    # when we hit the root update the 'all' group and stop
                    fgh('all', value['result'])

            # Update the the results.tests dictionary with the subtests so that
            # they are entered into the appropriate pages other than all.
            # Updating it in the loop will raise a RuntimeError
            for key, value in temp_results.iteritems():
                results.tests[key] = value

        # Create the lists of statuses like problems, regressions, fixes,
        # changes and skips
        for test in self.tests['all']:
            status = []
            for each in self.results:
                try:
                    status.append(each.tests[test]['result'])
                except KeyError:
                    status.append(so.NOTRUN)

            # Problems include: warn, dmesg-warn, fail, dmesg-fail, and crash.
            # Skip does not go on this page, it has the 'skipped' page
            if max(status) > so.PASS:
                self.tests['problems'].add(test)

            # Find all tests with a status of skip
            if so.SKIP in status:
                self.tests['skipped'].add(test)

            # find fixes, regressions, and changes
            for i in xrange(len(status) - 1):
                first = status[i]
                last = status[i + 1]
                if first in [so.SKIP, so.NOTRUN] and last not in [so.SKIP, so.NOTRUN]:
                    self.tests['enabled'].add(test)
                    self.tests['changes'].add(test)
                elif last in [so.SKIP, so.NOTRUN] and first not in [so.SKIP, so.NOTRUN]:
                    self.tests['disabled'].add(test)
                elif first < last:
                    self.tests['regressions'].add(test)
                    self.tests['changes'].add(test)
                elif first > last:
                    self.tests['fixes'].add(test)
                    self.tests['changes'].add(test)
示例#13
0
def test_load_results_file():
    """ Test that load_results takes a file """
    with utils.resultfile() as tfile:
        results = core.load_results(tfile.name)
        assert results