def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests (can be used more than "
                             "once)")
    parser.add_argument("--tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests (can be used more than "
                             "once) Deprecated")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                             "once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Deprecated
    # --include-tests is the standard going forward, but for backwards
    # compatability merge args.tests into args.include_tests and drop
    # duplicates
    if args.tests != []:
        print "Warnings: Option --tests is deprecated, use --include-tests"
        args.include_tests = list(set(args.include_tests + args.tests))

    # Set the environment, pass in the included and excluded tests
    env = core.Environment(exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests)

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = core.loadTestProfile(args.testProfile)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "

        # Make the test command relative to the piglit_dir
        testCommand = test.command[:]
        testCommand[0] = os.path.relpath(testCommand[0], piglit_dir)

        command += ' '.join(testCommand)
        return command

    profile.prepare_test_list(env)
    for name, test in profile.test_list.items():
        assert(isinstance(test, ExecTest))
        print name, ':::', getCommand(test)
def main():
    env = core.Environment()

    try:
        option_list = [
            "help",
            "tests=",
            "exclude-tests=",
        ]
        options, args = getopt(sys.argv[1:], "ht:x:", option_list)
    except GetoptError:
        usage()

    OptionName = ''
    OptionResume = False
    test_filter = []
    exclude_filter = []

    for name, value in options:
        if name in ('-h', '--help'):
            usage()
        elif name in ('-t', '--tests'):
            test_filter.append(value)
            env.filter.append(re.compile(value))
        elif name in ('-x', '--exclude-tests'):
            exclude_filter.append(value)
            env.exclude_filter.append(re.compile(value))

    if len(args) != 1:
        usage()

    profileFilename = args[0]

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = core.loadTestProfile(profileFilename, "$GLEAN_RESULT_DIR")

    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if OptionResume:
        for (key, value) in old_results.tests.items():
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "
        command += ' '.join(test.command)
        return command

    profile.prepare_test_list(env)
    for name, test in profile.test_list.items():
        assert (isinstance(test, ExecTest))
        print name, ':::', getCommand(test)
def main():
    env = core.Environment()

    try:
        option_list = ["help", "tests=", "exclude-tests="]
        options, args = getopt(sys.argv[1:], "ht:x:", option_list)
    except GetoptError:
        usage()

    OptionName = ""
    OptionResume = False
    test_filter = []
    exclude_filter = []

    for name, value in options:
        if name in ("-h", "--help"):
            usage()
        elif name in ("-t", "--tests"):
            test_filter.append(value)
            env.filter.append(re.compile(value))
        elif name in ("-x", "--exclude-tests"):
            exclude_filter.append(value)
            env.exclude_filter.append(re.compile(value))

    if len(args) != 1:
        usage()

    profileFilename = args[0]

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = core.loadTestProfile(profileFilename, "$GLEAN_RESULT_DIR")

    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if OptionResume:
        for (key, value) in old_results.tests.items():
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    def getCommand(test):
        command = ""
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "
        command += " ".join(test.command)
        return command

    profile.prepare_test_list(env)
    for name, test in profile.test_list.items():
        assert isinstance(test, ExecTest)
        print name, ":::", getCommand(test)
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()
    
    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'])
    
    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))
    
    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()
    
    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)
    
    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)
    json_writer.close_dict()
    
    profile = core.loadTestProfile(results.options['profile'])
    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)
    
    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()
    
    print("\n"
          "Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'])

    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))

    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)
    json_writer.close_dict()

    profile = core.loadTestProfile(results.options['profile'])
    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)

    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()

    print("\n"
          "Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                        "once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the environment, pass in the included and excluded tests
    env = core.Environment(exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests)

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = core.loadTestProfile(args.testProfile)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "

        # Make the test command relative to the piglit_dir
        testCommand = test.command[:]
        testCommand[0] = os.path.relpath(testCommand[0], piglit_dir)

        command += ' '.join(testCommand)
        return command

    profile.prepare_test_list(env)
    for name, test in profile.test_list.items():
        assert (isinstance(test, ExecTest))
        print name, ':::', getCommand(test)
Beispiel #7
0
def main():
    parser = argparse.ArgumentParser(sys.argv)

    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n",
                           "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r",
                           "--resume",
                           action="store_true",
                           help="Resume an interupted test run")

    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
      default = [],
      action  = "append",
      metavar = "<regex>",
      help    = "Run only matching tests (can be used more than once) " \
                "DEPRECATED: use --include-tests instead")
    parser.add_argument(
        "-x",
        "--exclude-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Exclude matching tests (can be used more than once)")

    # The new option going forward should be --no-concurrency, but to
    # maintain backwards compatability the --c, --concurrent option should
    # also be maintained. This code allows only one of the two options to be
    # supplied, or it throws an error
    excGroup2 = parser.add_mutually_exclusive_group()
    excGroup2.add_argument("--no-concurrency",
                           action="store_false",
                           dest="concurrency",
                           help="Disable concurrent test runs")
    excGroup2.add_argument("-c",
                           "--concurrent",
                           action="store",
                           metavar="<boolean>",
                           choices=["1", "0", "on", "off"],
                           help="Deprecated: Turn concrrent runs on or off")

    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")

    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Deprecated:
    # If the deprecated -c, --concurrent flag is passed, override
    # args.concurrency (which would otherwise be set by the --no-concurrency)
    # flag and print a warning.
    if args.concurrent is not None:
        if (args.concurrent == '1' or args.concurrent == 'on'):
            args.concurrency = True
            print "Warning: Option -c, --concurrent is deprecated, " \
              "concurrent test runs are on by default"
        elif (args.concurrent == '0' or args.concurrent == 'off'):
            args.concurrency = False
            print "Warning: Option -c, --concurrent is deprecated, " \
              "use --no-concurrency for non-concurrent test runs"
        # Ne need for else, since argparse restricts the arguments allowed

    # If the deprecated tests option was passed print a warning
    if args.tests != []:
        # This merges any options passed into the --tests option into the
        # ones passed into -t or --tests-include and throws out duplicates
        args.include_tests = list(set(args.include_tests + args.tests))
        print "Warning: Option --tests is deprecated, use " \
          "--include-tests instead"

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        resultsDir = path.realpath(args.resultsPath)

        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile
        resultsDir = args.resultsPath

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_key('filter')
    result_file.write(json.dumps(args.include_tests))
    json_writer.write_dict_key('exclude_filter')
    result_file.write(json.dumps(args.exclude_tests))
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Beispiel #8
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                           action="store_true",
                           help="Resume an interupted test run")
    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    parser.add_argument("-1", "--no-concurrency",
                        action="store_false",
                        dest="concurrency",
                        help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.load_results(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_item('filter', args.include_tests)
    json_writer.write_dict_item('exclude_filter', args.exclude_tests)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Beispiel #9
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test")
    parser.add_argument("test_profile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(args.test_profile)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Beispiel #10
0
def main():
    parser = argparse.ArgumentParser(sys.argv)

    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                    metavar = "<test name>",
                    default = None,
                    help    = "Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                    action  = "store_true",
                    help    = "Resume an interupted test run")

    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                    action  = "store_false",
                    dest    = "execute",
                    help    = "Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Run only matching tests (can be used more than once) " \
                              "DEPRECATED: use --include-tests instead")
    parser.add_argument("-x", "--exclude-tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Exclude matching tests (can be used more than once)")

    # The new option going forward should be --no-concurrency, but to
    # maintain backwards compatability the --c, --concurrent option should
    # also be maintained. This code allows only one of the two options to be
    # supplied, or it throws an error
    excGroup2 = parser.add_mutually_exclusive_group()
    excGroup2.add_argument("--no-concurrency",
                    action  = "store_false",
                    dest    = "concurrency",
                    help    = "Disable concurrent test runs")
    excGroup2.add_argument("-c", "--concurrent",
                    action  = "store",
                    metavar = "<boolean>",
                    choices = ["1", "0", "on", "off"],
                    help    = "Deprecated: Turn concrrent runs on or off")

    parser.add_argument("-p", "--platform",
                    choices = ["glx", "x11_egl", "wayland", "gbm"],
                    help    = "Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                    action  =  "store_true",
                    help    = "Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                    metavar = "<Path to test profile>",
                    help    = "Path to testfile to run")
    parser.add_argument("resultsPath",
                    metavar = "<Results Path>",
                    help    = "Path to results folder")

    args = parser.parse_args()


    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Deprecated:
    # If the deprecated -c, --concurrent flag is passed, override
    # args.concurrency (which would otherwise be set by the --no-concurrency)
    # flag and print a warning.
    if args.concurrent is not None:
        if (args.concurrent == '1' or args.concurrent == 'on'):
            args.concurrency = True
            print "Warning: Option -c, --concurrent is deprecated, " \
                            "concurrent test runs are on by default"
        elif (args.concurrent == '0' or args.concurrent == 'off'):
            args.concurrency = False
            print "Warning: Option -c, --concurrent is deprecated, " \
                            "use --no-concurrency for non-concurrent test runs"
        # Ne need for else, since argparse restricts the arguments allowed

    # If the deprecated tests option was passed print a warning
    if args.tests != []:
        # This merges any options passed into the --tests option into the
        # ones passed into -t or --tests-include and throws out duplicates
        args.include_tests = list(set(args.include_tests + args.tests))
        print "Warning: Option --tests is deprecated, use " \
                        "--include-tests instead"

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                    exclude_filter=args.exclude_tests,
                    include_filter=args.include_tests,
                    execute=args.execute,
                    valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_key('filter')
    result_file.write(json.dumps(args.include_tests))
    json_writer.write_dict_key('exclude_filter')
    result_file.write(json.dumps(args.exclude_tests))
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Beispiel #11
0
def main():
	env = core.Environment()

	try:
		option_list = [
			 "help",
			 "dry-run",
			 "resume",
			 "valgrind",
			 "tests=",
			 "name=",
			 "exclude-tests=",
			 "concurrent=",
			 ]
		options, args = getopt(sys.argv[1:], "hdrt:n:x:c:", option_list)
	except GetoptError:
		usage()

	OptionName = ''
	OptionResume = False
	test_filter = []
	exclude_filter = []

	for name, value in options:
		if name in ('-h', '--help'):
			usage()
		elif name in ('-d', '--dry-run'):
			env.execute = False
		elif name in ('-r', '--resume'):
			OptionResume = True
		elif name in ('--valgrind'):
			env.valgrind = True
		elif name in ('-t', '--tests'):
			test_filter.append(value)
			env.filter.append(re.compile(value))
		elif name in ('-x', '--exclude-tests'):
			exclude_filter.append(value)
			env.exclude_filter.append(re.compile(value))
		elif name in ('-n', '--name'):
			OptionName = value
		elif name in ('-c, --concurrent'):
			if value in ('1', 'on'):
				env.concurrent = True
			elif value in ('0', 'off'):
				env.concurrent = False
			else:
				usage()

	if OptionResume:
		if test_filter or OptionName:
			print "-r is not compatible with -t or -n."
			usage()
		if len(args) != 1:
			usage()
		resultsDir = args[0]

		# Load settings from the old results JSON
		old_results = core.loadTestResults(resultsDir)
		profileFilename = old_results.options['profile']
		for value in old_results.options['filter']:
			test_filter.append(value)
			env.filter.append(re.compile(value))
		for value in old_results.options['exclude_filter']:
			exclude_filter.append(value)
			env.exclude_filter.append(re.compile(value))
	else:
		if len(args) != 2:
			usage()

		profileFilename = args[0]
		resultsDir = path.realpath(args[1])

	# Change to the piglit's path
	piglit_dir = path.dirname(path.realpath(sys.argv[0]))
	os.chdir(piglit_dir)

	core.checkDir(resultsDir, False)

	results = core.TestrunResult()

	# Set results.name
	if OptionName is '':
		results.name = path.basename(resultsDir)
	else:
		results.name = OptionName

	# Begin json.
	result_filepath = os.path.join(resultsDir, 'main')
	result_file = open(result_filepath, 'w')
	json_writer = core.JSONWriter(result_file)
	json_writer.open_dict()

	# Write out command line options for use in resuming.
	json_writer.write_dict_key('options')
	json_writer.open_dict()
	json_writer.write_dict_item('profile', profileFilename)
	json_writer.write_dict_key('filter')
	result_file.write(json.dumps(test_filter))
	json_writer.write_dict_key('exclude_filter')
	result_file.write(json.dumps(exclude_filter))
	json_writer.close_dict()

	json_writer.write_dict_item('name', results.name)
	for (key, value) in env.collectData().items():
		json_writer.write_dict_item(key, value)

	profile = core.loadTestProfile(profileFilename, resultsDir)

	json_writer.write_dict_key('tests')
	json_writer.open_dict()
	# If resuming an interrupted test run, re-write all of the existing
	# results since we clobbered the results file.  Also, exclude them
	# from being run again.
	if OptionResume:
		for (key, value) in old_results.tests.items():
			json_writer.write_dict_item(key, value)
			env.exclude_tests.add(key)

	time_start = time.time()
	profile.run(env, json_writer)
	time_end = time.time()

	json_writer.close_dict()

	results.time_elapsed = time_end - time_start
	json_writer.write_dict_item('time_elapsed', results.time_elapsed)

	# End json.
	json_writer.close_dict()
	json_writer.file.close()

	print
	print 'Thank you for running Piglit!'
	print 'Results have been written to ' + result_filepath
Beispiel #12
0
def main():
    parser = argparse.ArgumentParser(sys.argv)

    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
      default = [],
      action  = "append",
      metavar = "<regex>",
      help    = "Run only matching tests (can be used more than once)" \
          "Deprecated")
    parser.add_argument(
        "-x",
        "--exclude-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Exclude matching tests (can be used more than once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")

    args = parser.parse_args()

    # Deprecated
    # --include-tests is the standard going forward, but for backwards
    # compatability merge args.tests into args.include_tests and drop
    # duplicates
    if args.tests != []:
        print "Warnings: Option --tests is deprecated, use --include-tests"
        args.include_tests = list(set(args.include_tests + args.tests))

    # Set the environment, pass in the included and excluded tests
    env = core.Environment(
        exclude_filter=args.exclude_tests,
        include_filter=args.include_tests,
    )

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = core.loadTestProfile(args.testProfile)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "
        command += ' '.join(test.command)
        return command

    profile.prepare_test_list(env)
    for name, test in profile.test_list.items():
        assert (isinstance(test, ExecTest))
        print name, ':::', getCommand(test)
Beispiel #13
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    parser.add_argument("-1",
                        "--no-concurrency",
                        action="store_false",
                        dest="concurrency",
                        help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test")
    parser.add_argument("test_profile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(args.test_profile)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath