def main(): env = core.Environment() try: options, args = getopt(sys.argv[1:], "h", ["help"]) except GetoptError: usage() OptionName = '' for name, value in options: if name in ('-h', '--help'): usage() if len(args) < 2: usage() combined = core.loadTestResults(args[0]) del args[0] for resultsDir in args: results = core.loadTestResults(resultsDir) for testname, result in results.tests.items(): combined.tests[testname] = result combined.write(sys.stdout)
def main(): parser = argparse.ArgumentParser() parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() results = core.load_results(args.results_path) env = core.Environment(concurrent=results.options['concurrent'], exclude_filter=results.options['exclude_filter'], include_filter=results.options['filter'], execute=results.options['execute'], valgrind=results.options['valgrind'], dmesg=results.options['dmesg'], verbose=results.options['verbose']) # Change working directory to the piglit directory os.chdir(path.dirname(path.realpath(sys.argv[0]))) # attempt to restore a saved platform, if there is no saved platform just # go on try: os.environ['PIGLIT_PLATFORM'] = results.options['platform'] except KeyError: pass results_path = path.join(args.results_path, "main") json_writer = core.JSONWriter(open(results_path, 'w+')) json_writer.open_dict() json_writer.write_dict_key("options") json_writer.open_dict() for key, value in results.options.iteritems(): json_writer.write_dict_item(key, value) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) json_writer.write_dict_key('tests') json_writer.open_dict() for key, value in results.tests.iteritems(): json_writer.write_dict_item(key, value) env.exclude_tests.add(key) profile = framework.profile.merge_test_profiles(results.options['profile']) profile.results_dir = args.results_path if env.dmesg: profile.dmesg = env.dmesg # This is resumed, don't bother with time since it wont be accurate anyway profile.run(env, json_writer) json_writer.close_dict() json_writer.close_dict() json_writer.file.close() print("Thank you for running Piglit!\n" "Results have ben wrriten to {0}".format(results_path))
def main(): env = core.Environment() try: option_list = [ "help", "tests=", "exclude-tests=", ] options, args = getopt(sys.argv[1:], "ht:x:", option_list) except GetoptError: usage() OptionName = '' OptionResume = False test_filter = [] exclude_filter = [] for name, value in options: if name in ('-h', '--help'): usage() elif name in ('-t', '--tests'): test_filter.append(value) env.filter.append(re.compile(value)) elif name in ('-x', '--exclude-tests'): exclude_filter.append(value) env.exclude_filter.append(re.compile(value)) if len(args) != 1: usage() profileFilename = args[0] # Change to the piglit's path piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) profile = core.loadTestProfile(profileFilename, "$GLEAN_RESULT_DIR") # If resuming an interrupted test run, re-write all of the existing # results since we clobbered the results file. Also, exclude them # from being run again. if OptionResume: for (key, value) in old_results.tests.items(): json_writer.write_dict_item(key, value) env.exclude_tests.add(key) def getCommand(test): command = '' if isinstance(test, GleanTest): for var, val in test.env.items(): command += var + "='" + val + "' " command += ' '.join(test.command) return command profile.prepare_test_list(env) for name, test in profile.test_list.items(): assert (isinstance(test, ExecTest)) print name, ':::', getCommand(test)
def main(): parser = argparse.ArgumentParser() parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() results = core.load_results(args.results_path) env = core.Environment(concurrent=results.options['concurrent'], exclude_filter=results.options['exclude_filter'], include_filter=results.options['filter'], execute=results.options['execute'], valgrind=results.options['valgrind'], dmesg=results.options['dmesg']) # Change working directory to the piglit directory os.chdir(path.dirname(path.realpath(sys.argv[0]))) results_path = path.join(args.results_path, "main") json_writer = core.JSONWriter(open(results_path, 'w+')) json_writer.open_dict() json_writer.write_dict_key("options") json_writer.open_dict() for key, value in results.options.iteritems(): json_writer.write_dict_item(key, value) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) json_writer.write_dict_key('tests') json_writer.open_dict() for key, value in results.tests.iteritems(): json_writer.write_dict_item(key, value) env.exclude_tests.add(key) json_writer.close_dict() profile = core.loadTestProfile(results.options['profile']) # This is resumed, don't bother with time since it wont be accurate anyway profile.run(env, json_writer) json_writer.close_dict() json_writer.close_dict() json_writer.file.close() print("\n" "Thank you for running Piglit!\n" "Results have ben wrriten to {0}".format(results_path))
def main(): parser = argparse.ArgumentParser(sys.argv) parser.add_argument( "-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests (can be used more than once)") parser.add_argument("-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests (can be used more than " "once)") parser.add_argument("testProfile", metavar="<Path to testfile>", help="Path to results folder") args = parser.parse_args() # Set the environment, pass in the included and excluded tests env = core.Environment(exclude_filter=args.exclude_tests, include_filter=args.include_tests) # Change to the piglit's path piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) profile = core.loadTestProfile(args.testProfile) def getCommand(test): command = '' if isinstance(test, GleanTest): for var, val in test.env.items(): command += var + "='" + val + "' " # Make the test command relative to the piglit_dir testCommand = test.command[:] testCommand[0] = os.path.relpath(testCommand[0], piglit_dir) command += ' '.join(testCommand) return command profile.prepare_test_list(env) for name, test in profile.test_list.items(): assert (isinstance(test, ExecTest)) print name, ':::', getCommand(test)
def main(): parser = argparse.ArgumentParser(sys.argv) # Either require that a name for the test is passed or that # resume is requested excGroup1 = parser.add_mutually_exclusive_group() excGroup1.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run") excGroup1.add_argument("-r", "--resume", action="store_true", help="Resume an interupted test run") # Setting the --dry-run flag is equivalent to env.execute=false parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests") parser.add_argument( "-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests (can be used more than once)") parser.add_argument("--tests", default = [], action = "append", metavar = "<regex>", help = "Run only matching tests (can be used more than once) " \ "DEPRECATED: use --include-tests instead") parser.add_argument( "-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests (can be used more than once)") # The new option going forward should be --no-concurrency, but to # maintain backwards compatability the --c, --concurrent option should # also be maintained. This code allows only one of the two options to be # supplied, or it throws an error excGroup2 = parser.add_mutually_exclusive_group() excGroup2.add_argument("--no-concurrency", action="store_false", dest="concurrency", help="Disable concurrent test runs") excGroup2.add_argument("-c", "--concurrent", action="store", metavar="<boolean>", choices=["1", "0", "on", "off"], help="Deprecated: Turn concrrent runs on or off") parser.add_argument("-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle") parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck") parser.add_argument("testProfile", metavar="<Path to test profile>", help="Path to testfile to run") parser.add_argument("resultsPath", metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() # Set the platform to pass to waffle if args.platform is not None: os.environ['PIGLIT_PLATFORM'] = args.platform # Deprecated: # If the deprecated -c, --concurrent flag is passed, override # args.concurrency (which would otherwise be set by the --no-concurrency) # flag and print a warning. if args.concurrent is not None: if (args.concurrent == '1' or args.concurrent == 'on'): args.concurrency = True print "Warning: Option -c, --concurrent is deprecated, " \ "concurrent test runs are on by default" elif (args.concurrent == '0' or args.concurrent == 'off'): args.concurrency = False print "Warning: Option -c, --concurrent is deprecated, " \ "use --no-concurrency for non-concurrent test runs" # Ne need for else, since argparse restricts the arguments allowed # If the deprecated tests option was passed print a warning if args.tests != []: # This merges any options passed into the --tests option into the # ones passed into -t or --tests-include and throws out duplicates args.include_tests = list(set(args.include_tests + args.tests)) print "Warning: Option --tests is deprecated, use " \ "--include-tests instead" # If resume is requested attempt to load the results file # in the specified path if args.resume is True: resultsDir = path.realpath(args.resultsPath) # Load settings from the old results JSON old_results = core.loadTestResults(resultsDir) profileFilename = old_results.options['profile'] # Changing the args to the old args allows us to set them # all in one places down the way args.exclude_tests = old_results.options['exclude_filter'] args.include_tests = old_results.options['filter'] # Otherwise parse additional settings from the command line else: profileFilename = args.testProfile resultsDir = args.resultsPath # Pass arguments into Environment env = core.Environment(concurrent=args.concurrency, exclude_filter=args.exclude_tests, include_filter=args.include_tests, execute=args.execute, valgrind=args.valgrind) # Change working directory to the root of the piglit directory piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) core.checkDir(resultsDir, False) results = core.TestrunResult() # Set results.name if args.name is not None: results.name = args.name else: results.name = path.basename(resultsDir) # Begin json. result_filepath = os.path.join(resultsDir, 'main') result_file = open(result_filepath, 'w') json_writer = core.JSONWriter(result_file) json_writer.open_dict() # Write out command line options for use in resuming. json_writer.write_dict_key('options') json_writer.open_dict() json_writer.write_dict_item('profile', profileFilename) json_writer.write_dict_key('filter') result_file.write(json.dumps(args.include_tests)) json_writer.write_dict_key('exclude_filter') result_file.write(json.dumps(args.exclude_tests)) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) profile = core.loadTestProfile(profileFilename) json_writer.write_dict_key('tests') json_writer.open_dict() # If resuming an interrupted test run, re-write all of the existing # results since we clobbered the results file. Also, exclude them # from being run again. if args.resume is True: for (key, value) in old_results.tests.items(): if os.path.sep != '/': key = key.replace(os.path.sep, '/', -1) json_writer.write_dict_item(key, value) env.exclude_tests.add(key) time_start = time.time() profile.run(env, json_writer) time_end = time.time() json_writer.close_dict() results.time_elapsed = time_end - time_start json_writer.write_dict_item('time_elapsed', results.time_elapsed) # End json. json_writer.close_dict() json_writer.file.close() print print 'Thank you for running Piglit!' print 'Results have been written to ' + result_filepath
def main(): parser = argparse.ArgumentParser(sys.argv) parser.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run") parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests") parser.add_argument("-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests " "(can be used more than once)") parser.add_argument("-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests " "(can be used more than once)") conc_parser = parser.add_mutually_exclusive_group() conc_parser.add_argument('-c', '--all-concurrent', action="store_const", default="some", const="all", dest="concurrency", help="Run all tests concurrently") conc_parser.add_argument("-1", "--no-concurrency", action="store_const", default="some", const="none", dest="concurrency", help="Disable concurrent test runs") parser.add_argument("-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle") parser.add_argument("-f", "--config", dest="config_file", type=argparse.FileType("r"), help="Optionally specify a piglit config file to use. " "Default is piglit.conf") parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck") parser.add_argument("--dmesg", action="store_true", help="Capture a difference in dmesg before and " "after each test. Implies -1/--no-concurrency") parser.add_argument("-v", "--verbose", action="store_true", help="Produce a line of output for each test before " "and after it runs") parser.add_argument("test_profile", metavar="<Path to one or more test profile(s)>", nargs='+', help="Path to testfile to run") parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() # Set the platform to pass to waffle if args.platform: os.environ['PIGLIT_PLATFORM'] = args.platform # If dmesg is requested we must have serial run, this is becasue dmesg # isn't reliable with threaded run if args.dmesg: args.concurrency = "none" # Read the config file if args.config_file: core.PIGLIT_CONFIG.readfp(args.config_file) args.config_file.close() else: core.PIGLIT_CONFIG.read( os.path.join(os.path.dirname(__file__), 'piglit.conf')) # Pass arguments into Environment env = core.Environment(concurrent=args.concurrency, exclude_filter=args.exclude_tests, include_filter=args.include_tests, execute=args.execute, valgrind=args.valgrind, dmesg=args.dmesg, verbose=args.verbose) # Change working directory to the root of the piglit directory piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) core.checkDir(args.results_path, False) results = core.TestrunResult() # Set results.name if args.name is not None: results.name = args.name else: results.name = path.basename(args.results_path) # Begin json. result_filepath = path.join(args.results_path, 'main') result_file = open(result_filepath, 'w') json_writer = core.JSONWriter(result_file) json_writer.open_dict() # Write out command line options for use in resuming. json_writer.write_dict_key('options') json_writer.open_dict() json_writer.write_dict_item('profile', args.test_profile) for key, value in env: json_writer.write_dict_item(key, value) if args.platform: json_writer.write_dict_item('platform', args.platform) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) profile = core.merge_test_profiles(args.test_profile) json_writer.write_dict_key('tests') json_writer.open_dict() time_start = time.time() # Set the dmesg type if args.dmesg: profile.dmesg = args.dmesg profile.run(env, json_writer) time_end = time.time() json_writer.close_dict() results.time_elapsed = time_end - time_start json_writer.write_dict_item('time_elapsed', results.time_elapsed) # End json. json_writer.close_dict() json_writer.file.close() print print 'Thank you for running Piglit!' print 'Results have been written to ' + result_filepath
def main(): parser = argparse.ArgumentParser(sys.argv) parser.add_argument( "-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests (can be used more than once)") parser.add_argument("--tests", default = [], action = "append", metavar = "<regex>", help = "Run only matching tests (can be used more than once)" \ "Deprecated") parser.add_argument( "-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests (can be used more than once)") parser.add_argument("testProfile", metavar="<Path to testfile>", help="Path to results folder") args = parser.parse_args() # Deprecated # --include-tests is the standard going forward, but for backwards # compatability merge args.tests into args.include_tests and drop # duplicates if args.tests != []: print "Warnings: Option --tests is deprecated, use --include-tests" args.include_tests = list(set(args.include_tests + args.tests)) # Set the environment, pass in the included and excluded tests env = core.Environment( exclude_filter=args.exclude_tests, include_filter=args.include_tests, ) # Change to the piglit's path piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) profile = core.loadTestProfile(args.testProfile) def getCommand(test): command = '' if isinstance(test, GleanTest): for var, val in test.env.items(): command += var + "='" + val + "' " command += ' '.join(test.command) return command profile.prepare_test_list(env) for name, test in profile.test_list.items(): assert (isinstance(test, ExecTest)) print name, ':::', getCommand(test)
def main(): parser = argparse.ArgumentParser(sys.argv) parser.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run") parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests") parser.add_argument("-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests " "(can be used more than once)") parser.add_argument("-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests " "(can be used more than once)") parser.add_argument("-1", "--no-concurrency", action="store_false", dest="concurrency", help="Disable concurrent test runs") parser.add_argument("-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle") parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck") parser.add_argument("--dmesg", action="store_true", help="Capture a difference in dmesg before and " "after each test") parser.add_argument("test_profile", metavar="<Path to test profile>", help="Path to testfile to run") parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() # Set the platform to pass to waffle if args.platform: os.environ['PIGLIT_PLATFORM'] = args.platform # Pass arguments into Environment env = core.Environment(concurrent=args.concurrency, exclude_filter=args.exclude_tests, include_filter=args.include_tests, execute=args.execute, valgrind=args.valgrind, dmesg=args.dmesg) # Change working directory to the root of the piglit directory piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) core.checkDir(args.results_path, False) results = core.TestrunResult() # Set results.name if args.name is not None: results.name = args.name else: results.name = path.basename(args.results_path) # Begin json. result_filepath = path.join(args.results_path, 'main') result_file = open(result_filepath, 'w') json_writer = core.JSONWriter(result_file) json_writer.open_dict() # Write out command line options for use in resuming. json_writer.write_dict_key('options') json_writer.open_dict() json_writer.write_dict_item('profile', args.test_profile) for key, value in env: json_writer.write_dict_item(key, value) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) profile = core.loadTestProfile(args.test_profile) json_writer.write_dict_key('tests') json_writer.open_dict() time_start = time.time() profile.run(env, json_writer) time_end = time.time() json_writer.close_dict() results.time_elapsed = time_end - time_start json_writer.write_dict_item('time_elapsed', results.time_elapsed) # End json. json_writer.close_dict() json_writer.file.close() print print 'Thank you for running Piglit!' print 'Results have been written to ' + result_filepath