def main(): parser = argparse.ArgumentParser() parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() results = core.load_results(args.results_path) env = core.Environment(concurrent=results.options['concurrent'], exclude_filter=results.options['exclude_filter'], include_filter=results.options['filter'], execute=results.options['execute'], valgrind=results.options['valgrind'], dmesg=results.options['dmesg'], verbose=results.options['verbose']) # Change working directory to the piglit directory os.chdir(path.dirname(path.realpath(sys.argv[0]))) # attempt to restore a saved platform, if there is no saved platform just # go on try: os.environ['PIGLIT_PLATFORM'] = results.options['platform'] except KeyError: pass results_path = path.join(args.results_path, "main") json_writer = core.JSONWriter(open(results_path, 'w+')) json_writer.open_dict() json_writer.write_dict_key("options") json_writer.open_dict() for key, value in results.options.iteritems(): json_writer.write_dict_item(key, value) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) json_writer.write_dict_key('tests') json_writer.open_dict() for key, value in results.tests.iteritems(): json_writer.write_dict_item(key, value) env.exclude_tests.add(key) profile = core.merge_test_profiles(results.options['profile']) if env.dmesg: profile.dmesg = env.dmesg # This is resumed, don't bother with time since it wont be accurate anyway profile.run(env, json_writer) json_writer.close_dict() json_writer.close_dict() json_writer.file.close() print("\n" "Thank you for running Piglit!\n" "Results have ben wrriten to {0}".format(results_path))
def main(): parser = argparse.ArgumentParser(sys.argv) parser.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run") parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests") parser.add_argument("-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests " "(can be used more than once)") parser.add_argument("-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests " "(can be used more than once)") conc_parser = parser.add_mutually_exclusive_group() conc_parser.add_argument('-c', '--all-concurrent', action="store_const", default="some", const="all", dest="concurrency", help="Run all tests concurrently") conc_parser.add_argument("-1", "--no-concurrency", action="store_const", default="some", const="none", dest="concurrency", help="Disable concurrent test runs") parser.add_argument("-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle") parser.add_argument("-f", "--config", dest="config_file", type=argparse.FileType("r"), help="Optionally specify a piglit config file to use. " "Default is piglit.conf") parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck") parser.add_argument("--dmesg", action="store_true", help="Capture a difference in dmesg before and " "after each test. Implies -1/--no-concurrency") parser.add_argument("-v", "--verbose", action="store_true", help="Produce a line of output for each test before " "and after it runs") parser.add_argument("test_profile", metavar="<Path to one or more test profile(s)>", nargs='+', help="Path to testfile to run") parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() # Set the platform to pass to waffle if args.platform: os.environ['PIGLIT_PLATFORM'] = args.platform # If dmesg is requested we must have serial run, this is becasue dmesg # isn't reliable with threaded run if args.dmesg: args.concurrency = "none" # Read the config file if args.config_file: core.PIGLIT_CONFIG.readfp(args.config_file) args.config_file.close() else: core.PIGLIT_CONFIG.read(os.path.join(os.path.dirname(__file__), 'piglit.conf')) # Pass arguments into Environment env = core.Environment(concurrent=args.concurrency, exclude_filter=args.exclude_tests, include_filter=args.include_tests, execute=args.execute, valgrind=args.valgrind, dmesg=args.dmesg, verbose=args.verbose) # Change working directory to the root of the piglit directory piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) core.checkDir(args.results_path, False) results = core.TestrunResult() # Set results.name if args.name is not None: results.name = args.name else: results.name = path.basename(args.results_path) # Begin json. result_filepath = path.join(args.results_path, 'main') result_file = open(result_filepath, 'w') json_writer = core.JSONWriter(result_file) json_writer.open_dict() # Write out command line options for use in resuming. json_writer.write_dict_key('options') json_writer.open_dict() json_writer.write_dict_item('profile', args.test_profile) for key, value in env: json_writer.write_dict_item(key, value) if args.platform: json_writer.write_dict_item('platform', args.platform) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) profile = core.merge_test_profiles(args.test_profile) json_writer.write_dict_key('tests') json_writer.open_dict() time_start = time.time() # Set the dmesg type if args.dmesg: profile.dmesg = args.dmesg profile.run(env, json_writer) time_end = time.time() json_writer.close_dict() results.time_elapsed = time_end - time_start json_writer.write_dict_item('time_elapsed', results.time_elapsed) # End json. json_writer.close_dict() json_writer.file.close() print print 'Thank you for running Piglit!' print 'Results have been written to ' + result_filepath
def main(): parser = argparse.ArgumentParser(sys.argv) parser.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run") parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests") parser.add_argument("-t", "--include-tests", default=[], action="append", metavar="<regex>", help="Run only matching tests " "(can be used more than once)") parser.add_argument("-x", "--exclude-tests", default=[], action="append", metavar="<regex>", help="Exclude matching tests " "(can be used more than once)") conc_parser = parser.add_mutually_exclusive_group() conc_parser.add_argument('-c', '--all-concurrent', action="store_const", default="some", const="all", dest="concurrency", help="Run all tests concurrently") conc_parser.add_argument("-1", "--no-concurrency", action="store_const", default="some", const="none", dest="concurrency", help="Disable concurrent test runs") parser.add_argument("-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle") parser.add_argument("-f", "--config", dest="config_file", type=argparse.FileType("r"), help="Optionally specify a piglit config file to use. " "Default is piglit.conf") parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck") parser.add_argument("--dmesg", action="store_true", help="Capture a difference in dmesg before and " "after each test. Implies -1/--no-concurrency") parser.add_argument("-v", "--verbose", action="store_true", help="Produce a line of output for each test before " "and after it runs") parser.add_argument("test_profile", metavar="<Path to one or more test profile(s)>", nargs='+', help="Path to testfile to run") parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") args = parser.parse_args() # Set the platform to pass to waffle if args.platform: os.environ['PIGLIT_PLATFORM'] = args.platform # If dmesg is requested we must have serial run, this is becasue dmesg # isn't reliable with threaded run if args.dmesg: args.concurrency = "none" # Read the config file if args.config_file: core.PIGLIT_CONFIG.readfp(args.config_file) args.config_file.close() else: core.PIGLIT_CONFIG.read( os.path.join(os.path.dirname(__file__), 'piglit.conf')) # Pass arguments into Environment env = core.Environment(concurrent=args.concurrency, exclude_filter=args.exclude_tests, include_filter=args.include_tests, execute=args.execute, valgrind=args.valgrind, dmesg=args.dmesg, verbose=args.verbose) # Change working directory to the root of the piglit directory piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) core.checkDir(args.results_path, False) results = core.TestrunResult() # Set results.name if args.name is not None: results.name = args.name else: results.name = path.basename(args.results_path) # Begin json. result_filepath = path.join(args.results_path, 'main') result_file = open(result_filepath, 'w') json_writer = core.JSONWriter(result_file) json_writer.open_dict() # Write out command line options for use in resuming. json_writer.write_dict_key('options') json_writer.open_dict() json_writer.write_dict_item('profile', args.test_profile) for key, value in env: json_writer.write_dict_item(key, value) if args.platform: json_writer.write_dict_item('platform', args.platform) json_writer.close_dict() json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) profile = core.merge_test_profiles(args.test_profile) json_writer.write_dict_key('tests') json_writer.open_dict() time_start = time.time() # Set the dmesg type if args.dmesg: profile.dmesg = args.dmesg profile.run(env, json_writer) time_end = time.time() json_writer.close_dict() results.time_elapsed = time_end - time_start json_writer.write_dict_item('time_elapsed', results.time_elapsed) # End json. json_writer.close_dict() json_writer.file.close() print print 'Thank you for running Piglit!' print 'Results have been written to ' + result_filepath