Exemplo n.º 1
0
def main():
	env = core.Environment()

	try:
		options, args = getopt(sys.argv[1:], "h", [ "help" ])
	except GetoptError:
		usage()

	OptionName = ''

	for name, value in options:
		if name in ('-h', '--help'):
			usage()

	if len(args) < 2:
		usage()

	combined = core.loadTestResults(args[0])
	del args[0]

	for resultsDir in args:
		results = core.loadTestResults(resultsDir)

		for testname, result in results.tests.items():
			combined.tests[testname] = result

	combined.write(sys.stdout)
Exemplo n.º 2
0
def main():
    env = core.Environment()

    try:
        options, args = getopt(sys.argv[1:], "h", ["help"])
    except GetoptError:
        usage()

    OptionName = ''

    for name, value in options:
        if name in ('-h', '--help'):
            usage()

    if len(args) < 2:
        usage()

    combined = core.loadTestResults(args[0])
    del args[0]

    for resultsDir in args:
        results = core.loadTestResults(resultsDir)

        for testname, result in results.tests.items():
            combined.tests[testname] = result

    combined.write(sys.stdout)
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results",
                        metavar="<First Results File>",
                        nargs="*",
                        help="Space seperated list of results files")
    args = parser.parse_args()

    combined = core.loadTestResults(args.results.pop(0))

    for resultsDir in args.results:
        results = core.loadTestResults(resultsDir)

        for testname, result in results.tests.items():
            combined.tests[testname] = result

    combined.write(sys.stdout)
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results",
                                            metavar = "<First Results File>",
                                            nargs   = "*",
                                help    = "Space seperated list of results files")
    args = parser.parse_args()

    combined = core.loadTestResults(args.results.pop(0))

    for resultsDir in args.results:
        results = core.loadTestResults(resultsDir)

        for testname, result in results.tests.items():
            combined.tests[testname] = result

    combined.write(sys.stdout)
Exemplo n.º 5
0
    def write(self, arg):
        results = [core.loadTestResults(arg)]
        summary = Summary(results)

        self.report.start()
        self.report.startSuite('piglit')
        try:
            for test in summary.allTests():
                self.write_test(summary, test)
        finally:
            self.enter_path([])
            self.report.stopSuite()
            self.report.stop()
Exemplo n.º 6
0
    def write(self, arg):
        results = [core.loadTestResults(arg)]
        summary = Summary(results)

        self.report.start()
        self.report.startSuite('piglit')
        try:
            for test in summary.allTests():
                self.write_test(summary, test)
        finally:
            self.enter_path([])
            self.report.stopSuite()
            self.report.stop()
Exemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o",
                        "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l",
                        "--list",
                        action="store",
                        help="Use test results from a list file")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="+",
                        help="Results files to include in HTML")
    args = parser.parse_args()

    core.checkDir(args.summaryDir, not args.overwrite)

    results = []
    for result_dir in args.resultsFiles:
        results.append(core.loadTestResults(result_dir))

    summary = framework.summary.Summary(results)
    for j in range(len(summary.testruns)):
        tr = summary.testruns[j]
        tr.codename = filter(lambda s: s.isalnum(), tr.name)
        dirname = args.summaryDir + '/' + tr.codename
        core.checkDir(dirname, False)
        writeTestrunHtml(tr, dirname + '/index.html')
        for test in summary.allTests():
            filename = dirname + '/' + testPathToHtmlFilename(test.path)
            writeResultHtml(test, test.results[j], filename)

    writefile(os.path.join(args.summaryDir, 'result.css'),
              readfile(os.path.join(templatedir, 'result.css')))
    writefile(os.path.join(args.summaryDir, 'index.css'),
              readfile(os.path.join(templatedir, 'index.css')))
    writeSummaryHtml(summary, args.summaryDir, 'all')
    writeSummaryHtml(summary, args.summaryDir, 'problems')
    writeSummaryHtml(summary, args.summaryDir, 'changes')
    writeSummaryHtml(summary, args.summaryDir, 'regressions')
    writeSummaryHtml(summary, args.summaryDir, 'fixes')
    writeSummaryHtml(summary, args.summaryDir, 'skipped')
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(sys.argv)

    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n",
                           "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r",
                           "--resume",
                           action="store_true",
                           help="Resume an interupted test run")

    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
      default = [],
      action  = "append",
      metavar = "<regex>",
      help    = "Run only matching tests (can be used more than once) " \
                "DEPRECATED: use --include-tests instead")
    parser.add_argument(
        "-x",
        "--exclude-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Exclude matching tests (can be used more than once)")

    # The new option going forward should be --no-concurrency, but to
    # maintain backwards compatability the --c, --concurrent option should
    # also be maintained. This code allows only one of the two options to be
    # supplied, or it throws an error
    excGroup2 = parser.add_mutually_exclusive_group()
    excGroup2.add_argument("--no-concurrency",
                           action="store_false",
                           dest="concurrency",
                           help="Disable concurrent test runs")
    excGroup2.add_argument("-c",
                           "--concurrent",
                           action="store",
                           metavar="<boolean>",
                           choices=["1", "0", "on", "off"],
                           help="Deprecated: Turn concrrent runs on or off")

    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")

    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Deprecated:
    # If the deprecated -c, --concurrent flag is passed, override
    # args.concurrency (which would otherwise be set by the --no-concurrency)
    # flag and print a warning.
    if args.concurrent is not None:
        if (args.concurrent == '1' or args.concurrent == 'on'):
            args.concurrency = True
            print "Warning: Option -c, --concurrent is deprecated, " \
              "concurrent test runs are on by default"
        elif (args.concurrent == '0' or args.concurrent == 'off'):
            args.concurrency = False
            print "Warning: Option -c, --concurrent is deprecated, " \
              "use --no-concurrency for non-concurrent test runs"
        # Ne need for else, since argparse restricts the arguments allowed

    # If the deprecated tests option was passed print a warning
    if args.tests != []:
        # This merges any options passed into the --tests option into the
        # ones passed into -t or --tests-include and throws out duplicates
        args.include_tests = list(set(args.include_tests + args.tests))
        print "Warning: Option --tests is deprecated, use " \
          "--include-tests instead"

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        resultsDir = path.realpath(args.resultsPath)

        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile
        resultsDir = args.resultsPath

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_key('filter')
    result_file.write(json.dumps(args.include_tests))
    json_writer.write_dict_key('exclude_filter')
    result_file.write(json.dumps(args.exclude_tests))
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Exemplo n.º 9
0
def loadresult(descr):
    result = core.loadTestResults(descr[0])
    if len(descr) > 1:
        result.__dict__.update(descr[1])
    return result
Exemplo n.º 10
0
def loadresult(descr):
	result = core.loadTestResults(descr[0])
	if len(descr) > 1:
		result.__dict__.update(descr[1])
	return result
Exemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(sys.argv)

    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                    metavar = "<test name>",
                    default = None,
                    help    = "Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                    action  = "store_true",
                    help    = "Resume an interupted test run")

    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                    action  = "store_false",
                    dest    = "execute",
                    help    = "Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Run only matching tests (can be used more than once) " \
                              "DEPRECATED: use --include-tests instead")
    parser.add_argument("-x", "--exclude-tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Exclude matching tests (can be used more than once)")

    # The new option going forward should be --no-concurrency, but to
    # maintain backwards compatability the --c, --concurrent option should
    # also be maintained. This code allows only one of the two options to be
    # supplied, or it throws an error
    excGroup2 = parser.add_mutually_exclusive_group()
    excGroup2.add_argument("--no-concurrency",
                    action  = "store_false",
                    dest    = "concurrency",
                    help    = "Disable concurrent test runs")
    excGroup2.add_argument("-c", "--concurrent",
                    action  = "store",
                    metavar = "<boolean>",
                    choices = ["1", "0", "on", "off"],
                    help    = "Deprecated: Turn concrrent runs on or off")

    parser.add_argument("-p", "--platform",
                    choices = ["glx", "x11_egl", "wayland", "gbm"],
                    help    = "Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                    action  =  "store_true",
                    help    = "Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                    metavar = "<Path to test profile>",
                    help    = "Path to testfile to run")
    parser.add_argument("resultsPath",
                    metavar = "<Results Path>",
                    help    = "Path to results folder")

    args = parser.parse_args()


    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Deprecated:
    # If the deprecated -c, --concurrent flag is passed, override
    # args.concurrency (which would otherwise be set by the --no-concurrency)
    # flag and print a warning.
    if args.concurrent is not None:
        if (args.concurrent == '1' or args.concurrent == 'on'):
            args.concurrency = True
            print "Warning: Option -c, --concurrent is deprecated, " \
                            "concurrent test runs are on by default"
        elif (args.concurrent == '0' or args.concurrent == 'off'):
            args.concurrency = False
            print "Warning: Option -c, --concurrent is deprecated, " \
                            "use --no-concurrency for non-concurrent test runs"
        # Ne need for else, since argparse restricts the arguments allowed

    # If the deprecated tests option was passed print a warning
    if args.tests != []:
        # This merges any options passed into the --tests option into the
        # ones passed into -t or --tests-include and throws out duplicates
        args.include_tests = list(set(args.include_tests + args.tests))
        print "Warning: Option --tests is deprecated, use " \
                        "--include-tests instead"

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                    exclude_filter=args.exclude_tests,
                    include_filter=args.include_tests,
                    execute=args.execute,
                    valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_key('filter')
    result_file.write(json.dumps(args.include_tests))
    json_writer.write_dict_key('exclude_filter')
    result_file.write(json.dumps(args.exclude_tests))
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Exemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                           action="store_true",
                           help="Resume an interupted test run")
    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default = [],
                        action  = "append",
                        metavar = "<regex>",
                        help    = "Run only matching tests (can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default = [],
                        action  = "append",
                        metavar = "<regex>",
                        help    = "Exclude matching tests (can be used more than once)")
    parser.add_argument("--no-concurrency",
                        action  = "store_false",
                        dest    = "concurrency",
                        help    = "Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_item('filter', args.include_tests)
    json_writer.write_dict_item('exclude_filter', args.exclude_tests)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
Exemplo n.º 13
0
def main():
	env = core.Environment()

	try:
		option_list = [
			 "help",
			 "dry-run",
			 "resume",
			 "valgrind",
			 "tests=",
			 "name=",
			 "exclude-tests=",
			 "concurrent=",
			 ]
		options, args = getopt(sys.argv[1:], "hdrt:n:x:c:", option_list)
	except GetoptError:
		usage()

	OptionName = ''
	OptionResume = False
	test_filter = []
	exclude_filter = []

	for name, value in options:
		if name in ('-h', '--help'):
			usage()
		elif name in ('-d', '--dry-run'):
			env.execute = False
		elif name in ('-r', '--resume'):
			OptionResume = True
		elif name in ('--valgrind'):
			env.valgrind = True
		elif name in ('-t', '--tests'):
			test_filter.append(value)
			env.filter.append(re.compile(value))
		elif name in ('-x', '--exclude-tests'):
			exclude_filter.append(value)
			env.exclude_filter.append(re.compile(value))
		elif name in ('-n', '--name'):
			OptionName = value
		elif name in ('-c, --concurrent'):
			if value in ('1', 'on'):
				env.concurrent = True
			elif value in ('0', 'off'):
				env.concurrent = False
			else:
				usage()

	if OptionResume:
		if test_filter or OptionName:
			print "-r is not compatible with -t or -n."
			usage()
		if len(args) != 1:
			usage()
		resultsDir = args[0]

		# Load settings from the old results JSON
		old_results = core.loadTestResults(resultsDir)
		profileFilename = old_results.options['profile']
		for value in old_results.options['filter']:
			test_filter.append(value)
			env.filter.append(re.compile(value))
		for value in old_results.options['exclude_filter']:
			exclude_filter.append(value)
			env.exclude_filter.append(re.compile(value))
	else:
		if len(args) != 2:
			usage()

		profileFilename = args[0]
		resultsDir = path.realpath(args[1])

	# Change to the piglit's path
	piglit_dir = path.dirname(path.realpath(sys.argv[0]))
	os.chdir(piglit_dir)

	core.checkDir(resultsDir, False)

	results = core.TestrunResult()

	# Set results.name
	if OptionName is '':
		results.name = path.basename(resultsDir)
	else:
		results.name = OptionName

	# Begin json.
	result_filepath = os.path.join(resultsDir, 'main')
	result_file = open(result_filepath, 'w')
	json_writer = core.JSONWriter(result_file)
	json_writer.open_dict()

	# Write out command line options for use in resuming.
	json_writer.write_dict_key('options')
	json_writer.open_dict()
	json_writer.write_dict_item('profile', profileFilename)
	json_writer.write_dict_key('filter')
	result_file.write(json.dumps(test_filter))
	json_writer.write_dict_key('exclude_filter')
	result_file.write(json.dumps(exclude_filter))
	json_writer.close_dict()

	json_writer.write_dict_item('name', results.name)
	for (key, value) in env.collectData().items():
		json_writer.write_dict_item(key, value)

	profile = core.loadTestProfile(profileFilename, resultsDir)

	json_writer.write_dict_key('tests')
	json_writer.open_dict()
	# If resuming an interrupted test run, re-write all of the existing
	# results since we clobbered the results file.  Also, exclude them
	# from being run again.
	if OptionResume:
		for (key, value) in old_results.tests.items():
			json_writer.write_dict_item(key, value)
			env.exclude_tests.add(key)

	time_start = time.time()
	profile.run(env, json_writer)
	time_end = time.time()

	json_writer.close_dict()

	results.time_elapsed = time_end - time_start
	json_writer.write_dict_item('time_elapsed', results.time_elapsed)

	# End json.
	json_writer.close_dict()
	json_writer.file.close()

	print
	print 'Thank you for running Piglit!'
	print 'Results have been written to ' + result_filepath