コード例 #1
0
ファイル: summary.py プロジェクト: renchenglei/piglit-1
def feature(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o",
                        "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("featureFile",
                        metavar="<Feature json file>",
                        help="Json file containing the features description")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(input_)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.featureFile and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.resultsFiles or not path.exists(args.featureFile):
        raise parser.error("Missing json file")

    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    core.checkDir(args.summaryDir, not args.overwrite)

    summary.feat(args.resultsFiles, args.summaryDir, args.featureFile)
コード例 #2
0
ファイル: summary.py プロジェクト: chadversary/piglit
def feature(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("featureFile",
                        metavar="<Feature json file>",
                        help="Json file containing the features description")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(input_)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.featureFile and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.resultsFiles or not path.exists(args.featureFile):
        raise parser.error("Missing json file")

    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    core.checkDir(args.summaryDir, not args.overwrite)

    summary.feat(args.resultsFiles, args.summaryDir, args.featureFile)
コード例 #3
0
ファイル: summary.py プロジェクト: janesma/piglit
def html(input_):
    # Make a copy of the status text list and add all. This is used as the
    # argument list for -e/--exclude
    statuses = set(str(s) for s in status.ALL)
    statuses.add("all")

    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--overwrite", action="store_true", help="Overwrite existing directories")
    parser.add_argument(
        "-l",
        "--list",
        action="store",
        help="Load a newline seperated list of results. These "
        "results will be prepended to any Results "
        "specified on the command line",
    )
    parser.add_argument(
        "-e",
        "--exclude-details",
        default=[],
        action="append",
        choices=statuses,
        help="Optionally exclude the generation of HTML pages "
        "for individual test pages with the status(es) "
        "given as arguments. This speeds up HTML "
        "generation, but reduces the info in the HTML "
        "pages. May be used multiple times",
    )
    parser.add_argument("summaryDir", metavar="<Summary Directory>", help="Directory to put HTML files in")
    parser.add_argument("resultsFiles", metavar="<Results Files>", nargs="*", help="Results files to include in HTML")
    args = parser.parse_args(input_)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.list and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # Convert the exclude_details list to status objects, without this using
    # the -e option will except
    if args.exclude_details:
        # If exclude-results has all, then change it to be all
        if "all" in args.exclude_details:
            args.exclude_details = status.ALL
        else:
            args.exclude_details = frozenset(status.status_lookup(i) for i in args.exclude_details)

    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    core.checkDir(args.summaryDir, not args.overwrite)

    # Merge args.list and args.resultsFiles
    if args.list:
        args.resultsFiles.extend(core.parse_listfile(args.list))

    # Create the HTML output
    output = summary.Summary(args.resultsFiles)
    output.generate_html(args.summaryDir, args.exclude_details)
コード例 #4
0
ファイル: piglit-summary-html.py プロジェクト: RAOF/piglit
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l", "--list",
                        action="store",
                        help="Load a newline seperated list of results. These "
                             "results will be prepended to any Results "
                             "specified on the command line")
    parser.add_argument("-e", "--exclude-details",
                        default=[],
                        action="append",
                        choices=['skip', 'pass', 'warn', 'crash' 'fail',
                                 'all'],
                        help="Optionally exclude the generation of HTML pages "
                             "for individual test pages with the status(es) "
                             "given as arguments. This speeds up HTML "
                             "generation, but reduces the info in the HTML "
                             "pages. May be used multiple times")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args()

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.list and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # Convert the exclude_details list to status objects, without this using
    # the -e option will except
    if args.exclude_details:
        # If exclude-results has all, then change it to be all
        if 'all' in args.exclude_details:
            args.exclude_details = [status.Skip(), status.Pass(), status.Warn(),
                                    status.Crash(), status.Fail()]
        else:
            args.exclude_details = [status.status_lookup(i) for i in
                                    args.exclude_details]


    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    checkDir(args.summaryDir, not args.overwrite)

    # Merge args.list and args.resultsFiles
    if args.list:
        args.resultsFiles.extend(parse_listfile(args.list))

    # Create the HTML output
    output = summary.Summary(args.resultsFiles)
    output.generate_html(args.summaryDir, args.exclude_details)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o",
                        "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l",
                        "--list",
                        action="store",
                        help="Load a newline seperated list of results. These "
                        "results will be prepended to any Results "
                        "specified on the command line")
    parser.add_argument(
        "-e",
        "--exclude-details",
        default=[],
        action="append",
        choices=['skip', 'pass', 'warn', 'crash'
                 'fail', 'all'],
        help="Optionally exclude the generation of HTML pages "
        "for individual test pages with the status(es) "
        "given as arguments. This speeds up HTML "
        "generation, but reduces the info in the HTML "
        "pages. May be used multiple times")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args()

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.list and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # If exclude-results has all, then change it to be all
    if 'all' in args.exclude_details:
        args.exclude_details = ['skip', 'pass', 'warn', 'crash', 'fail']

    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    checkDir(args.summaryDir, not args.overwrite)

    # Merge args.list and args.resultsFiles
    if args.list:
        args.resultsFiles.extend(parse_listfile(args.list))

    # Create the HTML output
    output = summary.Summary(args.resultsFiles)
    output.generateHTML(args.summaryDir, args.exclude_details)
コード例 #6
0
def main():
	try:
		options, args = getopt(sys.argv[1:], "hofl:", [ "help", "overwrite", "list" ])
	except GetoptError:
		usage()

	OptionOverwrite = False
	OptionList = []
	for name, value in options:
		if name == "-h" or name == "--help":
			usage()
		elif name == "-o" or name == "--overwrite":
			OptionOverwrite = True
		elif name == "-l" or name == "--list":
			OptionList += parse_listfile(value)

	OptionList += [[name] for name in args[1:]]

	if len(args) < 1 or len(OptionList) == 0:
		usage()

	summaryDir = args[0]
	core.checkDir(summaryDir, not OptionOverwrite)

	results = []
	for result_dir in OptionList:
		results.append(loadresult(result_dir))

	summary = framework.summary.Summary(results)
	for j in range(len(summary.testruns)):
		tr = summary.testruns[j]
		tr.codename = filter(lambda s: s.isalnum(), tr.name)
		dirname = summaryDir + '/' + tr.codename
		core.checkDir(dirname, False)
		writeTestrunHtml(tr, dirname + '/index.html')
		for test in summary.allTests():
			filename = dirname + '/' + testPathToHtmlFilename(test.path)
			writeResultHtml(test, test.results[j], filename)

	writefile(os.path.join(summaryDir, 'result.css'), readfile(os.path.join(templatedir, 'result.css')))
	writefile(os.path.join(summaryDir, 'index.css'), readfile(os.path.join(templatedir, 'index.css')))
	writeSummaryHtml(summary, summaryDir, 'all')
	writeSummaryHtml(summary, summaryDir, 'problems')
	writeSummaryHtml(summary, summaryDir, 'changes')
	writeSummaryHtml(summary, summaryDir, 'regressions')
	writeSummaryHtml(summary, summaryDir, 'fixes')
コード例 #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o",
                        "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l",
                        "--list",
                        action="store",
                        help="Use test results from a list file")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="+",
                        help="Results files to include in HTML")
    args = parser.parse_args()

    core.checkDir(args.summaryDir, not args.overwrite)

    results = []
    for result_dir in args.resultsFiles:
        results.append(core.loadTestResults(result_dir))

    summary = framework.summary.Summary(results)
    for j in range(len(summary.testruns)):
        tr = summary.testruns[j]
        tr.codename = filter(lambda s: s.isalnum(), tr.name)
        dirname = args.summaryDir + '/' + tr.codename
        core.checkDir(dirname, False)
        writeTestrunHtml(tr, dirname + '/index.html')
        for test in summary.allTests():
            filename = dirname + '/' + testPathToHtmlFilename(test.path)
            writeResultHtml(test, test.results[j], filename)

    writefile(os.path.join(args.summaryDir, 'result.css'),
              readfile(os.path.join(templatedir, 'result.css')))
    writefile(os.path.join(args.summaryDir, 'index.css'),
              readfile(os.path.join(templatedir, 'index.css')))
    writeSummaryHtml(summary, args.summaryDir, 'all')
    writeSummaryHtml(summary, args.summaryDir, 'problems')
    writeSummaryHtml(summary, args.summaryDir, 'changes')
    writeSummaryHtml(summary, args.summaryDir, 'regressions')
    writeSummaryHtml(summary, args.summaryDir, 'fixes')
    writeSummaryHtml(summary, args.summaryDir, 'skipped')
コード例 #8
0
ファイル: run.py プロジェクト: janesma/piglit
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(
        args.results_path,
        options,
        file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
コード例 #9
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
コード例 #10
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # build up the include filter based on test_list
    if args.test_list:
        with open(args.test_list) as test_list:
            for line in test_list.readlines():
                args.include_tests.append(line.rstrip())

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.sync = args.sync

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
コード例 #11
0
ファイル: piglit-run.py プロジェクト: kphillisjr/piglit
def main():
    parser = argparse.ArgumentParser(sys.argv)
    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                           action="store_true",
                           help="Resume an interupted test run")
    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    parser.add_argument("-1", "--no-concurrency",
                        action="store_false",
                        dest="concurrency",
                        help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.load_results(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_item('filter', args.include_tests)
    json_writer.write_dict_item('exclude_filter', args.exclude_tests)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
コード例 #12
0
ファイル: piglit-run.py プロジェクト: passdedd/piglit-test
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    parser.add_argument("-1",
                        "--no-concurrency",
                        action="store_false",
                        dest="concurrency",
                        help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test")
    parser.add_argument("test_profile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(args.test_profile)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
コード例 #13
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        file_fsync=opts.sync,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name, opts))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(sys.argv)

    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n",
                           "--name",
                           metavar="<test name>",
                           default=None,
                           help="Name of this test run")
    excGroup1.add_argument("-r",
                           "--resume",
                           action="store_true",
                           help="Resume an interupted test run")

    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
      default = [],
      action  = "append",
      metavar = "<regex>",
      help    = "Run only matching tests (can be used more than once) " \
                "DEPRECATED: use --include-tests instead")
    parser.add_argument(
        "-x",
        "--exclude-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Exclude matching tests (can be used more than once)")

    # The new option going forward should be --no-concurrency, but to
    # maintain backwards compatability the --c, --concurrent option should
    # also be maintained. This code allows only one of the two options to be
    # supplied, or it throws an error
    excGroup2 = parser.add_mutually_exclusive_group()
    excGroup2.add_argument("--no-concurrency",
                           action="store_false",
                           dest="concurrency",
                           help="Disable concurrent test runs")
    excGroup2.add_argument("-c",
                           "--concurrent",
                           action="store",
                           metavar="<boolean>",
                           choices=["1", "0", "on", "off"],
                           help="Deprecated: Turn concrrent runs on or off")

    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                        metavar="<Path to test profile>",
                        help="Path to testfile to run")
    parser.add_argument("resultsPath",
                        metavar="<Results Path>",
                        help="Path to results folder")

    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Deprecated:
    # If the deprecated -c, --concurrent flag is passed, override
    # args.concurrency (which would otherwise be set by the --no-concurrency)
    # flag and print a warning.
    if args.concurrent is not None:
        if (args.concurrent == '1' or args.concurrent == 'on'):
            args.concurrency = True
            print "Warning: Option -c, --concurrent is deprecated, " \
              "concurrent test runs are on by default"
        elif (args.concurrent == '0' or args.concurrent == 'off'):
            args.concurrency = False
            print "Warning: Option -c, --concurrent is deprecated, " \
              "use --no-concurrency for non-concurrent test runs"
        # Ne need for else, since argparse restricts the arguments allowed

    # If the deprecated tests option was passed print a warning
    if args.tests != []:
        # This merges any options passed into the --tests option into the
        # ones passed into -t or --tests-include and throws out duplicates
        args.include_tests = list(set(args.include_tests + args.tests))
        print "Warning: Option --tests is deprecated, use " \
          "--include-tests instead"

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        resultsDir = path.realpath(args.resultsPath)

        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile
        resultsDir = args.resultsPath

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_key('filter')
    result_file.write(json.dumps(args.include_tests))
    json_writer.write_dict_key('exclude_filter')
    result_file.write(json.dumps(args.exclude_tests))
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
コード例 #15
0
ファイル: piglit-run.py プロジェクト: dervishxgit/piglit
def main():
    parser = argparse.ArgumentParser(sys.argv)

    # Either require that a name for the test is passed or that
    # resume is requested
    excGroup1 = parser.add_mutually_exclusive_group()
    excGroup1.add_argument("-n", "--name",
                    metavar = "<test name>",
                    default = None,
                    help    = "Name of this test run")
    excGroup1.add_argument("-r", "--resume",
                    action  = "store_true",
                    help    = "Resume an interupted test run")

    # Setting the --dry-run flag is equivalent to env.execute=false
    parser.add_argument("-d", "--dry-run",
                    action  = "store_false",
                    dest    = "execute",
                    help    = "Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Run only matching tests (can be used more than once)")
    parser.add_argument("--tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Run only matching tests (can be used more than once) " \
                              "DEPRECATED: use --include-tests instead")
    parser.add_argument("-x", "--exclude-tests",
                    default = [],
                    action  = "append",
                    metavar = "<regex>",
                    help    = "Exclude matching tests (can be used more than once)")

    # The new option going forward should be --no-concurrency, but to
    # maintain backwards compatability the --c, --concurrent option should
    # also be maintained. This code allows only one of the two options to be
    # supplied, or it throws an error
    excGroup2 = parser.add_mutually_exclusive_group()
    excGroup2.add_argument("--no-concurrency",
                    action  = "store_false",
                    dest    = "concurrency",
                    help    = "Disable concurrent test runs")
    excGroup2.add_argument("-c", "--concurrent",
                    action  = "store",
                    metavar = "<boolean>",
                    choices = ["1", "0", "on", "off"],
                    help    = "Deprecated: Turn concrrent runs on or off")

    parser.add_argument("-p", "--platform",
                    choices = ["glx", "x11_egl", "wayland", "gbm"],
                    help    = "Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                    action  =  "store_true",
                    help    = "Run tests in valgrind's memcheck")
    parser.add_argument("testProfile",
                    metavar = "<Path to test profile>",
                    help    = "Path to testfile to run")
    parser.add_argument("resultsPath",
                    metavar = "<Results Path>",
                    help    = "Path to results folder")

    args = parser.parse_args()


    # Set the platform to pass to waffle
    if args.platform is not None:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # Deprecated:
    # If the deprecated -c, --concurrent flag is passed, override
    # args.concurrency (which would otherwise be set by the --no-concurrency)
    # flag and print a warning.
    if args.concurrent is not None:
        if (args.concurrent == '1' or args.concurrent == 'on'):
            args.concurrency = True
            print "Warning: Option -c, --concurrent is deprecated, " \
                            "concurrent test runs are on by default"
        elif (args.concurrent == '0' or args.concurrent == 'off'):
            args.concurrency = False
            print "Warning: Option -c, --concurrent is deprecated, " \
                            "use --no-concurrency for non-concurrent test runs"
        # Ne need for else, since argparse restricts the arguments allowed

    # If the deprecated tests option was passed print a warning
    if args.tests != []:
        # This merges any options passed into the --tests option into the
        # ones passed into -t or --tests-include and throws out duplicates
        args.include_tests = list(set(args.include_tests + args.tests))
        print "Warning: Option --tests is deprecated, use " \
                        "--include-tests instead"

    # Always Convert Results Path from Relative path to Actual Path.
    resultsDir = path.realpath(args.resultsPath)

    # If resume is requested attempt to load the results file
    # in the specified path
    if args.resume is True:
        # Load settings from the old results JSON
        old_results = core.loadTestResults(resultsDir)
        profileFilename = old_results.options['profile']

        # Changing the args to the old args allows us to set them
        # all in one places down the way
        args.exclude_tests = old_results.options['exclude_filter']
        args.include_tests = old_results.options['filter']

    # Otherwise parse additional settings from the command line
    else:
        profileFilename = args.testProfile

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                    exclude_filter=args.exclude_tests,
                    include_filter=args.include_tests,
                    execute=args.execute,
                    valgrind=args.valgrind)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    core.checkDir(resultsDir, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(resultsDir)

    # Begin json.
    result_filepath = os.path.join(resultsDir, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', profileFilename)
    json_writer.write_dict_key('filter')
    result_file.write(json.dumps(args.include_tests))
    json_writer.write_dict_key('exclude_filter')
    result_file.write(json.dumps(args.exclude_tests))
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(profileFilename)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    # If resuming an interrupted test run, re-write all of the existing
    # results since we clobbered the results file.  Also, exclude them
    # from being run again.
    if args.resume is True:
        for (key, value) in old_results.tests.items():
            if os.path.sep != '/':
                key = key.replace(os.path.sep, '/', -1)
            json_writer.write_dict_item(key, value)
            env.exclude_tests.add(key)

    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
コード例 #16
0
ファイル: piglit-run.py プロジェクト: blaztinn/piglit
def main():
	env = core.Environment()

	try:
		option_list = [
			 "help",
			 "dry-run",
			 "resume",
			 "valgrind",
			 "tests=",
			 "name=",
			 "exclude-tests=",
			 "concurrent=",
			 ]
		options, args = getopt(sys.argv[1:], "hdrt:n:x:c:", option_list)
	except GetoptError:
		usage()

	OptionName = ''
	OptionResume = False
	test_filter = []
	exclude_filter = []

	for name, value in options:
		if name in ('-h', '--help'):
			usage()
		elif name in ('-d', '--dry-run'):
			env.execute = False
		elif name in ('-r', '--resume'):
			OptionResume = True
		elif name in ('--valgrind'):
			env.valgrind = True
		elif name in ('-t', '--tests'):
			test_filter.append(value)
			env.filter.append(re.compile(value))
		elif name in ('-x', '--exclude-tests'):
			exclude_filter.append(value)
			env.exclude_filter.append(re.compile(value))
		elif name in ('-n', '--name'):
			OptionName = value
		elif name in ('-c, --concurrent'):
			if value in ('1', 'on'):
				env.concurrent = True
			elif value in ('0', 'off'):
				env.concurrent = False
			else:
				usage()

	if OptionResume:
		if test_filter or OptionName:
			print "-r is not compatible with -t or -n."
			usage()
		if len(args) != 1:
			usage()
		resultsDir = args[0]

		# Load settings from the old results JSON
		old_results = core.loadTestResults(resultsDir)
		profileFilename = old_results.options['profile']
		for value in old_results.options['filter']:
			test_filter.append(value)
			env.filter.append(re.compile(value))
		for value in old_results.options['exclude_filter']:
			exclude_filter.append(value)
			env.exclude_filter.append(re.compile(value))
	else:
		if len(args) != 2:
			usage()

		profileFilename = args[0]
		resultsDir = path.realpath(args[1])

	# Change to the piglit's path
	piglit_dir = path.dirname(path.realpath(sys.argv[0]))
	os.chdir(piglit_dir)

	core.checkDir(resultsDir, False)

	results = core.TestrunResult()

	# Set results.name
	if OptionName is '':
		results.name = path.basename(resultsDir)
	else:
		results.name = OptionName

	# Begin json.
	result_filepath = os.path.join(resultsDir, 'main')
	result_file = open(result_filepath, 'w')
	json_writer = core.JSONWriter(result_file)
	json_writer.open_dict()

	# Write out command line options for use in resuming.
	json_writer.write_dict_key('options')
	json_writer.open_dict()
	json_writer.write_dict_item('profile', profileFilename)
	json_writer.write_dict_key('filter')
	result_file.write(json.dumps(test_filter))
	json_writer.write_dict_key('exclude_filter')
	result_file.write(json.dumps(exclude_filter))
	json_writer.close_dict()

	json_writer.write_dict_item('name', results.name)
	for (key, value) in env.collectData().items():
		json_writer.write_dict_item(key, value)

	profile = core.loadTestProfile(profileFilename, resultsDir)

	json_writer.write_dict_key('tests')
	json_writer.open_dict()
	# If resuming an interrupted test run, re-write all of the existing
	# results since we clobbered the results file.  Also, exclude them
	# from being run again.
	if OptionResume:
		for (key, value) in old_results.tests.items():
			json_writer.write_dict_item(key, value)
			env.exclude_tests.add(key)

	time_start = time.time()
	profile.run(env, json_writer)
	time_end = time.time()

	json_writer.close_dict()

	results.time_elapsed = time_end - time_start
	json_writer.write_dict_item('time_elapsed', results.time_elapsed)

	# End json.
	json_writer.close_dict()
	json_writer.file.close()

	print
	print 'Thank you for running Piglit!'
	print 'Results have been written to ' + result_filepath
コード例 #17
0
ファイル: piglit-run.py プロジェクト: RAOF/piglit
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run")
    parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests")
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests " "(can be used more than once)",
    )
    parser.add_argument(
        "-x",
        "--exclude-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Exclude matching tests " "(can be used more than once)",
    )
    parser.add_argument(
        "-1", "--no-concurrency", action="store_false", dest="concurrency", help="Disable concurrent test runs"
    )
    parser.add_argument(
        "-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle"
    )
    parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck")
    parser.add_argument(
        "--dmesg", action="store_true", help="Capture a difference in dmesg before and " "after each test"
    )
    parser.add_argument("test_profile", metavar="<Path to test profile>", help="Path to testfile to run")
    parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ["PIGLIT_PLATFORM"] = args.platform

    # Pass arguments into Environment
    env = core.Environment(
        concurrent=args.concurrency,
        exclude_filter=args.exclude_tests,
        include_filter=args.include_tests,
        execute=args.execute,
        valgrind=args.valgrind,
        dmesg=args.dmesg,
    )

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, "main")
    result_file = open(result_filepath, "w")
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    json_writer.write_dict_item("profile", args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item("name", results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.loadTestProfile(args.test_profile)

    json_writer.write_dict_key("tests")
    json_writer.open_dict()
    time_start = time.time()
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item("time_elapsed", results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print "Thank you for running Piglit!"
    print "Results have been written to " + result_filepath
コード例 #18
0
ファイル: run.py プロジェクト: evelikov/piglit
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v", "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                             "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
コード例 #19
0
ファイル: piglit-run.py プロジェクト: rafalmiel/piglit
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v", "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                             "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    if args.config_file:
        core.PIGLIT_CONFIG.readfp(args.config_file)
        args.config_file.close()
    else:
        core.PIGLIT_CONFIG.read(os.path.join(os.path.dirname(__file__),
                                'piglit.conf'))

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg,
                           verbose=args.verbose)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    if args.platform:
        json_writer.write_dict_item('platform', args.platform)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)

    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.merge_test_profiles(args.test_profile)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
コード例 #20
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    if args.config_file:
        core.PIGLIT_CONFIG.readfp(args.config_file)
        args.config_file.close()
    else:
        core.PIGLIT_CONFIG.read(
            os.path.join(os.path.dirname(__file__), 'piglit.conf'))

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg,
                           verbose=args.verbose)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    if args.platform:
        json_writer.write_dict_item('platform', args.platform)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)

    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = core.merge_test_profiles(args.test_profile)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print
    print 'Thank you for running Piglit!'
    print 'Results have been written to ' + result_filepath
コード例 #21
0
ファイル: summary.py プロジェクト: evelikov/piglit
def html(input_):
    # Make a copy of the status text list and add all. This is used as the
    # argument list for -e/--exclude
    statuses = set(str(s) for s in status.ALL)
    statuses.add('all')

    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l", "--list",
                        action="store",
                        help="Load a newline seperated list of results. These "
                             "results will be prepended to any Results "
                             "specified on the command line")
    parser.add_argument("-e", "--exclude-details",
                        default=[],
                        action="append",
                        choices=statuses,
                        help="Optionally exclude the generation of HTML pages "
                             "for individual test pages with the status(es) "
                             "given as arguments. This speeds up HTML "
                             "generation, but reduces the info in the HTML "
                             "pages. May be used multiple times")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(input_)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.list and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # Convert the exclude_details list to status objects, without this using
    # the -e option will except
    if args.exclude_details:
        # If exclude-results has all, then change it to be all
        if 'all' in args.exclude_details:
            args.exclude_details = status.ALL
        else:
            args.exclude_details = frozenset(
                status.status_lookup(i) for i in args.exclude_details)


    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    core.checkDir(args.summaryDir, not args.overwrite)

    # Merge args.list and args.resultsFiles
    if args.list:
        args.resultsFiles.extend(core.parse_listfile(args.list))

    # Create the HTML output
    output = summary.Summary(args.resultsFiles)
    output.generate_html(args.summaryDir, args.exclude_details)
コード例 #22
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(args.results_path,
                                                          options,
                                                          file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)