Beispiel #1
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.concurrent = results.options['concurrent']
    options.OPTIONS.exclude_filter = results.options['exclude_filter']
    options.OPTIONS.include_filter = results.options['include_filter']
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.dmesg = results.options['dmesg']
    options.OPTIONS.sync = results.options['sync']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            options.OPTIONS.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if options.OPTIONS.dmesg:
        profile.dmesg = options.OPTIONS.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #2
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in results.tests.iteritems():
        if args.no_retry or result['result'] != 'incomplete':
            opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #3
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = framework.results.TestrunResult.resume(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    for name in results.tests.iterkeys():
        opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #4
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #5
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
def _create_metadata(args, name):
    """Create and return a metadata dict for Backend.initialize()."""
    opts = dict(options.OPTIONS)
    opts['profile'] = args.test_profile
    opts['log_level'] = args.log_level
    if args.platform:
        opts['platform'] = args.platform

    metadata = {'options': opts}
    metadata['name'] = name
    metadata.update(core.collect_system_info())

    return metadata
Beispiel #7
0
def _create_metadata(args, name):
    """Create and return a metadata dict for Backend.initialize()."""
    opts = dict(options.OPTIONS)
    opts['profile'] = args.test_profile
    opts['log_level'] = args.log_level
    if args.platform:
        opts['platform'] = args.platform

    metadata = {'options': opts}
    metadata['name'] = name
    metadata.update(core.collect_system_info())

    return metadata
Beispiel #8
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = framework.results.get_backend('json')(
        args.results_path,
        results.options,
        file_fsync=opts.sync)

    for key, value in results.tests.iteritems():
        backend.write_test(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #9
0
def _create_metadata(args, name, opts):
    """Create and return a metadata dict for Backend.initialize()."""
    options = {}
    options['profile'] = args.test_profile
    options['log_level'] = args.log_level
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform

    metadata = {'options': options}
    metadata['name'] = name
    metadata.update(core.collect_system_info())

    return metadata
Beispiel #10
0
def _create_metadata(args, name, opts):
    """Create and return a metadata dict for Backend.initialize()."""
    options = {}
    options['profile'] = args.test_profile
    options['log_level'] = args.log_level
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform

    metadata = {'options': options}
    metadata['name'] = name
    metadata.update(core.collect_system_info())

    return metadata
Beispiel #11
0
def _create_metadata(args, name):
    """Create and return a metadata dict for Backend.initialize()."""
    opts = dict(options.OPTIONS)
    opts['profile'] = args.test_profile
    opts['log_level'] = args.log_level
    opts['concurrent'] = args.concurrency
    opts['include_filter'] = args.include_tests
    opts['exclude_filter'] = args.exclude_tests
    opts['dmesg'] = args.dmesg
    opts['monitoring'] = args.monitored
    if args.platform:
        opts['platform'] = args.platform

    metadata = {'options': opts}
    metadata['name'] = name
    metadata.update(core.collect_system_info())

    return metadata
Beispiel #12
0
def _create_metadata(args, name, forced_test_list):
    """Create and return a metadata dict for Backend.initialize()."""
    opts = dict(options.OPTIONS)
    opts['profile'] = args.test_profile
    opts['log_level'] = args.log_level
    opts['concurrent'] = args.concurrency
    opts['include_filter'] = args.include_tests
    opts['exclude_filter'] = args.exclude_tests
    opts['dmesg'] = args.dmesg
    opts['monitoring'] = args.monitored
    if args.platform:
        opts['platform'] = args.platform
    opts['forced_test_list'] = forced_test_list

    metadata = {'options': opts}
    metadata['name'] = name
    metadata.update(core.collect_system_info())

    return metadata
Beispiel #13
0
def _create_metadata(args, name, forced_test_list):
    """Create and return a metadata dict for Backend.initialize()."""
    opts = dict(options.OPTIONS)
    opts['profile'] = args.test_profile
    opts['log_level'] = args.log_level
    opts['concurrent'] = args.concurrency
    opts['include_filter'] = args.include_tests
    opts['exclude_filter'] = args.exclude_tests
    opts['dmesg'] = args.dmesg
    opts['monitoring'] = args.monitored
    if args.platform:
        opts['platform'] = args.platform
    opts['forced_test_list'] = forced_test_list
    opts['ignore_missing'] = args.ignore_missing
    opts['timeout'] = args.timeout

    metadata = {'options': opts}
    metadata['name'] = name
    metadata['info'] = {}
    metadata['info']['system'] = core.collect_system_info()

    return metadata
Beispiel #14
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(args.results_path,
                                                          options,
                                                          file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #15
0
def resume(input_):
    unparsed = parsers.parse_config(input_)[1]

    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    parser.add_argument(
        '-j',
        '--jobs',
        dest='jobs',
        action='store',
        type=int,
        default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None),
        help='Set the maximum number of jobs to run concurrently. '
        'By default, the reported number of CPUs is used.')
    args = parser.parse_args(unparsed)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.process_isolation = results.options['process_isolation']
    options.OPTIONS.jobs = args.jobs
    options.OPTIONS.no_retry = args.no_retry

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
    base.Test.timeout = results.options['timeout']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [
        profile.load_test_profile(p) for p in results.options['profile']
    ]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if results.options['ignore_missing']:
            p.options['ignore_missing'] = results.options['ignore_missing']

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    try:
        profile.run(profiles, results.options['log_level'], backend,
                    results.options['concurrent'], args.jobs)
    except exceptions.PiglitUserError as e:
        if str(e) != 'no matching tests':
            raise

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #16
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(
        args.results_path,
        options,
        file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #17
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
Beispiel #18
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #19
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v", "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                             "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)