Beispiel #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'],
                           verbose=results.options['verbose'])

    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))

    # attempt to restore a saved platform, if there is no saved platform just
    # go on
    try:
        os.environ['PIGLIT_PLATFORM'] = results.options['platform']
    except KeyError:
        pass

    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if env.dmesg:
        profile.dmesg = env.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)

    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    results = core.load_results(args.results_path)
    env = core.Environment(concurrent=results.options['concurrent'],
                           exclude_filter=results.options['exclude_filter'],
                           include_filter=results.options['filter'],
                           execute=results.options['execute'],
                           valgrind=results.options['valgrind'],
                           dmesg=results.options['dmesg'],
                           verbose=results.options['verbose'])

    # Change working directory to the piglit directory
    os.chdir(path.dirname(path.realpath(sys.argv[0])))

    # attempt to restore a saved platform, if there is no saved platform just
    # go on
    try:
        os.environ['PIGLIT_PLATFORM'] = results.options['platform']
    except KeyError:
        pass

    results_path = path.join(args.results_path, "main")
    json_writer = core.JSONWriter(open(results_path, 'w+'))
    json_writer.open_dict()
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    for key, value in results.options.iteritems():
        json_writer.write_dict_item(key, value)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)
    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        env.exclude_tests.add(key)
    
    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if env.dmesg:
        profile.dmesg = env.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(env, json_writer)

    json_writer.close_dict()
    json_writer.close_dict()
    json_writer.file.close()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #3
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in results.tests.iteritems():
        if args.no_retry or result['result'] != 'incomplete':
            opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #4
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.concurrent = results.options['concurrent']
    options.OPTIONS.exclude_filter = results.options['exclude_filter']
    options.OPTIONS.include_filter = results.options['include_filter']
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.dmesg = results.options['dmesg']
    options.OPTIONS.sync = results.options['sync']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            options.OPTIONS.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if options.OPTIONS.dmesg:
        profile.dmesg = options.OPTIONS.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #5
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = framework.results.TestrunResult.resume(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    for name in results.tests.iterkeys():
        opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #6
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #7
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #8
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = framework.results.get_backend('json')(
        args.results_path,
        results.options,
        file_fsync=opts.sync)

    for key, value in results.tests.iteritems():
        backend.write_test(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #9
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(
        args.results_path,
        options,
        file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #10
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # build up the include filter based on test_list
    if args.test_list:
        with open(args.test_list) as test_list:
            for line in test_list.readlines():
                args.include_tests.append(line.rstrip())

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.sync = args.sync

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #12
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        file_fsync=opts.sync,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name, opts))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #13
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v", "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                             "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
Beispiel #14
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    base.Test.timeout = args.timeout
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation
    options.OPTIONS.jobs = args.jobs

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    time_elapsed = TimeAttribute(start=time.time())

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(
        _create_metadata(args, args.name or path.basename(args.results_path),
                         forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    if args.ignore_missing:
        for p in profiles:
            p.options['ignore_missing'] = args.ignore_missing

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(
                profile.RegexFilter(args.exclude_tests, inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    profile.run(profiles, args.log_level, backend, args.concurrency, args.jobs)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #15
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n", "--name", metavar="<test name>", default=None, help="Name of this test run")
    parser.add_argument("-d", "--dry-run", action="store_false", dest="execute", help="Do not execute the tests")
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests " "(can be used more than once)",
    )
    parser.add_argument(
        "-x",
        "--exclude-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Exclude matching tests " "(can be used more than once)",
    )
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument(
        "-c",
        "--all-concurrent",
        action="store_const",
        default="some",
        const="all",
        dest="concurrency",
        help="Run all tests concurrently",
    )
    conc_parser.add_argument(
        "-1",
        "--no-concurrency",
        action="store_const",
        default="some",
        const="none",
        dest="concurrency",
        help="Disable concurrent test runs",
    )
    parser.add_argument(
        "-p", "--platform", choices=["glx", "x11_egl", "wayland", "gbm"], help="Name of windows system passed to waffle"
    )
    parser.add_argument(
        "-f",
        "--config",
        dest="config_file",
        type=argparse.FileType("r"),
        help="Optionally specify a piglit config file to use. " "Default is piglit.conf",
    )
    parser.add_argument("--valgrind", action="store_true", help="Run tests in valgrind's memcheck")
    parser.add_argument(
        "--dmesg",
        action="store_true",
        help="Capture a difference in dmesg before and " "after each test. Implies -1/--no-concurrency",
    )
    parser.add_argument(
        "-v",
        "--verbose",
        action="store_true",
        help="Produce a line of output for each test before " "and after it runs",
    )
    parser.add_argument(
        "test_profile", metavar="<Path to one or more test profile(s)>", nargs="+", help="Path to testfile to run"
    )
    parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ["PIGLIT_PLATFORM"] = args.platform

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    if args.config_file:
        core.PIGLIT_CONFIG.readfp(args.config_file)
        args.config_file.close()
    else:
        core.PIGLIT_CONFIG.read(os.path.join(os.path.dirname(__file__), "piglit.conf"))

    # Pass arguments into Environment
    env = core.Environment(
        concurrent=args.concurrency,
        exclude_filter=args.exclude_tests,
        include_filter=args.include_tests,
        execute=args.execute,
        valgrind=args.valgrind,
        dmesg=args.dmesg,
        verbose=args.verbose,
    )

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, "main")
    result_file = open(result_filepath, "w")
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key("options")
    json_writer.open_dict()
    json_writer.write_dict_item("profile", args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    if args.platform:
        json_writer.write_dict_item("platform", args.platform)
    json_writer.close_dict()

    json_writer.write_dict_item("name", results.name)

    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    json_writer.write_dict_key("tests")
    json_writer.open_dict()
    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item("time_elapsed", results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print("Thank you for running Piglit!\n" "Results have been written to " + result_filepath)
Beispiel #16
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.sync = args.sync

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    if os.path.exists(args.results_path):
        if args.overwrite:
            if os.path.isdir(args.results_path):
                shutil.rmtree(args.results_path)
            else:
                os.unlink(args.results_path)
        else:
            raise exceptions.PiglitFatalError(
                'Cannot overwrite existing folder without the -o/--overwrite '
                'option being set.')
    os.makedirs(args.results_path)

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path
    # If a test list is provided then set the forced_test_list value.
    if args.test_list:
        with open(args.test_list) as test_list:
            # Strip newlines
            profile.forced_test_list = list([t.strip() for t in test_list])

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #17
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.monitored = args.monitored
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path
    # If a test list is provided then set the forced_test_list value.
    if args.test_list:
        with open(args.test_list) as test_list:
            # Strip newlines
            profile.forced_test_list = list([t.strip() for t in test_list])

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg

    if args.monitored:
        profile.monitoring = args.monitored

    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #18
0
def main():
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args()

    # Set the platform to pass to waffle
    if args.platform:
        os.environ['PIGLIT_PLATFORM'] = args.platform

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    if args.config_file:
        core.PIGLIT_CONFIG.readfp(args.config_file)
        args.config_file.close()
    else:
        core.PIGLIT_CONFIG.read(
            os.path.join(os.path.dirname(__file__), 'piglit.conf'))

    # Pass arguments into Environment
    env = core.Environment(concurrent=args.concurrency,
                           exclude_filter=args.exclude_tests,
                           include_filter=args.include_tests,
                           execute=args.execute,
                           valgrind=args.valgrind,
                           dmesg=args.dmesg,
                           verbose=args.verbose)

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = core.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'main')
    result_file = open(result_filepath, 'w')
    json_writer = core.JSONWriter(result_file)
    json_writer.open_dict()

    # Write out command line options for use in resuming.
    json_writer.write_dict_key('options')
    json_writer.open_dict()
    json_writer.write_dict_item('profile', args.test_profile)
    for key, value in env:
        json_writer.write_dict_item(key, value)
    if args.platform:
        json_writer.write_dict_item('platform', args.platform)
    json_writer.close_dict()

    json_writer.write_dict_item('name', results.name)

    for (key, value) in env.collectData().items():
        json_writer.write_dict_item(key, value)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    json_writer.write_dict_key('tests')
    json_writer.open_dict()
    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(env, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_dict()
    json_writer.file.close()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
Beispiel #19
0
def resume(input_):
    unparsed = parsers.parse_config(input_)[1]

    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    parser.add_argument(
        '-j',
        '--jobs',
        dest='jobs',
        action='store',
        type=int,
        default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None),
        help='Set the maximum number of jobs to run concurrently. '
        'By default, the reported number of CPUs is used.')
    args = parser.parse_args(unparsed)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.process_isolation = results.options['process_isolation']
    options.OPTIONS.jobs = args.jobs
    options.OPTIONS.no_retry = args.no_retry

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
    base.Test.timeout = results.options['timeout']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [
        profile.load_test_profile(p) for p in results.options['profile']
    ]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if results.options['ignore_missing']:
            p.options['ignore_missing'] = results.options['ignore_missing']

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    try:
        profile.run(profiles, results.options['log_level'], backend,
                    results.options['concurrent'], args.jobs)
    except exceptions.PiglitUserError as e:
        if str(e) != 'no matching tests':
            raise

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #20
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(
        args, args.name or path.basename(args.results_path), forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(profile.RegexFilter(args.exclude_tests,
                                                 inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    time_elapsed = TimeAttribute(start=time.time())

    profile.run(profiles, args.log_level, backend, args.concurrency)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Beispiel #21
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #22
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(args.results_path,
                                                          options,
                                                          file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)