Пример #1
0
    def test_matches_include_caps(self):
        """TestProfile.prepare_test_list: matches capitalized tests."""
        env = core.Options(exclude_filter=['test9'])

        profile_ = profile.TestProfile()
        profile_.test_list = self.data
        profile_._prepare_test_list(env)

        nt.assert_not_in(grouptools.join('group4', 'Test9'), profile_.test_list)
Пример #2
0
def test_matches_filter_mar_2(data):
    """ Tests 'not env.filter or matches_any_regex() mar is False"""
    env = core.Options(include_filter=['test5'])

    profile_ = profile.TestProfile()
    profile_.test_list = data
    profile_._prepare_test_list(env)

    baseline = {'group3/test5': 'other'}

    nt.assert_dict_equal(profile_.test_list, baseline)
Пример #3
0
def test_matches_exclude_mar(data):
    """ Tests 'not matches_any_regexp() """
    env = core.Options(exclude_filter=['test5'])

    profile_ = profile.TestProfile()
    profile_.test_list = data
    profile_._prepare_test_list(env)

    baseline = copy.deepcopy(data)
    del baseline['group3/test5']

    nt.assert_dict_equal(profile_.test_list, baseline)
Пример #4
0
    def test_matches_exclude_mar(self):
        """TestProfile.prepare_test_list: 'not matches_any_regexp()"""
        env = core.Options(exclude_filter=['test5'])

        profile_ = profile.TestProfile()
        profile_.test_list = self.data
        profile_._prepare_test_list(env)

        baseline = copy.deepcopy(self.data)
        del baseline[grouptools.join('group3', 'test5')]

        nt.assert_dict_equal(profile_.test_list, baseline)
Пример #5
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = framework.results.TestrunResult.resume(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    for name in results.tests.iterkeys():
        opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Пример #6
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Пример #7
0
def test_matches_filter_mar_1(data):
    """ Tests 'not env.filter or matches_any_regex() env.filter is False

    Nothing should be filtered.

    """
    env = core.Options()

    profile_ = profile.TestProfile()
    profile_.test_list = data
    profile_._prepare_test_list(env)

    nt.assert_dict_equal(profile_.test_list, data)
Пример #8
0
def test_matches_env_exclude(data):
    """ Tests 'not path in env.exclude_tests  """
    env = core.Options()
    env.exclude_tests.add('group3/test5')

    profile_ = profile.TestProfile()
    profile_.test_list = data
    profile_._prepare_test_list(env)

    baseline = copy.deepcopy(data)
    del baseline['group3/test5']

    nt.assert_dict_equal(profile_.test_list, baseline)
Пример #9
0
    def test_matches_env_exclude(self):
        """TestProfile.prepare_test_list: 'not path in env.exclude_tests"""
        env = core.Options()
        env.exclude_tests.add(grouptools.join('group3', 'test5'))

        profile_ = profile.TestProfile()
        profile_.test_list = self.data
        profile_._prepare_test_list(env)

        baseline = copy.deepcopy(self.data)
        del baseline[grouptools.join('group3', 'test5')]

        nt.assert_dict_equal(profile_.test_list, baseline)
Пример #10
0
    def test_matches_filter_mar_2(self):
        """TestProfile.prepare_test_list: 'not env.filter or matches_any_regex()
        mar is False

        """
        env = core.Options(include_filter=['test5'])

        profile_ = profile.TestProfile()
        profile_.test_list = self.data
        profile_._prepare_test_list(env)

        baseline = {grouptools.join('group3', 'test5'): 'other'}

        nt.assert_dict_equal(profile_.test_list, baseline)
Пример #11
0
    def setup_class(cls):
        super(TestJsonOutput, cls).setup_class()

        args = Namespace()
        # pylint: disable=attribute-defined-outside-init
        args.test_profile = ['fake.py']
        args.platform = 'gbm'
        args.log_level = 'verbose'

        backend = JSONBackend(cls.tdir, file_fsync=True)
        backend.initialize(_create_metadata(args, 'test', core.Options()))
        backend.write_test('result', {'result': 'pass'})
        backend.finalize({'time_elapsed': 1.22})
        with open(os.path.join(cls.tdir, 'results.json'), 'r') as f:
            cls.json = json.load(f)
Пример #12
0
def main():
    core.get_config()
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                        "once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args()

    opts = core.Options(exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests)

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = framework.profile.load_test_profile(args.testProfile)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "

        # Make the test command relative to the piglit_dir
        testCommand = test.command[:]
        testCommand[0] = os.path.relpath(testCommand[0], piglit_dir)

        command += ' '.join(testCommand)
        return command

    profile._prepare_test_list(opts)
    for name, test in profile.test_list.items():
        assert (isinstance(test, Test))
        print(name, ':::', getCommand(test))
Пример #13
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        file_fsync=opts.sync,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name, opts))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Пример #14
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
Пример #15
0
try:
    import simplejson as json
except ImportError:
    import json
import nose.tools as nt
import framework.core as core
import framework.results as results
import framework.backends as backends
import framework.tests.utils as utils


BACKEND_INITIAL_META = {
    'name': 'name',
    'test_count': 0,
    'env': {},
    'options': {k: v for k, v in core.Options()},
}

JUNIT_SCHEMA = 'framework/tests/schema/junit-7.xsd'


def test_initialize_jsonbackend():
    """ Test that JSONBackend initializes

    This needs to be handled separately from the others because it requires
    arguments

    """
    with utils.tempdir() as tdir:
        func = results.JSONBackend(tdir)
        assert isinstance(func, results.JSONBackend)
Пример #16
0
    def __init__(self, results, json_file):

        with open(json_file) as data:
            feature_data = json.load(data)

        self.feat_fractions = {}
        self.feat_status = {}
        self.features = set()
        self.results = results

        profiles = {}

        # we expect all the result sets to be for the same profile
        profile_orig = profile.load_test_profile(
            results[0].options['profile'][0])

        for feature in feature_data:
            self.features.add(feature)

            incl_str = feature_data[feature]["include_tests"]
            excl_str = feature_data[feature]["exclude_tests"]

            include_filter = [incl_str
                              ] if incl_str and not incl_str.isspace() else []
            exclude_filter = [excl_str
                              ] if excl_str and not excl_str.isspace() else []

            opts = core.Options(include_filter=include_filter,
                                exclude_filter=exclude_filter)

            profiles[feature] = copy.deepcopy(profile_orig)

            # An empty list will raise PiglitFatalError exception
            # But for reporting we need to handle this situation
            try:
                profiles[feature]._prepare_test_list(opts)
            except exceptions.PiglitFatalError:
                pass

        for results in self.results:
            self.feat_fractions[results.name] = {}
            self.feat_status[results.name] = {}

            for feature in feature_data:
                result_set = set(results.tests)
                profile_set = set(profiles[feature].test_list)

                common_set = profile_set & result_set
                passed_list = [
                    x for x in common_set
                    if results.tests[x].result == status.PASS
                ]

                total = len(common_set)
                passed = len(passed_list)

                self.feat_fractions[results.name][feature] = (passed, total)
                if total == 0:
                    self.feat_status[results.name][feature] = status.NOTRUN
                else:
                    if 100 * passed // total >= feature_data[feature][
                            "target_rate"]:
                        self.feat_status[results.name][feature] = status.PASS
                    else:
                        self.feat_status[results.name][feature] = status.FAIL
Пример #17
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        sync=args.sync)

    # Set the platform to pass to waffle
    opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    options['name'] = results.name
    options['env'] = core.collect_system_info()
    # FIXME: this should be the actual count, but profile needs to be
    # refactored to make that possible because of the flattening pass that is
    # part of profile.run
    options['test_count'] = 0
    options['test_suffix'] = args.junit_suffix
    options['log_level'] = args.log_level

    # Begin json.
    backend = framework.results.get_backend(args.backend)(args.results_path,
                                                          options,
                                                          file_fsync=opts.sync)

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, args.log_level, backend)
    time_end = time.time()

    results.time_elapsed = time_end - time_start
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)