Example #1
0
 def test_no_such_module(self, tmpdir):
     """profile.load_test_profile: Trying to load a non-existent module
     exits.
     """
     # Ensure that the module will not exist by moving to an empty temporary
     # directory
     tmpdir.chdir()
     with pytest.raises(exceptions.PiglitFatalError):
         profile.load_test_profile('this_module_will_never_ever_exist')
Example #2
0
def test_load_test_profile_no_profile():
    """ Loading a module with no profile name exits

    Beacuse load_test_profile uses test.{} to load a module we need a module in
    tests that doesn't have a profile attribute. The only module that currently
    meets that requirement is __init__.py

    """
    profile.load_test_profile('__init__')
Example #3
0
 def test_no_such_module(self, tmpdir):
     """profile.load_test_profile: Trying to load a non-existent module
     exits.
     """
     # Ensure that the module will not exist by moving to an empty temporary
     # directory
     tmpdir.chdir()
     with pytest.raises(exceptions.PiglitFatalError):
         profile.load_test_profile('this_module_will_never_ever_exist')
Example #4
0
def test_load_test_profile_no_profile():
    """profile.load_test_profile: Loading a module with no profile name exits

    Because load_test_profile uses test.{} to load a module we need a module in
    tests that doesn't have a profile attribute. The only module that currently
    meets that requirement is __init__.py

    """
    profile.load_test_profile('__init__')
Example #5
0
    def test_no_profile_attribute(self, tmpdir):
        """profile.load_test_profile: Loading a module with no profile name
        exits.

        Because load_test_profile uses test.{} to load a module we need a
        module in tests that doesn't have a profile attribute. The only module
        that currently meets that requirement is __init__.py
        """
        p = tmpdir.join('foo.profile')

        with pytest.raises(exceptions.PiglitFatalError):
            profile.load_test_profile(six.text_type(p))
Example #6
0
    def test_no_profile_attribute(self, tmpdir):
        """profile.load_test_profile: Loading a module with no profile name
        exits.

        Because load_test_profile uses test.{} to load a module we need a
        module in tests that doesn't have a profile attribute. The only module
        that currently meets that requirement is __init__.py
        """
        p = tmpdir.join('foo.profile')

        with pytest.raises(exceptions.PiglitFatalError):
            profile.load_test_profile(six.text_type(p))
Example #7
0
def main():
    args = parser()
    OPTIONS.process_isolation = not args.no_process_isolation
    if args.glsl_arb_compat:
        os.environ['PIGLIT_FORCE_GLSLPARSER_DESKTOP'] = 'true'
    profile = load_test_profile(args.input, python=True)
    serializer(args.name, profile, args.output)
Example #8
0
def main():
    args = parser()
    OPTIONS.process_isolation = not args.no_process_isolation
    if args.glsl_arb_compat:
        os.environ['PIGLIT_FORCE_GLSLPARSER_DESKTOP'] = 'true'
    profile = load_test_profile(args.input, python=True)
    serializer(args.name, profile, args.output)
Example #9
0
    def __init__(self, results, json_file):

        with open(json_file) as data:
            feature_data = json.load(data)

        self.feat_fractions = {}
        self.feat_status = {}
        self.features = set()
        self.results = results

        profiles = {}

        # we expect all the result sets to be for the same profile
        profile_orig = profile.load_test_profile(results[0].options['profile'][0])

        for feature in feature_data:
            self.features.add(feature)

            incl_str = feature_data[feature]["include_tests"]
            excl_str = feature_data[feature]["exclude_tests"]

            include_filter = [incl_str] if incl_str and not incl_str.isspace() else []
            exclude_filter = [excl_str] if excl_str and not excl_str.isspace() else []

            opts = core.Options(include_filter=include_filter,
                                exclude_filter=exclude_filter)

            profiles[feature] = copy.deepcopy(profile_orig)

            # An empty list will raise PiglitFatalError exception
            # But for reporting we need to handle this situation
            try:
                profiles[feature]._prepare_test_list(opts)
            except exceptions.PiglitFatalError:
                pass

        for results in self.results:
            self.feat_fractions[results.name] = {}
            self.feat_status[results.name] = {}

            for feature in feature_data:
                result_set = set(results.tests)
                profile_set = set(profiles[feature].test_list)

                common_set = profile_set & result_set
                passed_list = [x for x in common_set if results.tests[x].result == status.PASS]

                total = len(common_set)
                passed = len(passed_list)

                self.feat_fractions[results.name][feature] = (passed, total)
                if total == 0:
                    self.feat_status[results.name][feature] = status.NOTRUN
                else:
                    if 100 * passed // total >= feature_data[feature]["target_rate"]:
                        self.feat_status[results.name][feature] = status.PASS
                    else:
                        self.feat_status[results.name][feature] = status.FAIL
Example #10
0
    def __init__(self, results, json_file):

        with open(json_file) as data:
            feature_data = json.load(data)

        self.feat_fractions = {}
        self.feat_status = {}
        self.features = set()
        self.results = results

        profiles = {}

        # we expect all the result sets to be for the same profile
        profile_orig = profile.load_test_profile(
            results[0].options['profile'][0])

        for feature in feature_data:
            self.features.add(feature)

            profiles[feature] = profile_orig.copy()

            incl_str = feature_data[feature]["include_tests"]
            excl_str = feature_data[feature]["exclude_tests"]

            profiles[feature].filters.append(
                profile.RegexFilter(
                    [incl_str] if incl_str and not incl_str.isspace() else []))
            profiles[feature].filters.append(
                profile.RegexFilter(
                    [excl_str] if excl_str and not excl_str.isspace() else [],
                    inverse=True))

        for results in self.results:
            self.feat_fractions[results.name] = {}
            self.feat_status[results.name] = {}

            for feature in feature_data:
                result_set = set(results.tests)
                profile_set = set(a for a, _ in profiles[feature].itertests())

                common_set = profile_set & result_set
                passed_list = [
                    x for x in common_set
                    if results.tests[x].result == status.PASS
                ]

                total = len(common_set)
                passed = len(passed_list)

                self.feat_fractions[results.name][feature] = (passed, total)
                if total == 0:
                    self.feat_status[results.name][feature] = status.NOTRUN
                else:
                    if 100 * passed // total >= feature_data[feature][
                            "target_rate"]:
                        self.feat_status[results.name][feature] = status.PASS
                    else:
                        self.feat_status[results.name][feature] = status.FAIL
Example #11
0
    def __init__(self, results, json_file):

        with open(json_file) as data:
            feature_data = json.load(data)

        self.feat_fractions = {}
        self.feat_status = {}
        self.features = set()
        self.results = results

        profiles = {}

        # we expect all the result sets to be for the same profile
        profile_orig = profile.load_test_profile(results[0].options['profile'][0])

        for feature in feature_data:
            self.features.add(feature)

            profiles[feature] = profile_orig.copy()

            incl_str = feature_data[feature]["include_tests"]
            excl_str = feature_data[feature]["exclude_tests"]

            profiles[feature].filters.append(
                profile.RegexFilter(
                    [incl_str] if incl_str and not incl_str.isspace() else []))
            profiles[feature].filters.append(
                profile.RegexFilter(
                    [excl_str] if excl_str and not excl_str.isspace() else [],
                    inverse=True))

        for results in self.results:
            self.feat_fractions[results.name] = {}
            self.feat_status[results.name] = {}

            for feature in feature_data:
                result_set = set(results.tests)
                profile_set = set(a for a, _ in profiles[feature].itertests())

                common_set = profile_set & result_set
                passed_list = [x for x in common_set if results.tests[x].result == status.PASS]

                total = len(common_set)
                passed = len(passed_list)

                self.feat_fractions[results.name][feature] = (passed, total)
                if total == 0:
                    self.feat_status[results.name][feature] = status.NOTRUN
                else:
                    if 100 * passed // total >= feature_data[feature]["target_rate"]:
                        self.feat_status[results.name][feature] = status.PASS
                    else:
                        self.feat_status[results.name][feature] = status.FAIL
Example #12
0
def main(input_):
    """The main function."""
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                        "once)")
    parser.add_argument("--format",
                        dest="format_string",
                        default="{name} ::: {command}",
                        action="store",
                        help="A template string that defines the output "
                        "format. It has two replacement tokens that can "
                        "be provided, along with any arbitrary text, "
                        "which will be printed verbatim. The two tokens "
                        "are '{name}', which will be replaced with the "
                        "name of the test; and '{command}', which will "
                        "be replaced with the command to run the test.")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    profile_ = profile.load_test_profile(args.testProfile)

    if args.exclude_tests:
        profile_.filters.append(profile.RegexFilter(args.exclude_tests))
    if args.include_tests:
        profile_.filters.append(profile.RegexFilter(args.include_tests))

    # Change to the piglit's path
    piglit_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    for name, test in profile_.itertests():
        assert isinstance(test, Test)
        print(
            args.format_string.format(name=name,
                                      command=get_command(test, piglit_dir)))
Example #13
0
def main(input_):
    """The main function."""
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                             "once)")
    parser.add_argument("--format",
                        dest="format_string",
                        default="{name} ::: {command}",
                        action="store",
                        help="A template string that defines the output "
                             "format. It has two replacement tokens that can "
                             "be provided, along with any arbitrary text, "
                             "which will be printed verbatim. The two tokens "
                             "are '{name}', which will be replaced with the "
                             "name of the test; and '{command}', which will "
                             "be replaced with the command to run the test.")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    profile_ = profile.load_test_profile(args.testProfile)

    if args.exclude_tests:
        profile_.filters.append(profile.RegexFilter(args.exclude_tests))
    if args.include_tests:
        profile_.filters.append(profile.RegexFilter(args.include_tests))

    # Change to the piglit's path
    piglit_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    for name, test in profile_.itertests():
        assert isinstance(test, Test)
        print(args.format_string.format(
            name=name,
            command=get_command(test, piglit_dir)))
Example #14
0
def main():
    """The main function."""
    if six.PY2:
        input_ = [i.decode('utf-8') for i in sys.argv[1:]]
    elif six.PY3:
        input_ = sys.argv[1:]

    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                             "once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests

    # Change to the piglit's path
    piglit_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile_ = profile.load_test_profile(args.testProfile)

    profile_._prepare_test_list()
    for name, test in six.iteritems(profile_.test_list):
        assert isinstance(test, Test)
        print(name, ':::', get_command(test, piglit_dir))
Example #15
0
def test_load_test_profile_no_module():
    """profile.load_test_profile: Trying to load a non-existant module exits"""
    profile.load_test_profile('this_module_will_never_ever_exist')
Example #16
0
def test_load_test_profile_returns():
    """profile.load_test_profile: returns a TestProfile instance"""
    profile_ = profile.load_test_profile('sanity')
    nt.ok_(isinstance(profile_, profile.TestProfile))
Example #17
0
def resume(input_):
    unparsed = parsers.parse_config(input_)[1]

    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    parser.add_argument(
        '-j',
        '--jobs',
        dest='jobs',
        action='store',
        type=int,
        default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None),
        help='Set the maximum number of jobs to run concurrently. '
        'By default, the reported number of CPUs is used.')
    args = parser.parse_args(unparsed)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.process_isolation = results.options['process_isolation']
    options.OPTIONS.jobs = args.jobs
    options.OPTIONS.no_retry = args.no_retry

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
    base.Test.timeout = results.options['timeout']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [
        profile.load_test_profile(p) for p in results.options['profile']
    ]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if results.options['ignore_missing']:
            p.options['ignore_missing'] = results.options['ignore_missing']

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    try:
        profile.run(profiles, results.options['log_level'], backend,
                    results.options['concurrent'], args.jobs)
    except exceptions.PiglitUserError as e:
        if str(e) != 'no matching tests':
            raise

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Example #18
0
def test_load_test_profile_returns():
    """profile.load_test_profile: returns a TestProfile instance"""
    profile_ = profile.load_test_profile('sanity')
    nt.ok_(isinstance(profile_, profile.TestProfile))
Example #19
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Example #20
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    base.Test.timeout = args.timeout
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation
    options.OPTIONS.jobs = args.jobs

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    time_elapsed = TimeAttribute(start=time.time())

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(
        _create_metadata(args, args.name or path.basename(args.results_path),
                         forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    if args.ignore_missing:
        for p in profiles:
            p.options['ignore_missing'] = args.ignore_missing

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(
                profile.RegexFilter(args.exclude_tests, inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    profile.run(profiles, args.log_level, backend, args.concurrency, args.jobs)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Example #21
0
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.

from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from framework import core
from framework.profile import load_test_profile

__all__ = ['profile']


def xts_render_filter(path, test):
    # Keep any tests that aren't from xts.
    if 'xts5' not in path:
        return True

    # All of Xlib9 is for rendering.
    return 'xlib9' in path


profile = load_test_profile("xts")
profile.filter_tests(xts_render_filter)
Example #22
0
def test_load_test_profile_returns():
    """ load_test_profile returns a TestProfile instance """
    profile_ = profile.load_test_profile('sanity')
    assert isinstance(profile_, profile.TestProfile)
Example #23
0
 def test_return_type(self):
     """profile.load_test_profile: returns a TestProfile instance."""
     assert isinstance(profile.load_test_profile('sanity'),
                       profile.TestProfile)
Example #24
0
def test_load_test_profile_returns():
    """ load_test_profile returns a TestProfile instance """
    profile_ = profile.load_test_profile('sanity')
    assert isinstance(profile_, profile.TestProfile)
Example #25
0
# -*- coding: utf-8 -*-

# quick.tests minus tests that are known to fail

from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

from framework.profile import load_test_profile

__all__ = ['profile']

profile = load_test_profile('quick')

with open("/usr/lib64/piglit/tests/opensuse_qa-skip-tests.txt") as f:
    to_skip = frozenset(map(lambda line: line[:-1], f))

    profile.filters.append(lambda p, _: p not in to_skip)
Example #26
0
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.

from framework import core
from framework.profile import load_test_profile

__all__ = ['profile']


def xts_render_filter(path, test):
    # Keep any tests that aren't from xts.
    if 'xts5' not in path:
        return True

    # All of Xlib9 is for rendering.
    return 'xlib9' in path


profile = load_test_profile("xts")
profile.filter_tests(xts_render_filter)
Example #27
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Example #28
0
def test_load_test_profile_no_module():
    """profile.load_test_profile: Trying to load a non-existant module exits"""
    profile.load_test_profile('this_module_will_never_ever_exist')
Example #29
0
 def test_return_type(self):
     """profile.load_test_profile: returns a TestProfile instance."""
     assert isinstance(profile.load_test_profile('sanity'),
                       profile.TestProfile)
Example #30
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(
        args, args.name or path.basename(args.results_path), forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(profile.RegexFilter(args.exclude_tests,
                                                 inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    time_elapsed = TimeAttribute(start=time.time())

    profile.run(profiles, args.log_level, backend, args.concurrency)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Example #31
0
    def __init__(self, results, json_file):

        with open(json_file) as data:
            feature_data = json.load(data)

        self.feat_fractions = {}
        self.feat_status = {}
        self.features = set()
        self.results = results

        profiles = {}

        # we expect all the result sets to be for the same profile
        profile_orig = profile.load_test_profile(
            results[0].options['profile'][0])

        for feature in feature_data:
            self.features.add(feature)

            incl_str = feature_data[feature]["include_tests"]
            excl_str = feature_data[feature]["exclude_tests"]

            include_filter = [incl_str
                              ] if incl_str and not incl_str.isspace() else []
            exclude_filter = [excl_str
                              ] if excl_str and not excl_str.isspace() else []

            options.OPTIONS.exclude_filter = exclude_filter
            options.OPTIONS.include_filter = include_filter

            profiles[feature] = profile.TestProfile()
            profiles[feature].update(profile_orig)

            # An empty list will raise PiglitFatalError exception
            # But for reporting we need to handle this situation
            try:
                profiles[feature]._prepare_test_list()
            except exceptions.PiglitFatalError:
                pass

        for results in self.results:
            self.feat_fractions[results.name] = {}
            self.feat_status[results.name] = {}

            for feature in feature_data:
                result_set = set(results.tests)
                profile_set = set(profiles[feature].test_list)

                common_set = profile_set & result_set
                passed_list = [
                    x for x in common_set
                    if results.tests[x].result == status.PASS
                ]

                total = len(common_set)
                passed = len(passed_list)

                self.feat_fractions[results.name][feature] = (passed, total)
                if total == 0:
                    self.feat_status[results.name][feature] = status.NOTRUN
                else:
                    if 100 * passed // total >= feature_data[feature][
                            "target_rate"]:
                        self.feat_status[results.name][feature] = status.PASS
                    else:
                        self.feat_status[results.name][feature] = status.FAIL