Пример #1
0
def test_get_dmesg():
    """dmesg.get_dmesg: Returns correct class for platform"""
    ret = dmesg.get_dmesg()
    if sys.platform.startswith('win32'):
        nt.ok_(isinstance(ret, dmesg.DummyDmesg), msg='got {}'.format(type(ret)))
    elif sys.platform.startswith('linux'):
        nt.ok_(isinstance(ret, dmesg.LinuxDmesg), msg='got {}'.format(type(ret)))
Пример #2
0
def test_get_dmesg_dummy():
    """ Test that get_dmesg function returns a Dummy when asked """
    dummy = dmesg.get_dmesg(not_dummy=False)
    nt.assert_is(type(dummy),
                 dmesg.DummyDmesg,
                 msg=("Error: get_dmesg should have returned DummyDmesg, "
                      "but it actually returned {}".format(type(dummy))))
Пример #3
0
def test_get_dmesg_dummy():
    """ Test that get_dmesg function returns a Dummy when asked """
    dummy = get_dmesg(not_dummy=False)
    nt.assert_is(
        type(dummy),
        DummyDmesg,
        msg="Error: get_dmesg should have returned DummyDmesg, " "but it actually returned {}".format(type(dummy)),
    )
Пример #4
0
 def __init__(self):
     self.test_list = TestDict()
     self.forced_test_list = []
     self.filters = []
     self.options = {
         'dmesg': get_dmesg(False),
         'monitor': Monitoring(False),
     }
def test_get_dmesg_dummy():
    """dmesg.get_dmesg: Returns a DummyDmesg when not_dummy is False"""
    dummy = dmesg.get_dmesg(not_dummy=False)
    nt.assert_is(
        type(dummy),
        dmesg.DummyDmesg,
        msg=("Error: get_dmesg should have returned DummyDmesg, " "but it actually returned {}".format(type(dummy))),
    )
Пример #6
0
 def __init__(self):
     self.test_list = TestDict()
     self.forced_test_list = []
     self.filters = []
     self.options = {
         'dmesg': get_dmesg(False),
         'monitor': Monitoring(False),
     }
Пример #7
0
 def __init__(self, filename):
     self.filename = filename
     self.forced_test_list = []
     self.filters = []
     self.options = {
         'dmesg': get_dmesg(False),
         'monitor': Monitoring(False),
         'ignore_missing': False,
     }
Пример #8
0
    def dmesg(self, not_dummy):
        """ Set dmesg

        Arguments:
        not_dummy -- if Truthy dmesg will try to get a PosixDmesg, if Falsy it
                     will get a DummyDmesg

        """
        self._dmesg = get_dmesg(not_dummy)
def _get_dmesg():
    """ checks to ensure dmesg is not DummyDmesg, raises skip if it is

    If we are on a non-posix system we will get a dummy dmesg, go ahead and
    skip in that case
    """
    test = dmesg.get_dmesg()
    if isinstance(test, dmesg.DummyDmesg):
        raise SkipTest("A DummyDmesg was returned, testing dmesg impossible.")
    return test
Пример #10
0
def _get_dmesg():
    """ checks to ensure dmesg is not DummyDmesg, raises skip if it is

    If we are on a non-posix system we will get a dummy dmesg, go ahead and
    skip in that case
    """
    test = dmesg.get_dmesg()
    if isinstance(test, dmesg.DummyDmesg):
        raise SkipTest("A DummyDmesg was returned, testing dmesg impossible.")
    return test
Пример #11
0
    def test_get_dmesg(self, platform, dummy, expected, mocker):
        """Test that get_dmesg returns the expected dmesg type on variuos
        platforms with various configurations.
        """
        mocker.patch('framework.dmesg.sys.platform', platform)

        with mock.patch('framework.dmesg.subprocess.check_output',
                        mock.Mock(return_value=b'[1.0]foo')):
            actual = dmesg.get_dmesg(not_dummy=dummy)

        # We don't want a subclass, we want the *exact* class. This is a
        # unittest after all
        assert type(actual) == expected  # pylint: disable=unidiomatic-typecheck
Пример #12
0
    def test_get_dmesg(self, platform, dummy, expected, mocker):
        """Test that get_dmesg returns the expected dmesg type on variuos
        platforms with various configurations.
        """
        mocker.patch('framework.dmesg.sys.platform', platform)

        with mock.patch('framework.dmesg.subprocess.check_output',
                        mock.Mock(return_value=b'[1.0]foo')):
            actual = dmesg.get_dmesg(not_dummy=dummy)

        # We don't want a subclass, we want the *exact* class. This is a
        # unittest after all
        assert type(actual) == expected  # pylint: disable=unidiomatic-typecheck
Пример #13
0
    def __init__(self, filename):
        self.forced_test_list = []
        self.filters = []
        self.options = {
            'dmesg': get_dmesg(False),
            'monitor': Monitoring(False),
            'ignore_missing': False,
        }

        tree = et.parse(filename)
        root = tree.getroot()
        self._profiles = [
            load_test_profile(p.text) for p in root.findall('.//Profile')
        ]

        for p in self._profiles:
            p.options = self.options
Пример #14
0
def resume(input_):
    unparsed = parsers.parse_config(input_)[1]

    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    parser.add_argument(
        '-j',
        '--jobs',
        dest='jobs',
        action='store',
        type=int,
        default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None),
        help='Set the maximum number of jobs to run concurrently. '
        'By default, the reported number of CPUs is used.')
    args = parser.parse_args(unparsed)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.process_isolation = results.options['process_isolation']
    options.OPTIONS.jobs = args.jobs
    options.OPTIONS.no_retry = args.no_retry

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
    base.Test.timeout = results.options['timeout']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [
        profile.load_test_profile(p) for p in results.options['profile']
    ]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if results.options['ignore_missing']:
            p.options['ignore_missing'] = results.options['ignore_missing']

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    try:
        profile.run(profiles, results.options['log_level'], backend,
                    results.options['concurrent'], args.jobs)
    except exceptions.PiglitUserError as e:
        if str(e) != 'no matching tests':
            raise

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Пример #15
0
            universal_newlines=True)
    except subprocess.CalledProcessError as e:
        # a return code of 79 indicates there are no subtests
        if e.returncode == 79:
            profile.test_list[grouptools.join('igt', test)] = IGTTest(test)
        elif e.returncode != 0:
            print("Error: Could not list subtests for " + test)
        else:
            raise

        # If we reach here there are no subtests.
        return

    for subtest in (s for s in out.splitlines() if s):
        profile.test_list[grouptools.join('igt', test, subtest)] = \
            IGTTest(test, ['--run-subtest', subtest])


def populate_profile():
    tests = []
    for test_list in TEST_LISTS:
        tests.extend(list_tests(test_list))

    for test in tests:
        add_subtest_cases(test)


populate_profile()
profile.options['dmesg'] = dmesg.get_dmesg(True)
profile.options['dmesg'].regex = re.compile(r"(\[drm:|drm_|intel_|i915_)")
Пример #16
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    base.Test.timeout = args.timeout
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation
    options.OPTIONS.jobs = args.jobs

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    time_elapsed = TimeAttribute(start=time.time())

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(
        _create_metadata(args, args.name or path.basename(args.results_path),
                         forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    if args.ignore_missing:
        for p in profiles:
            p.options['ignore_missing'] = args.ignore_missing

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(
                profile.RegexFilter(args.exclude_tests, inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    profile.run(profiles, args.log_level, backend, args.concurrency, args.jobs)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Пример #17
0
 def test(platform, class_):
     with mock.patch('framework.dmesg.sys.platform', platform):
         ret = dmesg.get_dmesg()
     nt.assert_is_instance(ret, class_)
Пример #18
0
def test_get_dmesg_dummy():
    """dmesg.get_dmesg: when not_dummy=False a dummy is provided"""
    # Linux was selected since it would normally return LinuxDmesg
    with mock.patch('framework.dmesg.sys.platform', 'linux'):
        ret = dmesg.get_dmesg(False)
    nt.assert_is_instance(ret, dmesg.DummyDmesg)
Пример #19
0
            universal_newlines=True)
    except subprocess.CalledProcessError as e:
        # a return code of 79 indicates there are no subtests
        if e.returncode == 79:
            profile.test_list[grouptools.join('igt', test)] = IGTTest(test)
        elif e.returncode != 0:
            print("Error: Could not list subtests for " + test)
        else:
            raise

        # If we reach here there are no subtests.
        return

    for subtest in (s for s in out.splitlines() if s):
        profile.test_list[grouptools.join('igt', test, subtest)] = \
            IGTTest(test, ['--run-subtest', subtest])


def populate_profile():
    tests = []
    for test_list in TEST_LISTS:
        tests.extend(list_tests(test_list))

    for test in tests:
        add_subtest_cases(test)


populate_profile()
profile.options['dmesg'] = dmesg.get_dmesg(True)
profile.options['dmesg'].regex = re.compile(r"(\[drm:|drm_|intel_|i915_)")
Пример #20
0
def test_get_dmesg_dummy():
    """dmesg.get_dmesg: when not_dummy=False a dummy is provided"""
    # Linux was selected since it would normally return LinuxDmesg
    with mock.patch('framework.dmesg.sys.platform', 'linux'):
        ret = dmesg.get_dmesg(False)
    nt.assert_is_instance(ret, dmesg.DummyDmesg)
Пример #21
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Пример #22
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(
        args, args.name or path.basename(args.results_path), forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(profile.RegexFilter(args.exclude_tests,
                                                 inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    time_elapsed = TimeAttribute(start=time.time())

    profile.run(profiles, args.log_level, backend, args.concurrency)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Пример #23
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))