Example #1
0
def test_set_meta():
    """backends.set_meta(): Works as expected."""
    backends.BACKENDS['test_backend'] = backends.register.Registry(
        extensions=None,
        backend=None,
        load=None,
        meta=lambda x: x.append('bar'),
    )

    test = ['foo']

    backends.set_meta('test_backend', test)
    nt.assert_list_equal(test, ['foo', 'bar'])
Example #2
0
def test_set_meta():
    """backends.set_meta(): Works as expected."""
    backends.BACKENDS['test_backend'] = backends.register.Registry(
        extensions=None,
        backend=None,
        load=None,
        meta=lambda x: x.append('bar'),
    )

    test = ['foo']

    backends.set_meta('test_backend', test)
    nt.assert_list_equal(test, ['foo', 'bar'])
Example #3
0
    def test_basic(self, mocker):
        """Basic argument/output test."""
        mocker.patch.dict(
            backends.BACKENDS,
            {'test_backend': backends.register.Registry(
                extensions=['.test_backend'],
                backend=None,
                load=None,
                meta=lambda x: x.append('bar'),
            )})

        test = []
        backends.set_meta('test_backend', test)
        assert test == ['bar']
Example #4
0
 def test_notimplemented(self, mock_backend):  # pylint: disable=redefined-outer-name,unused-argument
     """backends.load(): An error is raised if a set_meta isn't properly
     implmented.
     """
     with pytest.raises(backends.BackendNotImplementedError):
         backends.set_meta('test_backend', {})
Example #5
0
 def test_no_backened(self):
     """backends.set_meta: raises an error if there is no meta function."""
     with pytest.raises(backends.BackendError):
         backends.set_meta('foo', {})
Example #6
0
def test_set_meta_notimplemented():
    """backends.load(): An error is raised if a set_meta isn't properly implmented.
    """
    backends.set_meta('test_backend', {})
Example #7
0
def test_set_meta_no_backened():
    """backends.set_meta: raises an error if there is no meta function."""
    backends.set_meta('foo', {})
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # build up the include filter based on test_list
    if args.test_list:
        with open(args.test_list) as test_list:
            for line in test_list.readlines():
                args.include_tests.append(line.rstrip())

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.sync = args.sync

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Example #9
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # build up the include filter based on test_list
    if args.test_list:
        with open(args.test_list) as test_list:
            for line in test_list.readlines():
                args.include_tests.append(line.rstrip())

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.sync = args.sync

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path, junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Example #10
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.sync = args.sync

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    if os.path.exists(args.results_path):
        if args.overwrite:
            if os.path.isdir(args.results_path):
                shutil.rmtree(args.results_path)
            else:
                os.unlink(args.results_path)
        else:
            raise exceptions.PiglitFatalError(
                'Cannot overwrite existing folder without the -o/--overwrite '
                'option being set.')
    os.makedirs(args.results_path)

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path
    # If a test list is provided then set the forced_test_list value.
    if args.test_list:
        with open(args.test_list) as test_list:
            # Strip newlines
            profile.forced_test_list = list([t.strip() for t in test_list])

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Example #11
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.monitored = args.monitored
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path
    # If a test list is provided then set the forced_test_list value.
    if args.test_list:
        with open(args.test_list) as test_list:
            # Strip newlines
            profile.forced_test_list = list([t.strip() for t in test_list])

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg

    if args.monitored:
        profile.monitoring = args.monitored

    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Example #12
0
def test_set_meta_notimplemented():
    """backends.load(): An error is raised if a set_meta isn't properly implmented.
    """
    backends.set_meta('test_backend', {})
Example #13
0
def test_set_meta_no_backened():
    """backends.set_meta: raises an error if there is no meta function."""
    backends.set_meta('foo', {})