Beispiel #1
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.concurrent = results.options['concurrent']
    options.OPTIONS.exclude_filter = results.options['exclude_filter']
    options.OPTIONS.include_filter = results.options['include_filter']
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.dmesg = results.options['dmesg']
    options.OPTIONS.sync = results.options['sync']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            options.OPTIONS.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if options.OPTIONS.dmesg:
        profile.dmesg = options.OPTIONS.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #2
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in results.tests.iteritems():
        if args.no_retry or result['result'] != 'incomplete':
            opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #3
0
    def test_config_in_home_dir(self, tmpdir, mocker):
        """core.get_config() finds $HOME/.config/piglit.conf"""
        env = mocker.patch('framework.core.os.environ', new={})
        env['HOME'] = six.text_type(tmpdir)
        conf = tmpdir.join('piglit.conf')
        conf.write(self._CONF_FILE)
        core.get_config()

        assert core.PIGLIT_CONFIG.has_section('nose-test')
Beispiel #4
0
    def test_xdg_config_home(self):
        """ get_config() finds $XDG_CONFIG_HOME/piglit.conf """
        with utils.tempdir() as tdir:
            os.environ["XDG_CONFIG_HOME"] = tdir
            with open(os.path.join(tdir, "piglit.conf"), "w") as f:
                f.write(TestGetConfig.CONF_FILE)
            core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section("nose-test"), msg="$XDG_CONFIG_HOME not found")
Beispiel #5
0
    def test_config_in_home_dir(self, tmpdir, mocker):
        """core.get_config() finds $HOME/.config/piglit.conf"""
        env = mocker.patch('framework.core.os.environ', new={})
        env['HOME'] = six.text_type(tmpdir)
        conf = tmpdir.join('piglit.conf')
        conf.write(self._CONF_FILE)
        core.get_config()

        assert core.PIGLIT_CONFIG.has_section('nose-test')
Beispiel #6
0
def test_piglit_root():
    """core.get_config() finds "piglit root"/piglit.conf"""
    with open('piglit.conf', 'w') as f:
        f.write(_CONF_FILE)
        with utils.nose.chdir('..'):
            core.get_config()
    os.unlink('piglit.conf')

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='$PIGLIT_ROOT not found')
Beispiel #7
0
def test_xdg_config_home():
    """core.get_config() finds $XDG_CONFIG_HOME/piglit.conf"""
    with utils.tempdir() as tdir:
        os.environ['XDG_CONFIG_HOME'] = tdir
        with open(os.path.join(tdir, 'piglit.conf'), 'w') as f:
            f.write(_CONF_FILE)
        core.get_config()

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='$XDG_CONFIG_HOME not found')
Beispiel #8
0
    def test_xdg_config_home(self):
        """ get_config() finds $XDG_CONFIG_HOME/piglit.conf """
        with utils.tempdir() as tdir:
            os.environ['XDG_CONFIG_HOME'] = tdir
            with open(os.path.join(tdir, 'piglit.conf'), 'w') as f:
                f.write(TestGetConfig.CONF_FILE)
            core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
               msg='$XDG_CONFIG_HOME not found')
Beispiel #9
0
    def test_config_home_fallback(self):
        """ get_config() finds $HOME/.config/piglit.conf """
        with utils.tempdir() as tdir:
            os.environ["HOME"] = tdir
            os.mkdir(os.path.join(tdir, ".config"))
            with open(os.path.join(tdir, ".config/piglit.conf"), "w") as f:
                f.write(TestGetConfig.CONF_FILE)
            core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section("nose-test"), msg="$HOME/.config/piglit.conf not found")
Beispiel #10
0
def test_local():
    """core.get_config() finds ./piglit.conf"""
    with utils.nose.tempdir() as tdir:
        with utils.nose.chdir(tdir):
            with open(os.path.join(tdir, 'piglit.conf'), 'w') as f:
                f.write(_CONF_FILE)
            core.get_config()

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='./piglit.conf not found')
Beispiel #11
0
    def test_piglit_root(self):
        """ get_config() finds "piglit root"/piglit.conf """
        with open("piglit.conf", "w") as f:
            f.write(TestGetConfig.CONF_FILE)
        self.defer(os.unlink, "piglit.conf")
        self.defer(os.chdir, os.getcwd())
        os.chdir("..")

        core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section("nose-test"), msg="$PIGLIT_ROOT not found")
Beispiel #12
0
def test_config_home_fallback():
    """core.get_config() finds $HOME/.config/piglit.conf"""
    with utils.tempdir() as tdir:
        os.environ['HOME'] = tdir
        os.mkdir(os.path.join(tdir, '.config'))
        with open(os.path.join(tdir, '.config/piglit.conf'), 'w') as f:
            f.write(_CONF_FILE)
        core.get_config()

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='$HOME/.config/piglit.conf not found')
Beispiel #13
0
    def test_config_home_fallback(self):
        """ get_config() finds $HOME/.config/piglit.conf """
        with utils.tempdir() as tdir:
            os.environ['HOME'] = tdir
            os.mkdir(os.path.join(tdir, '.config'))
            with open(os.path.join(tdir, '.config/piglit.conf'), 'w') as f:
                f.write(TestGetConfig.CONF_FILE)
            core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
               msg='$HOME/.config/piglit.conf not found')
Beispiel #14
0
def test_local():
    """core.get_config() finds ./piglit.conf"""
    with utils.tempdir() as tdir:
        os.chdir(tdir)

        with open(os.path.join(tdir, 'piglit.conf'), 'w') as f:
            f.write(_CONF_FILE)

        core.get_config()

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='./piglit.conf not found')
Beispiel #15
0
    def test_config_in_piglit_root(self, mocker, tmpdir):
        """core.get_config() finds "piglit root"/piglit.conf"""
        # Mock the __file__ attribute of the core module, since that's how
        # piglit decides where the root of the piglit directory is.
        mocker.patch('framework.core.__file__',
                     six.text_type(tmpdir.join('framework', 'core.py')))
        mocker.patch('framework.core.os.environ', new={})
        conf = tmpdir.join('piglit.conf')
        conf.write(self._CONF_FILE)
        core.get_config()

        assert core.PIGLIT_CONFIG.has_section('nose-test')
Beispiel #16
0
    def test_config_in_piglit_root(self, mocker, tmpdir):
        """core.get_config() finds "piglit root"/piglit.conf"""
        # Mock the __file__ attribute of the core module, since that's how
        # piglit decides where the root of the piglit directory is.
        mocker.patch('framework.core.__file__',
                     six.text_type(tmpdir.join('framework', 'core.py')))
        mocker.patch('framework.core.os.environ', new={})
        conf = tmpdir.join('piglit.conf')
        conf.write(self._CONF_FILE)
        core.get_config()

        assert core.PIGLIT_CONFIG.has_section('nose-test')
Beispiel #17
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #18
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = framework.results.TestrunResult.resume(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    for name in results.tests.iterkeys():
        opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #19
0
    def test_piglit_root(self):
        """ get_config() finds "piglit root"/piglit.conf """
        with open('piglit.conf', 'w') as f:
            f.write(TestGetConfig.CONF_FILE)
        self.defer(os.unlink, 'piglit.conf')
        self.defer(os.chdir, os.getcwd())
        os.chdir('..')

        core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
               msg='$PIGLIT_ROOT not found')
Beispiel #20
0
    def test_local(self):
        """ get_config() finds ./piglit.conf """
        with utils.tempdir() as tdir:
            self.defer(os.chdir, os.getcwd())
            os.chdir(tdir)

            with open(os.path.join(tdir, "piglit.conf"), "w") as f:
                f.write(TestGetConfig.CONF_FILE)

            core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section("nose-test"), msg="./piglit.conf not found")
Beispiel #21
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        verbose=results.options['verbose'])

    core.get_config(args.config_file)

    if results.options.get('platform'):
        opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results_path = path.join(args.results_path, 'results.json')
    json_writer = framework.results.JSONWriter(open(results_path, 'w+'))
    json_writer.initialize_json(results.options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    for key, value in results.tests.iteritems():
        json_writer.write_dict_item(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, json_writer)

    json_writer.close_dict()
    json_writer.close_json()

    print("Thank you for running Piglit!\n"
          "Results have ben wrriten to {0}".format(results_path))
Beispiel #22
0
    def test_config_in_current(self, tmpdir, mocker):
        """core.get_config() finds ./piglit.conf"""
        mocker.patch('framework.core.os.environ', new={})
        conf = tmpdir.join('piglit.conf')
        conf.write(self._CONF_FILE)

        ret = tmpdir.chdir()
        try:
            core.get_config()
        finally:
            os.chdir(six.text_type(ret))

        assert core.PIGLIT_CONFIG.has_section('nose-test')
Beispiel #23
0
    def test_config_in_current(self, tmpdir, mocker):
        """core.get_config() finds ./piglit.conf"""
        mocker.patch('framework.core.os.environ', new={})
        conf = tmpdir.join('piglit.conf')
        conf.write(self._CONF_FILE)

        ret = tmpdir.chdir()
        try:
            core.get_config()
        finally:
            os.chdir(six.text_type(ret))

        assert core.PIGLIT_CONFIG.has_section('nose-test')
Beispiel #24
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    args = parser.parse_args(input_)

    results = framework.results.load_results(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = framework.results.get_backend('json')(
        args.results_path,
        results.options,
        file_fsync=opts.sync)

    for key, value in results.tests.iteritems():
        backend.write_test(key, value)
        opts.exclude_tests.add(key)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #25
0
def test_piglit_root():
    """core.get_config() finds "piglit root"/piglit.conf"""
    with open('piglit.conf', 'w') as f:
        f.write(_CONF_FILE)
    return_dir = getcwd()
    try:
        os.chdir('..')
        core.get_config()
    finally:
        os.chdir(return_dir)
        os.unlink('piglit.conf')

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='$PIGLIT_ROOT not found')
Beispiel #26
0
def test_piglit_root():
    """core.get_config() finds "piglit root"/piglit.conf"""
    with open('piglit.conf', 'w') as f:
        f.write(_CONF_FILE)
    return_dir = os.getcwd()
    try:
        os.chdir('..')
        core.get_config()
    finally:
        os.chdir(return_dir)
        os.unlink('piglit.conf')

    nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
           msg='$PIGLIT_ROOT not found')
Beispiel #27
0
def main():
    core.get_config()
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument(
        "-t",
        "--include-tests",
        default=[],
        action="append",
        metavar="<regex>",
        help="Run only matching tests (can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                        "once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args()

    opts = core.Options(exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests)

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = framework.profile.load_test_profile(args.testProfile)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "

        # Make the test command relative to the piglit_dir
        testCommand = test.command[:]
        testCommand[0] = os.path.relpath(testCommand[0], piglit_dir)

        command += ' '.join(testCommand)
        return command

    profile._prepare_test_list(opts)
    for name, test in profile.test_list.items():
        assert (isinstance(test, Test))
        print(name, ':::', getCommand(test))
Beispiel #28
0
    def test(self):
        """ get_config() finds $XDG_CONFIG_HOME/piglit.conf """
        self.defer(lambda: core.PIGLIT_CONFIG == ConfigParser.SafeConfigParser)
        self.add_teardown('XDG_CONFIG_HOME')
        if os.path.exists('piglit.conf'):
            shutil.move('piglit.conf', 'piglit.conf.restore')
            self.defer(shutil.move, 'piglit.conf.restore', 'piglit.conf')

        with utils.tempdir() as tdir:
            os.environ['XDG_CONFIG_HOME'] = tdir
            with open(os.path.join(tdir, 'piglit.conf'), 'w') as f:
                f.write(CONF_FILE)
            core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
               msg='$XDG_CONFIG_HOME not found')
Beispiel #29
0
def main():
    core.get_config()
    parser = argparse.ArgumentParser(sys.argv)
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests (can be used more than "
                             "once)")
    parser.add_argument("testProfile",
                        metavar="<Path to testfile>",
                        help="Path to results folder")
    args = parser.parse_args()

    opts = core.Options(exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests)

    # Change to the piglit's path
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    profile = framework.profile.load_test_profile(args.testProfile)

    def getCommand(test):
        command = ''
        if isinstance(test, GleanTest):
            for var, val in test.env.items():
                command += var + "='" + val + "' "

        # Make the test command relative to the piglit_dir
        testCommand = test.command[:]
        testCommand[0] = os.path.relpath(testCommand[0], piglit_dir)

        command += ' '.join(testCommand)
        return command

    profile._prepare_test_list(opts)
    for name, test in profile.test_list.items():
        assert(isinstance(test, Test))
        print(name, ':::', getCommand(test))
Beispiel #30
0
def parse_config(input_):
    """Convenience method for the CONFIG parser.

    This returns a two element tuple, the first element is a namespace with the
    known arguments in it (in this case the config_file), and the second is the
    remaining unparsed arguments. These remaining arguments should be passed to
    a new ArgumentParser instance.

    This will also call core.get_config on the config file. The parsed options
    are passed to ensure API compatibility

    """
    parsed, unparsed = CONFIG.parse_known_args(input_)

    # Read the config file
    # We want to read this before we finish parsing since some default options
    # are set in the config file.
    core.get_config(parsed.config_file)

    return parsed, unparsed
Beispiel #31
0
def parse_config(input_):
    """Convenience method for the CONFIG parser.

    This returns a two element tuple, the first element is a namespace with the
    known arguments in it (in this case the config_file), and the second is the
    remaining unparsed arguments. These remaining arguments should be passed to
    a new ArgumentParser instance.

    This will also call core.get_config on the config file. The parsed options
    are passed to ensure API compatibility

    """
    parsed, unparsed = CONFIG.parse_known_args(input_)

    # Read the config file
    # We want to read this before we finish parsing since some default options
    # are set in the config file.
    core.get_config(parsed.config_file)

    return parsed, unparsed
Beispiel #32
0
    def test(self):
        """ get_config() finds "piglit root"/piglit.conf """
        self.defer(lambda: core.PIGLIT_CONFIG == ConfigParser.SafeConfigParser)
        self.add_teardown('HOME')
        self.add_teardown('XDG_CONFIG_HOME')

        if os.path.exists('piglit.conf'):
            shutil.move('piglit.conf', 'piglit.conf.restore')
            self.defer(shutil.move, 'piglit.conf.restore', 'piglit.conf')

        with open('piglit.conf', 'w') as f:
            f.write(CONF_FILE)
        self.defer(os.unlink, 'piglit.conf')
        self.defer(os.chdir, os.getcwd())
        os.chdir('..')

        core.get_config()

        nt.ok_(core.PIGLIT_CONFIG.has_section('nose-test'),
               msg='$PIGLIT_ROOT not found')
Beispiel #33
0
def _run_parser(input_):
    """ Parser for piglit run command """
    # Parse the config file before any other options, this allows the config
    # file to be used to sete default values for the parser.
    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="override piglit.conf search path")
    known, unparsed = parser.parse_known_args(input_)

    # Read the config file
    # We want to read this before we finish parsing since some default options
    # are set in the config file.
    core.get_config(known.config_file)

    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    parser.add_argument('-b',
                        '--backend',
                        default=_default_backend(),
                        choices=backends.BACKENDS,
                        help='select a results backend to use')
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=core.PLATFORMS,
                        default=_default_platform(),
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-s",
                        "--sync",
                        action="store_true",
                        help="Sync results to disk after every test")
    parser.add_argument(
        "--junit_suffix",
        type=str,
        default="",
        help="suffix string to append to each test name in junit")
    # -f/--config is a duplicate that cannot be hit, but is needed for help
    # generation
    parser.add_argument("-f",
                        "--config",
                        metavar="config_file",
                        dest="__unused",
                        help="override piglit.conf search path")
    log_parser = parser.add_mutually_exclusive_group()
    log_parser.add_argument('-v',
                            '--verbose',
                            action='store_const',
                            const='verbose',
                            default='quiet',
                            dest='log_level',
                            help='DEPRECATED! Print more information during '
                            'test runs. Use -l/--log-level instead')
    log_parser.add_argument("-l",
                            "--log-level",
                            dest="log_level",
                            action="store",
                            choices=['quiet', 'verbose', 'dummy'],
                            default='quiet',
                            help="Set the logger verbosity level")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    return parser.parse_args(unparsed)
Beispiel #34
0
try:
    import simplejson as json
except ImportError:
    import json
from nose.plugins.skip import SkipTest

from framework import test, backends, results, core

__all__ = [
    'tempfile',
    'resultfile',
    'tempdir',
    'JSON_DATA'
]

core.get_config()


class _Tree(dict):
    """Private helper to make JSON_DATA easier to work with."""
    def __getitem__(self, key):
        try:
            return super(_Tree, self).__getitem__(key)
        except KeyError:
            ret = self[key] = _Tree()
            return ret


JSON_DATA = {
    "options": {
        "profile": "tests/fake.py",
Beispiel #35
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v", "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                             "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
Beispiel #36
0
def _run_parser(input_):
    """ Parser for piglit run command """
    # Parse the config file before any other options, this allows the config
    # file to be used to sete default values for the parser.
    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="override piglit.conf search path")
    known, unparsed = parser.parse_known_args(input_)

    # Read the config file
    # We want to read this before we finish parsing since some default options
    # are set in the config file.
    core.get_config(known.config_file)

    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d", "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t", "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                             "(can be used more than once)")
    parser.add_argument("-x", "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                             "(can be used more than once)")
    parser.add_argument('-b', '--backend',
                        default=_default_backend(),
                        choices=framework.results.BACKENDS,
                        help='select a results backend to use')
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c', '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1", "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p", "--platform",
                        choices=_PLATFORMS,
                        default=_default_platform(),
                        help="Name of windows system passed to waffle")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                             "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-s", "--sync",
                        action="store_true",
                        help="Sync results to disk after every test")
    parser.add_argument("--junit_suffix",
                        type=str,
                        default="",
                        help="suffix string to append to each test name in junit")
    # -f/--config is a duplicate that cannot be hit, but is needed for help
    # generation
    parser.add_argument("-f", "--config",
                        metavar="config_file",
                        dest="__unused",
                        help="override piglit.conf search path")
    log_parser = parser.add_mutually_exclusive_group()
    log_parser.add_argument('-v', '--verbose',
                            action='store_const',
                            const='verbose',
                            default='quiet',
                            dest='log_level',
                            help='DEPRECATED! Print more information during '
                                 'test runs. Use -l/--log-level instead')
    log_parser.add_argument("-l", "--log-level",
                            dest="log_level",
                            action="store",
                            choices=['quiet', 'verbose', 'dummy'],
                            default='quiet',
                            help="Set the logger verbosity level")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    return parser.parse_args(unparsed)
Beispiel #37
0
def resume(input_):
    unparsed = parsers.parse_config(input_)[1]

    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    parser.add_argument(
        '-j',
        '--jobs',
        dest='jobs',
        action='store',
        type=int,
        default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None),
        help='Set the maximum number of jobs to run concurrently. '
        'By default, the reported number of CPUs is used.')
    args = parser.parse_args(unparsed)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.process_isolation = results.options['process_isolation']
    options.OPTIONS.jobs = args.jobs
    options.OPTIONS.no_retry = args.no_retry

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
    base.Test.timeout = results.options['timeout']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [
        profile.load_test_profile(p) for p in results.options['profile']
    ]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if results.options['ignore_missing']:
            p.options['ignore_missing'] = results.options['ignore_missing']

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    try:
        profile.run(profiles, results.options['log_level'], backend,
                    results.options['concurrent'], args.jobs)
    except exceptions.PiglitUserError as e:
        if str(e) != 'no matching tests':
            raise

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #38
0
import six
try:
    from six.moves import getcwd
except ImportError:
    # pylint: disable=no-member
    if six.PY2:
        getcwd = os.getcwdu
    elif six.PY3:
        getcwd = os.getcwd
    # pylint: enable=no-member

from framework import test, backends, core, results, compat

__all__ = ['tempfile', 'resultfile', 'tempdir', 'JSON_DATA']

core.get_config()

_WRITE_MODE = 'w'
_READ_MODE = 'r'


class _Tree(dict):
    """Private helper to make JSON_DATA easier to work with."""
    def __getitem__(self, key):
        try:
            return super(_Tree, self).__getitem__(key)
        except KeyError:
            ret = self[key] = _Tree()
            return ret

Beispiel #39
0
def setup_module():
    core.PIGLIT_CONFIG = core.PiglitConfig(allow_no_value=True)
    core.get_config()
Beispiel #40
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Beispiel #41
0
def run(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--name",
                        metavar="<test name>",
                        default=None,
                        help="Name of this test run")
    parser.add_argument("-d",
                        "--dry-run",
                        action="store_false",
                        dest="execute",
                        help="Do not execute the tests")
    parser.add_argument("-t",
                        "--include-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Run only matching tests "
                        "(can be used more than once)")
    parser.add_argument("-x",
                        "--exclude-tests",
                        default=[],
                        action="append",
                        metavar="<regex>",
                        help="Exclude matching tests "
                        "(can be used more than once)")
    conc_parser = parser.add_mutually_exclusive_group()
    conc_parser.add_argument('-c',
                             '--all-concurrent',
                             action="store_const",
                             default="some",
                             const="all",
                             dest="concurrency",
                             help="Run all tests concurrently")
    conc_parser.add_argument("-1",
                             "--no-concurrency",
                             action="store_const",
                             default="some",
                             const="none",
                             dest="concurrency",
                             help="Disable concurrent test runs")
    parser.add_argument("-p",
                        "--platform",
                        choices=["glx", "x11_egl", "wayland", "gbm"],
                        help="Name of windows system passed to waffle")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("--valgrind",
                        action="store_true",
                        help="Run tests in valgrind's memcheck")
    parser.add_argument("--dmesg",
                        action="store_true",
                        help="Capture a difference in dmesg before and "
                        "after each test. Implies -1/--no-concurrency")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Produce a line of output for each test before "
                        "and after it runs")
    parser.add_argument("test_profile",
                        metavar="<Path to one or more test profile(s)>",
                        nargs='+',
                        help="Path to testfile to run")
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    args = parser.parse_args(input_)

    # Disable Windows error message boxes for this and all child processes.
    if sys.platform == 'win32':
        # This disables messages boxes for uncaught exceptions, but it will not
        # disable the message boxes for assertion failures or abort().  Those
        # are created not by the system but by the CRT itself, and must be
        # disabled by the child processes themselves.
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    # If dmesg is requested we must have serial run, this is becasue dmesg
    # isn't reliable with threaded run
    if args.dmesg:
        args.concurrency = "none"

    # Read the config file
    core.get_config(args.config_file)

    # Pass arguments into Options
    opts = core.Options(concurrent=args.concurrency,
                        exclude_filter=args.exclude_tests,
                        include_filter=args.include_tests,
                        execute=args.execute,
                        valgrind=args.valgrind,
                        dmesg=args.dmesg,
                        verbose=args.verbose)

    # Set the platform to pass to waffle
    if args.platform:
        opts.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)
    core.checkDir(args.results_path, False)

    results = framework.results.TestrunResult()

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    # Begin json.
    result_filepath = path.join(args.results_path, 'results.json')
    result_file = open(result_filepath, 'w')
    json_writer = framework.results.JSONWriter(result_file)

    # Create a dictionary to pass to initialize json, it needs the contents of
    # the env dictionary and profile and platform information
    options = {'profile': args.test_profile}
    for key, value in opts:
        options[key] = value
    if args.platform:
        options['platform'] = args.platform
    json_writer.initialize_json(options, results.name,
                                core.collect_system_info())

    json_writer.write_dict_key('tests')
    json_writer.open_dict()

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path

    time_start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg
    profile.run(opts, json_writer)
    time_end = time.time()

    json_writer.close_dict()

    results.time_elapsed = time_end - time_start
    json_writer.write_dict_item('time_elapsed', results.time_elapsed)

    # End json.
    json_writer.close_json()

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + result_filepath)
Beispiel #42
0
def setup_module():
    core.PIGLIT_CONFIG = core.PiglitConfig(allow_no_value=True)
    core.get_config()