Exemplo n.º 1
0
    def test_interupted(self, tmpdir, mock_backend):  # pylint: disable=unused-argument,redefined-outer-name
        """backends.load: works for resuming (no extension known)."""
        tmpdir.mkdir('tests')
        with tmpdir.join('tests', '0.test_backend').open('w') as f:
            f.write('foo')

        backends.load(six.text_type(tmpdir))
Exemplo n.º 2
0
def test_load_notimplemented():
    """backends.load(): An error is raised if a loader isn't properly implmented.
    """
    file_path = 'foo.test_backend'
    with open(file_path, 'w') as f:
        f.write('foo')

    backends.load(file_path)
Exemplo n.º 3
0
def test_load_unknown():
    """backends.load(): An error is raised if no modules supportes `extension`
    """
    file_path = 'foo.test_extension'

    with open(file_path, 'w') as f:
        f.write('foo')
    backends.load(file_path)
Exemplo n.º 4
0
def test_load_unknown():
    """backends.load(): An error is raised if no modules supportes `extension`
    """
    file_path = 'foo.test_extension'

    with open(file_path, 'w') as f:
        f.write('foo')
    backends.load(file_path)
Exemplo n.º 5
0
def test_load_notimplemented():
    """backends.load(): An error is raised if a loader isn't properly implmented.
    """
    file_path = 'foo.test_backend'
    with open(file_path, 'w') as f:
        f.write('foo')

    backends.load(file_path)
Exemplo n.º 6
0
def test_load_trailing_dot():
    """framework.backends.load: handles the result name ending in '.'

    Basically if this reaches a BackendNotImplementedError, then the '.' was
    handled correctly, otherwise if it's '.' then we should reach the
    BackendError, which is incorrect.

    """
    backends.load('foo.test_backend..gz')
Exemplo n.º 7
0
def test_load_trailing_dot():
    """framework.backends.load: handles the result name ending in '.'

    Basically if this reaches a BackendNotImplementedError, then the '.' was
    handled correctly, otherwise if it's '.' then we should reach the
    BackendError, which is incorrect.

    """
    backends.load('foo.test_backend..gz')
Exemplo n.º 8
0
def test_load_old():
    """backends.load(): Ignores files ending in '.old'

    If this raises a BackendError it means it didn't find a backend to use,
    thus it skipped the file ending in '.old'.

    """
    os.mkdir('test')
    file_path = os.path.join('test', 'results.test_backend.old')
    with open(file_path, 'w') as f:
        f.write('foo')

    backends.load('test')
Exemplo n.º 9
0
def test_load_old():
    """backends.load(): Ignores files ending in '.old'

    If this raises a BackendError it means it didn't find a backend to use,
    thus it skipped the file ending in '.old'.

    """
    os.mkdir('test')
    file_path = os.path.join('test', 'results.test_backend.old')
    with open(file_path, 'w') as f:
        f.write('foo')

    backends.load('test')
Exemplo n.º 10
0
    def test_old(self, tmpdir, mock_backend):  # pylint: disable=unused-argument,redefined-outer-name
        """backends.load: Ignores files ending in '.old'.

        If this raises a BackendError it means it didn't find a backend to use,
        thus it skipped the file ending in '.old'.
        """
        tmpdir.mkdir('test')
        p = tmpdir.join('test')
        with p.join('results.test_backend.old').open('w') as f:
            f.write('foo')

        with pytest.raises(backends.BackendError):
            backends.load(six.text_type(p))
Exemplo n.º 11
0
def test_load_resume():
    """backends.load: works for resuming (no extension known)."""
    backends.BACKENDS['test_backend'] = backends.register.Registry(
        extensions=['.test_backend'],
        backend=None,
        load=lambda x, y: x,
        meta=None,
    )
    os.mkdir('tests')
    name = os.path.join('tests', '0.test_backend')
    with open(name, 'w') as f:
        f.write('foo')

    backends.load('.')
Exemplo n.º 12
0
def test_load_resume():
    """backends.load: works for resuming (no extension known)."""
    backends.BACKENDS['test_backend'] = backends.register.Registry(
        extensions=['.test_backend'],
        backend=None,
        load=lambda x, y: x,
        meta=None,
    )
    os.mkdir('tests')
    name = os.path.join('tests', '0.test_backend')
    with open(name, 'w') as f:
        f.write('foo')

    backends.load('.')
Exemplo n.º 13
0
def aggregate(input_):
    """Combine files in a tests/ directory into a single results file."""
    parser = argparse.ArgumentParser()
    parser.add_argument('results_folder',
                        type=path.realpath,
                        metavar="<results path>",
                        help="Path to a results folder")
    parser.add_argument('-o',
                        '--output',
                        default="results.json",
                        help="name of output file. Default: results.json")
    args = parser.parse_args(input_)

    assert os.path.isdir(args.results_folder)

    outfile = os.path.join(args.results_folder, args.output)
    try:
        results = backends.load(args.results_folder)
    except backends.errors.ResultsLoadError as e:
        print('Error: {}'.format(e.message), file=sys.stderr)
        sys.exit(1)

    try:
        # FIXME: This works, it fixes the problem, but it only works because
        # only the json backend has the ability to agregate results at the
        # moment.
        backends.json._write(results, outfile)
    except IOError as e:
        if e.errno == errno.EPERM:
            print("Error: Unable to write aggregated file, permission denied.",
                  file=sys.stderr)
            sys.exit(1)
        raise

    print("Aggregated file written to: {}".format(outfile))
Exemplo n.º 14
0
def csv(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o",
                        "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("testResults",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(input_)

    try:
        testrun = backends.load(args.testResults)
    except backends.errors.ResultsLoadError as e:
        print('Error: {}'.format(e.message), file=sys.stderr)
        sys.exit(1)

    def write_results(output):
        for name, result in testrun.tests.iteritems():
            output.write("{},{},{},{}\n".format(name, result['time'],
                                                result['returncode'],
                                                result['result']))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 15
0
def csv(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("testResults",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(input_)

    try:
        testrun = backends.load(args.testResults)
    except backends.errors.ResultsLoadError as e:
        print('Error: {}'.format(e.message), file=sys.stderr)
        sys.exit(1)

    def write_results(output):
        for name, result in testrun.tests.iteritems():
            output.write("{},{},{},{}\n".format(name, result['time'],
                                                result['returncode'],
                                                result['result']))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 16
0
def console(resultsFiles, mode):
    """ Write summary information to the console for the given list of
    results files in the given mode."""
    assert mode in [
        'summary', 'diff', 'incomplete', 'fixes', 'problems', 'regressions',
        'all'
    ], mode
    results = Results([backends.load(r) for r in resultsFiles])

    # Print the name of the test and the status from each test run
    if mode == 'all':
        _print_result(results, results.names.all)
        _print_summary(results)
    elif mode == 'diff':
        _print_result(results, results.names.all_changes)
        _print_summary(results)
    elif mode == 'incomplete':
        _print_result(results, results.names.all_incomplete)
    elif mode == 'fixes':
        _print_result(results, results.names.all_fixes)
    elif mode == 'problems':
        _print_result(results, results.names.all_problems)
    elif mode == 'regressions':
        _print_result(results, results.names.all_regressions)
    elif mode == 'summary':
        _print_summary(results)
Exemplo n.º 17
0
def test_load():
    """backends.load(): works as expected.

    This is an interesting function to test, because it is just a wrapper that
    returns a TestrunResult object. So most of the testing should be happening
    in the tests for each backend.

    However, we can test this by injecting a fake backend, and ensuring that we
    get back what we expect. What we do is inject list(), which menas that we
    should get back [file_path].

    """
    backends.BACKENDS['test_backend'] = backends.register.Registry(
        extensions=['.test_extension'],
        backend=None,
        load=lambda x, y: [x],  # y is for a compression value
        meta=None,
    )

    file_path = 'foo.test_extension'
    with open(file_path, 'w') as f:
        f.write('foo')

    test = backends.load(file_path)
    nt.assert_list_equal([file_path], test)
Exemplo n.º 18
0
    def test_basic(self, mocker, tmpdir):  # pylint: disable=unused-argument
        """backends.load: works as expected.

        This is an interesting function to test, because it is just a wrapper
        that returns a TestrunResult object. So most of the testing should be
        happening in the tests for each backend.

        However, we can test this by injecting a fake backend, and ensuring
        that we get back what we expect. What we do is inject list(), which
        means that we should get back [file_path], instead of just file_path,
        like the legitimate backends return.
        """
        mocker.patch.dict(
            backends.BACKENDS,
            {'test_backend': backends.register.Registry(
                extensions=['.test_backend'],
                backend=None,
                load=lambda x, _: [x],
                meta=None,
            )})

        p = tmpdir.join('foo.test_backend')
        p.write('foo')
        test = backends.load(six.text_type(p))
        assert [six.text_type(p)] == test
Exemplo n.º 19
0
def csv(input_):
    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-o", "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("testResults",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(unparsed)

    testrun = backends.load(args.testResults)

    def write_results(output):
        for name, result in testrun.tests.iteritems():
            output.write("{},{},{},{}\n".format(name, result.time,
                                                result.returncode,
                                                result.result))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 20
0
def csv(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("testResults",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(input_)

    testrun = backends.load(args.testResults)

    def write_results(output):
        for name, result in testrun.tests.iteritems():
            output.write("{},{},{},{}\n".format(name, result.get('time', ""),
                                                result.get('returncode', ""),
                                                result['result']))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 21
0
def test_load():
    """backends.load(): works as expected.

    This is an interesting function to test, because it is just a wrapper that
    returns a TestrunResult object. So most of the testing should be happening
    in the tests for each backend.

    However, we can test this by injecting a fake backend, and ensuring that we
    get back what we expect. What we do is inject list(), which menas that we
    should get back [file_path].

    """
    backends.BACKENDS['test_backend'] = backends.register.Registry(
        extensions=['.test_extension'],
        backend=None,
        load=lambda x, y: [x],  # y is for a compression value
        meta=None,
    )

    file_path = 'foo.test_extension'
    with open(file_path, 'w') as f:
        f.write('foo')

    test = backends.load(file_path)
    nt.assert_list_equal([file_path], test)
Exemplo n.º 22
0
def aggregate(input_):
    """Combine files in a tests/ directory into a single results file."""
    parser = argparse.ArgumentParser()
    parser.add_argument('results_folder',
                        type=path.realpath,
                        metavar="<results path>",
                        help="Path to a results folder")
    parser.add_argument('-o', '--output',
                        default="results.json",
                        help="name of output file. Default: results.json")
    args = parser.parse_args(input_)

    assert os.path.isdir(args.results_folder)

    outfile = os.path.join(args.results_folder, args.output)
    results = backends.load(args.results_folder)

    try:
        # FIXME: This works, it fixes the problem, but it only works because
        # only the json backend has the ability to agregate results at the
        # moment.
        backends.json._write(results, outfile)
    except IOError as e:
        if e.errno == errno.EPERM:
            raise exceptions.PiglitFatalError(
                "Unable to write aggregated file, permission denied.")
        raise

    print("Aggregated file written to: {}".format(outfile))
Exemplo n.º 23
0
def csv(input_):
    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-o",
                        "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("testResults",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(unparsed)

    testrun = backends.load(args.testResults)

    def write_results(output):
        for name, result in six.iteritems(testrun.tests):
            output.write("{},{},{},{}\n".format(name, result.time.total,
                                                result.returncode,
                                                result.result))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 24
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.concurrent = results.options['concurrent']
    options.OPTIONS.exclude_filter = results.options['exclude_filter']
    options.OPTIONS.include_filter = results.options['include_filter']
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.dmesg = results.options['dmesg']
    options.OPTIONS.sync = results.options['sync']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            options.OPTIONS.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if options.OPTIONS.dmesg:
        profile.dmesg = options.OPTIONS.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Exemplo n.º 25
0
Arquivo: run.py Projeto: bpeel/piglit
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    opts = core.Options(concurrent=results.options['concurrent'],
                        exclude_filter=results.options['exclude_filter'],
                        include_filter=results.options['filter'],
                        execute=results.options['execute'],
                        valgrind=results.options['valgrind'],
                        dmesg=results.options['dmesg'],
                        sync=results.options['sync'])

    core.get_config(args.config_file)

    opts.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_fsync=opts.sync,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    for name, result in results.tests.iteritems():
        if args.no_retry or result['result'] != 'incomplete':
            opts.exclude_tests.add(name)

    profile = framework.profile.merge_test_profiles(results.options['profile'])
    profile.results_dir = args.results_path
    if opts.dmesg:
        profile.dmesg = opts.dmesg

    # This is resumed, don't bother with time since it wont be accurate anyway
    profile.run(opts, results.options['log_level'], backend)

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Exemplo n.º 26
0
    def test_trailing_dot(self, mocker):
        """framework.backends.load: handles the result name ending in '.'.

        Basically if this reaches a BackendNotImplementedError, then the '.'
        was handled correctly, otherwise if it's '.' then we should reach the
        BackendError, which is incorrect.
        """
        mocker.patch.dict(
            backends.BACKENDS,
            {'test_backend': backends.register.Registry(
                extensions=['.test_backend'],
                backend=None,
                load=None,
                meta=None,
            )})
        with pytest.raises(backends.BackendNotImplementedError):
            backends.load('foo.test_backend..gz')
Exemplo n.º 27
0
    def test_notimplemented(self, tmpdir, mocker):
        """backends.load(): An error is raised if a loader isn't properly
        implmented.
        """
        mocker.patch.dict(
            backends.BACKENDS,
            {'test_backend': backends.register.Registry(
                extensions=['.test_backend'],
                backend=None,
                load=None,
                meta=None,
            )})
        p = tmpdir.join('foo.test_backend')
        p.write('foo')

        with pytest.raises(backends.BackendNotImplementedError):
            backends.load(six.text_type(p))
Exemplo n.º 28
0
def feat(results, destination, feat_desc):
    """Produce HTML feature readiness summary."""

    feat_res = FeatResults([backends.load(i) for i in results], feat_desc)

    _copy_static_files(destination)
    _make_testrun_info(feat_res, destination)
    _make_feature_info(feat_res, destination)
Exemplo n.º 29
0
def test_load_json():
    """backends.load(): Loads .json files."""
    with utils.tempdir() as tdir:
        filename = os.path.join(tdir, 'results.json')
        with open(filename, 'w') as f:
            json.dump(utils.JSON_DATA, f)

        result = backends.load(filename)

    nt.assert_is_instance(result, results.TestrunResult)
    nt.assert_in('sometest', result.tests)
Exemplo n.º 30
0
def test_load_json():
    """backends.load(): Loads .json files."""
    with utils.nose.tempdir() as tdir:
        filename = os.path.join(tdir, 'results.json')
        with open(filename, 'w') as f:
            json.dump(utils.piglit.JSON_DATA, f, default=backends.json.piglit_encoder)

        result = backends.load(filename)

    nt.assert_is_instance(result, results.TestrunResult)
    nt.assert_in('sometest', result.tests)
Exemplo n.º 31
0
def test_load_json():
    """backends.load(): Loads .json files."""
    with utils.tempdir() as tdir:
        filename = os.path.join(tdir, "results.json")
        with open(filename, "w") as f:
            json.dump(utils.JSON_DATA, f)

        result = backends.load(filename)

    nt.assert_is_instance(result, results.TestrunResult)
    nt.assert_in("sometest", result.tests)
Exemplo n.º 32
0
def formatted(input_, default_format_string=DEFAULT_FMT_STR):
    # Make a copy of the status text list and add all. This is used as the
    # argument list for -e/--exclude
    statuses = set(str(s) for s in status.ALL)

    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("--format",
                        dest="format_string",
                        metavar="<format string>",
                        default=default_format_string,
                        action="store",
                        help="A template string that defines the format. "
                        "Replacement tokens are {name}, {time}, "
                        "{returncode} and {result}")
    parser.add_argument("-e",
                        "--exclude-details",
                        default=[],
                        action="append",
                        choices=statuses,
                        help="Optionally exclude the listing of tests with "
                        "the status(es) given as arguments. "
                        "May be used multiple times")
    parser.add_argument("-o",
                        "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("test_results",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(unparsed)

    testrun = backends.load(args.test_results)

    def write_results(output):
        for name, result in testrun.tests.items():
            if result.result in args.exclude_details:
                continue
            output.write((args.format_string + "\n").format(
                name=name,
                time=result.time.total,
                returncode=result.returncode,
                result=result.result))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 33
0
def formatted(input_, default_format_string=DEFAULT_FMT_STR):
    # Make a copy of the status text list and add all. This is used as the
    # argument list for -e/--exclude
    statuses = set(str(s) for s in status.ALL)

    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("--format",
                        dest="format_string",
                        metavar="<format string>",
                        default=default_format_string,
                        action="store",
                        help="A template string that defines the format. "
                             "Replacement tokens are {name}, {time}, "
                             "{returncode} and {result}")
    parser.add_argument("-e", "--exclude-details",
                        default=[],
                        action="append",
                        choices=statuses,
                        help="Optionally exclude the listing of tests with "
                             "the status(es) given as arguments. "
                             "May be used multiple times")
    parser.add_argument("-o", "--output",
                        metavar="<Output File>",
                        action="store",
                        dest="output",
                        default="stdout",
                        help="Output filename")
    parser.add_argument("test_results",
                        metavar="<Input Files>",
                        help="JSON results file to be converted")
    args = parser.parse_args(unparsed)

    testrun = backends.load(args.test_results)

    def write_results(output):
        for name, result in six.iteritems(testrun.tests):
            if result.result in args.exclude_details:
                continue
            output.write((args.format_string + "\n").format(
                name=name,
                time=result.time.total,
                returncode=result.returncode,
                result=result.result))

    if args.output != "stdout":
        with open(args.output, 'w') as output:
            write_results(output)
    else:
        write_results(sys.stdout)
Exemplo n.º 34
0
def html(results, destination, exclude):
    """
    Produce HTML summaries.

    Basically all this does is takes the information provided by the
    constructor, and passes it to mako templates to generate HTML files.
    The beauty of this approach is that mako is leveraged to do the
    heavy lifting, this method just passes it a bunch of dicts and lists
    of dicts, which mako turns into pretty HTML.
    """
    results = Results([backends.load(i) for i in results])

    _copy_static_files(destination)
    _make_testrun_info(results, destination, exclude)
    _make_comparison_pages(results, destination, exclude)
Exemplo n.º 35
0
def console(results, mode):
    """ Write summary information to the console """
    assert mode in ['summary', 'diff', 'incomplete', 'all'], mode
    results = Results([backends.load(r) for r in results])

    # Print the name of the test and the status from each test run
    if mode == 'all':
        _print_result(results, results.names.all)
        _print_summary(results)
    elif mode == 'diff':
        _print_result(results, results.names.all_changes)
        _print_summary(results)
    elif mode == 'incomplete':
        _print_result(results, results.names.all_incomplete)
    elif mode == 'summary':
        _print_summary(results)
Exemplo n.º 36
0
def console(results, mode):
    """ Write summary information to the console """
    assert mode in ['summary', 'diff', 'incomplete', 'all'], mode
    results = Results([backends.load(r) for r in results])

    # Print the name of the test and the status from each test run
    if mode == 'all':
        _print_result(results, results.names.all)
        _print_summary(results)
    elif mode == 'diff':
        _print_result(results, results.names.all_changes)
        _print_summary(results)
    elif mode == 'incomplete':
        _print_result(results, results.names.all_incomplete)
    elif mode == 'summary':
        _print_summary(results)
Exemplo n.º 37
0
def aggregate(input_):
    """Combine files in a tests/ directory into a single results file."""
    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument('results_folder',
                        type=path.realpath,
                        metavar="<results path>",
                        help="Path to a results directory "
                        "(which contains a tests directory)")
    parser.add_argument('-o',
                        '--output',
                        default="results.json",
                        help="name of output file. Default: results.json")
    args = parser.parse_args(unparsed)

    assert os.path.isdir(args.results_folder)

    # args.results_folder must be a path with a 'tests' directory in it, not
    # the tests directory itself.
    outfile = os.path.join(args.results_folder, args.output)
    try:
        results = backends.load(args.results_folder)
    except backends.BackendError:
        raise exceptions.PiglitFatalError(
            'Cannot find a tests directory to aggregate in {}.\n'
            'Are you you sure that you pointed to '
            'a results directory (not results/tests)?'.format(
                args.results_folder))

    try:
        # FIXME: This works, it fixes the problem, but it only works because
        # only the json backend has the ability to aggregate results at the
        # moment.
        backends.json._write(results, outfile)
    except IOError as e:
        if e.errno == errno.EPERM:
            raise exceptions.PiglitFatalError(
                "Unable to write aggregated file, permission denied.")
        raise

    print("Aggregated file written to: {}.{}".format(
        outfile, backends.compression.get_mode()))
Exemplo n.º 38
0
def aggregate(input_):
    """Combine files in a tests/ directory into a single results file."""
    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument('results_folder',
                        type=path.realpath,
                        metavar="<results path>",
                        help="Path to a results directory "
                             "(which contains a tests directory)")
    parser.add_argument('-o', '--output',
                        default="results.json",
                        help="name of output file. Default: results.json")
    args = parser.parse_args(unparsed)

    assert os.path.isdir(args.results_folder)

    # args.results_folder must be a path with a 'tests' directory in it, not
    # the tests directory itself.
    outfile = os.path.join(args.results_folder, args.output)
    try:
        results = backends.load(args.results_folder)
    except backends.BackendError:
        raise exceptions.PiglitFatalError(
            'Cannot find a tests directory to aggregate in {}.\n'
            'Are you you sure that you pointed to '
            'a results directory (not results/tests)?'.format(args.results_folder))

    try:
        # FIXME: This works, it fixes the problem, but it only works because
        # only the json backend has the ability to aggregate results at the
        # moment.
        backends.json._write(results, outfile)
    except IOError as e:
        if e.errno == errno.EPERM:
            raise exceptions.PiglitFatalError(
                "Unable to write aggregated file, permission denied.")
        raise

    print("Aggregated file written to: {}.{}".format(
        outfile, backends.compression.get_mode()))
Exemplo n.º 39
0
    def __init__(self, resultfiles):
        """
        Create an initial object with all of the result information rolled up
        in an easy to process form.

        The constructor of the summary class has an attribute for each HTML
        summary page, which are fed into the index.mako file to produce HTML
        files. resultfiles is a list of paths to JSON results generated by
        piglit-run.
        """

        # Create a Result object for each piglit result and append it to the
        # results list
        self.results = [backends.load(i) for i in resultfiles]

        self.status = {}
        self.fractions = {}
        self.totals = {}
        self.tests = {'all': set(), 'changes': set(), 'problems': set(),
                      'skipped': set(), 'regressions': set(), 'fixes': set(),
                      'enabled': set(), 'disabled': set(), 'incomplete': set()}

        def fgh(test, result):
            """ Helper for updating the fractions and status lists """
            fraction[test] = tuple(
                [sum(i) for i in zip(fraction[test], result.fraction)])

            # If the new status is worse update it, or if the new status is
            # SKIP (which is equivalent to notrun) and the current is NOTRUN
            # update it
            if (status[test] < result or
                    (result == so.SKIP and status[test] == so.NOTRUN)):
                status[test] = result

        for results in self.results:
            # Create a set of all of the tset names across all of the runs
            self.tests['all'] = set(self.tests['all'] | set(results.tests))

            # Create two dictionaries that have a default factory: they return
            # a default value instead of a key error.
            # This default key must be callable
            self.fractions[results.name] = \
                collections.defaultdict(lambda: (0, 0))
            self.status[results.name] = \
                collections.defaultdict(lambda: so.NOTRUN)

            # short names
            fraction = self.fractions[results.name]
            status = self.status[results.name]

            # store the results to be appeneded to results. Adding them in the
            # loop will cause a RuntimeError
            temp_results = {}

            for key, value in results.tests.iteritems():
                # if the first character of key is a / then our while loop will
                # become an infinite loop. Beyond that / should never be the
                # leading character, if it is then there is a bug in one of the
                # test profiles.
                assert key[0] != '/'

                # Treat a test with subtests as if it is a group, assign the
                # subtests' statuses and fractions down to the test, and then
                # proceed like normal.
                if 'subtest' in value:
                    for (subt, subv) in value['subtest'].iteritems():
                        subt = grouptools.join(key, subt)
                        subv = so.status_lookup(subv)

                        # Add the subtest to the fractions and status lists
                        fraction[subt] = subv.fraction
                        status[subt] = subv
                        temp_results.update({subt: {'result': subv}})

                        self.tests['all'].add(subt)
                        while subt != '':
                            fgh(subt, subv)
                            subt = grouptools.groupname(subt)
                        fgh('all', subv)

                    # remove the test from the 'all' list, this will cause to
                    # be treated as a group
                    self.tests['all'].discard(key)
                else:
                    # Walk the test name as if it was a path, at each level
                    # update the tests passed over the total number of tests
                    # (fractions), and update the status of the current level
                    # if the status of the previous level was worse, but is not
                    # skip
                    while key != '':
                        fgh(key, value['result'])
                        key = grouptools.groupname(key)

                    # when we hit the root update the 'all' group and stop
                    fgh('all', value['result'])

            # Update the the results.tests dictionary with the subtests so that
            # they are entered into the appropriate pages other than all.
            # Updating it in the loop will raise a RuntimeError
            for key, value in temp_results.iteritems():
                results.tests[key] = value

        # Create the lists of statuses like problems, regressions, fixes,
        # changes and skips
        for test in self.tests['all']:
            status = []
            for each in self.results:
                try:
                    status.append(each.tests[test]['result'])
                except KeyError:
                    status.append(so.NOTRUN)

            # Problems include: warn, dmesg-warn, fail, dmesg-fail, and crash.
            # Skip does not go on this page, it has the 'skipped' page
            if max(status) > so.PASS:
                self.tests['problems'].add(test)

            # Find all tests with a status of skip
            if so.SKIP in status:
                self.tests['skipped'].add(test)

            if so.INCOMPLETE in status:
                self.tests['incomplete'].add(test)

            # find fixes, regressions, and changes
            for i in xrange(len(status) - 1):
                first = status[i]
                last = status[i + 1]
                if first in [so.SKIP, so.NOTRUN] and \
                        last not in [so.SKIP, so.NOTRUN]:
                    self.tests['enabled'].add(test)
                    self.tests['changes'].add(test)
                elif last in [so.SKIP, so.NOTRUN] and \
                        first not in [so.SKIP, so.NOTRUN]:
                    self.tests['disabled'].add(test)
                    self.tests['changes'].add(test)
                elif first < last:
                    self.tests['regressions'].add(test)
                    self.tests['changes'].add(test)
                elif first > last:
                    self.tests['fixes'].add(test)
                    self.tests['changes'].add(test)
Exemplo n.º 40
0
    def test_unknown(self, tmpdir):
        p = tmpdir.join('foo.test_extension')
        p.write('foo')

        with pytest.raises(backends.BackendError):
            backends.load(six.text_type(p))
Exemplo n.º 41
0
def resume(input_):
    unparsed = parsers.parse_config(input_)[1]

    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f",
                        "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                        "Default is piglit.conf")
    parser.add_argument("-n",
                        "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    parser.add_argument(
        '-j',
        '--jobs',
        dest='jobs',
        action='store',
        type=int,
        default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None),
        help='Set the maximum number of jobs to run concurrently. '
        'By default, the reported number of CPUs is used.')
    args = parser.parse_args(unparsed)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.process_isolation = results.options['process_isolation']
    options.OPTIONS.jobs = args.jobs
    options.OPTIONS.no_retry = args.no_retry

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']
    base.Test.timeout = results.options['timeout']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path, file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [
        profile.load_test_profile(p) for p in results.options['profile']
    ]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if results.options['ignore_missing']:
            p.options['ignore_missing'] = results.options['ignore_missing']

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    try:
        profile.run(profiles, results.options['log_level'], backend,
                    results.options['concurrent'], args.jobs)
    except exceptions.PiglitUserError as e:
        if str(e) != 'no matching tests':
            raise

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))
Exemplo n.º 42
0
def resume(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("results_path",
                        type=path.realpath,
                        metavar="<Results Path>",
                        help="Path to results folder")
    parser.add_argument("-f", "--config",
                        dest="config_file",
                        type=argparse.FileType("r"),
                        help="Optionally specify a piglit config file to use. "
                             "Default is piglit.conf")
    parser.add_argument("-n", "--no-retry",
                        dest="no_retry",
                        action="store_true",
                        help="Do not retry incomplete tests")
    args = parser.parse_args(input_)
    _disable_windows_exception_messages()

    results = backends.load(args.results_path)
    options.OPTIONS.execute = results.options['execute']
    options.OPTIONS.valgrind = results.options['valgrind']
    options.OPTIONS.sync = results.options['sync']
    options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass']
    options.OPTIONS.proces_isolation = results.options['process_isolation']

    core.get_config(args.config_file)

    options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform']

    results.options['env'] = core.collect_system_info()
    results.options['name'] = results.name

    # Resume only works with the JSON backend
    backend = backends.get_backend('json')(
        args.results_path,
        file_start_count=len(results.tests) + 1)
    # Specifically do not initialize again, everything initialize does is done.

    # Don't re-run tests that have already completed, incomplete status tests
    # have obviously not completed.
    exclude_tests = set()
    for name, result in six.iteritems(results.tests):
        if args.no_retry or result.result != 'incomplete':
            exclude_tests.add(name)

    profiles = [profile.load_test_profile(p)
                for p in results.options['profile']]
    for p in profiles:
        p.results_dir = args.results_path

        if results.options['dmesg']:
            p.dmesg = dmesg.get_dmesg(results.options['dmesg'])

        if results.options['monitoring']:
            p.options['monitor'] = monitoring.Monitoring(
                results.options['monitoring'])

        if exclude_tests:
            p.filters.append(lambda n, _: n not in exclude_tests)
        if results.options['exclude_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['exclude_filter'],
                                    inverse=True))
        if results.options['include_filter']:
            p.filters.append(
                profile.RegexFilter(results.options['include_filter']))

        if results.options['forced_test_list']:
            p.forced_test_list = results.options['forced_test_list']

    # This is resumed, don't bother with time since it won't be accurate anyway
    profile.run(
        profiles,
        results.options['log_level'],
        backend,
        results.options['concurrent'])

    backend.finalize()

    print("Thank you for running Piglit!\n"
          "Results have been written to {0}".format(args.results_path))