Esempio n. 1
0
 def test_exists_fail(self, mocker, tmpdir):
     """core.check_dir: if the directory exists and failifexsits is True
     fail."""
     tmpdir.chdir()
     mocker.patch('framework.core.os.stat', mocker.Mock(side_effect=OSError))
     with pytest.raises(exceptions.PiglitException):
         core.check_dir('foo', True)
Esempio n. 2
0
 def test_exists_fail(self, mocker, tmpdir):
     """core.check_dir: if the directory exists and failifexsits is True
     fail."""
     tmpdir.chdir()
     mocker.patch('framework.core.os.stat', mocker.Mock(side_effect=OSError))
     with pytest.raises(exceptions.PiglitException):
         core.check_dir('foo', True)
Esempio n. 3
0
def test_check_dir_stat_ENOTDIR():
    """core.check_dir: if a file exists (ENOTDIR) and failifexsits is False continue"""
    with mock.patch('framework.core.os.stat',
                    mock.Mock(side_effect=OSError('foo', errno.ENOTDIR))):
        with mock.patch('framework.core.os.makedirs') as makedirs:
            core.check_dir('foo', False)
            nt.eq_(makedirs.called, 1)
Esempio n. 4
0
def test_check_dir_stat_FileNotFoundError():
    """core.check_dir: FileNotFoundError is raised and failifexsits is False continue"""
    with mock.patch('framework.core.os.stat',
                    mock.Mock(side_effect=FileNotFoundError)):
        with mock.patch('framework.core.os.makedirs') as makedirs:
            core.check_dir('foo', False)
            nt.eq_(makedirs.called, 1)
Esempio n. 5
0
    def test_makedirs_fail(self, mocker, tmpdir):
        """core.check_dir: If makedirs fails with any other raise that error."""
        tmpdir.chdir()
        mocker.patch('framework.core.os.makedirs',
                     mocker.Mock(side_effect=OSError))

        with pytest.raises(OSError):
            core.check_dir('foo', False)
Esempio n. 6
0
    def test_makedirs_fail(self, mocker, tmpdir):
        """core.check_dir: If makedirs fails with any other raise that error."""
        tmpdir.chdir()
        mocker.patch('framework.core.os.makedirs',
                     mocker.Mock(side_effect=OSError))

        with pytest.raises(OSError):
            core.check_dir('foo', False)
Esempio n. 7
0
def test_check_dir_makedirs_fail():
    """core.check_dir: If makedirs fails with any other raise"""
    with mock.patch('framework.core.os.stat', mock.Mock()):
        with mock.patch('framework.core.os.path.exists',
                        mock.Mock(return_value=False)):
            with mock.patch('framework.core.os.makedirs',
                            mock.Mock(side_effect=OSError)):
                core.check_dir('foo', False)
Esempio n. 8
0
    def test_exists_and_not_fail(self, mocker, tmpdir):
        """core.check_dir: If makedirs fails with EEXIST pass"""
        tmpdir.chdir()
        mocker.patch('framework.core.os.stat', mocker.Mock())
        makedirs = mocker.patch(
            'framework.core.os.makedirs',
            mocker.Mock(side_effect=OSError(errno.EEXIST, 'foo')))

        core.check_dir('foo', False)

        assert makedirs.called == 0
Esempio n. 9
0
    def test_stat_FileNotFoundError(self, mocker, tmpdir):
        """core.check_dir: FileNotFoundError is raised and failifexsits is
        False continue."""
        tmpdir.chdir()
        mocker.patch('framework.core.os.stat',
                     mocker.Mock(side_effect=FileNotFoundError))
        makedirs = mocker.patch('framework.core.os.makedirs')

        core.check_dir('foo', False)

        assert makedirs.called == 1
Esempio n. 10
0
    def test_handler(self, mocker, tmpdir):
        """core.check_dir: Handler is called if not failifexists."""
        class Sentinel(Exception):
            pass

        tmpdir.chdir()

        mocker.patch('framework.core.os.stat',
                     mocker.Mock(side_effect=OSError('foo', errno.ENOTDIR)))
        with pytest.raises(Sentinel):
            core.check_dir('foo', handler=mocker.Mock(side_effect=Sentinel))
Esempio n. 11
0
    def test_exists_and_not_fail(self, mocker, tmpdir):
        """core.check_dir: If makedirs fails with EEXIST pass"""
        tmpdir.chdir()
        mocker.patch('framework.core.os.stat', mocker.Mock())
        makedirs = mocker.patch(
            'framework.core.os.makedirs',
            mocker.Mock(side_effect=OSError(errno.EEXIST, 'foo')))

        core.check_dir('foo', False)

        assert makedirs.called == 0
Esempio n. 12
0
    def test_not_exists_and_not_fail(self, mocker, tmpdir):
        """core.check_dir: if the directory doesn't exists (ENOENT) and
        failifexists is False continue."""
        tmpdir.chdir()
        mocker.patch('framework.core.os.stat',
                     mocker.Mock(side_effect=OSError('foo', errno.ENOENT)))
        makedirs = mocker.patch('framework.core.os.makedirs')

        core.check_dir('foo', False)

        assert makedirs.called == 1
Esempio n. 13
0
    def test_handler(self, mocker, tmpdir):
        """core.check_dir: Handler is called if not failifexists."""
        class Sentinel(Exception):
            pass

        tmpdir.chdir()

        mocker.patch('framework.core.os.stat',
                     mocker.Mock(side_effect=OSError('foo', errno.ENOTDIR)))
        with pytest.raises(Sentinel):
            core.check_dir('foo', handler=mocker.Mock(side_effect=Sentinel))
Esempio n. 14
0
    def test_stat_FileNotFoundError(self, mocker, tmpdir):
        """core.check_dir: FileNotFoundError is raised and failifexsits is
        False continue."""
        tmpdir.chdir()
        mocker.patch('framework.core.os.stat',
                     mocker.Mock(side_effect=FileNotFoundError))
        makedirs = mocker.patch('framework.core.os.makedirs')

        core.check_dir('foo', False)

        assert makedirs.called == 1
Esempio n. 15
0
    def test_not_exists_and_not_fail(self, mocker, tmpdir):
        """core.check_dir: if the directory doesn't exists (ENOENT) and
        failifexists is False continue."""
        tmpdir.chdir()
        mocker.patch('framework.core.os.stat',
                     mocker.Mock(side_effect=OSError('foo', errno.ENOENT)))
        makedirs = mocker.patch('framework.core.os.makedirs')

        core.check_dir('foo', False)

        assert makedirs.called == 1
Esempio n. 16
0
 def _inner(self, *args, **kwargs):
     try:
         DumpBackend.log('Info',
                         'Dumping trace {}'.format(self._trace_path),
                         end='...\n')
         core.check_dir(self._output_dir)
         func(self, *args, **kwargs)
         DumpBackend.log_result('OK')
         return True
     except Exception as e:
         DumpBackend.log_result('ERROR')
         DumpBackend.log('Debug', '=== Failure log start ===')
         print(e)
         DumpBackend.log('Debug', '=== Failure log end ===')
         return False
Esempio n. 17
0
def _make_testrun_info(results, destination, exclude=None):
    """Create the pages for each results file."""
    exclude = exclude or {}
    result_css = os.path.join(destination, "result.css")
    index = os.path.join(destination, "index.html")

    for each in results.results:
        name = escape_pathname(each.name)
        try:
            core.check_dir(os.path.join(destination, name), True)
        except exceptions.PiglitException:
            raise exceptions.PiglitFatalError(
                'Two or more of your results have the same "name" '
                'attribute. Try changing one or more of the "name" '
                'values in your json files.\n'
                'Duplicate value: {}'.format(name))

        with open(os.path.join(destination, name, "index.html"), 'wb') as out:
            out.write(
                _TEMPLATES.get_template('testrun_info.mako').render(
                    name=each.name,
                    totals=each.totals['root'],
                    time=each.time_elapsed.delta,
                    options=each.options,
                    info=each.info))

        # Then build the individual test results
        for key, value in each.tests.items():
            html_path = os.path.join(destination, name,
                                     escape_filename(key + ".html"))
            temp_path = os.path.dirname(html_path)

            if value.result not in exclude:
                core.check_dir(temp_path)

                try:
                    with open(html_path, 'wb') as out:
                        out.write(
                            _TEMPLATES.get_template('test_result.mako').render(
                                testname=key,
                                value=value,
                                css=os.path.relpath(result_css, temp_path),
                                index=os.path.relpath(index, temp_path)))
                except OSError as e:
                    if e.errno == errno.ENAMETOOLONG:
                        print('WARN: filename "{}" too long'.format(html_name))
                    else:
                        raise
Esempio n. 18
0
def _make_testrun_info(results, destination, exclude=None):
    """Create the pages for each results file."""
    exclude = exclude or {}
    result_css = os.path.join(destination, "result.css")
    index = os.path.join(destination, "index.html")

    for each in results.results:
        name = escape_pathname(each.name)
        try:
            core.check_dir(os.path.join(destination, name), True)
        except exceptions.PiglitException:
            raise exceptions.PiglitFatalError(
                'Two or more of your results have the same "name" '
                'attribute. Try changing one or more of the "name" '
                'values in your json files.\n'
                'Duplicate value: {}'.format(name))

        with open(os.path.join(destination, name, "index.html"), 'wb') as out:
            out.write(_TEMPLATES.get_template('testrun_info.mako').render(
                name=each.name,
                totals=each.totals['root'],
                time=each.time_elapsed.delta,
                options=each.options,
                info=each.info))

        # Then build the individual test results
        for key, value in six.iteritems(each.tests):
            html_path = os.path.join(destination, name,
                                     escape_filename(key + ".html"))
            temp_path = os.path.dirname(html_path)

            if value.result not in exclude:
                core.check_dir(temp_path)

                try:
                    with open(html_path, 'wb') as out:
                        out.write(_TEMPLATES.get_template(
                            'test_result.mako').render(
                                testname=key,
                                value=value,
                                css=os.path.relpath(result_css, temp_path),
                                index=os.path.relpath(index, temp_path)))
                except OSError as e:
                    if e.errno == errno.ENAMETOOLONG:
                        print('WARN: filename "{}" too long'.format(html_name))
                    else:
                        raise
Esempio n. 19
0
def _check_trace(trace_path, expected_checksum):
    ensure_file(trace_path)

    json_result = {}

    trace_dir = path.dirname(trace_path)
    dir_in_results = path.join('trace', OPTIONS.device_name, trace_dir)
    results_path = path.join(OPTIONS.results_path, dir_in_results)
    core.check_dir(results_path)

    checksum, image_file = _replay(path.join(OPTIONS.db_path, trace_path),
                                   results_path)

    print('[check_image]\n'
          '    actual: {}\n'
          '  expected: {}'.format(checksum or 'error', expected_checksum))

    json_result['images'] = [{
        'image_desc': trace_path,
        'image_ref': expected_checksum + '.png',
        'image_render': expected_checksum + '.png'
    }]

    if checksum is None:
        json_result['images'][0]['image_render'] = None
        return Result.FAILURE, json_result

    if checksum == expected_checksum:
        if not OPTIONS.keep_image:
            os.remove(image_file)
        print('[check_image] Images match for:\n  {}\n'.format(trace_path))
        result = Result.MATCH
    else:
        print('[check_image] Images differ for:\n  {}'.format(trace_path))
        print('[check_image] For more information see '
              'https://gitlab.freedesktop.org/'
              'mesa/piglit/blob/master/replayer/README.md\n')
        result = Result.DIFFER

    if result is not Result.MATCH or OPTIONS.keep_image:
        root, ext = path.splitext(image_file)
        image_file_dest = '{}-{}{}'.format(root, checksum, ext)
        shutil.move(image_file, image_file_dest)
        json_result['images'][0]['image_render'] = image_file_dest

    return result, json_result
Esempio n. 20
0
def feature(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o",
                        "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("featureFile",
                        metavar="<Feature json file>",
                        help="Json file containing the features description")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(input_)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.featureFile and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.resultsFiles or not path.exists(args.featureFile):
        raise parser.error("Missing json file")

    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    try:
        core.check_dir(args.summaryDir, not args.overwrite)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            '{} already exists.\n'
            'use -o/--overwrite if you want to overwrite it.'.format(
                args.summaryDir))

    summary.feat(args.resultsFiles, args.summaryDir, args.featureFile)
Esempio n. 21
0
def feature(input_):
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("featureFile",
                        metavar="<Feature json file>",
                        help="Json file containing the features description")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(input_)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.featureFile and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.resultsFiles or not path.exists(args.featureFile):
        raise parser.error("Missing json file")

    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    try:
        core.check_dir(args.summaryDir, not args.overwrite)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            '{} already exists.\n'
            'use -o/--overwrite if you want to overwrite it.'.format(
                args.summaryDir))

    summary.feat(args.resultsFiles, args.summaryDir, args.featureFile)
Esempio n. 22
0
def ensure_file(file_path):
    destination_file_path = path.join(OPTIONS.db_path, file_path)
    if OPTIONS.download['url'] is None:
        if not path.exists(destination_file_path):
            raise exceptions.PiglitFatalError(
                '{} missing'.format(destination_file_path))
        return

    core.check_dir(path.dirname(destination_file_path))

    if not OPTIONS.download['force'] and path.exists(destination_file_path):
        return

    print('[check_image] Downloading file {}'.format(
        file_path), end=' ', flush=True)
    download_time = time()
    with open(destination_file_path, 'wb') as file:
        with requests.get(OPTIONS.download['url'].geturl() + file_path,
                          allow_redirects=True, stream=True) as r:
            r.raise_for_status()
            for chunk in r.iter_content(chunk_size=8194):
                if chunk:
                    file.write(chunk)
    print('took %ds.' % (time() - download_time), flush=True)
Esempio n. 23
0
def test_check_dir_handler():
    """core.check_dir: Handler is called if not failifexists."""
    with mock.patch('framework.core.os.stat',
                    mock.Mock(side_effect=OSError('foo', errno.ENOTDIR))):
        core.check_dir('foo',
                       handler=mock.Mock(side_effect=utils.nose.SentinalException))
Esempio n. 24
0
def test_check_dir_makedirs_pass():
    """core.check_dir: If makedirs fails with EEXIST pass"""
    with mock.patch('framework.core.os.stat', mock.Mock()):
        with mock.patch('framework.core.os.makedirs',
                        mock.Mock(side_effect=OSError(errno.EEXIST, 'foo'))):
            core.check_dir('foo', False)
Esempio n. 25
0
def html(input_):
    # Make a copy of the status text list and add all. This is used as the
    # argument list for -e/--exclude
    statuses = set(str(s) for s in status.ALL)
    statuses.add('all')

    """Combine files in a tests/ directory into a single results file."""
    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-o", "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l", "--list",
                        action="store",
                        help="Load a newline separated list of results. These "
                             "results will be prepended to any Results "
                             "specified on the command line")
    parser.add_argument("-e", "--exclude-details",
                        default=[],
                        action="append",
                        choices=statuses,
                        help="Optionally exclude the generation of HTML pages "
                             "for individual test pages with the status(es) "
                             "given as arguments. This speeds up HTML "
                             "generation, but reduces the info in the HTML "
                             "pages. May be used multiple times")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(unparsed)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.list and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # Convert the exclude_details list to status objects, without this using
    # the -e option will except
    if args.exclude_details:
        # If exclude-results has all, then change it to be all
        if 'all' in args.exclude_details:
            args.exclude_details = status.ALL
        else:
            args.exclude_details = frozenset(
                status.status_lookup(i) for i in args.exclude_details)


    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    try:
        core.check_dir(args.summaryDir, not args.overwrite)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            '{} already exists.\n'
            'use -o/--overwrite if you want to overwrite it.'.format(
                args.summaryDir))

    # Merge args.list and args.resultsFiles
    if args.list:
        args.resultsFiles.extend(core.parse_listfile(args.list))

    # Create the HTML output
    summary.html(args.resultsFiles, args.summaryDir, args.exclude_details)
Esempio n. 26
0
def test_check_dir_exists_fail():
    """core.check_dir: if the directory exists and failifexsits is True fail"""
    with mock.patch('framework.core.os.stat', mock.Mock(side_effect=OSError)):
        core.check_dir('foo', True)
Esempio n. 27
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.concurrent = args.concurrency
    options.OPTIONS.exclude_filter = args.exclude_tests
    options.OPTIONS.include_filter = args.include_tests
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.dmesg = args.dmesg
    options.OPTIONS.monitored = args.monitored
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    results = framework.results.TestrunResult()
    backends.set_meta(args.backend, results)

    # Set results.name
    if args.name is not None:
        results.name = args.name
    else:
        results.name = path.basename(args.results_path)

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(args, results.name))

    profile = framework.profile.merge_test_profiles(args.test_profile)
    profile.results_dir = args.results_path
    # If a test list is provided then set the forced_test_list value.
    if args.test_list:
        with open(args.test_list) as test_list:
            # Strip newlines
            profile.forced_test_list = list([t.strip() for t in test_list])

    results.time_elapsed.start = time.time()
    # Set the dmesg type
    if args.dmesg:
        profile.dmesg = args.dmesg

    if args.monitored:
        profile.monitoring = args.monitored

    profile.run(args.log_level, backend)

    results.time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': results.time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Esempio n. 28
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(_create_metadata(
        args, args.name or path.basename(args.results_path), forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(profile.RegexFilter(args.exclude_tests,
                                                 inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    time_elapsed = TimeAttribute(start=time.time())

    profile.run(profiles, args.log_level, backend, args.concurrency)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Esempio n. 29
0
def run(input_):
    """ Function for piglit run command

    This is a function because it allows it to be shared between piglit-run.py
    and piglit run

    """
    args = _run_parser(input_)
    base.Test.timeout = args.timeout
    _disable_windows_exception_messages()

    # If dmesg is requested we must have serial run, this is because dmesg
    # isn't reliable with threaded run
    if args.dmesg or args.monitored:
        args.concurrency = "none"

    # Pass arguments into Options
    options.OPTIONS.execute = args.execute
    options.OPTIONS.valgrind = args.valgrind
    options.OPTIONS.sync = args.sync
    options.OPTIONS.deqp_mustpass = args.deqp_mustpass
    options.OPTIONS.process_isolation = args.process_isolation
    options.OPTIONS.jobs = args.jobs

    # Set the platform to pass to waffle
    options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform

    # Change working directory to the root of the piglit directory
    piglit_dir = path.dirname(path.realpath(sys.argv[0]))
    os.chdir(piglit_dir)

    # If the results directory already exists and if overwrite was set, then
    # clear the directory. If it wasn't set, then raise fatal error.
    try:
        core.check_dir(args.results_path,
                       failifexists=args.overwrite,
                       handler=_results_handler)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            'Cannot overwrite existing folder without the -o/--overwrite '
            'option being set.')

    # If a test list is provided then set the forced_test_list value.
    forced_test_list = None
    if args.test_list:
        if len(args.test_profile) != 1:
            raise exceptions.PiglitFatalError(
                'Unable to force a test list with more than one profile')

        with open(args.test_list) as test_list:
            # Strip newlines and comments, ignore empty lines
            stripped = (t.split('#')[0].strip() for t in test_list)
            forced_test_list = [t for t in stripped if t]

    time_elapsed = TimeAttribute(start=time.time())

    backend = backends.get_backend(args.backend)(
        args.results_path,
        junit_suffix=args.junit_suffix,
        junit_subtests=args.junit_subtests)
    backend.initialize(
        _create_metadata(args, args.name or path.basename(args.results_path),
                         forced_test_list))

    profiles = [profile.load_test_profile(p) for p in args.test_profile]
    for p in profiles:
        p.results_dir = args.results_path

    # Set the forced_test_list, if applicable
    if forced_test_list:
        profiles[0].forced_test_list = forced_test_list

    # Set the dmesg type
    if args.dmesg:
        for p in profiles:
            p.options['dmesg'] = dmesg.get_dmesg(args.dmesg)

    if args.monitored:
        for p in profiles:
            p.options['monitor'] = monitoring.Monitoring(args.monitored)

    if args.ignore_missing:
        for p in profiles:
            p.options['ignore_missing'] = args.ignore_missing

    for p in profiles:
        if args.exclude_tests:
            p.filters.append(
                profile.RegexFilter(args.exclude_tests, inverse=True))
        if args.include_tests:
            p.filters.append(profile.RegexFilter(args.include_tests))

    profile.run(profiles, args.log_level, backend, args.concurrency, args.jobs)

    time_elapsed.end = time.time()
    backend.finalize({'time_elapsed': time_elapsed.to_json()})

    print('Thank you for running Piglit!\n'
          'Results have been written to ' + args.results_path)
Esempio n. 30
0
def html(input_):
    # Make a copy of the status text list and add all. This is used as the
    # argument list for -e/--exclude
    statuses = set(str(s) for s in status.ALL)
    statuses.add('all')

    """Combine files in a tests/ directory into a single results file."""
    unparsed = parsers.parse_config(input_)[1]

    # Adding the parent is necissary to get the help options
    parser = argparse.ArgumentParser(parents=[parsers.CONFIG])
    parser.add_argument("-o", "--overwrite",
                        action="store_true",
                        help="Overwrite existing directories")
    parser.add_argument("-l", "--list",
                        action="store",
                        help="Load a newline separated list of results. These "
                             "results will be prepended to any Results "
                             "specified on the command line")
    parser.add_argument("-e", "--exclude-details",
                        default=[],
                        action="append",
                        choices=statuses,
                        help="Optionally exclude the generation of HTML pages "
                             "for individual test pages with the status(es) "
                             "given as arguments. This speeds up HTML "
                             "generation, but reduces the info in the HTML "
                             "pages. May be used multiple times")
    parser.add_argument("summaryDir",
                        metavar="<Summary Directory>",
                        help="Directory to put HTML files in")
    parser.add_argument("resultsFiles",
                        metavar="<Results Files>",
                        nargs="*",
                        help="Results files to include in HTML")
    args = parser.parse_args(unparsed)

    # If args.list and args.resultsFiles are empty, then raise an error
    if not args.list and not args.resultsFiles:
        raise parser.error("Missing required option -l or <resultsFiles>")

    # Convert the exclude_details list to status objects, without this using
    # the -e option will except
    if args.exclude_details:
        # If exclude-results has all, then change it to be all
        if 'all' in args.exclude_details:
            args.exclude_details = status.ALL
        else:
            args.exclude_details = frozenset(
                status.status_lookup(i) for i in args.exclude_details)


    # if overwrite is requested delete the output directory
    if path.exists(args.summaryDir) and args.overwrite:
        shutil.rmtree(args.summaryDir)

    # If the requested directory doesn't exist, create it or throw an error
    try:
        core.check_dir(args.summaryDir, not args.overwrite)
    except exceptions.PiglitException:
        raise exceptions.PiglitFatalError(
            '{} already exists.\n'
            'use -o/--overwrite if you want to overwrite it.'.format(
                args.summaryDir))

    # Merge args.list and args.resultsFiles
    if args.list:
        args.resultsFiles.extend(core.parse_listfile(args.list))

    # Create the HTML output
    summary.html(args.resultsFiles, args.summaryDir, args.exclude_details)