コード例 #1
0
    def collect_result(self, name, process, job_info):
        """Custom collect_result function."""

        def resource(ext):
            """Returns the path to the testcase resource with the given ext."""
            return os.path.join(
                self.main.options.output_dir, '%s.%s' % (name, ext))

        # Fetch the testcase results
        raw_result = split_file(resource('result'), ignore_errors=True)

        if raw_result:
            status, message = raw_result[0].split(':', 1)
        else:
            status, message = 'CRASH', 'testsuite internal error (no results)'

        if os.path.isfile(resource('time')):
            with open(resource('time'), 'r') as timing:
                duration = float(timing.read())
        else:
            duration = 0

        result = {
            'name': name,
            'status': status,
            'message': message,
            'duration': duration
        }

        # Increment the status count
        self.summary[status] += 1
        self.duration += duration

        # Store the testcase result in the results file
        echo_to_file(self.main.options.results_file,
                     '%(name)s:%(status)s: %(message)s\n' % result,
                     append=True)

        # Display the testcase result
        self.log(
            Testsuite.format_testcase_result(result),
            sys.stderr if status in Testsuite.ERRORS else sys.stdout)

        # Display the testcase diff if requested
        out_file = resource('out')
        diff_file = resource('diff')

        if status in Testsuite.ERRORS and self.main.options.with_diff:
            if os.path.isfile(diff_file):
                with open(diff_file, 'r') as diff:
                    self.log(
                        Testsuite.format_testcase_diff(diff.read().strip()),
                        stream=sys.stderr)
            elif os.path.isfile(out_file):
                with open(out_file, 'r') as out:
                    self.log(
                        Testsuite.format_testcase_diff(''.join([
                            '+{}\n'.format(line)
                            for line in out.read().strip().splitlines()
                        ])), stream=sys.stderr)
コード例 #2
0
ファイル: reports.py プロジェクト: t-14/gnatpython
    def __init__(self, dir):
        """Report constructor.

        :param dir: the directory that contains the testsuite results, if None
             initialise the object but do not populate result_db
        :type dir: str | None
        """
        self.dir = dir
        self.result_db = {}

        if self.dir is not None:
            assert os.path.isdir(self.dir), "invalid result directory"

            try:
                result_list = split_file(dir + '/results')
                # Discard any invalid lines (not containing :)
                result_list = (k.split(':', 2) for k in result_list
                               if ':' in k)
            except FileUtilsError:
                # No result file typically means that no test has been run.
                result_list = []

            for item in result_list:
                msg = ''
                if len(item) > 2:
                    msg = item[2]

                self.result_db[item[0]] = \
                    TestResult(self.dir, item[0], item[1], msg)
コード例 #3
0
 def collect_result_including_uok(name, process, _job_info):
     generated_collect_result(name, process, _job_info)
     test_name = os.path.basename(name)
     test_result = split_file(m.options.output_dir + '/' + test_name +
                              '.result',
                              ignore_errors=True)
     if test_result:
         test_status = test_result[0].split(':')[0]
         if test_status == 'UOK':
             test_metrics['uok'] += 1
コード例 #4
0
def get_test_diff(result_dir, name, note, result_str, filename, diffs_format):
    """Update diffs and xfail_diffs files.

    :param result_str: content of the test .result file
    :type result_dir: str
    :param name: test name
    :type name: str
    :param note: annotation
    :type note: str
    :param filename: file to update
    :type filename: str
    :param diffs_format: if 'diff' show diff content else show the expected /
        actual output
    :type diffs_format: str | None
    """
    result = ["================ Bug %s %s" % (name, note)]
    if diffs_format == 'diff':
        result += split_file(result_dir + '/' + name + '.diff',
                             ignore_errors=True)[0:2000]
    else:
        if re.match("DIFF:unexpected", result_str):
            result.append("---------------- unexpected output")
            result += split_file(result_dir + '/' + name + '.out',
                                 ignore_errors=True)[0:100]

        elif re.match("CRASH:", result_str):
            result.append("---------------- unexpected output")
            result += split_file(result_dir + '/' + name + '.out',
                                 ignore_errors=True)[0:30]

        elif re.match("DIFF:output|XFAIL:|FAILED:|PROBLEM:", result_str):
            result.append("---------------- expected output")
            result += split_file(result_dir + '/' + name + '.expected',
                                 ignore_errors=True)[0:2000]
            result.append("---------------- actual output")
            result += split_file(result_dir + '/' + name + '.out',
                                 ignore_errors=True)

    echo_to_file(filename, result, append=True)
コード例 #5
0
ファイル: testdriver.py プロジェクト: t-14/gnatpython
    def adjust_to_context(self):
        """Adjust test environment to context.

        At this stage we parse the test.opt and adjust the opt_results
        attribute value. The driver will check if the test should be run
        (i.e is DEAD) right after this step.
        """
        opt_file_path = os.path.join(self.test, self.opt_file)

        if self.restricted_discs is not None:
            opt_file_content = ['ALL DEAD disabled by default']
            if os.path.isfile(opt_file_path):
                opt_file_content += split_file(opt_file_path)

            opt = OptFileParse(self.discs, opt_file_content)
            self.opt_results = opt.get_values(self.opt_results)
            if not self.opt_results['DEAD']:
                activating_tags = opt.get_note(sep='')
                for d in self.restricted_discs:
                    if d not in activating_tags:
                        self.opt_results['DEAD'] = \
                            '%s not in activating tags' % d
        else:
            opt = OptFileParse(self.discs, opt_file_path)
            self.opt_results = opt.get_values(self.opt_results)

        self.opt_results['NOTE'] = opt.get_note()

        if not os.path.isfile(self.test + '/' + self.opt_results['CMD']):
            self.result = {
                'result': 'INVALID_TEST',
                'msg':
                'cannot find script file %s' % (self.opt_results['CMD']),
                'is_failure': True
            }
            return

        if self.opt_results['OUT'][-8:] != 'test.out' and \
                not os.path.isfile(self.test + '/' + self.opt_results['OUT']):
            tmp = os.path.basename(self.opt_results['OUT'])
            self.result = {
                'result': 'INVALID_TEST',
                'msg': 'cannot find output file %s' % (tmp),
                'is_failure': True
            }
            return
コード例 #6
0
    def run_testcase(test, job_info):
        """Run the given test.

        See mainloop documentation
        """
        skip_if_ok = hasattr(options, 'skip_if_ok') and options.skip_if_ok
        skip_if_run = hasattr(
            options, 'skip_if_already_run') and options.skip_if_already_run
        skip_if_dead = hasattr(options,
                               'skip_if_dead') and options.skip_if_dead

        result_dir = options.output_dir

        if skip_if_ok or skip_if_run or skip_if_dead:
            try:
                if use_basename:
                    test_name = os.path.basename(test)
                else:
                    test_name = os.path.relpath(test, os.getcwd())

                old_result_file = os.path.join(result_dir,
                                               test_name + '.result')
                if os.path.exists(old_result_file):
                    if skip_if_run:
                        return SKIP_EXECUTION
                    old_result = split_file(old_result_file)[0].split(':')[0]
                    if skip_if_ok and old_result in ('OK', 'UOK', 'PASSED'):
                        return SKIP_EXECUTION
                    if skip_if_dead and old_result == 'DEAD':
                        return SKIP_EXECUTION
            except FileUtilsError:
                logging.debug("Cannot get old result for %s" % test)
                pass

        # VxWorks tests needs WORKER_ID to be set in order to have an id for
        # vxsim that will not collide with other instances.
        os.environ['WORKER_ID'] = str(job_info[0])

        cmd = [
            sys.executable, driver, '-d', ",".join(discs or []), '-o',
            result_dir, '-t', options.tmp, test
        ]
        if options.verbose:
            cmd.append('-v')
        if hasattr(options, 'host'):
            if options.host:
                cmd.append('--host=' + options.host)
            if options.build:
                cmd.append('--build=' + options.build)
            if options.target:
                cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        if hasattr(options, 'restricted_discs') and options.restricted_discs:
            cmd.extend(('-r', options.restricted_discs))
        if options.failed_only:
            cmd.append('--failed-only')
        if options.timeout:
            cmd.append('--timeout=' + options.timeout)
        if options.use_basename:
            cmd.append('--use-basename')
        return Run(cmd, bg=True, output=None)
コード例 #7
0
    def collect_result(name, process, _job_info):
        """Default collect result function.

        Read .result and .note file in {result_dir}/{test_name} dir
        Then append result to {result_file}

        If output_diff is True, print the content of .diff files

        Name should be the path to the test directory
        """
        # Unused parameter
        del _job_info
        if metrics is not None:
            # Increment number of run tests
            metrics['run'] += 1

        if use_basename:
            test_name = os.path.basename(name)
        else:
            test_name = os.path.relpath(name, os.getcwd())

        test_result = split_file(result_dir + '/' + test_name + '.result',
                                 ignore_errors=True)
        if not test_result:
            if process == SKIP_EXECUTION:
                test_result = 'CRASH:test skipped'
            else:
                test_result = 'CRASH:cannot read result file'
        else:
            test_result = test_result[0]
            if not test_result:
                test_result = 'CRASH: invalid result file'

        test_note = split_file(result_dir + '/' + test_name + '.note',
                               ignore_errors=True)

        if not test_note:
            test_note = ""
        else:
            test_note = test_note[0]

        # Append result to results file
        echo_to_file(results_file,
                     "%s:%s %s\n" % (test_name, test_result, test_note),
                     append=True)

        testsuite_logging.append_to_logfile(test_name, result_dir)

        test_status = test_result.split(':')[0]
        if test_status not in (DIFF_STATUS + CRASH_STATUS):
            # The command line log is not useful in these cases so it is
            # removed.
            cmdlog = result_dir + '/' + test_name + '.log'
            if os.path.isfile(cmdlog):
                rm(cmdlog)

        if metrics is not None:
            diffs_format = options.diffs_format if hasattr(
                options, 'diffs_format') else None

            # Set last test name
            metrics['last'] = test_name

            # Update metrics and diffs or xfail_diffs file
            diffs_file = os.path.join(result_dir, 'diffs')
            xfail_diffs_file = os.path.join(result_dir, 'xfail_diffs')

            if test_status in DIFF_STATUS:
                metrics['failed'] += 1
                if test_name not in metrics['old_diffs']:
                    metrics['new_failed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in CRASH_STATUS:
                metrics['crashed'] += 1
                if test_name not in metrics['old_crashes']:
                    metrics['new_crashed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in XFAIL_STATUS:
                get_test_diff(result_dir, test_name, test_note, test_result,
                              xfail_diffs_file, diffs_format)

            if max_consecutive_failures and process != SKIP_EXECUTION:
                # Count number of consecutive failures
                if test_status in FAIL_STATUS:
                    # ignore XFAIL
                    if test_status not in XFAIL_STATUS:
                        metrics['max_consecutive_failures'] += 1
                elif test_status in SKIP_STATUS:
                    # ignore DEAD or SKIP tests
                    pass
                else:
                    metrics['max_consecutive_failures'] = 0

            # Update global status
            s = []
            if "JOB_ID" in os.environ:
                s.append("%s running tests since %s\n" %
                         (os.environ['JOB_ID'], start_time_str))

            s.append("%(run)s out of %(total)s processed (now at %(last)s)" %
                     metrics)
            s.append("%(new_failed)s new potential regression(s)"
                     " among %(failed)s" % metrics)
            s.append("%(new_crashed)s new crash(es) among %(crashed)s" %
                     metrics)
            echo_to_file(os.path.join(result_dir, 'status'),
                         '\n'.join(s) + '\n')

        if process != SKIP_EXECUTION:
            # else the test has been skipped. No need to print its status.
            if test_status in (DIFF_STATUS + CRASH_STATUS):
                logging_func = logging.error
            else:
                logging_func = logging.info

            logging_func("%-30s %s %s" % (test_name, test_result, test_note))

            if output_diff:
                diff_filename = result_dir + '/' + test_name + '.diff'
                if os.path.exists(diff_filename):
                    with open(diff_filename) as diff_file:
                        logging_func(diff_file.read().strip())

        # Exit the mainloop if too many errors (more than
        # max_consecutive_failures)
        if metrics and max_consecutive_failures \
                and process != SKIP_EXECUTION and metrics[
                    'max_consecutive_failures'] >= max_consecutive_failures:
            raise TooManyErrors
コード例 #8
0
def generate_collect_result(result_dir=None,
                            results_file=None,
                            output_diff=False,
                            use_basename=True,
                            metrics=None,
                            options=None):
    """Generate a collect result function.

    The generated collect_result function is known to work with gnatpython
    default test driver: gnatpython.testdriver.TestRunner

    If you use the default options, the call to generate_collect_result
    should be:

    .. code-block:: python

        metrics = {'total': NUMBER_OF_TESTS}
        generate_collect_result(metrics=metrics, options=options)

    :param result_dir: [deprecated] directory containing test results,
        if None use options.output_dir
    :type result_dir: str | None
    :param results_file: [deprecated] file containing the list of test status,
        if None use options.results_file
    :type results_file: str | None
    :param output_diff: if True, output the .diff in case of failure (useful
        when debugging)
    :type output_diff: bool
    :param use_basename: if True use the test basename to get the test name
        else use the relative path
    :type use_basename: bool
    :param metrics: to collect metrics, just pass an empty dictionary or
        a dictionary containing a key named 'total' with an integer
        value equal to the number of test to run
    :type metrics: dict | None
    :param options: test driver and Main options

    When collecting metrics, a file named status will be created in
    result_dir and will contain some metrics

    If options.max_consecutive_failures is set to N, the test will be aborted
    when more than N tests are failing consecutively (ignoring tests
    expecting to fail and tests skipped).
    """
    # Set result_dir and results_file if needed
    if options is not None and result_dir is None:
        result_dir = options.output_dir
    if results_file is None:
        results_file = options.results_file

    # Save the startup time
    start_time_str = strftime('%Y-%m-%d %H:%M:%S')

    max_consecutive_failures = int(
        options.max_consecutive_failures) if hasattr(
            options, 'max_consecutive_failures') else 0
    if max_consecutive_failures:
        if metrics is None:
            metrics = {}
        metrics['max_consecutive_failures'] = 0

    if metrics is not None:
        for m in ('run', 'failed', 'crashed', 'new_failed', 'new_crashed'):
            metrics[m] = 0
        for m in ('old_diffs', 'old_crashes'):
            if m not in metrics:
                metrics[m] = []
        if 'total' not in metrics:
            metrics['total'] = 0

        # Compute old metrics if needed
        if hasattr(options, 'old_output_dir') \
                and options.old_output_dir is not None:
            old_results = [
                k.split(':') for k in split_file(os.path.join(
                    options.old_output_dir, 'results'),
                                                 ignore_errors=True)
            ]
            if 'old_diffs' not in metrics:
                metrics['old_diffs'] = [
                    k[0] for k in old_results if k[1] in DIFF_STATUS
                ]
            if 'old_crashes' not in metrics:
                metrics['old_crashes'] = [
                    k[0] for k in old_results if k[1] in CRASH_STATUS
                ]

    def collect_result(name, process, _job_info):
        """Default collect result function.

        Read .result and .note file in {result_dir}/{test_name} dir
        Then append result to {result_file}

        If output_diff is True, print the content of .diff files

        Name should be the path to the test directory
        """
        # Unused parameter
        del _job_info
        if metrics is not None:
            # Increment number of run tests
            metrics['run'] += 1

        if use_basename:
            test_name = os.path.basename(name)
        else:
            test_name = os.path.relpath(name, os.getcwd())

        test_result = split_file(result_dir + '/' + test_name + '.result',
                                 ignore_errors=True)
        if not test_result:
            if process == SKIP_EXECUTION:
                test_result = 'CRASH:test skipped'
            else:
                test_result = 'CRASH:cannot read result file'
        else:
            test_result = test_result[0]
            if not test_result:
                test_result = 'CRASH: invalid result file'

        test_note = split_file(result_dir + '/' + test_name + '.note',
                               ignore_errors=True)

        if not test_note:
            test_note = ""
        else:
            test_note = test_note[0]

        # Append result to results file
        echo_to_file(results_file,
                     "%s:%s %s\n" % (test_name, test_result, test_note),
                     append=True)

        testsuite_logging.append_to_logfile(test_name, result_dir)

        test_status = test_result.split(':')[0]
        if test_status not in (DIFF_STATUS + CRASH_STATUS):
            # The command line log is not useful in these cases so it is
            # removed.
            cmdlog = result_dir + '/' + test_name + '.log'
            if os.path.isfile(cmdlog):
                rm(cmdlog)

        if metrics is not None:
            diffs_format = options.diffs_format if hasattr(
                options, 'diffs_format') else None

            # Set last test name
            metrics['last'] = test_name

            # Update metrics and diffs or xfail_diffs file
            diffs_file = os.path.join(result_dir, 'diffs')
            xfail_diffs_file = os.path.join(result_dir, 'xfail_diffs')

            if test_status in DIFF_STATUS:
                metrics['failed'] += 1
                if test_name not in metrics['old_diffs']:
                    metrics['new_failed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in CRASH_STATUS:
                metrics['crashed'] += 1
                if test_name not in metrics['old_crashes']:
                    metrics['new_crashed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in XFAIL_STATUS:
                get_test_diff(result_dir, test_name, test_note, test_result,
                              xfail_diffs_file, diffs_format)

            if max_consecutive_failures and process != SKIP_EXECUTION:
                # Count number of consecutive failures
                if test_status in FAIL_STATUS:
                    # ignore XFAIL
                    if test_status not in XFAIL_STATUS:
                        metrics['max_consecutive_failures'] += 1
                elif test_status in SKIP_STATUS:
                    # ignore DEAD or SKIP tests
                    pass
                else:
                    metrics['max_consecutive_failures'] = 0

            # Update global status
            s = []
            if "JOB_ID" in os.environ:
                s.append("%s running tests since %s\n" %
                         (os.environ['JOB_ID'], start_time_str))

            s.append("%(run)s out of %(total)s processed (now at %(last)s)" %
                     metrics)
            s.append("%(new_failed)s new potential regression(s)"
                     " among %(failed)s" % metrics)
            s.append("%(new_crashed)s new crash(es) among %(crashed)s" %
                     metrics)
            echo_to_file(os.path.join(result_dir, 'status'),
                         '\n'.join(s) + '\n')

        if process != SKIP_EXECUTION:
            # else the test has been skipped. No need to print its status.
            if test_status in (DIFF_STATUS + CRASH_STATUS):
                logging_func = logging.error
            else:
                logging_func = logging.info

            logging_func("%-30s %s %s" % (test_name, test_result, test_note))

            if output_diff:
                diff_filename = result_dir + '/' + test_name + '.diff'
                if os.path.exists(diff_filename):
                    with open(diff_filename) as diff_file:
                        logging_func(diff_file.read().strip())

        # Exit the mainloop if too many errors (more than
        # max_consecutive_failures)
        if metrics and max_consecutive_failures \
                and process != SKIP_EXECUTION and metrics[
                    'max_consecutive_failures'] >= max_consecutive_failures:
            raise TooManyErrors

    return collect_result
コード例 #9
0
ファイル: testdriver.py プロジェクト: t-14/gnatpython
    def analyze(self, ignore_white_chars=True):
        """Compute test status.

        :param ignore_white_chars: in the default driver difference in white
            chars are ignored. This parameter allow the user to change that
            behavior. In that case the user should override the analyze method
            in its own driver and call this method with ignore_white_chars set
            to False.
        :type ignore_white_chars: bool

        This method should set the final value of 'result' attribute
        """
        # Retrieve the outputs and see if we match some of the CRASH or DEAD
        # patterns
        output = split_file(self.output, ignore_errors=True)
        if output:
            tmp = "\n".join(output)
            for pattern in self.get_status_filter():
                if re.search(pattern[0], tmp):
                    self.result.update(pattern[1])
                    break

        # If the test status has not been updated compare output with the
        # baseline
        if self.result['result'] == 'UNKNOWN':
            # Retrieve expected output
            expected = split_file(self.opt_results['OUT'], ignore_errors=True)

            # Process output and expected output with registered filters
            expected = self.apply_output_filter(expected)
            output = self.apply_output_filter(output)

            # Save the filtered output (might be needed by some developpers to
            # create more easily baselines).
            echo_to_file(self.output_filtered, output)

            d = diff(expected, output, ignore_white_chars=ignore_white_chars)
            if d:
                logging.debug(d)
                self.result['result'] = 'DIFF'
                if len(expected) == 0:
                    self.result['msg'] = 'unexpected output'
                else:
                    self.result['msg'] = 'output'
                diff_file = open(self.diff_output, 'w')
                diff_file.write(d)
                diff_file.close()
            else:
                self.result = {'result': 'OK', 'msg': '', 'is_failure': False}

        self.result['is_failure'] = IS_STATUS_FAILURE[self.result['result']]

        # self.opt_results['XFAIL'] contains the XFAIL comment or False
        # The status should be set to XFAIL even if the comment is empty
        if not isinstance(self.opt_results['XFAIL'], bool) or \
                self.opt_results['XFAIL']:
            if self.result['result'] in ['DIFF', 'CRASH']:
                self.result.update({
                    'result': 'XFAIL',
                    'msg': self.opt_results['XFAIL']
                })
            elif self.result['result'] == 'OK':
                self.result.update({
                    'result': 'UOK',
                    'msg': self.opt_results['XFAIL']
                })
コード例 #10
0
ファイル: testdriver.py プロジェクト: t-14/gnatpython
    def compute_cmd_line_cmd(self, filesize_limit):
        """Compute self.cmd_line and preprocess the test script.

        This function is called by compute_cmd_line
        """
        cmd = self.opt_results['CMD']
        if Env().host.os.name != 'windows':
            script = split_file(cmd)

            # The test is run on a Unix system but has a 'cmd' syntax.
            # Convert it to Bourne shell syntax.
            cmdfilter = Filter()
            cmdfilter.append([r'-o(.*).exe', r'-o \1'])
            cmdfilter.append([r'%([^ ]*)%', r'"$\1"'])
            cmdfilter.append([r'(\032|\015)', r''])
            cmdfilter.append(
                [r'set *([^ =]+) *= *([^ ]*)', r'\1="\2"; export \1'])
            script = cmdfilter.process(script)

            cmd = self.work_dir + '/__test.sh'
            echo_to_file(cmd, 'PATH=.:$PATH; export PATH\n')

            # Compute effective file size limit on Unix system.
            if filesize_limit > 0:
                # File size limit can be specified either by a default or by
                # mean of FILESIZE_LIMIT command in the test test.opt. When
                # both are specified use the upper limit (note that 0 means
                # unlimited).
                opt_limit = self.opt_results['FILESIZE_LIMIT']
                if opt_limit is not None:
                    try:
                        opt_limit = int(opt_limit)
                    except TypeError:
                        opt_limit = filesize_limit
                else:
                    opt_limit = filesize_limit

                if opt_limit != 0:
                    if filesize_limit < opt_limit:
                        filesize_limit = opt_limit

                    # Limit filesize. Argument to ulimit is a number of blocks
                    # (512 bytes) so multiply by two the argument given by the
                    # user. Filesize limit is not supported on Windows.
                    echo_to_file(cmd, 'ulimit -f %s\n' % (filesize_limit * 2),
                                 True)

            # Source support.sh in TEST_SUPPORT_DIR if set
            if 'TEST_SUPPORT_DIR' in os.environ and os.path.isfile(
                    os.environ['TEST_SUPPORT_DIR'] + '/support.sh'):
                echo_to_file(cmd, '. $TEST_SUPPORT_DIR/support.sh\n', True)

            echo_to_file(cmd, script, True)

            self.cmd_line += ['bash', cmd]
        else:
            # On windows system, use cmd to run the script.
            if cmd[-4:] != '.cmd':
                # We are about to use cmd.exe to run a test. In this case,
                # ensure that the file extension is .cmd otherwise a dialog box
                # will popup asking to choose the program that should be used
                # to run the script.
                cp(cmd, self.work_dir + '/test__.cmd')
                cmd = self.work_dir + '/test__.cmd'

            self.cmd_line += ['cmd.exe', '/q', '/c', cmd]