Ejemplo n.º 1
0
    def collect_result(self, name, process, job_info):
        """Custom collect_result function."""

        def resource(ext):
            """Returns the path to the testcase resource with the given ext."""
            return os.path.join(
                self.main.options.output_dir, '%s.%s' % (name, ext))

        # Fetch the testcase results
        raw_result = split_file(resource('result'), ignore_errors=True)

        if raw_result:
            status, message = raw_result[0].split(':', 1)
        else:
            status, message = 'CRASH', 'testsuite internal error (no results)'

        if os.path.isfile(resource('time')):
            with open(resource('time'), 'r') as timing:
                duration = float(timing.read())
        else:
            duration = 0

        result = {
            'name': name,
            'status': status,
            'message': message,
            'duration': duration
        }

        # Increment the status count
        self.summary[status] += 1
        self.duration += duration

        # Store the testcase result in the results file
        echo_to_file(self.main.options.results_file,
                     '%(name)s:%(status)s: %(message)s\n' % result,
                     append=True)

        # Display the testcase result
        self.log(
            Testsuite.format_testcase_result(result),
            sys.stderr if status in Testsuite.ERRORS else sys.stdout)

        # Display the testcase diff if requested
        out_file = resource('out')
        diff_file = resource('diff')

        if status in Testsuite.ERRORS and self.main.options.with_diff:
            if os.path.isfile(diff_file):
                with open(diff_file, 'r') as diff:
                    self.log(
                        Testsuite.format_testcase_diff(diff.read().strip()),
                        stream=sys.stderr)
            elif os.path.isfile(out_file):
                with open(out_file, 'r') as out:
                    self.log(
                        Testsuite.format_testcase_diff(''.join([
                            '+{}\n'.format(line)
                            for line in out.read().strip().splitlines()
                        ])), stream=sys.stderr)
Ejemplo n.º 2
0
    def dump_testsuite_result(self):
        """Dump testsuite result files.

        Dump the content of all <test>.yaml files and create report,
        result and comment files.
        """
        testsuite_results = os.path.join(self.output_dir, 'results')
        testsuite_report = os.path.join(self.output_dir, 'report')
        testsuite_comment = os.path.join(self.output_dir, 'comment')

        with open(testsuite_comment, 'w') as f:
            self.write_comment_file(f)

        touch(testsuite_results)

        # Mapping: test status -> hits. Computed to display the testsuite run
        # summary.
        summary = collections.defaultdict(lambda: 0)

        for test_result in find(self.output_dir, '*.yaml'):

            if os.path.basename(test_result) != 'global_env.yaml':
                with open(test_result, "rb") as fd:
                    tr_yaml = yaml.load(fd)

                if tr_yaml:
                    # result in results file
                    echo_to_file(testsuite_results,
                                 '%s:%s: %s\n' %
                                 (tr_yaml.test_env['test_name'],
                                  tr_yaml.status, tr_yaml.msg),
                                 append=True)

                    tr_yaml.dump_result(self.output_dir)
                    summary[tr_yaml.status] += 1

        try:
            report = ReportDiff(self.output_dir,
                                self.old_output_dir,
                                use_diff=True)
        except:
            report = ReportDiff(self.output_dir, self.old_output_dir)

        report.txt_image(testsuite_report)

        summary_msg = ['Summary:']
        for status in sorted(summary):
            hits = summary[status]
            summary_msg.append('  {}: {} test{}'.format(
                status, summary[status], 's' if hits > 1 else ''))
        logging.info('\n'.join(summary_msg))
Ejemplo n.º 3
0
    def dump_result(self, output_dir):
        """Dump the result as separated files.

        :param path_prefix: the path_prefix to be used as prefix for
           the several files
        :type path_prefix: str
        """

        path_prefix = os.path.join(output_dir, self.test_env['test_name'])

        echo_to_file(path_prefix + '.result',
                     '%s: %s' % (self.status, self.msg))

        if 'PASSED' not in self.status:
            echo_to_file(path_prefix + '.out', '%s' % self.actual_output)
            echo_to_file(path_prefix + '.diff', '%s' % self.diff)
            echo_to_file(path_prefix + '.expected',
                         '%s' % self.expected_output)
            echo_to_file(path_prefix + '.filtered',
                         '%s' % self.filtered_output)
Ejemplo n.º 4
0
def get_test_diff(result_dir, name, note, result_str, filename, diffs_format):
    """Update diffs and xfail_diffs files.

    :param result_str: content of the test .result file
    :type result_dir: str
    :param name: test name
    :type name: str
    :param note: annotation
    :type note: str
    :param filename: file to update
    :type filename: str
    :param diffs_format: if 'diff' show diff content else show the expected /
        actual output
    :type diffs_format: str | None
    """
    result = ["================ Bug %s %s" % (name, note)]
    if diffs_format == 'diff':
        result += split_file(result_dir + '/' + name + '.diff',
                             ignore_errors=True)[0:2000]
    else:
        if re.match("DIFF:unexpected", result_str):
            result.append("---------------- unexpected output")
            result += split_file(result_dir + '/' + name + '.out',
                                 ignore_errors=True)[0:100]

        elif re.match("CRASH:", result_str):
            result.append("---------------- unexpected output")
            result += split_file(result_dir + '/' + name + '.out',
                                 ignore_errors=True)[0:30]

        elif re.match("DIFF:output|XFAIL:|FAILED:|PROBLEM:", result_str):
            result.append("---------------- expected output")
            result += split_file(result_dir + '/' + name + '.expected',
                                 ignore_errors=True)[0:2000]
            result.append("---------------- actual output")
            result += split_file(result_dir + '/' + name + '.out',
                                 ignore_errors=True)

    echo_to_file(filename, result, append=True)
Ejemplo n.º 5
0
    def write_results(self):
        """Write results on disk.

        Write at least .result and maybe .out and .expected files in the
        result directory.
        """
        echo_to_file(self.result_prefix + '.result',
                     self.result['result'] + ':' + self.result['msg'] + '\n')

        # The command line logs are always saved in the result directory
        # because all of them are needed to generate the aggregation file
        # (testsuite_support.log) in the collect_result function.

        if os.path.isfile(self.cmdlog):
            cp(self.cmdlog, self.result_prefix + '.log')

        if self.result['is_failure']:
            if os.path.isfile(self.opt_results['OUT']):
                cp(self.opt_results['OUT'], self.result_prefix + '.expected')

            if os.path.isfile(self.output):
                cp(self.output, self.result_prefix + '.out')

            if os.path.isfile(self.output_filtered):
                cp(self.output_filtered, self.result_prefix + '.out.filtered')

            if os.path.isfile(self.diff_output):
                cp(self.diff_output, self.result_prefix + '.diff')

            if self.keep_test_dir_on_failure:
                with open(self.result_prefix + '.info', 'a') as f:
                    f.write('binary_path:%s\n' % self.failed_bin_path)

        if self.opt_results['TIMING']:
            echo_to_file(self.result_prefix + '.time',
                         str(self.opt_results['TIMING']) + '\n')
Ejemplo n.º 6
0
    def collect_result(name, process, _job_info):
        """Default collect result function.

        Read .result and .note file in {result_dir}/{test_name} dir
        Then append result to {result_file}

        If output_diff is True, print the content of .diff files

        Name should be the path to the test directory
        """
        # Unused parameter
        del _job_info
        if metrics is not None:
            # Increment number of run tests
            metrics['run'] += 1

        if use_basename:
            test_name = os.path.basename(name)
        else:
            test_name = os.path.relpath(name, os.getcwd())

        test_result = split_file(result_dir + '/' + test_name + '.result',
                                 ignore_errors=True)
        if not test_result:
            if process == SKIP_EXECUTION:
                test_result = 'CRASH:test skipped'
            else:
                test_result = 'CRASH:cannot read result file'
        else:
            test_result = test_result[0]
            if not test_result:
                test_result = 'CRASH: invalid result file'

        test_note = split_file(result_dir + '/' + test_name + '.note',
                               ignore_errors=True)

        if not test_note:
            test_note = ""
        else:
            test_note = test_note[0]

        # Append result to results file
        echo_to_file(results_file,
                     "%s:%s %s\n" % (test_name, test_result, test_note),
                     append=True)

        testsuite_logging.append_to_logfile(test_name, result_dir)

        test_status = test_result.split(':')[0]
        if test_status not in (DIFF_STATUS + CRASH_STATUS):
            # The command line log is not useful in these cases so it is
            # removed.
            cmdlog = result_dir + '/' + test_name + '.log'
            if os.path.isfile(cmdlog):
                rm(cmdlog)

        if metrics is not None:
            diffs_format = options.diffs_format if hasattr(
                options, 'diffs_format') else None

            # Set last test name
            metrics['last'] = test_name

            # Update metrics and diffs or xfail_diffs file
            diffs_file = os.path.join(result_dir, 'diffs')
            xfail_diffs_file = os.path.join(result_dir, 'xfail_diffs')

            if test_status in DIFF_STATUS:
                metrics['failed'] += 1
                if test_name not in metrics['old_diffs']:
                    metrics['new_failed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in CRASH_STATUS:
                metrics['crashed'] += 1
                if test_name not in metrics['old_crashes']:
                    metrics['new_crashed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in XFAIL_STATUS:
                get_test_diff(result_dir, test_name, test_note, test_result,
                              xfail_diffs_file, diffs_format)

            if max_consecutive_failures and process != SKIP_EXECUTION:
                # Count number of consecutive failures
                if test_status in FAIL_STATUS:
                    # ignore XFAIL
                    if test_status not in XFAIL_STATUS:
                        metrics['max_consecutive_failures'] += 1
                elif test_status in SKIP_STATUS:
                    # ignore DEAD or SKIP tests
                    pass
                else:
                    metrics['max_consecutive_failures'] = 0

            # Update global status
            s = []
            if "JOB_ID" in os.environ:
                s.append("%s running tests since %s\n" %
                         (os.environ['JOB_ID'], start_time_str))

            s.append("%(run)s out of %(total)s processed (now at %(last)s)" %
                     metrics)
            s.append("%(new_failed)s new potential regression(s)"
                     " among %(failed)s" % metrics)
            s.append("%(new_crashed)s new crash(es) among %(crashed)s" %
                     metrics)
            echo_to_file(os.path.join(result_dir, 'status'),
                         '\n'.join(s) + '\n')

        if process != SKIP_EXECUTION:
            # else the test has been skipped. No need to print its status.
            if test_status in (DIFF_STATUS + CRASH_STATUS):
                logging_func = logging.error
            else:
                logging_func = logging.info

            logging_func("%-30s %s %s" % (test_name, test_result, test_note))

            if output_diff:
                diff_filename = result_dir + '/' + test_name + '.diff'
                if os.path.exists(diff_filename):
                    with open(diff_filename) as diff_file:
                        logging_func(diff_file.read().strip())

        # Exit the mainloop if too many errors (more than
        # max_consecutive_failures)
        if metrics and max_consecutive_failures \
                and process != SKIP_EXECUTION and metrics[
                    'max_consecutive_failures'] >= max_consecutive_failures:
            raise TooManyErrors
Ejemplo n.º 7
0
    def analyze(self, ignore_white_chars=True):
        """Compute test status.

        :param ignore_white_chars: in the default driver difference in white
            chars are ignored. This parameter allow the user to change that
            behavior. In that case the user should override the analyze method
            in its own driver and call this method with ignore_white_chars set
            to False.
        :type ignore_white_chars: bool

        This method should set the final value of 'result' attribute
        """
        # Retrieve the outputs and see if we match some of the CRASH or DEAD
        # patterns
        output = split_file(self.output, ignore_errors=True)
        if output:
            tmp = "\n".join(output)
            for pattern in self.get_status_filter():
                if re.search(pattern[0], tmp):
                    self.result.update(pattern[1])
                    break

        # If the test status has not been updated compare output with the
        # baseline
        if self.result['result'] == 'UNKNOWN':
            # Retrieve expected output
            expected = split_file(self.opt_results['OUT'], ignore_errors=True)

            # Process output and expected output with registered filters
            expected = self.apply_output_filter(expected)
            output = self.apply_output_filter(output)

            # Save the filtered output (might be needed by some developpers to
            # create more easily baselines).
            echo_to_file(self.output_filtered, output)

            d = diff(expected, output, ignore_white_chars=ignore_white_chars)
            if d:
                logging.debug(d)
                self.result['result'] = 'DIFF'
                if len(expected) == 0:
                    self.result['msg'] = 'unexpected output'
                else:
                    self.result['msg'] = 'output'
                diff_file = open(self.diff_output, 'w')
                diff_file.write(d)
                diff_file.close()
            else:
                self.result = {'result': 'OK', 'msg': '', 'is_failure': False}

        self.result['is_failure'] = IS_STATUS_FAILURE[self.result['result']]

        # self.opt_results['XFAIL'] contains the XFAIL comment or False
        # The status should be set to XFAIL even if the comment is empty
        if not isinstance(self.opt_results['XFAIL'], bool) or \
                self.opt_results['XFAIL']:
            if self.result['result'] in ['DIFF', 'CRASH']:
                self.result.update({
                    'result': 'XFAIL',
                    'msg': self.opt_results['XFAIL']
                })
            elif self.result['result'] == 'OK':
                self.result.update({
                    'result': 'UOK',
                    'msg': self.opt_results['XFAIL']
                })
Ejemplo n.º 8
0
    def compute_cmd_line_cmd(self, filesize_limit):
        """Compute self.cmd_line and preprocess the test script.

        This function is called by compute_cmd_line
        """
        cmd = self.opt_results['CMD']
        if Env().host.os.name != 'windows':
            script = split_file(cmd)

            # The test is run on a Unix system but has a 'cmd' syntax.
            # Convert it to Bourne shell syntax.
            cmdfilter = Filter()
            cmdfilter.append([r'-o(.*).exe', r'-o \1'])
            cmdfilter.append([r'%([^ ]*)%', r'"$\1"'])
            cmdfilter.append([r'(\032|\015)', r''])
            cmdfilter.append(
                [r'set *([^ =]+) *= *([^ ]*)', r'\1="\2"; export \1'])
            script = cmdfilter.process(script)

            cmd = self.work_dir + '/__test.sh'
            echo_to_file(cmd, 'PATH=.:$PATH; export PATH\n')

            # Compute effective file size limit on Unix system.
            if filesize_limit > 0:
                # File size limit can be specified either by a default or by
                # mean of FILESIZE_LIMIT command in the test test.opt. When
                # both are specified use the upper limit (note that 0 means
                # unlimited).
                opt_limit = self.opt_results['FILESIZE_LIMIT']
                if opt_limit is not None:
                    try:
                        opt_limit = int(opt_limit)
                    except TypeError:
                        opt_limit = filesize_limit
                else:
                    opt_limit = filesize_limit

                if opt_limit != 0:
                    if filesize_limit < opt_limit:
                        filesize_limit = opt_limit

                    # Limit filesize. Argument to ulimit is a number of blocks
                    # (512 bytes) so multiply by two the argument given by the
                    # user. Filesize limit is not supported on Windows.
                    echo_to_file(cmd, 'ulimit -f %s\n' % (filesize_limit * 2),
                                 True)

            # Source support.sh in TEST_SUPPORT_DIR if set
            if 'TEST_SUPPORT_DIR' in os.environ and os.path.isfile(
                    os.environ['TEST_SUPPORT_DIR'] + '/support.sh'):
                echo_to_file(cmd, '. $TEST_SUPPORT_DIR/support.sh\n', True)

            echo_to_file(cmd, script, True)

            self.cmd_line += ['bash', cmd]
        else:
            # On windows system, use cmd to run the script.
            if cmd[-4:] != '.cmd':
                # We are about to use cmd.exe to run a test. In this case,
                # ensure that the file extension is .cmd otherwise a dialog box
                # will popup asking to choose the program that should be used
                # to run the script.
                cp(cmd, self.work_dir + '/test__.cmd')
                cmd = self.work_dir + '/test__.cmd'

            self.cmd_line += ['cmd.exe', '/q', '/c', cmd]
Ejemplo n.º 9
0
    def execute(self):
        """Complete test execution.

        Calls all the steps that are needed to run the test.
        """
        if self.skip:
            logging.debug("SKIP %s - failed only mode" % self.test)
            return

        # Adjust test context
        self.adjust_to_context()

        for key in ('CMD', 'OUT'):
            # Read command file and expected output from working directory
            self.opt_results[key] = self.work_dir + '/src/' + \
                self.opt_results[key]

        # Keep track of the discriminants that activate the test
        if self.opt_results['NOTE']:
            echo_to_file(self.result_prefix + '.note',
                         '(' + self.opt_results['NOTE'] + ')\n')

        # If a test is either DEAD or SKIP then do not execute it. The only
        # difference is that a SKIP test will appear in the report whereas a
        # DEAD test won't.

        for opt_cmd in ('DEAD', 'SKIP'):
            if self.opt_results[opt_cmd] is not None:
                echo_to_file(self.result_prefix + '.result',
                             opt_cmd + ':%s\n' % self.opt_results[opt_cmd])
                return

        if self.result['result'] != 'UNKNOWN':
            self.write_results()
            return

        # Run the test
        self.prepare_working_space()
        self.compute_cmd_line()

        if self.opt_results['TIMING'] is not None:
            start_time = time.time()

        try:
            self.run()
        except KeyboardInterrupt:
            self.result['result'] = 'CRASH'
            self.result['msg'] = 'User interrupt'
            self.write_results()
            raise

        if self.opt_results['TIMING'] is not None:
            self.opt_results['TIMING'] = time.time() - start_time

        # Analyze the results and write them into result_dir
        self.set_output_filter()
        self.analyze()
        self.write_results()

        # Clean the working space
        if self.enable_cleanup:
            self.clean()