Example #1
0
    def analyze(self):
        rewrite = (self.global_env['options'].rewrite
                   and not self.expect_failure)
        failures = []

        # Check for the test output itself
        diff = fileutils.diff(self.expected_file,
                              self.output_file,
                              ignore_white_chars=False)
        if diff:
            if rewrite:
                new_baseline = self.read_file(self.output_file)
                with open(self.original_expected_file, 'w') as f:
                    f.write(new_baseline)
            self.result.actual_output += diff
            failures.append('output is not as expected{}'.format(
                ' (baseline updated)' if rewrite else ''))

        # Check memory issues if asked to
        if self.valgrind_errors:
            self.result.actual_output += (
                'Valgrind reported the following errors:\n{}'.format(
                    self.valgrind.format_report(self.valgrind_errors)))
            failures.append('memory issues detected')

        if failures:
            self.set_failure(' | '.join(failures))
        else:
            self.set_passed()
    def analyze(self):
        rewrite = (self.global_env['options'].rewrite
                   and not self.expect_failure)
        failures = []

        # Check for the test output itself
        diff = fileutils.diff(self.expected_file, self.output_file,
                              ignore_white_chars=False)
        if diff:
            if rewrite:
                new_baseline = self.read_file(self.output_file)
                with open(self.original_expected_file, 'w') as f:
                    f.write(new_baseline)
            self.result.actual_output += diff
            failures.append('output is not as expected{}'.format(
                ' (baseline updated)' if rewrite else ''
            ))

        # Check memory issues if asked to
        if self.valgrind_errors:
            self.result.actual_output += (
                'Valgrind reported the following errors:\n{}'.format(
                    self.valgrind.format_report(self.valgrind_errors)
                )
            )
            failures.append('memory isuses detected')

        if failures:
            self.set_failure(' | '.join(failures))
        else:
            self.set_passed()
Example #3
0
    def analyze(self):
        diff = fileutils.diff(self.test_working_dir(self.expected_file),
                              self.test_working_dir(self.out_file))

        rewrite = (self.global_env['options'].rewrite
                   and not self.expect_failure)

        # Determine the status of this test, ignoring expected failure (for
        # now).
        if diff:
            if rewrite:
                new_baseline = self.read_file(
                    self.test_working_dir(self.out_file)
                )
                self.write_file(
                    self.test_orig_dir(self.expected_file), new_baseline
                )

            self.result.actual_output += diff
            failed = True
            message = 'diff in output'
        else:
            failed = False
            message = ''

        self.set_result_status(failed, message)
Example #4
0
    def diff(self, expected_out):
        """Return self.out followed by a diff self.cmd_out and expected_out.

        PARAMETERS
            expected_out: A string containing the expected output.
        """
        diff_str = diff(expected_out.splitlines(), self.cmd_out.splitlines())
        return '%s\n\nDiff:\n\n%s' % (self.image, diff_str)
Example #5
0
    def assertOutputEqual(self, expected, actual):
        """same as assertEqual but for strings and with a diff if not equal.

        This a convenience function that allows us to quickly diagnose
        what's wrong when the output does not match...
        """
        self.assertEqual(
            expected, actual,
            "Diff:\n\n%s" % diff(expected.splitlines(), actual.splitlines()))
Example #6
0
 def analyze(self):
     # Check for the test output itself
     diff = fileutils.diff(self.expected_file,
                           self.output_file,
                           ignore_white_chars=False)
     if diff:
         self.set_failure('output is not as expected')
         self.result.actual_output += diff
     else:
         self.set_passed()
Example #7
0
    def analyze(self):
        diff = fileutils.diff(self.test_working_dir(self.expected_file),
                              self.test_working_dir(self.out_file))

        # Determine the status of this test, ignoring expected failure (for
        # now).
        if diff:
            self.result.actual_output += diff
            failed = True
            message = 'diff in output'
        else:
            failed = False
            message = ''

        self.set_result_status(failed, message)
Example #8
0
    def analyze_diff(self,
                     expected=None,
                     actual=None,
                     strip_cr=True,
                     replace_backslashes=True):
        """Set status based on diff analysis.

        If there is no difference test status is set to PASSED, otherwise
        it is set to FAILED. diff string is stored in self.result.diff.

        :param expected: if None then self.result.expected_output is taken
          otherwise parameter expected is used
        :type expected: str | None
        :param actual: if None then self.result.actual_output is taken
          otherwise parameter actual is used
        :type expected: str | None
        :param strip_cr: if True, strip cr from both expected and actual
        :type strip_cr: bool
        :param replace_backslashes: if True, replace backslashes by slashes
            in expected and actual
        :type replace_backslashes: bool
        """
        if expected is None:
            expected = self.result.expected_output

        if actual is None:
            actual = self.result.actual_output

        if strip_cr:
            actual = actual.replace('\r', '')
            expected = expected.replace('\r', '')

        for d in self.subst:
            logging.debug('%s -> %s' % (d[0], d[1]))
            expected = re.sub(d[0], d[1], expected)
            actual = re.sub(d[0], d[1], actual)

        if replace_backslashes:
            actual = actual.replace('\\', '/')
            expected = expected.replace('\\', '/')

        self.result.diff = diff(expected.splitlines(), actual.splitlines())

        if self.result.diff:
            self.result.set_status('FAILED', 'output diff')
        else:
            self.result.set_status('PASSED')
Example #9
0
    def analyze(self):
        # RA22-015: For the transition to the concrete syntax, we want to
        # check-in and test unparsing results.
        super(PythonDriver, self).analyze()

        rewrite = (self.global_env['options'].rewrite
                   and not self.expect_failure)
        failures = []

        expected_lkt = self.working_dir('expected_concrete_syntax.lkt')
        actual_lkt = self.working_dir('concrete_syntax.lkt')

        if not os.path.exists(actual_lkt):
            return

        # We just open the file in append mode, to create it if it doesn't
        # exist.
        with open(expected_lkt, 'a+'):
            pass

        # Check for the test output itself
        diff = fileutils.diff(expected_lkt,
                              actual_lkt,
                              ignore_white_chars=False)
        if diff:
            if rewrite:
                new_baseline = self.read_file(actual_lkt)
                with open(
                        os.path.join(self.test_dir,
                                     'expected_concrete_syntax.lkt'),
                        'w') as f:
                    f.write(new_baseline)
            self.result.actual_output += diff
            failures.append('output is not as expected{}'.format(
                ' (baseline updated)' if rewrite else ''))

        if failures:
            self.set_failure(' | '.join(failures))
        else:
            self.set_passed()
Example #10
0
    def analyze(self):
        rewrite = (self.global_env['options'].rewrite
                   and not self.expect_failure)
        failures = []

        # Check for the test output itself
        diff = fileutils.diff(self.expected_file,
                              self.output_file,
                              ignore_white_chars=False)
        if diff:
            if rewrite:
                new_baseline = self.read_file(self.output_file)
                with open(self.original_expected_file, 'w') as f:
                    f.write(new_baseline)
            self.result.actual_output += diff
            failures.append('output is not as expected{}'.format(
                ' (baseline updated)' if rewrite else ''))

        if failures:
            self.set_failure(' | '.join(failures))
        else:
            self.set_passed()
Example #11
0
    def analyze(self):
        rewrite = (self.global_env['options'].rewrite
                   and not self.expect_failure)
        failures = []

        # Check for the test output itself
        diff = fileutils.diff(self.expected_file, self.output_file,
                              ignore_white_chars=False)
        if diff:
            if rewrite:
                new_baseline = self.read_file(self.output_file)
                with open(self.original_expected_file, 'w') as f:
                    f.write(new_baseline)
            self.result.actual_output += diff
            failures.append('output is not as expected{}'.format(
                ' (baseline updated)' if rewrite else ''
            ))

        if failures:
            self.set_failure(' | '.join(failures))
        else:
            self.set_passed()
Example #12
0
 def diff(first_file, second_file):
     return fileutils.diff(first_file,
                           second_file,
                           ignore_white_chars=False)
Example #13
0
    def analyze(self, ignore_white_chars=True):
        """Compute test status.

        :param ignore_white_chars: in the default driver difference in white
            chars are ignored. This parameter allow the user to change that
            behavior. In that case the user should override the analyze method
            in its own driver and call this method with ignore_white_chars set
            to False.
        :type ignore_white_chars: bool

        This method should set the final value of 'result' attribute
        """
        # Retrieve the outputs and see if we match some of the CRASH or DEAD
        # patterns
        output = split_file(self.output, ignore_errors=True)
        if output:
            tmp = "\n".join(output)
            for pattern in self.get_status_filter():
                if re.search(pattern[0], tmp):
                    self.result.update(pattern[1])
                    break

        # If the test status has not been updated compare output with the
        # baseline
        if self.result['result'] == 'UNKNOWN':
            # Retrieve expected output
            expected = split_file(self.opt_results['OUT'], ignore_errors=True)

            # Process output and expected output with registered filters
            expected = self.apply_output_filter(expected)
            output = self.apply_output_filter(output)

            # Save the filtered output (might be needed by some developpers to
            # create more easily baselines).
            echo_to_file(self.output_filtered, output)

            d = diff(expected, output, ignore_white_chars=ignore_white_chars)
            if d:
                logging.debug(d)
                self.result['result'] = 'DIFF'
                if len(expected) == 0:
                    self.result['msg'] = 'unexpected output'
                else:
                    self.result['msg'] = 'output'
                diff_file = open(self.diff_output, 'w')
                diff_file.write(d)
                diff_file.close()
            else:
                self.result = {'result': 'OK', 'msg': '', 'is_failure': False}

        self.result['is_failure'] = IS_STATUS_FAILURE[self.result['result']]

        # self.opt_results['XFAIL'] contains the XFAIL comment or False
        # The status should be set to XFAIL even if the comment is empty
        if not isinstance(self.opt_results['XFAIL'], bool) or \
                self.opt_results['XFAIL']:
            if self.result['result'] in ['DIFF', 'CRASH']:
                self.result.update({
                    'result': 'XFAIL',
                    'msg': self.opt_results['XFAIL']
                })
            elif self.result['result'] == 'OK':
                self.result.update({
                    'result': 'UOK',
                    'msg': self.opt_results['XFAIL']
                })