Esempio n. 1
0
    def fail_if_diff_internal(
        self,
        baseline_file,
        actual,
        failure_message="unexpected output",
        output_refiners=None,
    ):
        """Compute the diff between expected and actual outputs.

        Return an empty list if there is no diff, and return a list that
        contains an error message based on ``failure_message`` otherwise.

        :param baseline_file: Absolute filename for the text file that contains
            the expected content (for baseline rewriting, if enabled), or None.
        :param actual: Actual content to compare.
        :param failure_message: Failure message to return if there is a
            difference.
        :param output_refiners: List of refiners to apply both to the baseline
            and the actual output. Refer to the doc in
            ``e3.testsuite.driver.diff``. If None, use
            ``self.report_output_refiners()``.
        """

        if output_refiners is None:
            output_refiners = self.report_output_refiners()

        # Run output refiners on the actual output, and on the baseline
        refiners = RefiningChain(output_refiners)
        refined_actual = refiners.refine(actual)
        with open(baseline_file, "r") as f:
            refined_baseline = refiners.refine(f.read())

        # Get the two texts to compare as list of lines, with trailing
        # characters preserved (splitlines(keepends=True)).
        expected_lines = refined_baseline.splitlines(True)
        actual_lines = refined_actual.splitlines(True)

        # Compute the diff. If it is empty, return no failure. Otherwise,
        # include the diff in the test log and return the given failure
        # message.
        d = diff(expected_lines, actual_lines)
        if not d:
            return []

        message = failure_message

        # If requested and the failure is not expected, rewrite the test
        # baseline with the new one.
        if baseline_file is not None and self.options.rewrite:
            with open(baseline_file, "w") as f:
                f.write(refined_actual)
            message = "{} (baseline updated)".format(message)

        # Send the appropriate logging.
        self.failed("Diff failure: {}\n".format(message) + "\n{}".format(d) +
                    "\n")
Esempio n. 2
0
    def diff(self, expected_out):
        """Return self.out followed by a diff self.cmd_out and expected_out.

        PARAMETERS
            expected_out: A string containing the expected output.
        """
        diff_str = diff(
            expected_out.splitlines(),
            self.cmd_out.splitlines(),
            ignore_white_chars=False,
        )
        return "%s\n\nDiff:\n\n%s" % (self.image, diff_str)
Esempio n. 3
0
    def check_run(self, previous_values):
        """Check status fragment."""
        sync_tree(self.test_env['test_dir'], self.test_env['working_dir'])

        process = check_call_valgrind(self, [self.env.gsh, './test.sh'],
                                      timeout=self.process_timeout)

        with open(os.path.join(self.test_env['test_dir'], 'test.out'),
                  'r') as fd:
            expected_output = fd.read()
        actual_output = process.out.replace(self.test_env['working_dir'], '')
        actual_output = actual_output.replace('\\', '/')
        actual_output = actual_output.replace(
            unixpath(self.test_env['working_dir']), '')
        d = diff(actual_output, expected_output)
        if not d:
            self.result.set_status(TestStatus.PASS)
        else:
            self.result.set_status(TestStatus.FAIL, d)
        self.push_result()
Esempio n. 4
0
    def analyze(self, prev):
        test_diff = diff.diff(self.test_dir('test.out'),
                              self.result.out.log.splitlines())
        if test_diff != '':

            # Rewrite mode: If there is a diff and rewrite mode is on, rewrite
            # the "test.out" file.
            if self.env.options.rewrite:
                with open(self.test_dir('test.out'), 'w') as f:
                    f.write(self.result.out.log)

            # Log the error diff
            logging.error("Diff in test")
            logging.error(test_diff)
            if self.env.options.rewrite:
                logging.info(
                    "Rewritten test '{}'".format(self.test_env['test_name'])
                )
            self.result.set_status(TestStatus.FAIL)
        else:
            self.result.set_status(TestStatus.PASS)

        self.push_result()
Esempio n. 5
0
    def compute_diff(
        self,
        baseline_file: Optional[str],
        baseline: AnyStr,
        actual: AnyStr,
        failure_message: str = "unexpected output",
        ignore_white_chars: Optional[bool] = None,
        truncate_logs_threshold: Optional[int] = None,
    ) -> List[str]:
        """Compute the diff between expected and actual outputs.

        Return an empty list if there is no diff, and return a list that
        contains an error message based on ``failure_message`` otherwise.

        :param baseline_file: Absolute filename for the text file that contains
            the expected content (for baseline rewriting, if enabled), or None.
        :param actual: Actual content to compare.
        :param failure_message: Failure message to return if there is a
            difference.
        :param ignore_white_chars: Whether to ignore whitespaces during the
            diff computation. If left to None, use
            ``self.diff_ignore_white_chars``.
        :param truncate_logs_threshold: Threshold to truncate the diff message
            in ``self.result.log``. See ``e3.testsuite.result.truncated``'s
            ``line_count`` argument. If left to None, use the testsuite's
            ``--truncate-logs`` option.
        """
        if ignore_white_chars is None:
            ignore_white_chars = self.diff_ignore_white_chars

        if truncate_logs_threshold is None:
            truncate_logs_threshold = self.testsuite_options.truncate_logs

        # Run output refiners on the actual output, not on the baseline
        refiners = (RefiningChain[str](self.output_refiners) if isinstance(
            actual, str) else RefiningChain[bytes](self.output_refiners))
        refined_actual = refiners.refine(actual)
        refined_baseline = (refiners.refine(baseline)
                            if self.refine_baseline else baseline)

        # When running in binary mode, make sure the diff runs on text strings
        if self.default_encoding == "binary":
            assert isinstance(refined_actual, bytes)
            assert isinstance(refined_baseline, bytes)
            decoded_actual = binary_repr(refined_actual)
            decoded_baseline = binary_repr(refined_baseline)
        else:
            assert isinstance(refined_actual, str)
            assert isinstance(refined_baseline, str)
            decoded_actual = refined_actual
            decoded_baseline = refined_baseline

        # Get the two texts to compare as list of lines, with trailing
        # characters preserved (splitlines(keepends=True)).
        expected_lines = decoded_baseline.splitlines(True)
        actual_lines = decoded_actual.splitlines(True)

        # Compute the diff. If it is empty, return no failure. Otherwise,
        # include the diff in the test log and return the given failure
        # message.
        d = diff(expected_lines,
                 actual_lines,
                 ignore_white_chars=ignore_white_chars)
        if not d:
            return []

        self.failing_diff_count += 1
        message = failure_message

        diff_lines = []
        for line in d.splitlines():
            # Add colors diff lines
            if line.startswith("-"):
                color = self.Fore.RED
            elif line.startswith("+"):
                color = self.Fore.GREEN
            elif line.startswith("@"):
                color = self.Fore.CYAN
            else:
                color = ""
            diff_lines.append(color + line + self.Style.RESET_ALL)

        # If requested and the failure is not expected, rewrite the test
        # baseline with the new one.
        if (baseline_file is not None and not self.test_control.xfail
                and getattr(self.env, "rewrite_baselines", False)):
            if isinstance(refined_actual, str):
                with open(baseline_file, "w",
                          encoding=self.default_encoding) as f:
                    f.write(refined_actual)
            else:
                assert isinstance(refined_actual, bytes)
                with open(baseline_file, "wb") as f:
                    f.write(refined_actual)
            message = "{} (baseline updated)".format(message)

        # Send the appropriate logging. Make sure ".log" has all the
        # information. If there are multiple diff failures for this testcase,
        # do not emit the "expected/out" logs, as they support only one diff.
        diff_log = (self.Style.RESET_ALL + self.Style.BRIGHT +
                    "Diff failure: {}\n".format(message) +
                    "\n".join(diff_lines) + "\n")
        self.result.log += "\n" + truncated(diff_log, truncate_logs_threshold)
        if self.failing_diff_count == 1:
            self.result.expected = Log(decoded_baseline)
            self.result.out = Log(decoded_actual)
            self.result.diff = Log(diff_log)
        else:
            self.result.expected = None
            self.result.out = None
            assert isinstance(self.result.diff, Log) and isinstance(
                self.result.diff.log, str)
            self.result.diff += "\n" + diff_log

        return [message]
Esempio n. 6
0
from testsuite_support.builder_and_runner import BuilderAndRunner, GPRNAME, GPRLS, \
                                                 GPRINSTALL, GPRCLEAN
from e3.diff import diff

bnr = BuilderAndRunner()


def run(args):
    print(bnr.check_output(args).out)


# gpr2name -P switch support
try:
    os.chdir('P-switch-support')
    run([GPRNAME, '-Pprj.gpr', '*.ada'])
    print(diff('prj.expected', 'prj.gpr'))
    subprocess.run("gprbuild -p -q -P prj.gpr main.2.ada -o main", shell=True)
    subprocess.run("./main")
except Exception as E:
    # Unexpected exception.  Just print the information we have.
    print('*** Error: %s' % str(E))
finally:
    os.chdir('..')

# non ada sources support
try:
    os.chdir('non-ada-sources')
    run([GPRNAME, '-Pprj.gpr', '*.ada', '-f', '*.c', '-f:c', '*.clang'])
    subprocess.run("gprbuild -p -q -P prj.gpr main.ada", shell=True)
    subprocess.run("./main")
except Exception as E: