Esempio n. 1
0
    def run_wrapper(self, prev: Dict[str, Any], slot: int) -> None:
        # Make the slot (unique identifier for active jobs at a specific time)
        # available to the overridable methods.
        self.slot = slot

        # Make colors available for output if enabled testsuite-wide
        if self.env.enable_colors:  # interactive-only
            self.Fore = Fore
            self.Style = Style
        else:
            self.Fore = DummyColors()
            self.Style = DummyColors()

        # Create a test control for this test...
        try:
            self.test_control = self.test_control_creator.create(self)
        except ValueError as exc:
            return self.push_error(
                "Error while interpreting control: {}".format(exc))

        # If test control tells us to skip the test, stop right here. Note that
        # if we have both skip and xfail, we are supposed not to execute the
        # test but still consider it as an expected failure (not just
        # "skipped").
        if self.test_control.skip:
            if self.test_control.xfail:
                return self.push_failure(self.test_control.message)
            else:
                return self.push_skip(self.test_control.message)

        # If requested, prepare the test working directory to initially be a
        # copy of the test directory.
        if self.copy_test_directory:
            sync_tree(
                self.test_env["test_dir"],
                self.test_env["working_dir"],
                delete=True,
            )

        # If the requested encoding is "binary", this actually means we will
        # handle binary data (i.e. no specific encoding). Create a binary log
        # accordingly.
        self.output = (Log(b"")
                       if self.default_encoding == "binary" else Log(""))

        # Execute the subclass' "run" method and handle convenience test
        # aborting exception.
        try:
            self.set_up()
            self.run()
            self.analyze()
        except TestSkip as exc:
            return self.push_skip(str(exc))
        except TestAbortWithError as exc:
            return self.push_error(str(exc))
        except TestAbortWithFailure as exc:
            return self.push_failure(str(exc))
        finally:
            self.tear_down()
Esempio n. 2
0
def check_call(driver: TestDriver,
               cmd: List[str],
               test_name: Optional[str] = None,
               result: Optional[TestResult] = None,
               **kwargs: Any) -> Run:
    if "cwd" not in kwargs and "working_dir" in driver.test_env:
        kwargs["cwd"] = driver.test_env["working_dir"]
    if result is None:
        result = driver.result
    if test_name is None:
        test_name = driver.test_name
    process = Run(cmd, **kwargs)
    result.processes.append({
        "output": Log(process.out),
        "status": process.status,
        "cmd": cmd,
        "run_args": kwargs,
    })

    # Append the status code and process output to the log to ease post-mortem
    # investigation.
    result.log += "Status code: {}\n".format(process.status)
    result.log += "Output:\n"
    result.log += process.out

    if process.status != 0:
        result.set_status(TestStatus.FAIL, "command call fails")
        driver.push_result(result)
        raise TestAbort
    return process
Esempio n. 3
0
def check_call(driver, cmd, test_name=None, result=None, **kwargs):
    if 'cwd' not in kwargs and 'working_dir' in driver.test_env:
        kwargs['cwd'] = driver.test_env['working_dir']
    process = Run(cmd, **kwargs)
    if result is None:
        result = driver.result
    if test_name is None:
        test_name = driver.test_name
    result.processes.append({
        'output': Log(process.out),
        'status': process.status,
        'cmd': cmd,
        'run_args': kwargs
    })
    result.out += process.out

    # Append the status code and process output to the log to ease post-mortem
    # investigation.
    result.log += 'Status code: {}\n'.format(process.status)
    result.log += 'Output:\n'
    result.log += process.out

    if process.status != 0:
        result.set_status(TestStatus.FAIL, 'command call fails')
        driver.push_result(result)
        raise TestAbort
    return process
Esempio n. 4
0
    def run_and_log(self, cmd, **kwargs):
        """
        Wrapper around e3.os.process.Run to log processes.

        Logging the processes that are run in each testcases is very useful for
        debugging.
        """

        # If code coverage is requested, leave a chance to gnatcov to decorate
        # the execution of the subprogram in order to make it contribute to
        # code coverage.
        if self.env.gnatcov:
            kwargs = self.env.gnatcov.decorate_run(self, kwargs)

        process = Run(cmd, **kwargs)

        self.result.processes.append({
            'cmd': cmd,
            'run_args': kwargs,
            'status': process.status,
            'output': Log(process.out)
        })
        self.result.out += process.out

        return process
Esempio n. 5
0
def create_result(name, status, msg="", log="", diff=None, time=None):
    """Create a TestResult instance."""
    result = Result(name, status=status, msg=msg)
    result.log += log
    if diff is not None:
        result.diff = Log(diff)
    result.time = time
    return result
Esempio n. 6
0
def bin_check_call(driver,
                   cmd,
                   test_name=None,
                   result=None,
                   timeout=None,
                   env=None,
                   cwd=None):
    if cwd is None and "working_dir" in driver.test_env:
        cwd = driver.test_env["working_dir"]
    if result is None:
        result = driver.result
    if test_name is None:
        test_name = driver.test_name
    if timeout is not None:
        cmd = [get_rlimit(), str(timeout)] + cmd

    # Use directly subprocess instead of e3.os.process.Run, since the latter
    # does not handle binary outputs.
    subp = subprocess.Popen(cmd,
                            cwd=cwd,
                            env=env,
                            stdin=subprocess.DEVNULL,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)
    stdout, _ = subp.communicate()
    process = ProcessResult(subp.returncode, stdout)
    result.processes.append({
        "output": Log(stdout),
        "status": process.status,
        "cmd": cmd,
        "timeout": timeout,
        "env": env,
        "cwd": cwd,
    })

    # Append the status code and process output to the log to ease post-mortem
    # investigation.
    result.log += "Status code: {}\n".format(process.status)
    result.log += "Output:\n"
    try:
        stdout = stdout.decode('utf-8')
    except UnicodeDecodeError:
        stdout = str(stdout)
    result.log += stdout

    if process.status != 0:
        if isinstance(driver, ClassicTestDriver):
            raise TestAbortWithFailure('command call fails')
        else:
            result.set_status(TestStatus.FAIL, "command call fails")
            driver.push_result(result)
            raise TestAbort
    return process
Esempio n. 7
0
def create_result(
    name,
    status,
    msg="",
    log="",
    diff=None,
    time=None,
    failure_reasons=None,
):
    """Create a TestResult instance."""
    result = Result(name, status=status, msg=msg)
    result.log += log
    if diff is not None:
        result.diff = Log(diff)
    result.time = time
    if failure_reasons:
        result.failure_reasons.update(failure_reasons)
    return result
Esempio n. 8
0
def check_call(driver, cmd, test_name=None, result=None, **kwargs):
    if 'cwd' not in kwargs and 'working_dir' in driver.test_env:
        kwargs['cwd'] = driver.test_env['working_dir']
    process = Run(cmd, **kwargs)
    if result is None:
        result = driver.result
    if test_name is None:
        test_name = driver.test_name
    result.processes.append({
        'output': Log(process.out),
        'status': process.status,
        'cmd': cmd,
        'run_args': kwargs
    })
    result.out += process.out

    if process.status != 0:
        result.set_status(TestStatus.FAIL, 'command call fails')
        driver.push_result(result)
        raise TestAbort
    return process
Esempio n. 9
0
    def compute_diff(
        self,
        baseline_file: Optional[str],
        baseline: AnyStr,
        actual: AnyStr,
        failure_message: str = "unexpected output",
        ignore_white_chars: Optional[bool] = None,
        truncate_logs_threshold: Optional[int] = None,
    ) -> List[str]:
        """Compute the diff between expected and actual outputs.

        Return an empty list if there is no diff, and return a list that
        contains an error message based on ``failure_message`` otherwise.

        :param baseline_file: Absolute filename for the text file that contains
            the expected content (for baseline rewriting, if enabled), or None.
        :param actual: Actual content to compare.
        :param failure_message: Failure message to return if there is a
            difference.
        :param ignore_white_chars: Whether to ignore whitespaces during the
            diff computation. If left to None, use
            ``self.diff_ignore_white_chars``.
        :param truncate_logs_threshold: Threshold to truncate the diff message
            in ``self.result.log``. See ``e3.testsuite.result.truncated``'s
            ``line_count`` argument. If left to None, use the testsuite's
            ``--truncate-logs`` option.
        """
        if ignore_white_chars is None:
            ignore_white_chars = self.diff_ignore_white_chars

        if truncate_logs_threshold is None:
            truncate_logs_threshold = self.testsuite_options.truncate_logs

        # Run output refiners on the actual output, not on the baseline
        refiners = (RefiningChain[str](self.output_refiners) if isinstance(
            actual, str) else RefiningChain[bytes](self.output_refiners))
        refined_actual = refiners.refine(actual)
        refined_baseline = (refiners.refine(baseline)
                            if self.refine_baseline else baseline)

        # When running in binary mode, make sure the diff runs on text strings
        if self.default_encoding == "binary":
            assert isinstance(refined_actual, bytes)
            assert isinstance(refined_baseline, bytes)
            decoded_actual = binary_repr(refined_actual)
            decoded_baseline = binary_repr(refined_baseline)
        else:
            assert isinstance(refined_actual, str)
            assert isinstance(refined_baseline, str)
            decoded_actual = refined_actual
            decoded_baseline = refined_baseline

        # Get the two texts to compare as list of lines, with trailing
        # characters preserved (splitlines(keepends=True)).
        expected_lines = decoded_baseline.splitlines(True)
        actual_lines = decoded_actual.splitlines(True)

        # Compute the diff. If it is empty, return no failure. Otherwise,
        # include the diff in the test log and return the given failure
        # message.
        d = diff(expected_lines,
                 actual_lines,
                 ignore_white_chars=ignore_white_chars)
        if not d:
            return []

        self.failing_diff_count += 1
        message = failure_message

        diff_lines = []
        for line in d.splitlines():
            # Add colors diff lines
            if line.startswith("-"):
                color = self.Fore.RED
            elif line.startswith("+"):
                color = self.Fore.GREEN
            elif line.startswith("@"):
                color = self.Fore.CYAN
            else:
                color = ""
            diff_lines.append(color + line + self.Style.RESET_ALL)

        # If requested and the failure is not expected, rewrite the test
        # baseline with the new one.
        if (baseline_file is not None and not self.test_control.xfail
                and getattr(self.env, "rewrite_baselines", False)):
            if isinstance(refined_actual, str):
                with open(baseline_file, "w",
                          encoding=self.default_encoding) as f:
                    f.write(refined_actual)
            else:
                assert isinstance(refined_actual, bytes)
                with open(baseline_file, "wb") as f:
                    f.write(refined_actual)
            message = "{} (baseline updated)".format(message)

        # Send the appropriate logging. Make sure ".log" has all the
        # information. If there are multiple diff failures for this testcase,
        # do not emit the "expected/out" logs, as they support only one diff.
        diff_log = (self.Style.RESET_ALL + self.Style.BRIGHT +
                    "Diff failure: {}\n".format(message) +
                    "\n".join(diff_lines) + "\n")
        self.result.log += "\n" + truncated(diff_log, truncate_logs_threshold)
        if self.failing_diff_count == 1:
            self.result.expected = Log(decoded_baseline)
            self.result.out = Log(decoded_actual)
            self.result.diff = Log(diff_log)
        else:
            self.result.expected = None
            self.result.out = None
            assert isinstance(self.result.diff, Log) and isinstance(
                self.result.diff.log, str)
            self.result.diff += "\n" + diff_log

        return [message]
Esempio n. 10
0
    def shell(self,
              args: List[str],
              cwd: Optional[str] = None,
              env: Optional[Dict[str, str]] = None,
              catch_error: bool = True,
              analyze_output: bool = True,
              timeout: Optional[int] = None,
              encoding: Optional[str] = None,
              truncate_logs_threshold: Optional[int] = None) -> ProcessResult:
        """Run a subprocess.

        :param args: Arguments for the subprocess to run.
        :param cwd: Current working directory for the subprocess. By default
            (i.e. if None), use the test working directory.
        :param env: Environment to pass to the subprocess.
        :param catch_error: If True, consider that an error status code leads
            to a test failure. In that case, abort the testcase.
        :param analyze_output: If True, add the subprocess output to the
            ``self.output`` log.
        :param timeout: Timeout (in seconds) for the subprocess. Use
            ``self.default_timeout`` if left to None.
        :param encoding: Encoding to use when decoding the subprocess' output
            stream. If None, use the default enocding for this test
            (``self.default_encoding``, from the ``encoding`` entry in
            test.yaml).  If "binary", leave the output undecoded as a bytes
            string.
        :param truncate_logs_threshold: Threshold to truncate the subprocess
            output in ``self.result.log``. See
            ``e3.testsuite.result.truncated``'s ``line_count`` argument. If
            left to None, use the testsuite's ``--truncate-logs`` option.
        """
        # By default, run the subprocess in the test working directory
        if cwd is None:
            cwd = self.test_env["working_dir"]

        if timeout is None:
            timeout = self.default_process_timeout

        if truncate_logs_threshold is None:
            truncate_logs_threshold = self.testsuite_options.truncate_logs

        # Run the subprocess and log it
        def format_header(label: str, value: Any) -> str:
            return "{}{}{}: {}{}\n".format(
                self.Style.RESET_ALL + self.Style.BRIGHT,
                label,
                self.Style.RESET_ALL,
                self.Style.DIM,
                value,
            )

        self.result.log += format_header(
            "Running",
            "{} (cwd={}{}{})".format(" ".join(quote_arg(a) for a in args),
                                     self.Style.RESET_ALL, cwd,
                                     self.Style.DIM))

        process_info = {"cmd": args, "cwd": cwd}
        self.result.processes.append(process_info)

        # Python2's subprocess module does not handle timeout, so re-implement
        # e3.os.process's rlimit-based implementation of timeouts.
        if timeout is not None:
            args = [get_rlimit(), str(timeout)] + args

        # We cannot use e3.os.process.Run as this API forces the use of text
        # streams, whereas testsuite sometimes need to deal with binary data
        # (or unknown encodings, which is equivalent).
        subp = subprocess.Popen(args,
                                cwd=cwd,
                                env=env,
                                stdin=subprocess.DEVNULL,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT)
        stdout: Union[str, bytes]
        stdout, _ = subp.communicate()
        assert isinstance(stdout, bytes)
        encoding = encoding or self.default_encoding
        if encoding != "binary":
            try:
                stdout = stdout.decode(encoding)
            except UnicodeDecodeError as exc:
                raise TestAbortWithError(
                    "cannot decode process output ({}: {})".format(
                        type(exc).__name__, exc))

        p = ProcessResult(subp.returncode, stdout)

        self.result.log += format_header("Status code", p.status)
        process_info["status"] = p.status
        process_info["output"] = Log(stdout)

        self.result.log += format_header(
            "Output", "\n" +
            truncated(str(process_info["output"]), truncate_logs_threshold))

        # If requested, use its output for analysis
        if analyze_output:
            self.output += stdout

        if catch_error and p.status != 0:
            raise TestAbortWithFailure("non-zero status code")

        return p
Esempio n. 11
0
 def to_log(key):
     content = self.test_env.get(key)
     return None if content is None else Log(content)
Esempio n. 12
0
    def shell(
        self,
        args: List[str],
        cwd: Optional[str] = None,
        env: Optional[Dict[str, str]] = None,
        catch_error: bool = True,
        analyze_output: bool = True,
        timeout: Optional[int] = None,
        encoding: Optional[str] = None,
        truncate_logs_threshold: Optional[int] = None,
        ignore_environ: bool = True,
    ) -> ProcessResult:
        """Run a subprocess.

        :param args: Arguments for the subprocess to run.
        :param cwd: Current working directory for the subprocess. By default
            (i.e. if None), use the test working directory.
        :param env: Environment to pass to the subprocess.
        :param catch_error: If True, consider that an error status code leads
            to a test failure. In that case, abort the testcase.
        :param analyze_output: If True, add the subprocess output to the
            ``self.output`` log.
        :param timeout: Timeout (in seconds) for the subprocess. Use
            ``self.default_timeout`` if left to None.
        :param encoding: Encoding to use when decoding the subprocess' output
            stream. If None, use the default enocding for this test
            (``self.default_encoding``, from the ``encoding`` entry in
            test.yaml).  If "binary", leave the output undecoded as a bytes
            string.
        :param truncate_logs_threshold: Threshold to truncate the subprocess
            output in ``self.result.log``. See
            ``e3.testsuite.result.truncated``'s ``line_count`` argument. If
            left to None, use the testsuite's ``--truncate-logs`` option.
        :param ignore_environ: Applies only when ``env`` is not None.
            When True (the default), pass exactly environment variables
            in ``env``. When False, pass a copy of ``os.environ`` that is
            augmented with variables in ``env``.
        """
        # By default, run the subprocess in the test working directory
        if cwd is None:
            cwd = self.test_env["working_dir"]

        if timeout is None:
            timeout = self.default_process_timeout

        if truncate_logs_threshold is None:
            truncate_logs_threshold = self.testsuite_options.truncate_logs

        # Run the subprocess and log it
        def format_header(label: str, value: Any) -> str:
            return "{}{}{}: {}{}\n".format(
                self.Style.RESET_ALL + self.Style.BRIGHT,
                label,
                self.Style.RESET_ALL,
                self.Style.DIM,
                value,
            )

        self.result.log += format_header(
            "Running",
            "{} (cwd={}{}{})".format(
                " ".join(quote_arg(a) for a in args),
                self.Style.RESET_ALL,
                cwd,
                self.Style.DIM,
            ),
        )

        process_info = {"cmd": args, "cwd": cwd}
        self.result.processes.append(process_info)

        subp = Run(
            cmds=args,
            cwd=cwd,
            output=PIPE,
            error=STDOUT,
            input=DEVNULL,
            timeout=timeout,
            env=env,
            ignore_environ=ignore_environ,
        )

        # Testsuites sometimes need to deal with binary data (or unknown
        # encodings, which is equivalent), so always use subp.raw_out.
        stdout: Union[str, bytes]
        stdout = subp.raw_out
        assert isinstance(stdout, bytes)
        encoding = encoding or self.default_encoding
        if encoding != "binary":
            try:
                stdout = stdout.decode(encoding)
            except UnicodeDecodeError as exc:
                raise TestAbortWithError(
                    "cannot decode process output ({}: {})".format(
                        type(exc).__name__, exc))

        # We run subprocesses in foreground mode, so by the time Run's
        # constructor has returned, the subprocess is supposed to have
        # completed, and thus we are supposed to have an exit status code.
        assert subp.status is not None
        p = ProcessResult(subp.status, stdout)

        self.result.log += format_header("Status code", p.status)
        process_info["status"] = p.status
        process_info["output"] = Log(stdout)

        self.result.log += format_header(
            "Output",
            "\n" +
            truncated(str(process_info["output"]), truncate_logs_threshold),
        )

        # If requested, use its output for analysis
        if analyze_output:
            self.output += stdout

        if catch_error and p.status != 0:
            raise TestAbortWithFailure("non-zero status code")

        return p
Esempio n. 13
0
File: basic.py Progetto: AdaCore/gps
    def run(self, previous_values, slot):
        # Check whether the test should be skipped
        skip = self.should_skip()
        if skip is not None:
            self.result.set_status(skip)
            self.push_result()
            return False

        # If there's a test.cmd, execute it with the shell;
        # otherwise execute test.py.
        wd = self.test_env["working_dir"]
        base = os.path.abspath(
            os.path.join(os.path.dirname(__file__), "..", ".."))

        # In the development environment, run the development GPS,
        # otherwise use the GS found on the PATH
        devel_gs = os.path.join(base, "gnatstudio", "obj", "gnatstudio")
        if sys.platform == "win32":
            devel_gs += ".exe"
            devel_gs = unixpath(devel_gs)
        test_cmd = os.path.join(wd, "test.cmd")

        if os.path.exists(devel_gs):
            # We are testing the development executable: we need to
            # pass the valgrind command ourselves.
            if os.path.exists(test_cmd):
                # run via a test.cmd
                GS = " ".join(self.env.valgrind_cmd + [devel_gs])
                cmd_line = ["bash", test_cmd]
            else:
                # run the executable directly
                GS = devel_gs
                cmd_line = self.env.valgrind_cmd + [
                    devel_gs, "--load=python:test.py"
                ]
        else:
            # We are testing the real 'gnatstudio' script.
            # In this case we rely on GPS_WRAPPER to carry the
            # valgrind command.
            GS = "gnatstudio"
            if os.path.exists(test_cmd):
                # run via a test.cmd
                cmd_line = ["bash", test_cmd]
            else:
                # run the script directly
                cmd_line = [GS, "--load=python:test.py"]

        env = {
            "GNATSTUDIO_HOME": self.test_env["working_dir"],
            "GNATINSPECT": shutil.which("gnatinspect") + " --exit",
            "GNATSTUDIO": GS,
            "GPS": GS,
            "GPS_WRAPPER": " ".join(self.env.valgrind_cmd),
            "GNATSTUDIO_PYTHON_COV": self.test_env["pycov"],
        }

        env.update(Xvfbs.get_env(slot))

        process = Run(
            cmd_line,
            cwd=wd,
            timeout=(None if "GPS_PREVENT_EXIT" in os.environ else
                     (120 * self.env.wait_factor)),
            env=env,
            ignore_environ=False,
        )
        output = process.out

        if output:
            # If there's an output, capture it
            self.result.log += output

        is_error = False
        if process.status:
            # Nonzero status?
            if process.status == 100:
                # This one is an xfail
                self.result.set_status(TestStatus.XFAIL)
            elif process.status == 99:
                # This is intentionally deactivated in this configuration
                self.result.set_status(TestStatus.SKIP)
            else:
                # Unknown status!
                self.result.set_status(TestStatus.ERROR)
                is_error = True
        else:
            # Status is 0...
            if output:
                # ... and there is an output: compare it to test.out
                # if it exists
                test_out = os.path.join(wd, "test.out")

                if os.path.exists(test_out):
                    with open(test_out, "r") as f:
                        expected = f.read()

                    res = "\n".join(
                        difflib.unified_diff(expected.splitlines(),
                                             output.splitlines()))
                    if res == "":
                        self.result.set_status(TestStatus.PASS)
                    else:
                        self.result.out = Log(output)
                        self.result.expected = Log(expected)
                        self.result.diff = Log(res)
                        self.result.set_status(TestStatus.FAIL)
                        is_error = True

                else:
                    # ... if there's no test.out, that's a FAIL
                    self.result.set_status(TestStatus.FAIL)
                    is_error = True
            else:
                # ... and no output: that's a PASS
                self.result.set_status(TestStatus.PASS)

        if is_error:
            self.result.log += self._capture_for_developers()

        self.push_result()