Exemple #1
0
def test_cycle_detection():
    d = DAG()
    d.add_vertex('a')
    d.add_vertex('b')
    d.update_vertex('a', predecessors=['b'])
    with pytest.raises(DAGError):
        d.update_vertex('b', data='newb', predecessors=['a'])

    # Ensure that DAG is still valid and that previous
    # update_vertex has no effect
    result = []
    for vertex_id, data in d:
        result.append(vertex_id)
        assert data is None
    assert result == ['b', 'a']

    # Force the creation of a cycle
    d.update_vertex('b', data='newb', predecessors=['a'], enable_checks=False)

    # Verify that the cycle is detected
    with pytest.raises(DAGError):
        d.check()

    # Verify that some functions do not hang when a cycle is present
    assert len(d.get_closure('b')) == 2
    assert str(d)
    assert d.as_dot()

    with pytest.raises(DAGError):
        d.reverse_graph()
Exemple #2
0
def test_inexisting():
    d = DAG()
    d.add_vertex('a')
    assert 'a' in d
    d.update_vertex('a', data='NOT B', predecessors=['b'], enable_checks=False)
    assert 'b' not in d
    assert d['a'] == 'NOT B'
    with pytest.raises(DAGError):
        d.check()
Exemple #3
0
def test_inexisting():
    d = DAG()
    d.add_vertex("a")
    assert "a" in d
    d.update_vertex("a", data="NOT B", predecessors=["b"], enable_checks=False)
    assert "b" not in d
    assert d["a"] == "NOT B"
    with pytest.raises(DAGError):
        d.check()
Exemple #4
0
def test_cycle():
    d = DAG()
    d.add_vertex('a')
    d.add_vertex('b')
    d.update_vertex('a', predecessors=['b'])
    d.add_vertex('c', predecessors=['b'])
    d.update_vertex('b', predecessors=['c'], enable_checks=False)
    with pytest.raises(DAGError):
        d.check()

    with pytest.raises(DAGError):
        d.get_context('b')
Exemple #5
0
def test_cycle():
    d = DAG()
    d.add_vertex("a")
    d.add_vertex("b")
    d.update_vertex("a", predecessors=["b"])
    d.add_vertex("c", predecessors=["b"])
    d.update_vertex("b", predecessors=["c"], enable_checks=False)
    with pytest.raises(DAGError):
        d.check()

    with pytest.raises(DAGError):
        d.get_context("b")
Exemple #6
0
def test_simple_dag():
    d = DAG()
    d.add_vertex('a')
    d.add_vertex('b')
    d.add_vertex('c')
    result = []
    for vertex_id, data in d:
        result.append(vertex_id)
    result.sort()
    assert result == ['a', 'b', 'c']
    assert d.check() is None
Exemple #7
0
def test_simple_dag():
    d = DAG()
    d.add_vertex("a")
    d.add_vertex("b")
    d.add_vertex("c")
    result = []
    for vertex_id, _ in d:
        result.append(vertex_id)
    result.sort()
    assert result == ["a", "b", "c"]
    assert d.check() is None
Exemple #8
0
    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser

        temp_group = parser.add_argument_group(
            title="temporaries handling arguments")
        temp_group.add_argument("-t",
                                "--temp-dir",
                                metavar="DIR",
                                default=Env().tmp_dir)
        temp_group.add_argument(
            "--no-random-temp-subdir",
            dest="random_temp_subdir",
            action="store_false",
            help="Disable the creation of a random subdirectory in the"
            " temporary directory. Use this when you know that you have"
            " exclusive access to the temporary directory (needed in order to"
            " avoid name clashes there) to get a deterministic path for"
            " testsuite temporaries.")
        temp_group.add_argument(
            "-d",
            "--dev-temp",
            metavar="DIR",
            nargs="?",
            default=None,
            const="tmp",
            help="Convenience shortcut for dev setups: forces `-t DIR"
            " --no-random-temp-subdir --cleanup-mode=none` and cleans up `DIR`"
            ' first. If no directory is provided, use the local "tmp"'
            " directory.")

        cleanup_mode_map = enum_to_cmdline_args_map(CleanupMode)
        temp_group.add_argument(
            "--cleanup-mode",
            choices=list(cleanup_mode_map),
            help="Control the cleanup of working spaces.\n" +
            "\n".join(f"{name}: {CleanupMode.descriptions()[value]}"
                      for name, value in cleanup_mode_map.items()))
        temp_group.add_argument(
            "--disable-cleanup",
            action="store_true",
            help="Disable cleanup of working spaces. This option is deprecated"
            " and will disappear in a future version of e3-testsuite. Please"
            " use --cleanup-mode instead.")

        output_group = parser.add_argument_group(
            title="results output arguments")
        output_group.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="Select the output directory, where test results are to be"
            " stored (default: './out'). If --old-output-dir=DIR2 is passed,"
            " the new results are stored in DIR while DIR2 contains results"
            " from a previous run. Otherwise, the new results are stored in"
            " DIR/new/ while the old ones are stored in DIR/old. In both"
            " cases, the testsuite cleans the directory for new results"
            " first.",
        )
        output_group.add_argument(
            "--old-output-dir",
            metavar="DIR",
            help="Select the old output directory, for baseline comparison."
            " See --output-dir.",
        )
        output_group.add_argument(
            "--rotate-output-dirs",
            default=False,
            action="store_true",
            help="Rotate testsuite results: move the new results directory to"
            " the old results one before running testcases (this removes the"
            " old results directory first). If not passed, we just remove the"
            " new results directory before running testcases (i.e. just ignore"
            " the old results directory).",
        )
        output_group.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        output_group.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available",
        )
        output_group.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        output_group.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.",
        )
        output_group.add_argument(
            "--status-update-interval",
            default=1.0,
            type=float,
            help="Minimum number of seconds between status file updates. The"
            " more often we update this file, the more often one will read"
            " garbage.")

        auto_gen_default = ("enabled"
                            if self.auto_generate_text_report else "disabled")
        output_group.add_argument(
            "--generate-text-report",
            action="store_true",
            dest="generate_text_report",
            default=self.auto_generate_text_report,
            help=(
                f"When the testsuite completes, generate a 'report' text file"
                f" in the output directory ({auto_gen_default} by default)."),
        )
        output_group.add_argument(
            "--no-generate-text-report",
            action="store_false",
            dest="generate_text_report",
            help="Disable the generation of a 'report' text file (see"
            "--generate-text-report).",
        )

        output_group.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.",
        )
        output_group.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )

        exec_group = parser.add_argument_group(
            title="execution control arguments")
        exec_group.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.",
        )
        exec_group.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        exec_group.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.",
        )
        exec_group.add_argument(
            "--force-multiprocessing",
            action="store_true",
            help="Force the use of subprocesses to execute tests, for"
            " debugging purposes. This is normally automatically enabled when"
            " both the level of requested parallelism is high enough (to make"
            " it profitable regarding the contention of Python's GIL) and no"
            " test fragment has dependencies on other fragments. This flag"
            " forces the use of multiprocessing even if any of these two"
            " conditions is false.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = Env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # Setup output directories and create an index for the results we are
        # going to produce.
        self.output_dir: str
        self.old_output_dir: Optional[str]
        self.setup_result_dirs()
        self.report_index = ReportIndex(self.output_dir)

        # Set the cleanup mode from command-line arguments
        if self.main.args.cleanup_mode is not None:
            self.env.cleanup_mode = (
                cleanup_mode_map[self.main.args.cleanup_mode])
        elif self.main.args.disable_cleanup:
            logger.warning(
                "--disable-cleanup is deprecated and will disappear in a"
                " future version of e3-testsuite. Please use --cleanup-mode"
                " instead.")
            self.env.cleanup_mode = CleanupMode.NONE
        else:
            self.env.cleanup_mode = CleanupMode.default()

        # Settings for temporary directory creation
        temp_dir: str = self.main.args.temp_dir
        random_temp_subdir: bool = self.main.args.random_temp_subdir

        # The "--dev-temp" option forces several settings
        if self.main.args.dev_temp:
            self.env.cleanup_mode = CleanupMode.NONE
            temp_dir = self.main.args.dev_temp
            random_temp_subdir = False

        # Now actually setup the temporary directory: make sure we start from a
        # clean directory if we use a deterministic directory.
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        temp_dir = os.path.abspath(temp_dir)
        if not random_temp_subdir:
            self.working_dir = temp_dir
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)

        elif not os.path.isdir(temp_dir):
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            logger.critical("temp dir '%s' does not exist", temp_dir)
            return 1

        else:
            self.working_dir = tempfile.mkdtemp("", "tmp", temp_dir)

        # Create the exchange directory (to exchange data between the testsuite
        # main and the subprocesses running test fragments). Compute the name
        # of the file to pass environment data to subprocesses.
        self.exchange_dir = os.path.join(self.working_dir, "exchange")
        self.env_filename = os.path.join(self.exchange_dir, "_env.bin")
        mkdir(self.exchange_dir)

        # Make them both available to test fragments
        self.env.exchange_dir = self.exchange_dir
        self.env.env_filename = self.env_filename

        self.gaia_result_files: Dict[str, GAIAResultFiles] = {}
        """Mapping from test names to files for results in the GAIA report."""

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # Create an object to report testsuite execution status to users
        from e3.testsuite.running_status import RunningStatus
        self.running_status = RunningStatus(
            os.path.join(self.output_dir, "status"),
            self.main.args.status_update_interval,
        )

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Create a DAG to constraint the test execution order
        dag = DAG()
        for parsed_test in self.test_list:
            self.add_test(dag, parsed_test)
        self.adjust_dag_dependencies(dag)
        dag.check()
        self.running_status.set_dag(dag)

        # Determine whether to use multiple processes for fragment execution
        # parallelism.
        self.use_multiprocessing = self.compute_use_multiprocessing()
        self.env.use_multiprocessing = self.use_multiprocessing

        # Record modules lookup path, including for the file corresponding to
        # the __main__ module.  Subprocesses will need it to have access to the
        # same modules.
        main_module = sys.modules["__main__"]
        self.env.modules_search_path = [
            os.path.dirname(os.path.abspath(main_module.__file__))
        ] + sys.path

        # Now that the env is supposed to be complete, dump it for the test
        # fragments to pick it up.
        self.env.store(self.env_filename)

        # For debugging purposes, dump the final DAG to a DOT file
        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(dag.as_dot())

        if self.use_multiprocessing:
            self.run_multiprocess_mainloop(dag)
        else:
            self.run_standard_mainloop(dag)

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir, self.gaia_result_files)

        # Clean everything
        self.tear_down()

        # If requested, generate a text report
        if self.main.args.generate_text_report:
            # Use the previous testsuite results for comparison, if available
            old_index = (ReportIndex.read(self.old_output_dir)
                         if self.old_output_dir else None)

            # Include all information, except logs for successful tests, which
            # is just too verbose.
            with open(os.path.join(self.output_dir, "report"),
                      "w",
                      encoding="utf-8") as f:
                generate_report(
                    output_file=f,
                    new_index=self.report_index,
                    old_index=old_index,
                    colors=ColorConfig(colors_enabled=False),
                    show_all_logs=False,
                    show_xfail_logs=True,
                    show_error_output=True,
                    show_time_info=True,
                )

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0
Exemple #9
0
    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="select output dir",
        )
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "-d",
            "--dev-temp",
            nargs="?",
            default=None,
            const="tmp",
            help="Unlike --temp-dir, use this very directory to store"
            " testsuite temporaries (i.e. no random subdirectory). Also"
            " automatically disable temp dir cleanup, to be developer"
            " friendly. If no directory is provided, use the local"
            " \"tmp\" directory")
        parser.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)",
        )
        parser.add_argument(
            "--disable-cleanup",
            dest="enable_cleanup",
            action="store_false",
            default=True,
            help="disable cleanup of working space",
        )
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        parser.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        parser.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )
        parser.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        parser.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.")
        parser.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.")
        parser.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = BaseEnv.from_env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths Keep the working dir as
        # short as possible, to avoid the risk of having a path that's too long
        # (a problem often seen on Windows, or when using WRS tools that have
        # their own max path limitations).
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, "new")
        self.old_output_dir = os.path.join(d, "old")

        if self.main.args.dev_temp:
            # Use a temporary directory for developers: make sure it is an
            # empty directory and disable cleanup to ease post-mortem
            # investigation.
            self.working_dir = os.path.abspath(self.main.args.dev_temp)
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)
            self.main.args.enable_cleanup = False

        else:
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            if not os.path.isdir(self.main.args.temp_dir):
                logger.critical("temp dir '%s' does not exist",
                                self.main.args.temp_dir)
                return 1

            self.working_dir = tempfile.mkdtemp(
                "", "tmp", os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results and create
        # an index for it.
        self.setup_result_dir()
        self.report_index = ReportIndex(self.output_dir)

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.has_error = False
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(
            job_provider=self.job_factory,
            tokens=self.main.args.jobs,

            # correct_result expects specifically TestFragment instances (a Job
            # subclass), while Scheduler only guarantees Job instances.
            # Test drivers are supposed to register only TestFragment
            # instances, so the following cast should be fine.
            collect=cast(Any, self.collect_result),
        )
        actions = DAG()
        for parsed_test in self.test_list:
            if not self.add_test(actions, parsed_test):
                self.has_error = True
        actions.check()

        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(actions.as_dot())

        # Run the tests. Note that when the testsuite aborts because of too
        # many consecutive test failures, we still want to produce a report and
        # exit through regular ways, to catch KeyboardInterrupt exceptions,
        # which e3's scheduler uses to abort the execution loop, but only in
        # such cases. In other words, let the exception propagates if it's the
        # user that interrupted the testsuite.
        try:
            self.scheduler.run(actions)
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir)

        # Clean everything
        self.tear_down()

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if self.has_error:
            return 1
        elif TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0