예제 #1
0
    def test_requeue(self):
        """Requeue test.

        Same as previous example except that all tests are requeued
        once.
        """
        results = {}

        def collect(job):
            if job.uid not in results:
                results[job.uid] = True
                return True
            else:
                return False

        # This time test with two interdependent jobs
        dag = DAG()
        dag.add_vertex('1')
        dag.add_vertex('2')
        s = Scheduler(Scheduler.simple_provider(NopJob),
                      tokens=2, collect=collect)
        s.run(dag)
        assert s.max_active_jobs == 2
        assert results['1']
        assert results['2']
예제 #2
0
    def test_keyboard_interrupt(self):
        """Ensure that jobs can be interrupted."""
        results = {}
        pytest.importorskip('psutil')

        def get_job(uid, data, predecessors, notify_end):
            return NopJob(uid, data, notify_end)

        def collect(job):
            results[job.uid] = job

        dag = DAG()
        dag.add_vertex('1')
        dag.add_vertex('2')
        s = Scheduler(get_job, tokens=2, collect=collect, job_timeout=2)

        # fake log_state that will raise a KeyboardInterrupt
        def fake_log_state():
            raise KeyboardInterrupt
        s.log_state = fake_log_state

        with pytest.raises(KeyboardInterrupt):
            s.run(dag)

        for k, v in results.items():
            assert v.interrupted
예제 #3
0
 def test_minimal_run2(self):
     """Test with two interdependent jobs."""
     dag = DAG()
     dag.add_vertex('1')
     dag.add_vertex('2', predecessors=['1'])
     s = Scheduler(Scheduler.simple_provider(NopJob), tokens=2)
     s.run(dag)
     assert s.max_active_jobs == 1
예제 #4
0
 def test_minimal_run(self):
     """Test with only two independent jobs."""
     dag = DAG()
     dag.add_vertex('1')
     dag.add_vertex('2')
     s = Scheduler(Scheduler.simple_provider(NopJob), tokens=2)
     s.run(dag)
     assert s.max_active_jobs == 2
예제 #5
0
    def test_ordering(self):
        """Test that jobs are ordered correctly."""
        results = []

        def collect(job):
            results.append(job.uid)

        dag = DAG()
        dag.add_vertex("3")
        dag.add_vertex("0")
        dag.add_vertex("1")
        s = Scheduler(Scheduler.simple_provider(NopJob), tokens=1, collect=collect)
        s.run(dag)
        assert tuple(results) == ("0", "1", "3")
예제 #6
0
    def test_collect_feedback_scheme(self):
        """Collect feedback construction.

        Scheme in which if a job predecessor "fails" then job is skipped
        In order to do that get_job and collect should have access to
        common data. Note that scheduler ensure that these functions
        are called sequentially.
        """

        class SchedulerContext(object):
            def __init__(self):
                # Save in results tuples with first element being a bool
                # indicating success or failure and the second the job itself
                self.results = {}

            def get_job(self, uid, data, predecessors, notify_end):
                result = NopJob(uid, data, notify_end)

                # If any of the predecessor failed skip the job
                for k in predecessors:
                    if not self.results[k][0]:
                        result.should_skip = True
                return result

            def collect(self, job):
                if job.should_skip:
                    # Skipped jobs are considered failed
                    self.results[job.uid] = [False, job]
                else:
                    # Job '2' is always failing
                    if job.uid == "2":
                        self.results[job.uid] = [False, job]
                    else:
                        self.results[job.uid] = [True, job]

        dag = DAG()
        dag.add_vertex("1")
        dag.add_vertex("2")
        dag.add_vertex("3", predecessors=["1", "2"])
        dag.add_vertex("4", predecessors=["3"])
        c = SchedulerContext()
        s = Scheduler(c.get_job, tokens=2, collect=c.collect)
        s.run(dag)

        assert (
            not c.results["2"][1].should_skip and not c.results["2"][0]
        ), 'job "2" is run and should be marked as failed'
        assert c.results["3"][1].should_skip, 'job "3" should be skipped'
        assert c.results["4"][1].should_skip, 'job "4" should be skipped'
예제 #7
0
    def __init__(self, actions):
        """Object initializer.

        :param actions: DAG of actions to perform.
        :type actions: DAG
        """
        self.actions = actions
        self.new_fingerprints = {}
        self.job_status = {}
        self.set_scheduling_params()
        self.scheduler = Scheduler(job_provider=self.get_job,
                                   collect=self.collect,
                                   queues=self.queues,
                                   tokens=self.tokens,
                                   job_timeout=self.job_timeout)

        self.scheduler.run(self.actions)
예제 #8
0
    def test_timeout(self):
        """Ensure that jobs are interrupted correctly on timeout."""
        results = {}
        pytest.importorskip('psutil')

        def get_job(uid, data, predecessors, notify_end):
            return SleepJob(uid, data, notify_end)

        def collect(job):
            results[job.uid] = job

        dag = DAG()
        dag.add_vertex('1')
        dag.add_vertex('2')
        s = Scheduler(get_job, tokens=2, collect=collect, job_timeout=2)
        s.run(dag)

        for k, v in results.items():
            assert v.interrupted
예제 #9
0
    def __init__(self, actions: DAG):
        """Object initializer.

        :param actions: DAG of actions to perform.
        """
        self.actions = actions
        self.prev_fingerprints: Dict[str, Optional[Fingerprint]] = {}
        self.new_fingerprints: Dict[str, Optional[Fingerprint]] = {}
        self.job_status: Dict[str, ReturnValue] = {}
        self.set_scheduling_params()
        self.failure_source: Dict[str, Set[str]] = {}

        self.scheduler = Scheduler(
            job_provider=self.get_job,
            collect=self.collect,  # type: ignore
            queues=self.queues,
            tokens=self.tokens,
            job_timeout=self.job_timeout,
        )

        self.scheduler.run(self.actions)
예제 #10
0
    def test_skip(self):
        """Simple example in which all the tests are skipped."""
        results = {}

        def get_job(uid, data, predecessors, notify_end):
            result = NopJob(uid, data, notify_end)
            result.should_skip = True
            return result

        def collect(job):
            results[job.uid] = job.timing_info

        # This time test with two interdependent jobs
        dag = DAG()
        dag.add_vertex('1')
        dag.add_vertex('2')
        s = Scheduler(get_job, tokens=2, collect=collect)
        s.run(dag)

        # Check start_time end_time to be sure tests have not been run
        for k, v in results.items():
            assert v.start_time is None
            assert v.stop_time is None
예제 #11
0
파일: run.py 프로젝트: piratos/e3-core
 def run(self, action_list):
     sch = Scheduler(self.get_job, self.collect)
     sch.run(action_list)
예제 #12
0
    def run_standard_mainloop(self, dag: DAG) -> None:
        """Run the main loop to execute test fragments in threads."""
        assert self.main.args is not None

        from e3.job import Job
        from e3.testsuite.fragment import FragmentData, ThreadTestFragment

        def job_factory(
            uid: str,
            data: Any,
            predecessors: FrozenSet[str],
            notify_end: Callable[[str], None],
        ) -> ThreadTestFragment:
            """Turn a DAG item into a ThreadTestFragment instance."""
            assert isinstance(data, FragmentData)

            # When passing return values from predecessors, remove current test
            # name from the keys to ease referencing by user (the short
            # fragment name can then be used by user without knowing the full
            # node id).
            key_prefix = data.driver.test_name + "."
            key_prefix_len = len(key_prefix)

            def filter_key(k: str) -> str:
                if k.startswith(key_prefix):
                    return k[key_prefix_len:]
                else:
                    return k

            return ThreadTestFragment(
                uid,
                data.driver,
                data.callback,
                {filter_key(k): self.return_values[k]
                 for k in predecessors},
                notify_end,
                self.running_status,
            )

        def collect_result(job: Job) -> bool:
            """Collect test results from the given fragment."""
            assert isinstance(job, ThreadTestFragment)
            self.return_values[job.uid] = job.return_value
            self.collect_result(job)

            # In the e3.job.scheduler API, collect returning "True" means
            # "requeue the job". We never want to do that.
            return False

        # Create a scheduler to run all fragments for the testsuite main loop
        scheduler = Scheduler(
            job_provider=job_factory,
            tokens=self.main.args.jobs,
            collect=collect_result,
        )

        # Run the tests. Note that when the testsuite aborts because of too
        # many consecutive test failures, we still want to produce a report and
        # exit through regular ways, to catch KeyboardInterrupt exceptions,
        # which e3's scheduler uses to abort the execution loop, but only in
        # such cases. In other words, let the exception propagates if it's the
        # user that interrupted the testsuite.
        try:
            scheduler.run(dag)
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise
예제 #13
0
    def testsuite_main(self, args=None):
        """Main for the main testsuite script.

        :param args: command line arguments. If None use sys.argv
        :type args: list[str] | None
        """
        self.main = Main(platform_args=self.CROSS_SUPPORT)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument("-o",
                            "--output-dir",
                            metavar="DIR",
                            default="./out",
                            help="select output dir")
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        parser.add_argument("--disable-cleanup",
                            dest="enable_cleanup",
                            action="store_false",
                            default=True,
                            help="disable cleanup of working space")
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously")
        parser.add_argument(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")
        parser.add_argument('sublist',
                            metavar='tests',
                            nargs='*',
                            default=[],
                            help='test')
        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args(args)

        self.env = BaseEnv.from_env()
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.args.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.args.temp_dir)
            return 1

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(job_provider=self.job_factory,
                                   collect=self.collect_result,
                                   tokens=self.main.args.jobs)
        actions = DAG()
        for test in self.test_list:
            self.parse_test(actions, test)

        with open(os.path.join(self.output_dir, 'tests.dot'), 'wb') as fd:
            fd.write(actions.as_dot())
        self.scheduler.run(actions)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()
        return 0
예제 #14
0
    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="select output dir",
        )
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "-d",
            "--dev-temp",
            nargs="?",
            default=None,
            const="tmp",
            help="Unlike --temp-dir, use this very directory to store"
            " testsuite temporaries (i.e. no random subdirectory). Also"
            " automatically disable temp dir cleanup, to be developer"
            " friendly. If no directory is provided, use the local"
            " \"tmp\" directory")
        parser.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)",
        )
        parser.add_argument(
            "--disable-cleanup",
            dest="enable_cleanup",
            action="store_false",
            default=True,
            help="disable cleanup of working space",
        )
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        parser.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        parser.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )
        parser.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        parser.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.")
        parser.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.")
        parser.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = BaseEnv.from_env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths Keep the working dir as
        # short as possible, to avoid the risk of having a path that's too long
        # (a problem often seen on Windows, or when using WRS tools that have
        # their own max path limitations).
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, "new")
        self.old_output_dir = os.path.join(d, "old")

        if self.main.args.dev_temp:
            # Use a temporary directory for developers: make sure it is an
            # empty directory and disable cleanup to ease post-mortem
            # investigation.
            self.working_dir = os.path.abspath(self.main.args.dev_temp)
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)
            self.main.args.enable_cleanup = False

        else:
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            if not os.path.isdir(self.main.args.temp_dir):
                logger.critical("temp dir '%s' does not exist",
                                self.main.args.temp_dir)
                return 1

            self.working_dir = tempfile.mkdtemp(
                "", "tmp", os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results and create
        # an index for it.
        self.setup_result_dir()
        self.report_index = ReportIndex(self.output_dir)

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.has_error = False
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(
            job_provider=self.job_factory,
            tokens=self.main.args.jobs,

            # correct_result expects specifically TestFragment instances (a Job
            # subclass), while Scheduler only guarantees Job instances.
            # Test drivers are supposed to register only TestFragment
            # instances, so the following cast should be fine.
            collect=cast(Any, self.collect_result),
        )
        actions = DAG()
        for parsed_test in self.test_list:
            if not self.add_test(actions, parsed_test):
                self.has_error = True
        actions.check()

        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(actions.as_dot())

        # Run the tests. Note that when the testsuite aborts because of too
        # many consecutive test failures, we still want to produce a report and
        # exit through regular ways, to catch KeyboardInterrupt exceptions,
        # which e3's scheduler uses to abort the execution loop, but only in
        # such cases. In other words, let the exception propagates if it's the
        # user that interrupted the testsuite.
        try:
            self.scheduler.run(actions)
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir)

        # Clean everything
        self.tear_down()

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if self.has_error:
            return 1
        elif TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0