Ejemplo n.º 1
0
    def run(self, pav_cfg, args):
        """Resolve the test configurations into individual tests and assign to
        schedulers. Have those schedulers kick off jobs to run the individual
        tests themselves.
        :param pav_cfg: The pavilion configuration.
        :param args: The parsed command line argument object.
        """
        # 1. Resolve the test configs
        #   - Get sched vars from scheduler.
        #   - Compile variables.
        #

        mb_tracker = MultiBuildTracker()

        local_builds_only = getattr(args, 'local_builds_only', False)

        test_list = self._get_tests(
            pav_cfg, args, mb_tracker, build_only=self.BUILD_ONLY,
            local_builds_only=getattr(args, 'local_builds_only', False))
        if test_list is None:
            return errno.EINVAL

        all_tests = test_list
        self.last_tests = all_tests

        if not all_tests:
            fprint("You must specify at least one test.", file=self.errfile)
            return errno.EINVAL

        series = TestSeries(pav_cfg, all_tests)
        self.last_series = series

        res = self.check_result_format(all_tests)
        if res != 0:
            self._complete_tests(all_tests)
            return res

        all_tests = [test for test in all_tests if not test.skipped]

        res = cmd_utils.build_local(
            tests=all_tests,
            max_threads=pav_cfg.build_threads,
            mb_tracker=mb_tracker,
            build_verbosity=args.build_verbosity,
            outfile=self.outfile,
            errfile=self.errfile)
        if res != 0:
            self._complete_tests(all_tests)
            return res

        self._complete_tests([test for test in all_tests if
                              test.build_only and test.build_local])

        wait = getattr(args, 'wait', None)
        report_status = getattr(args, 'status', False)

        if self.BUILD_ONLY and local_builds_only:
            non_local_build_tests = [test for test in all_tests
                                     if not test.build_local]
            if non_local_build_tests:
                fprint(
                    "Skipping tests that are set to build on nodes: {}"
                    .format([test.name for test in non_local_build_tests]),
                    file=self.outfile, color=output.YELLOW)
            return 0

        return series.run_tests(
            pav_cfg=pav_cfg,
            wait=wait,
            report_status=report_status,
            outfile=self.outfile,
            errfile=self.errfile
        )
Ejemplo n.º 2
0
    def run_set(self,
                log=False,
                local_builds_only=False,
                build_only=False,
                build_verbosity=0,
                wait=None,
                run_cmd=None,
                rebuild=False):
        """Runs tests in set. """

        mb_tracker = MultiBuildTracker(log=log)

        if not run_cmd:
            run_cmd = commands.get_command('run')

        try:
            new_conditions = {'only_if': self.only_if, 'not_if': self.not_if}

            # resolve configs
            test_configs = cmd_utils.get_test_configs(
                pav_cfg=self.pav_cfg,
                host=self.host,
                tests=self._test_names,
                modes=self.modes,
                overrides=self.series_obj.overrides,
                outfile=self.outfile,
                conditions=new_conditions,
            )

            # Remove non-local builds when only doing local builds.
            if build_only and local_builds_only:
                locally_built_tests = []
                for ptest in test_configs:
                    if ptest.config['build']['on_nodes'].lower() != 'true':
                        locally_built_tests.append(ptest)

                test_configs = locally_built_tests

            # configs->tests
            test_list = cmd_utils.configs_to_tests(
                pav_cfg=self.pav_cfg,
                proto_tests=test_configs,
                mb_tracker=mb_tracker,
                build_only=build_only,
                rebuild=rebuild,
                outfile=self.outfile,
            )

        except (commands.CommandError, test_config.TestConfigError) as err:
            self.done = True
            output.fprint("Error resolving configs. \n{}".format(err.args[0]),
                          file=self.errfile,
                          color=output.RED)
            return errno.EINVAL

        if not test_list:
            self.done = True
            self.all_pass = True
            fprint("You must specify at least one test.", file=self.errfile)
            return errno.EINVAL

        all_tests = test_list
        run_cmd.last_tests = all_tests

        # assign tests to series and vice versa
        self.series_obj.add_tests(all_tests)
        for test_obj in all_tests:
            self.tests[test_obj.name] = test_obj

        run_cmd.last_series = self.series_obj

        # make sure result parsers are ok
        res = cmd_utils.check_result_format(all_tests, self.errfile)
        if res != 0:
            self.done = True
            cmd_utils.complete_tests(all_tests)
            return res

        all_tests = [test for test in all_tests if not test.skipped]

        # attempt to build
        res = cmd_utils.build_local(tests=all_tests,
                                    max_threads=self.pav_cfg.build_threads,
                                    mb_tracker=mb_tracker,
                                    build_verbosity=build_verbosity,
                                    outfile=self.outfile,
                                    errfile=self.errfile)
        if res != 0:
            self.done = True
            cmd_utils.complete_tests(all_tests)
            return res

        cmd_utils.complete_tests([
            test for test in all_tests if test.build_only and test.build_local
        ])

        if build_only and local_builds_only:
            non_local_build_tests = [
                test for test in all_tests if not test.build_local
            ]
            if non_local_build_tests:
                fprint("Skipping tests that are not set to build on nodes. {}".
                       format([test.name for test in non_local_build_tests]),
                       file=self.outfile,
                       color=output.YELLOW)
                return 0

        # deal with simultaneous here
        if self.series_obj.config['simultaneous'] is None:
            res = self.series_obj.run_tests(tests=all_tests, wait=wait)

            return res

        else:
            simult = int(self.series_obj.config['simultaneous'])

            for test in all_tests:
                self.test_wait(simult)
                self.series_obj.run_tests(tests=[test])
Ejemplo n.º 3
0
    def run_set(self):
        """Runs tests in set. """

        mb_tracker = MultiBuildTracker(log=False)

        run_cmd = commands.get_command('run')

        # run.RunCommand._get_tests function
        try:
            new_conditions = {
                'only_if': self.only_if,
                'not_if': self.not_if
            }

            # resolve configs
            test_configs = cmd_utils.get_test_configs(
                pav_cfg=self.pav_cfg,
                host=self.host,
                tests=self._test_names,
                modes=self.modes,
                outfile=self.outfile,
                conditions=new_conditions,
            )

            # configs->tests
            test_list = cmd_utils.configs_to_tests(
                pav_cfg=self.pav_cfg,
                proto_tests=test_configs,
                mb_tracker=mb_tracker,
                outfile=self.outfile,
            )

        except (commands.CommandError, test_config.TestConfigError) as err:
            self.done = True
            output.fprint("Error resolving configs. \n{}".format(err.args[0]),
                          file=self.errfile, color=output.RED)
            return None

        if not test_list:
            self.done = True
            self.all_pass = True
            return None

        all_tests = test_list
        run_cmd.last_tests = all_tests

        # assign tests to series and vice versa
        self.series_obj.add_tests(all_tests)
        for test_obj in all_tests:
            self.tests[test_obj.name] = test_obj

        run_cmd.last_series = self.series_obj

        # make sure result parsers are ok
        res = cmd_utils.check_result_format(all_tests, self.errfile)
        if res != 0:
            self.done = True
            cmd_utils.complete_tests(all_tests)
            return None

        # attempt to build
        res = cmd_utils.build_local(
            tests=all_tests,
            max_threads=self.pav_cfg.build_threads,
            mb_tracker=mb_tracker,
            outfile=self.outfile,
            errfile=self.errfile
        )
        if res != 0:
            self.done = True
            cmd_utils.complete_tests(all_tests)
            return None

        # deal with simultaneous here
        if self.series_obj.config['simultaneous'] is None:
            self.series_obj.run_tests(tests=all_tests)
        else:
            simult = int(self.series_obj.config['simultaneous'])

            for test in all_tests:
                self.test_wait(simult)
                self.series_obj.run_tests(tests=[test])
Ejemplo n.º 4
0
def build_local(tests: List[TestRun],
                mb_tracker: MultiBuildTracker,
                max_threads: int = 4,
                build_verbosity: int = 0,
                outfile: TextIO = StringIO(),
                errfile: TextIO = StringIO()):
    """Build all tests that request for their build to occur on the
    kickoff host.

    :param tests: The list of tests to potentially build.
    :param max_threads: Maximum number of build threads to start.
    :param build_verbosity: How much info to print during building.
        0 - Quiet, 1 - verbose, 2+ - very verbose
    :param mb_tracker: The tracker for all builds.
    :param outfile: Where to print user messages.
    :param errfile: Where to print user error messages.
    """

    test_threads = []  # type: List[Union[threading.Thread, None]]
    remote_builds = []

    cancel_event = threading.Event()

    # Generate new build names for each test that is rebuilding.
    # We do this here, even for non_local tests, because otherwise the
    # non-local tests can't tell what was built fresh either on a
    # front-end or by other tests rebuilding on nodes.
    for test in tests:
        if test.rebuild and test.builder.exists():
            test.builder.deprecate()
            test.builder.rename_build()
            test.build_name = test.builder.name
            test.save_attributes()

    # We don't want to start threads that are just going to wait on a lock,
    # so we'll rearrange the builds so that the uniq build names go first.
    # We'll use this as a stack, so tests that should build first go at
    # the end of the list.
    build_order = []
    # If we've seen a build name, the build can go later.
    seen_build_names = set()

    for test in tests:
        if not test.build_local:
            remote_builds.append(test)
        elif test.builder.name not in seen_build_names:
            build_order.append(test)
            seen_build_names.add(test.builder.name)
        else:
            build_order.insert(0, test)

    # Keep track of what the last message printed per build was.
    # This is for double build verbosity.
    message_counts = {test.id: 0 for test in tests}

    # Used to track which threads are for which tests.
    test_by_threads = {}

    if build_verbosity > 0:
        output.fprint(BUILD_STATUS_PREAMBLE.format(when='When',
                                                   test_id='TestID',
                                                   state_len=STATES.max_length,
                                                   state='State'),
                      'Message',
                      file=outfile,
                      width=None)

    builds_running = 0
    # Run and track <max_threads> build threads, giving output according
    # to the verbosity level. As threads finish, new ones are started until
    # either all builds complete or a build fails, in which case all tests
    # are aborted.
    while build_order or test_threads:
        # Start a new thread if we haven't hit our limit.
        if build_order and builds_running < max_threads:
            test = build_order.pop()

            test_thread = threading.Thread(target=test.build,
                                           args=(cancel_event, ))
            test_threads.append(test_thread)
            test_by_threads[test_thread] = test
            test_thread.start()

        # Check if all our threads are alive, and join those that aren't.
        for i in range(len(test_threads)):
            thread = test_threads[i]
            if not thread.is_alive():
                thread.join()
                builds_running -= 1
                test_threads[i] = None
                test = test_by_threads[thread]
                del test_by_threads[thread]

                # Only output test status after joining a thread.
                if build_verbosity == 1:
                    notes = mb_tracker.get_notes(test.builder)
                    when, state, msg = notes[-1]
                    when = output.get_relative_timestamp(when)
                    preamble = (BUILD_STATUS_PREAMBLE.format(
                        when=when,
                        test_id=test.id,
                        state_len=STATES.max_length,
                        state=state))
                    output.fprint(preamble,
                                  msg,
                                  wrap_indent=len(preamble),
                                  file=outfile,
                                  width=None)

        test_threads = [thr for thr in test_threads if thr is not None]

        if cancel_event.is_set():
            for thread in test_threads:
                thread.join()

            for test in tests:
                if (test.status.current().state
                        not in (STATES.BUILD_FAILED, STATES.BUILD_ERROR)):
                    test.status.set(
                        STATES.ABORTED,
                        "Run aborted due to failures in other builds.")

            output.fprint("Build error while building tests. Cancelling runs.",
                          color=output.RED,
                          file=outfile,
                          clear=True)
            output.fprint(
                "Failed builds are placed in <working_dir>/test_runs/"
                "<test_id>/build for the corresponding test run.",
                color=output.CYAN,
                file=outfile)

            for failed_build in mb_tracker.failures():
                output.fprint(
                    "Build error for test {f.test.name} (#{f.test.id}).".
                    format(f=failed_build),
                    file=errfile)
                output.fprint(
                    "See test status file (pav cat {id} status) and/or "
                    "the test build log (pav log build {id})".format(
                        id=failed_build.test.id),
                    file=errfile)

            return errno.EINVAL

        state_counts = mb_tracker.state_counts()
        if build_verbosity == 0:
            # Print a self-clearing one-liner of the counts of the
            # build statuses.
            parts = []
            for state in sorted(state_counts.keys()):
                parts.append("{}: {}".format(state, state_counts[state]))
            line = ' | '.join(parts)
            output.fprint(line, end='\r', file=outfile, width=None, clear=True)
        elif build_verbosity > 1:
            for test in tests:
                seen = message_counts[test.id]
                msgs = mb_tracker.messages[test.builder][seen:]
                for when, state, msg in msgs:
                    when = output.get_relative_timestamp(when)
                    state = '' if state is None else state
                    preamble = BUILD_STATUS_PREAMBLE.format(
                        when=when,
                        test_id=test.id,
                        state_len=STATES.max_length,
                        state=state)

                    output.fprint(preamble,
                                  msg,
                                  wrap_indent=len(preamble),
                                  file=outfile,
                                  width=None)
                message_counts[test.id] += len(msgs)

        time.sleep(BUILD_SLEEP_TIME)

    if build_verbosity == 0:
        # Print a newline after our last status update.
        output.fprint(width=None, file=outfile)

    return 0