Exemplo n.º 1
0
def full_vm_params_and_strs(param_dict, vm_strs, use_vms_default):
    """
    Add default vm parameters and strings for missing command line such.

    :param param_dict: runtime parameters used for extra customization
    :type param_dict: {str, str} or None
    :param vm_strs: command line vm-specific names and variant restrictions
    :type vm_strs: {str, str}
    :param use_vms_default: whether to use default variant restriction for a
                            particular vm
    :type use_vms_default: {str, bool}
    :returns: complete vm parameters and strings
    :rtype: (:py:class:`Params`, {str, str})
    :raises: :py:class:`ValueError` if no command line or default variant
             restriction could be found for some vm
    """
    vms_config = param.Reparsable()
    vms_config.parse_next_batch(base_file="guest-base.cfg",
                                ovrwrt_file=param.vms_ovrwrt_file(),
                                ovrwrt_dict=param_dict)
    vms_params = vms_config.get_params()
    for vm_name in param.all_vms():
        if use_vms_default[vm_name]:
            default = vms_params.get("default_only_%s" % vm_name)
            if not default:
                raise ValueError("No default variant restriction found for %s!" % vm_name)
            vm_strs[vm_name] += "only %s\n" % default
    log.debug("Parsed vm strings '%s'", vm_strs)
    return vms_params, vm_strs
Exemplo n.º 2
0
def add_runner_failure(test_state, new_status, message):
    """
    Append runner failure to the overall test status.

    :param test_state: Original test state (dict)
    :param new_status: New test status (PASS/FAIL/ERROR/INTERRUPTED/...)
    :param message: The error message
    """
    # Try to propagate the message everywhere
    message = (f"Runner error occurred: {message}\n"
               f"Original status: {test_state.get('status')}\n{test_state}")
    TEST_LOG.error(message)
    test_log = test_state.get("logfile")
    if test_state.get("text_output"):
        test_state["text_output"] = f"{test_state['text_output']}\n{message}\n"
    else:
        test_state["text_output"] = message + "\n"
    if test_log:
        with open(test_log, "a", encoding='utf-8') as log_file:
            log_file.write('\n' + message + '\n')
    # Update the results
    if test_state.get("fail_reason"):
        test_state["fail_reason"] = f"{test_state['fail_reason']}\n{message}"
    else:
        test_state["fail_reason"] = message
    if test_state.get("fail_class"):
        test_state["fail_class"] = f"{test_state['fail_class']}\nRUNNER"
    else:
        test_state["fail_class"] = "RUNNER"
    test_state["status"] = new_status
    return test_state
Exemplo n.º 3
0
    def run_tests(self):
        """
        The actual test execution phase
        """
        self._log_job_debug_info()
        jobdata.record(self, sys.argv)

        if self.size == 0:
            msg = ('Unable to resolve any reference or "resolver.references"'
                   "is empty.")
            LOG_UI.error(msg)

        if not self.test_suites:
            self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
            return self.exitcode

        summary = set()
        for suite in self.test_suites:
            summary |= suite.run(self)

        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        LOG_JOB.info('Test results available in %s', self.logdir)

        if 'INTERRUPTED' in summary:
            self.exitcode |= exit_codes.AVOCADO_JOB_INTERRUPTED
        if 'FAIL' in summary or 'ERROR' in summary:
            self.exitcode |= exit_codes.AVOCADO_TESTS_FAIL

        return self.exitcode
Exemplo n.º 4
0
    def run_suite(self, job, test_suite):
        summary = set()

        test_suite.tests, _ = nrunner.check_runnables_runner_requirements(
            test_suite.tests)
        job.result.tests_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)

        listen = test_suite.config.get('nrunner.status_server_listen')
        self._start_status_server(listen)

        # pylint: disable=W0201
        self.runtime_tasks = self._get_all_runtime_tasks(test_suite)
        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.runtime_tasks)
        test_ids = [
            rt.task.identifier for rt in self.runtime_tasks
            if rt.task.category == 'test'
        ]
        tsm = TaskStateMachine(self.runtime_tasks, self.status_repo)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.runtime_tasks))
        timeout = test_suite.config.get('task.timeout.running')
        workers = [
            Worker(state_machine=tsm,
                   spawner=spawner,
                   max_running=max_running,
                   task_timeout=timeout).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex:
            LOG_JOB.info(str(ex))
            job.interrupted_reason = str(ex)
            summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()

        # Update the overall summary with found test statuses, which will
        # determine the Avocado command line exit status
        summary.update([
            status.upper()
            for status in self.status_repo.get_result_set_for_tasks(test_ids)
        ])
        return summary
Exemplo n.º 5
0
 def _log_avocado_version():
     version_log = version.VERSION
     git_version = get_avocado_git_version()
     if git_version is not None:
         version_log += git_version
     LOG_JOB.info('Avocado version: %s', version_log)
     LOG_JOB.info('')
Exemplo n.º 6
0
    def run(self, args):
        """
        Take care of command line overwriting, parameter preparation,
        setup and cleanup chains, and paths/utilities for all host controls.
        """
        log.info("Manual setup chain started.")
        # set English environment (command output might be localized, need to be safe)
        os.environ['LANG'] = 'en_US.UTF-8'

        cmd_parser.params_from_cmd(args)

        run_params = param.prepare_params(
            base_file="guest-base.cfg",
            ovrwrt_dict={"vms": " ".join(args.selected_vms)},
            ovrwrt_file=param.vms_ovrwrt_file,
            ovrwrt_str=args.param_str)
        # prepare a setup step or a chain of such
        run_params["count"] = 0
        setup_chain = run_params["setup"].split()
        for setup_step in setup_chain:
            setup_func = getattr(intertest, setup_step)
            setup_func(args, run_params, "0m%s" % run_params["count"])
            run_params["count"] += 1

        log.info("Manual setup chain finished.")
Exemplo n.º 7
0
def add_runner_failure(test_state, new_status, message):
    """
    Append runner failure to the overall test status.

    :param test_state: Original test state (dict)
    :param new_status: New test status (PASS/FAIL/ERROR/INTERRUPTED/...)
    :param message: The error message
    """
    # Try to propagate the message everywhere
    message = ("Runner error occurred: %s\nOriginal status: %s\n%s" %
               (message, test_state.get("status"), test_state))
    TEST_LOG.error(message)
    test_log = test_state.get("logfile")
    if test_state.get("text_output"):
        test_state["text_output"] = "%s\n%s\n" % (test_state["text_output"],
                                                  message)
    else:
        test_state["text_output"] = message + "\n"
    if test_log:
        with open(test_log, "a") as log_file:
            log_file.write('\n' + message + '\n')
    # Update the results
    if test_state.get("fail_reason"):
        test_state["fail_reason"] = "%s\n%s" % (test_state["fail_reason"],
                                                message)
    else:
        test_state["fail_reason"] = message
    if test_state.get("fail_class"):
        test_state["fail_class"] = "%s\nRUNNER" % test_state["fail_class"]
    else:
        test_state["fail_class"] = "RUNNER"
    test_state["status"] = new_status
    return test_state
Exemplo n.º 8
0
def full_tests_params_and_str(param_dict, tests_str, use_tests_default):
    """
    Add default tests parameters and string for missing command line such.

    :param param_dict: runtime parameters used for extra customization
    :type param_dict: {str, str} or None
    :param str tests_str: command line variant restrictions
    :param bool use_tests_default: whether to use default primary restriction
    :returns: complete tests parameters and string
    :rtype: (:py:class:`Params`, str)
    :raises: :py:class:`ValueError` if the default primary restriction could is
             not valid (among the available ones)
    """
    tests_config = param.Reparsable()
    tests_config.parse_next_batch(base_file="groups-base.cfg",
                                  ovrwrt_file=param.tests_ovrwrt_file(),
                                  ovrwrt_dict=param_dict)
    tests_params = tests_config.get_params()
    if use_tests_default:
        default = tests_params.get("default_only", "all")
        available_restrictions = param.all_restrictions()
        if default not in available_restrictions:
            raise ValueError("Invalid primary restriction 'only=%s'! It has to be one "
                             "of %s" % (default, ", ".join(available_restrictions)))
        tests_str += "only %s\n" % default
    log.debug("Parsed tests string '%s'", tests_str)
    return tests_params, tests_str
Exemplo n.º 9
0
    def run_suite(self, job, test_suite):
        """
        Run one or more tests and report with test result.

        :param job: an instance of :class:`avocado.core.job.Job`.
        :param test_suite: a list of tests to run.
        :return: a set with types of test failures.
        """
        summary = set()
        replay_map = job.config.get('replay_map')
        execution_order = job.config.get('run.execution_order')
        queue = multiprocessing.SimpleQueue()
        if job.timeout > 0:
            deadline = time.time() + job.timeout
        else:
            deadline = None

        test_result_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)
        no_digits = len(str(test_result_total))
        job.result.tests_total = test_result_total
        index = 1
        try:
            for test_factory in test_suite.tests:
                test_factory[1]["base_logdir"] = job.logdir
                test_factory[1]["config"] = job.config
            for test_factory, variant in self._iter_suite(
                    test_suite, execution_order):
                test_parameters = test_factory[1]
                name = test_parameters.get("name")
                if test_suite.name:
                    prefix = "{}-{}".format(test_suite.name, index)
                else:
                    prefix = index
                test_parameters["name"] = TestID(prefix, name, variant,
                                                 no_digits)
                if deadline is not None and time.time() > deadline:
                    summary.add('INTERRUPTED')
                    if 'methodName' in test_parameters:
                        del test_parameters['methodName']
                    test_factory = (TimeOutSkipTest, test_parameters)
                    if not self.run_test(job, test_factory, queue, summary):
                        break
                else:
                    if (replay_map is not None
                            and replay_map[index - 1] is not None):
                        test_factory = (replay_map[index - 1], test_parameters)
                    if not self.run_test(job, test_factory, queue, summary,
                                         deadline):
                        break
                index += 1
        except KeyboardInterrupt:
            TEST_LOG.error('Job interrupted by ctrl+c.')
            summary.add('INTERRUPTED')

        job.result.end_tests()
        job.funcatexit.run()
        signal.signal(signal.SIGTSTP, signal.SIG_IGN)
        return summary
Exemplo n.º 10
0
 def get(self, basedir):
     if not hasattr(self, 'tmp_dir'):
         if basedir is not None:
             self.basedir = basedir
         self.tmp_dir = tempfile.mkdtemp(prefix='avocado_',  # pylint: disable=W0201
                                         dir=self.basedir)
     elif basedir is not None and basedir != self.basedir:
         LOG_JOB.error("The tmp_dir was already created. The new basedir "
                       "you're trying to provide will have no effect.")
     return self.tmp_dir
Exemplo n.º 11
0
 def _stop_logging(self):
     """
     Stop the logging activity of the test by cleaning the logger handlers.
     """
     self.log.removeHandler(self._file_handler)
     if isinstance(sys.stderr, output.LoggingFile):
         sys.stderr.rm_logger(LOG_JOB.getChild("stderr"))
     if isinstance(sys.stdout, output.LoggingFile):
         sys.stdout.rm_logger(LOG_JOB.getChild("stdout"))
     for name, handler in self._logging_handlers.items():
         logging.getLogger(name).removeHandler(handler)
Exemplo n.º 12
0
    def _get_msg_from_queue(self):
        """
        Helper method to handle safely getting messages from the queue.

        :return: Message, None if exception happened.
        :rtype: dict
        """
        try:
            return self.queue.get()
        # Let's catch all exceptions, since errors here mean a
        # crash in avocado.
        except Exception as details:  # pylint: disable=W0703
            self._failed = True
            TEST_LOG.error("RUNNER: Failed to read queue: %s", details)
            return None
Exemplo n.º 13
0
 def sigtstp_handler(signum, frame):     # pylint: disable=W0613
     """ SIGSTOP all test processes on SIGTSTP """
     if not proc:    # Ignore ctrl+z when proc not yet started
         return
     with sigtstp:
         msg = "ctrl+z pressed, %%s test (%s)" % proc.pid
         app_log_msg = '\n%s' % msg
         if self.sigstopped:
             APP_LOG.info(app_log_msg, "resumming")
             TEST_LOG.info(msg, "resumming")
             process.kill_process_tree(proc.pid, signal.SIGCONT, False)
             self.sigstopped = False
         else:
             APP_LOG.info(app_log_msg, "stopping")
             TEST_LOG.info(msg, "stopping")
             process.kill_process_tree(proc.pid, signal.SIGSTOP, False)
             self.sigstopped = True
Exemplo n.º 14
0
    def _check_reference(self,
                         produced_file_path,
                         reference_file_name,
                         diff_file_name,
                         child_log_name,
                         name='Content'):
        '''
        Compares the file produced by the test with the reference file

        :param produced_file_path: the location of the file that was produced
                                   by this test execution
        :type produced_file_path: str
        :param reference_file_name: the name of the file that will compared
                                    with the content produced by this test
        :type reference_file_name: str
        :param diff_file_name: in case of differences between the produced
                               and reference file, a file with this name will
                               be saved to the test results directory, with
                               the differences in unified diff format
        :type diff_file_name: str
        :param child_log_name: the name of a logger, child of :data:`LOG_JOB`,
                               to be used when logging the content differences
        :type child_log_name: str
        :param name: optional parameter for a descriptive name of the type of
                     content being checked here
        :type name: str
        :returns: True if the check was performed (there was a reference file) and
                  was successful, and False otherwise (there was no such reference
                  file and thus no check was performed).
        :raises: :class:`exceptions.TestFail` when the check is performed and fails
        '''
        reference_path = self.get_data(reference_file_name)
        if reference_path is not None:
            expected = genio.read_file(reference_path)
            actual = genio.read_file(produced_file_path)
            diff_path = os.path.join(self.logdir, diff_file_name)

            fmt = '%(message)s'
            formatter = logging.Formatter(fmt=fmt)
            log_diff = LOG_JOB.getChild(child_log_name)
            self._register_log_file_handler(log_diff, formatter, diff_path)

            diff = unified_diff(expected.splitlines(),
                                actual.splitlines(),
                                fromfile=reference_path,
                                tofile=produced_file_path)
            diff_content = []
            for diff_line in diff:
                diff_content.append(diff_line.rstrip('\n'))

            if diff_content:
                self.log.debug('%s Diff:', name)
                for line in diff_content:
                    log_diff.debug(line)
                self.fail('Actual test %s differs from expected one' % name)
            else:
                return True
        return False
Exemplo n.º 15
0
    def _start_logging(self):
        """
        Simple helper for adding a file logger to the main logger.
        """
        self._file_handler = logging.FileHandler(filename=self.logfile)
        self._file_handler.setLevel(logging.DEBUG)

        fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')

        self._file_handler.setFormatter(formatter)
        self.log.addHandler(self._file_handler)
        self.log.propagate = False

        # Adding the test log FileHandler to the Avocado's main logger so
        # that everything logged while the test is running, for every logger,
        # also makes its way into the test log file
        logger = logging.getLogger('avocado')
        logger.addHandler(self._file_handler)

        stream_fmt = '%(message)s'
        stream_formatter = logging.Formatter(fmt=stream_fmt)

        log_test_stdout = LOG_JOB.getChild("stdout")
        log_test_stderr = LOG_JOB.getChild("stderr")
        log_test_output = LOG_JOB.getChild("output")

        self._register_log_file_handler(log_test_stdout,
                                        stream_formatter,
                                        self._stdout_file,
                                        raw=True)
        self._register_log_file_handler(log_test_stderr,
                                        stream_formatter,
                                        self._stderr_file,
                                        raw=True)
        self._register_log_file_handler(log_test_output,
                                        stream_formatter,
                                        self._output_file,
                                        raw=True)

        if isinstance(sys.stdout, output.LoggingFile):
            sys.stdout.add_logger(log_test_stdout)
        if isinstance(sys.stderr, output.LoggingFile):
            sys.stderr.add_logger(log_test_stderr)
Exemplo n.º 16
0
    def run(self, config):
        """
        Take care of command line overwriting, parameter preparation,
        setup and cleanup chains, and paths/utilities for all host controls.
        """
        log.info("Manual setup chain started.")
        # set English environment (command output might be localized, need to be safe)
        os.environ['LANG'] = 'en_US.UTF-8'

        cmd_parser.params_from_cmd(config)
        intertest.load_addons_tools()
        run_params = config["vms_params"]

        # prepare a setup step or a chain of such
        setup_chain = run_params.objects("setup")
        for i, setup_step in enumerate(setup_chain):
            run_params["count"] = i
            setup_func = getattr(intertest, setup_step)
            setup_func(config, "0m%s" % i)

        log.info("Manual setup chain finished.")
Exemplo n.º 17
0
def permubuntu(config, tag=""):
    """
    Perform all extra setup needed for the ubuntu permanent vms.

    :param config: command line arguments and run configuration
    :type config: {str, str}
    :param str tag: extra name identifier for the test to be run
    """
    l, r = config["graph"].l, config["graph"].r
    selected_vms = sorted(config["vm_strs"].keys())
    LOG_UI.info("Starting permanent vm setup for %s (%s)",
                ", ".join(selected_vms), os.path.basename(r.job.logdir))

    for test_object in l.parse_objects(config["param_dict"],
                                       config["vm_strs"]):
        if test_object.key != "vms":
            continue
        vm = test_object
        # parse individual net only for the current vm
        net = l.parse_object_from_objects([vm],
                                          param_dict=config["param_dict"])
        logging.info("Performing extra setup for the permanent %s", vm.suffix)

        # consider this as a special kind of state converting test which concerns
        # permanent objects (i.e. instead of transition from customize to on
        # root, it is a transition from supposedly "permanentized" vm to the root)
        logging.info("Booting %s for the first permanent on state", vm.suffix)
        setup_dict = config["param_dict"].copy()
        setup_dict.update({"set_state_vms": "ready"})
        setup_str = param.re_str("all..internal..manage.start")
        test_node = l.parse_node_from_object(net,
                                             setup_dict,
                                             setup_str,
                                             prefix=tag)
        to_run = r.run_test_node(test_node)
        asyncio.get_event_loop().run_until_complete(
            asyncio.wait_for(to_run, r.job.timeout or None))

    LOG_UI.info("Finished permanent vm setup")
Exemplo n.º 18
0
    def run(self, config):
        """
        Take care of command line overwriting, parameter preparation,
        setup and cleanup chains, and paths/utilities for all host controls.
        """
        log.info("Manual setup chain started.")
        # set English environment (command output might be localized, need to be safe)
        os.environ['LANG'] = 'en_US.UTF-8'

        config["run.test_runner"] = "traverser"
        config["params"] = config["i2n.manu.params"]
        try:
            cmd_parser.params_from_cmd(config)
        except param.EmptyCartesianProduct as error:
            LOG_UI.error(error)
            return 1
        intertest.load_addons_tools()
        run_params = config["vms_params"]

        # prepare a setup step or a chain of such
        setup_chain = run_params.objects("setup")
        retcode = 0
        for i, setup_step in enumerate(setup_chain):
            run_params["count"] = i
            setup_func = getattr(intertest, setup_step)
            try:
                # TODO: drop the consideration of None in the future if the
                # functions from the intertest module do not return this value.
                if setup_func(config, "0m%s" % i) not in [None, 0]:
                    # return 1 if at least one of the steps fails
                    retcode = 1
            except Exception as error:
                LOG_UI.error(error)
                retcode = 1

        log.info("Manual setup chain finished.")
        return retcode
Exemplo n.º 19
0
    def _setup_job_category(self):
        """
        This has to be called after self.logdir has been defined

        It attempts to create a directory one level up from the job results,
        with the given category name.  Then, a symbolic link is created to
        this job results directory.

        This should allow a user to look at a single directory for all
        jobs of a given category.
        """
        category = self.config.get("run.job_category")
        if category is None:
            return

        if category != astring.string_to_safe_path(category):
            msg = (
                f"Unable to set category in job results: name is not "
                f"filesystem safe: {category}"
            )
            LOG_UI.warning(msg)
            LOG_JOB.warning(msg)
            return

        # we could also get "base_logdir" from config, but I believe this is
        # the best choice because it reduces the dependency surface (depends
        # only on self.logdir)
        category_path = os.path.join(os.path.dirname(self.logdir), category)
        try:
            os.mkdir(category_path)
        except FileExistsError:
            pass

        try:
            os.symlink(
                os.path.relpath(self.logdir, category_path),
                os.path.join(category_path, os.path.basename(self.logdir)),
            )
        except NotImplementedError:
            msg = f"Unable to link this job to category {category}"
            LOG_UI.warning(msg)
            LOG_JOB.warning(msg)
        except OSError:
            msg = f"Permission denied to link this job to category {category}"
            LOG_UI.warning(msg)
            LOG_JOB.warning(msg)
Exemplo n.º 20
0
 def _log_cmdline():
     cmdline = " ".join(sys.argv)
     LOG_JOB.info("Command line: %s", cmdline)
     LOG_JOB.info('')
Exemplo n.º 21
0
    def finish(self, proc, started, step, deadline, result_dispatcher):
        """
        Wait for the test process to finish and report status or error status
        if unable to obtain the status till deadline.

        :param proc: The test's process
        :param started: Time when the test started
        :param first: Delay before first check
        :param step: Step between checks for the status
        :param deadline: Test execution deadline
        :param result_dispatcher: Result dispatcher (for test_progress
               notifications)
        """
        # Wait for either process termination or test status
        wait.wait_for(lambda: not proc.is_alive() or self.status, 1, 0, step)
        config = settings.as_dict()
        if self.status:  # status exists, wait for process to finish
            timeout_process_alive = config.get('runner.timeout.process_alive')
            deadline = min(deadline, time.monotonic() + timeout_process_alive)
            while time.monotonic() < deadline:
                result_dispatcher.map_method('test_progress', False)
                if wait.wait_for(lambda: not proc.is_alive(), 1, 0, step):
                    return self._add_status_failures(self.status)
            err = "Test reported status but did not finish"
        else:  # proc finished, wait for late status delivery
            timeout_process_died = config.get('runner.timeout.process_died')
            deadline = min(deadline, time.monotonic() + timeout_process_died)
            while time.monotonic() < deadline:
                result_dispatcher.map_method('test_progress', False)
                if wait.wait_for(lambda: self.status, 1, 0, step):
                    # Status delivered after the test process finished, pass
                    return self._add_status_failures(self.status)
            err = "Test died without reporting the status."
        # At this point there were failures, fill the new test status
        TEST_LOG.debug("Original status: %s", str(self.status))
        test_state = self.early_status
        test_state['time_start'] = started
        test_state['time_elapsed'] = time.monotonic() - started
        test_state['fail_reason'] = err
        test_state['status'] = exceptions.TestAbortError.status
        test_state['fail_class'] = (
            exceptions.TestAbortError.__class__.__name__)
        test_state['traceback'] = 'Traceback not available'
        try:
            with open(test_state['logfile'], 'r') as log_file_obj:
                test_state['text_output'] = log_file_obj.read()
        except IOError:
            test_state["text_output"] = "Not available, file not created yet"
        TEST_LOG.error('ERROR %s -> TestAbortError: %s.', err,
                       test_state['name'])
        if proc.is_alive():
            TEST_LOG.warning("Killing hanged test process %s", proc.pid)
            os.kill(proc.pid, signal.SIGTERM)
            if not wait.wait_for(lambda: not proc.is_alive(), 1, 0, 0.01):
                os.kill(proc.pid, signal.SIGKILL)
                end_time = time.monotonic() + 60
                while time.monotonic() < end_time:
                    if not proc.is_alive():
                        break
                    time.sleep(0.1)
                else:
                    raise exceptions.TestError("Unable to destroy test's "
                                               "process (%s)" % proc.pid)
        return self._add_status_failures(test_state)
Exemplo n.º 22
0
 def soft_abort(msg):
     """ Only log the problem """
     LOG_JOB.warning("Unable to update the latest link: %s", msg)
Exemplo n.º 23
0
    def run_suite(self, job, result, test_suite, variants, timeout=0,
                  replay_map=None, execution_order=None):
        """
        Run one or more tests and report with test result.

        :param job: an instance of :class:`avocado.core.job.Job`.
        :param result: an instance of :class:`avocado.core.result.Result`
        :param test_suite: a list of tests to run.
        :param variants: A varianter iterator to produce test params.
        :param timeout: maximum amount of time (in seconds) to execute.
        :param replay_map: optional list to override test class based on test
                           index.
        :param execution_order: Mode in which we should iterate through tests
                                and variants.  If not provided, will default to
                                :attr:`DEFAULT_EXECUTION_ORDER`.
        :return: a set with types of test failures.
        """
        summary = set()
        if job.sysinfo is not None:
            job.sysinfo.start_job_hook()
        queue = multiprocessing.SimpleQueue()
        if timeout > 0:
            deadline = time.time() + timeout
        else:
            deadline = None

        test_result_total = variants.get_number_of_tests(test_suite)
        no_digits = len(str(test_result_total))
        result.tests_total = test_result_total
        index = -1
        try:
            for test_factory in test_suite:
                test_factory[1]["base_logdir"] = job.logdir
                test_factory[1]["job"] = job
            if execution_order is None:
                execution_order = self.DEFAULT_EXECUTION_ORDER
            for test_factory, variant in self._iter_suite(job,
                                                          test_suite,
                                                          variants,
                                                          execution_order):
                index += 1
                test_parameters = test_factory[1]
                name = test_parameters.get("name")
                test_parameters["name"] = test.TestID(index + 1, name,
                                                      variant,
                                                      no_digits)
                if deadline is not None and time.time() > deadline:
                    summary.add('INTERRUPTED')
                    if 'methodName' in test_parameters:
                        del test_parameters['methodName']
                    test_factory = (test.TimeOutSkipTest, test_parameters)
                    if not self.run_test(job, result, test_factory, queue, summary):
                        break
                else:
                    if (replay_map is not None and
                            replay_map[index] is not None):
                        test_parameters["methodName"] = "test"
                        test_factory = (replay_map[index], test_parameters)

                    if not self.run_test(job, result, test_factory, queue, summary,
                                         deadline):
                        break
        except KeyboardInterrupt:
            TEST_LOG.error('Job interrupted by ctrl+c.')
            summary.add('INTERRUPTED')

        if job.sysinfo is not None:
            job.sysinfo.end_job_hook()
        result.end_tests()
        job.funcatexit.run()
        signal.signal(signal.SIGTSTP, signal.SIG_IGN)
        return summary
Exemplo n.º 24
0
 def _log_tmp_dir(self):
     LOG_JOB.info('Temporary dir: %s', self.tmpdir)
     LOG_JOB.info('')
Exemplo n.º 25
0
 def _log_variants(variants):
     lines = variants.to_str(summary=1, variants=1, use_utf8=False)
     for line in lines.splitlines():
         LOG_JOB.info(line)
Exemplo n.º 26
0
 def _log_job_id(self):
     LOG_JOB.info('Job ID: %s', self.unique_id)
     if self.replay_sourcejob is not None:
         LOG_JOB.info('Replay of Job ID: %s', self.replay_sourcejob)
     LOG_JOB.info('')
Exemplo n.º 27
0
 def end_test(self, result, state):
     if self.runner == 'nrunner':
         LOG_JOB.info('%s: %s', self._get_name(state),
                      state.get("status", "ERROR"))
         LOG_JOB.info('More information in %s', state.get('task_path', ''))
Exemplo n.º 28
0
    def _run_test(job, result, test_factory, queue):
        """
        Run a test instance.

        This code is the first thing that runs inside a new process, known here
        as the test process. It communicates to the test runner by using
        :param:`queue`. It's important that this early state is given to the
        test runner in a reliable way.

        :param test_factory: Test factory (test class and parameters).
        :type test_factory: tuple of :class:`avocado.core.test.Test` and dict.
        :param queue: Multiprocess queue.
        :type queue: :class:`multiprocessing.Queue` instance.
        """
        sys.stdout = output.LoggingFile(["[stdout] "], loggers=[TEST_LOG])
        sys.stderr = output.LoggingFile(["[stderr] "], loggers=[TEST_LOG])

        def sigterm_handler(signum, frame):     # pylint: disable=W0613
            """ Produce traceback on SIGTERM """
            raise RuntimeError("Test interrupted by SIGTERM")

        signal.signal(signal.SIGTERM, sigterm_handler)

        # At this point, the original `sys.stdin` has already been
        # closed and replaced with `os.devnull` by
        # `multiprocessing.Process()` (not directly from Avocado
        # code).  Still, tests trying to use file descriptor 0 would
        # be able to read from the tty, and would hang. Let's replace
        # STDIN fd (0), with the same fd previously set by
        # `multiprocessing.Process()`
        os.dup2(sys.stdin.fileno(), 0)

        instance = loader.load_test(test_factory)
        if instance.runner_queue is None:
            instance.set_runner_queue(queue)
        early_state = instance.get_state()
        early_state['early_status'] = True
        try:
            queue.put(early_state)
        except queueFullException:
            instance.error(stacktrace.str_unpickable_object(early_state))

        result.start_test(early_state)
        job.result_events_dispatcher.map_method('start_test',
                                                result,
                                                early_state)
        if job.config.get('run.log_test_data_directories'):
            data_sources = getattr(instance, "DATA_SOURCES", [])
            if data_sources:
                locations = []
                for source in data_sources:
                    locations.append(instance.get_data("", source=source,
                                                       must_exist=False))
                TEST_LOG.info('Test data directories: ')
                for source, location in zip(data_sources, locations):
                    if location is not None:
                        TEST_LOG.info('  %s: %s', source, location)
                TEST_LOG.info('')
        try:
            instance.run_avocado()
        finally:
            try:
                state = instance.get_state()
                queue.put(state)
            except queueFullException:
                instance.error(stacktrace.str_unpickable_object(state))
Exemplo n.º 29
0
 def _log_avocado_datadir(self):
     LOG_JOB.info('Avocado Data Directories:')
     LOG_JOB.info('')
     LOG_JOB.info('base     %s', self.config.get('datadir.paths.base_dir'))
     LOG_JOB.info('tests    %s', data_dir.get_test_dir())
     LOG_JOB.info('data     %s', self.config.get('datadir.paths.data_dir'))
     LOG_JOB.info('logs     %s', self.logdir)
     LOG_JOB.info('')
Exemplo n.º 30
0
 def _log_avocado_config(self):
     LOG_JOB.info('Avocado config:')
     LOG_JOB.info('')
     for line in pprint.pformat(self.config).splitlines():
         LOG_JOB.info(line)
     LOG_JOB.info('')