Beispiel #1
0
def add_runner_failure(test_state, new_status, message):
    """
    Append runner failure to the overall test status.

    :param test_state: Original test state (dict)
    :param new_status: New test status (PASS/FAIL/ERROR/INTERRUPTED/...)
    :param message: The error message
    """
    # Try to propagate the message everywhere
    message = (f"Runner error occurred: {message}\n"
               f"Original status: {test_state.get('status')}\n{test_state}")
    TEST_LOG.error(message)
    test_log = test_state.get("logfile")
    if test_state.get("text_output"):
        test_state["text_output"] = f"{test_state['text_output']}\n{message}\n"
    else:
        test_state["text_output"] = message + "\n"
    if test_log:
        with open(test_log, "a", encoding='utf-8') as log_file:
            log_file.write('\n' + message + '\n')
    # Update the results
    if test_state.get("fail_reason"):
        test_state["fail_reason"] = f"{test_state['fail_reason']}\n{message}"
    else:
        test_state["fail_reason"] = message
    if test_state.get("fail_class"):
        test_state["fail_class"] = f"{test_state['fail_class']}\nRUNNER"
    else:
        test_state["fail_class"] = "RUNNER"
    test_state["status"] = new_status
    return test_state
Beispiel #2
0
def add_runner_failure(test_state, new_status, message):
    """
    Append runner failure to the overall test status.

    :param test_state: Original test state (dict)
    :param new_status: New test status (PASS/FAIL/ERROR/INTERRUPTED/...)
    :param message: The error message
    """
    # Try to propagate the message everywhere
    message = ("Runner error occurred: %s\nOriginal status: %s\n%s" %
               (message, test_state.get("status"), test_state))
    TEST_LOG.error(message)
    test_log = test_state.get("logfile")
    if test_state.get("text_output"):
        test_state["text_output"] = "%s\n%s\n" % (test_state["text_output"],
                                                  message)
    else:
        test_state["text_output"] = message + "\n"
    if test_log:
        with open(test_log, "a") as log_file:
            log_file.write('\n' + message + '\n')
    # Update the results
    if test_state.get("fail_reason"):
        test_state["fail_reason"] = "%s\n%s" % (test_state["fail_reason"],
                                                message)
    else:
        test_state["fail_reason"] = message
    if test_state.get("fail_class"):
        test_state["fail_class"] = "%s\nRUNNER" % test_state["fail_class"]
    else:
        test_state["fail_class"] = "RUNNER"
    test_state["status"] = new_status
    return test_state
Beispiel #3
0
    def run_suite(self, job, test_suite):
        """
        Run one or more tests and report with test result.

        :param job: an instance of :class:`avocado.core.job.Job`.
        :param test_suite: a list of tests to run.
        :return: a set with types of test failures.
        """
        summary = set()
        replay_map = job.config.get('replay_map')
        execution_order = job.config.get('run.execution_order')
        queue = multiprocessing.SimpleQueue()
        if job.timeout > 0:
            deadline = time.time() + job.timeout
        else:
            deadline = None

        test_result_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)
        no_digits = len(str(test_result_total))
        job.result.tests_total = test_result_total
        index = 1
        try:
            for test_factory in test_suite.tests:
                test_factory[1]["base_logdir"] = job.logdir
                test_factory[1]["config"] = job.config
            for test_factory, variant in self._iter_suite(
                    test_suite, execution_order):
                test_parameters = test_factory[1]
                name = test_parameters.get("name")
                if test_suite.name:
                    prefix = "{}-{}".format(test_suite.name, index)
                else:
                    prefix = index
                test_parameters["name"] = TestID(prefix, name, variant,
                                                 no_digits)
                if deadline is not None and time.time() > deadline:
                    summary.add('INTERRUPTED')
                    if 'methodName' in test_parameters:
                        del test_parameters['methodName']
                    test_factory = (TimeOutSkipTest, test_parameters)
                    if not self.run_test(job, test_factory, queue, summary):
                        break
                else:
                    if (replay_map is not None
                            and replay_map[index - 1] is not None):
                        test_factory = (replay_map[index - 1], test_parameters)
                    if not self.run_test(job, test_factory, queue, summary,
                                         deadline):
                        break
                index += 1
        except KeyboardInterrupt:
            TEST_LOG.error('Job interrupted by ctrl+c.')
            summary.add('INTERRUPTED')

        job.result.end_tests()
        job.funcatexit.run()
        signal.signal(signal.SIGTSTP, signal.SIG_IGN)
        return summary
Beispiel #4
0
 def get(self, basedir):
     if not hasattr(self, 'tmp_dir'):
         if basedir is not None:
             self.basedir = basedir
         self.tmp_dir = tempfile.mkdtemp(prefix='avocado_',  # pylint: disable=W0201
                                         dir=self.basedir)
     elif basedir is not None and basedir != self.basedir:
         LOG_JOB.error("The tmp_dir was already created. The new basedir "
                       "you're trying to provide will have no effect.")
     return self.tmp_dir
Beispiel #5
0
    def _get_msg_from_queue(self):
        """
        Helper method to handle safely getting messages from the queue.

        :return: Message, None if exception happened.
        :rtype: dict
        """
        try:
            return self.queue.get()
        # Let's catch all exceptions, since errors here mean a
        # crash in avocado.
        except Exception as details:  # pylint: disable=W0703
            self._failed = True
            TEST_LOG.error("RUNNER: Failed to read queue: %s", details)
            return None
Beispiel #6
0
    def finish(self, proc, started, step, deadline, result_dispatcher):
        """
        Wait for the test process to finish and report status or error status
        if unable to obtain the status till deadline.

        :param proc: The test's process
        :param started: Time when the test started
        :param first: Delay before first check
        :param step: Step between checks for the status
        :param deadline: Test execution deadline
        :param result_dispatcher: Result dispatcher (for test_progress
               notifications)
        """
        # Wait for either process termination or test status
        wait.wait_for(lambda: not proc.is_alive() or self.status, 1, 0, step)
        config = settings.as_dict()
        if self.status:  # status exists, wait for process to finish
            timeout_process_alive = config.get('runner.timeout.process_alive')
            deadline = min(deadline, time.monotonic() + timeout_process_alive)
            while time.monotonic() < deadline:
                result_dispatcher.map_method('test_progress', False)
                if wait.wait_for(lambda: not proc.is_alive(), 1, 0, step):
                    return self._add_status_failures(self.status)
            err = "Test reported status but did not finish"
        else:  # proc finished, wait for late status delivery
            timeout_process_died = config.get('runner.timeout.process_died')
            deadline = min(deadline, time.monotonic() + timeout_process_died)
            while time.monotonic() < deadline:
                result_dispatcher.map_method('test_progress', False)
                if wait.wait_for(lambda: self.status, 1, 0, step):
                    # Status delivered after the test process finished, pass
                    return self._add_status_failures(self.status)
            err = "Test died without reporting the status."
        # At this point there were failures, fill the new test status
        TEST_LOG.debug("Original status: %s", str(self.status))
        test_state = self.early_status
        test_state['time_start'] = started
        test_state['time_elapsed'] = time.monotonic() - started
        test_state['fail_reason'] = err
        test_state['status'] = exceptions.TestAbortError.status
        test_state['fail_class'] = (
            exceptions.TestAbortError.__class__.__name__)
        test_state['traceback'] = 'Traceback not available'
        try:
            with open(test_state['logfile'], 'r') as log_file_obj:
                test_state['text_output'] = log_file_obj.read()
        except IOError:
            test_state["text_output"] = "Not available, file not created yet"
        TEST_LOG.error('ERROR %s -> TestAbortError: %s.', err,
                       test_state['name'])
        if proc.is_alive():
            TEST_LOG.warning("Killing hanged test process %s", proc.pid)
            os.kill(proc.pid, signal.SIGTERM)
            if not wait.wait_for(lambda: not proc.is_alive(), 1, 0, 0.01):
                os.kill(proc.pid, signal.SIGKILL)
                end_time = time.monotonic() + 60
                while time.monotonic() < end_time:
                    if not proc.is_alive():
                        break
                    time.sleep(0.1)
                else:
                    raise exceptions.TestError("Unable to destroy test's "
                                               "process (%s)" % proc.pid)
        return self._add_status_failures(test_state)
Beispiel #7
0
    def run_suite(self, job, result, test_suite, variants, timeout=0,
                  replay_map=None, execution_order=None):
        """
        Run one or more tests and report with test result.

        :param job: an instance of :class:`avocado.core.job.Job`.
        :param result: an instance of :class:`avocado.core.result.Result`
        :param test_suite: a list of tests to run.
        :param variants: A varianter iterator to produce test params.
        :param timeout: maximum amount of time (in seconds) to execute.
        :param replay_map: optional list to override test class based on test
                           index.
        :param execution_order: Mode in which we should iterate through tests
                                and variants.  If not provided, will default to
                                :attr:`DEFAULT_EXECUTION_ORDER`.
        :return: a set with types of test failures.
        """
        summary = set()
        if job.sysinfo is not None:
            job.sysinfo.start_job_hook()
        queue = multiprocessing.SimpleQueue()
        if timeout > 0:
            deadline = time.time() + timeout
        else:
            deadline = None

        test_result_total = variants.get_number_of_tests(test_suite)
        no_digits = len(str(test_result_total))
        result.tests_total = test_result_total
        index = -1
        try:
            for test_factory in test_suite:
                test_factory[1]["base_logdir"] = job.logdir
                test_factory[1]["job"] = job
            if execution_order is None:
                execution_order = self.DEFAULT_EXECUTION_ORDER
            for test_factory, variant in self._iter_suite(job,
                                                          test_suite,
                                                          variants,
                                                          execution_order):
                index += 1
                test_parameters = test_factory[1]
                name = test_parameters.get("name")
                test_parameters["name"] = test.TestID(index + 1, name,
                                                      variant,
                                                      no_digits)
                if deadline is not None and time.time() > deadline:
                    summary.add('INTERRUPTED')
                    if 'methodName' in test_parameters:
                        del test_parameters['methodName']
                    test_factory = (test.TimeOutSkipTest, test_parameters)
                    if not self.run_test(job, result, test_factory, queue, summary):
                        break
                else:
                    if (replay_map is not None and
                            replay_map[index] is not None):
                        test_parameters["methodName"] = "test"
                        test_factory = (replay_map[index], test_parameters)

                    if not self.run_test(job, result, test_factory, queue, summary,
                                         deadline):
                        break
        except KeyboardInterrupt:
            TEST_LOG.error('Job interrupted by ctrl+c.')
            summary.add('INTERRUPTED')

        if job.sysinfo is not None:
            job.sysinfo.end_job_hook()
        result.end_tests()
        job.funcatexit.run()
        signal.signal(signal.SIGTSTP, signal.SIG_IGN)
        return summary
Beispiel #8
0
def params_from_cmd(config):
    """
    Take care of command line overwriting, parameter preparation,
    setup and cleanup chains, and paths/utilities for all host controls.

    :param config: command line arguments
    :type config: {str, str}
    :raises: :py:class:`ValueError` if a command line selected vm is not available
             from the configuration and thus supported or internal tests are
             restricted from the command line
    """
    sys.path.insert(1, os.path.join(param.suite_path, "utils"))

    # validate typed vm names and possible vm specific restrictions
    available_vms = param.all_vms()
    available_restrictions = param.all_restrictions()

    # defaults usage vs command line overriding
    use_tests_default = True
    with_nontrivial_restrictions = False
    use_vms_default = {vm_name: True for vm_name in available_vms}
    with_selected_vms = list(available_vms)

    # the run string includes only pure parameters
    param_dict = {}
    # the tests string includes the test restrictions while the vm strings include the ones for the vm variants
    tests_str, vm_strs = "", {vm: "" for vm in available_vms}

    # main tokenizing loop
    for cmd_param in config["params"]:
        re_param = re.match(r"(\w+)=(.*)", cmd_param)
        if re_param is None:
            log.error("Skipping malformed parameter on the command line '%s' - "
                      "must be of the form <key>=<val>", cmd_param)
            continue
        (key, value) = re_param.group(1, 2)
        if key == "only" or key == "no":
            # detect if this is the primary restriction to escape defaults
            if value in available_restrictions:
                use_tests_default = False
            # else this is an auxiliary restriction
            else:
                with_nontrivial_restrictions = True
            # main test restriction part
            tests_str += "%s %s\n" % (key, value)
        elif key.startswith("only_") or key.startswith("no_"):
            for vm_name in available_vms:
                if re.match("(only|no)_%s" % vm_name, key):
                    # escape defaults for this vm and use the command line
                    use_vms_default[vm_name] = False
                    # main vm restriction part
                    vm_strs[vm_name] += "%s %s\n" % (key.replace("_%s" % vm_name, ""), value)
        # NOTE: comma in a parameter sense implies the same as space in config file
        elif key == "vms":
            # NOTE: no restrictions of the required vms are allowed during tests since
            # these are specified by each test (allowed only for manual setup steps)
            with_selected_vms[:] = value.split(",")
            for vm_name in with_selected_vms:
                if vm_name not in available_vms:
                    raise ValueError("The vm '%s' is not among the supported vms: "
                                     "%s" % (vm_name, ", ".join(available_vms)))
        else:
            # NOTE: comma on the command line is space in a config file
            value = value.replace(",", " ")
            param_dict[key] = value
    config["param_dict"] = param_dict
    log.debug("Parsed param dict '%s'", param_dict)

    # get minimal configurations and parse defaults if no command line arguments
    config["vms_params"], config["vm_strs"] = full_vm_params_and_strs(param_dict, vm_strs,
                                                                      use_vms_default)
    config["vms_params"]["vms"] = " ".join(with_selected_vms)
    config["available_vms"] = vm_strs.copy()
    for vm_name in available_vms:
        # the keys of vm strings must be equivalent to the selected vms
        if vm_name not in with_selected_vms:
            del config["vm_strs"][vm_name]
    config["tests_params"], config["tests_str"] = full_tests_params_and_str(param_dict, tests_str,
                                                                            use_tests_default)
    config["available_restrictions"] = available_restrictions

    # control against invoking only runnable tests and empty Cartesian products
    control_config = param.Reparsable()
    control_config.parse_next_batch(base_file="sets.cfg",
                                    ovrwrt_file=param.tests_ovrwrt_file(),
                                    ovrwrt_str=config["tests_str"],
                                    ovrwrt_dict=config["param_dict"])
    control_parser = control_config.get_parser()
    if with_nontrivial_restrictions:
        log.info("%s tests with nontrivial restriction %s",
                 len(list(control_parser.get_dicts())), config["tests_str"])

    # prefix for all tests of the current run making it possible to perform multiple runs in one command
    config["prefix"] = ""

    # log into files for each major level the way it was done for autotest
    config["run.store_logging_stream"] = [":10", ":20", ":30", ":40"]

    # attach environment processing hooks
    env_process_hooks()