Beispiel #1
0
    def run_suite(self, job, test_suite):
        summary = set()

        test_suite.tests, _ = nrunner.check_runnables_runner_requirements(
            test_suite.tests)
        job.result.tests_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)

        listen = test_suite.config.get('nrunner.status_server_listen')
        self._start_status_server(listen)

        # pylint: disable=W0201
        self.runtime_tasks = self._get_all_runtime_tasks(test_suite)
        if test_suite.config.get('nrunner.shuffle'):
            random.shuffle(self.runtime_tasks)
        test_ids = [
            rt.task.identifier for rt in self.runtime_tasks
            if rt.task.category == 'test'
        ]
        tsm = TaskStateMachine(self.runtime_tasks, self.status_repo)
        spawner_name = test_suite.config.get('nrunner.spawner')
        spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj
        max_running = min(test_suite.config.get('nrunner.max_parallel_tasks'),
                          len(self.runtime_tasks))
        timeout = test_suite.config.get('task.timeout.running')
        workers = [
            Worker(state_machine=tsm,
                   spawner=spawner,
                   max_running=max_running,
                   task_timeout=timeout).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex:
            LOG_JOB.info(str(ex))
            job.interrupted_reason = str(ex)
            summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()

        # Update the overall summary with found test statuses, which will
        # determine the Avocado command line exit status
        summary.update([
            status.upper()
            for status in self.status_repo.get_result_set_for_tasks(test_ids)
        ])
        return summary
Beispiel #2
0
    def run_tests(self):
        """
        The actual test execution phase
        """
        self._log_job_debug_info()
        jobdata.record(self, sys.argv)

        if self.size == 0:
            msg = ('Unable to resolve any reference or "resolver.references"'
                   "is empty.")
            LOG_UI.error(msg)

        if not self.test_suites:
            self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
            return self.exitcode

        summary = set()
        for suite in self.test_suites:
            summary |= suite.run(self)

        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        LOG_JOB.info('Test results available in %s', self.logdir)

        if 'INTERRUPTED' in summary:
            self.exitcode |= exit_codes.AVOCADO_JOB_INTERRUPTED
        if 'FAIL' in summary or 'ERROR' in summary:
            self.exitcode |= exit_codes.AVOCADO_TESTS_FAIL

        return self.exitcode
Beispiel #3
0
    def run(self, args):
        """
        Take care of command line overwriting, parameter preparation,
        setup and cleanup chains, and paths/utilities for all host controls.
        """
        log.info("Manual setup chain started.")
        # set English environment (command output might be localized, need to be safe)
        os.environ['LANG'] = 'en_US.UTF-8'

        cmd_parser.params_from_cmd(args)

        run_params = param.prepare_params(
            base_file="guest-base.cfg",
            ovrwrt_dict={"vms": " ".join(args.selected_vms)},
            ovrwrt_file=param.vms_ovrwrt_file,
            ovrwrt_str=args.param_str)
        # prepare a setup step or a chain of such
        run_params["count"] = 0
        setup_chain = run_params["setup"].split()
        for setup_step in setup_chain:
            setup_func = getattr(intertest, setup_step)
            setup_func(args, run_params, "0m%s" % run_params["count"])
            run_params["count"] += 1

        log.info("Manual setup chain finished.")
Beispiel #4
0
 def _log_avocado_version():
     version_log = version.VERSION
     git_version = get_avocado_git_version()
     if git_version is not None:
         version_log += git_version
     LOG_JOB.info('Avocado version: %s', version_log)
     LOG_JOB.info('')
Beispiel #5
0
 def sigtstp_handler(signum, frame):     # pylint: disable=W0613
     """ SIGSTOP all test processes on SIGTSTP """
     if not proc:    # Ignore ctrl+z when proc not yet started
         return
     with sigtstp:
         msg = "ctrl+z pressed, %%s test (%s)" % proc.pid
         app_log_msg = '\n%s' % msg
         if self.sigstopped:
             APP_LOG.info(app_log_msg, "resumming")
             TEST_LOG.info(msg, "resumming")
             process.kill_process_tree(proc.pid, signal.SIGCONT, False)
             self.sigstopped = False
         else:
             APP_LOG.info(app_log_msg, "stopping")
             TEST_LOG.info(msg, "stopping")
             process.kill_process_tree(proc.pid, signal.SIGSTOP, False)
             self.sigstopped = True
Beispiel #6
0
    def run(self, config):
        """
        Take care of command line overwriting, parameter preparation,
        setup and cleanup chains, and paths/utilities for all host controls.
        """
        log.info("Manual setup chain started.")
        # set English environment (command output might be localized, need to be safe)
        os.environ['LANG'] = 'en_US.UTF-8'

        cmd_parser.params_from_cmd(config)
        intertest.load_addons_tools()
        run_params = config["vms_params"]

        # prepare a setup step or a chain of such
        setup_chain = run_params.objects("setup")
        for i, setup_step in enumerate(setup_chain):
            run_params["count"] = i
            setup_func = getattr(intertest, setup_step)
            setup_func(config, "0m%s" % i)

        log.info("Manual setup chain finished.")
Beispiel #7
0
def permubuntu(config, tag=""):
    """
    Perform all extra setup needed for the ubuntu permanent vms.

    :param config: command line arguments and run configuration
    :type config: {str, str}
    :param str tag: extra name identifier for the test to be run
    """
    l, r = config["graph"].l, config["graph"].r
    selected_vms = sorted(config["vm_strs"].keys())
    LOG_UI.info("Starting permanent vm setup for %s (%s)",
                ", ".join(selected_vms), os.path.basename(r.job.logdir))

    for test_object in l.parse_objects(config["param_dict"],
                                       config["vm_strs"]):
        if test_object.key != "vms":
            continue
        vm = test_object
        # parse individual net only for the current vm
        net = l.parse_object_from_objects([vm],
                                          param_dict=config["param_dict"])
        logging.info("Performing extra setup for the permanent %s", vm.suffix)

        # consider this as a special kind of state converting test which concerns
        # permanent objects (i.e. instead of transition from customize to on
        # root, it is a transition from supposedly "permanentized" vm to the root)
        logging.info("Booting %s for the first permanent on state", vm.suffix)
        setup_dict = config["param_dict"].copy()
        setup_dict.update({"set_state_vms": "ready"})
        setup_str = param.re_str("all..internal..manage.start")
        test_node = l.parse_node_from_object(net,
                                             setup_dict,
                                             setup_str,
                                             prefix=tag)
        to_run = r.run_test_node(test_node)
        asyncio.get_event_loop().run_until_complete(
            asyncio.wait_for(to_run, r.job.timeout or None))

    LOG_UI.info("Finished permanent vm setup")
Beispiel #8
0
    def run(self, config):
        """
        Take care of command line overwriting, parameter preparation,
        setup and cleanup chains, and paths/utilities for all host controls.
        """
        log.info("Manual setup chain started.")
        # set English environment (command output might be localized, need to be safe)
        os.environ['LANG'] = 'en_US.UTF-8'

        config["run.test_runner"] = "traverser"
        config["params"] = config["i2n.manu.params"]
        try:
            cmd_parser.params_from_cmd(config)
        except param.EmptyCartesianProduct as error:
            LOG_UI.error(error)
            return 1
        intertest.load_addons_tools()
        run_params = config["vms_params"]

        # prepare a setup step or a chain of such
        setup_chain = run_params.objects("setup")
        retcode = 0
        for i, setup_step in enumerate(setup_chain):
            run_params["count"] = i
            setup_func = getattr(intertest, setup_step)
            try:
                # TODO: drop the consideration of None in the future if the
                # functions from the intertest module do not return this value.
                if setup_func(config, "0m%s" % i) not in [None, 0]:
                    # return 1 if at least one of the steps fails
                    retcode = 1
            except Exception as error:
                LOG_UI.error(error)
                retcode = 1

        log.info("Manual setup chain finished.")
        return retcode
Beispiel #9
0
 def _log_avocado_config(self):
     LOG_JOB.info('Avocado config:')
     LOG_JOB.info('')
     for line in pprint.pformat(self.config).splitlines():
         LOG_JOB.info(line)
     LOG_JOB.info('')
Beispiel #10
0
 def _log_avocado_datadir(self):
     LOG_JOB.info('Avocado Data Directories:')
     LOG_JOB.info('')
     LOG_JOB.info('base     %s', self.config.get('datadir.paths.base_dir'))
     LOG_JOB.info('tests    %s', data_dir.get_test_dir())
     LOG_JOB.info('data     %s', self.config.get('datadir.paths.data_dir'))
     LOG_JOB.info('logs     %s', self.logdir)
     LOG_JOB.info('')
Beispiel #11
0
def params_from_cmd(config):
    """
    Take care of command line overwriting, parameter preparation,
    setup and cleanup chains, and paths/utilities for all host controls.

    :param config: command line arguments
    :type config: {str, str}
    :raises: :py:class:`ValueError` if a command line selected vm is not available
             from the configuration and thus supported or internal tests are
             restricted from the command line

    .. todo:: Any dynamically created config keys here are usually entire data
        structures like dictionaries and lists and only used internally during
        the run which makes them unfit for displaying to the user and putting
        in a namespace scope like the officially registered plugin settings.
        Let's wait to see if the multi-suite support in avocado would establish
        some standards for doing this first. Until then, the user won't directly
        interact with these keys anyway.
    """
    suite_path = settings.as_dict().get('i2n.common.suite_path', ".")
    sys.path.insert(1, os.path.join(suite_path, "utils"))

    # validate typed vm names and possible vm specific restrictions
    available_vms = param.all_objects("vms")
    available_restrictions = param.all_restrictions()

    # defaults usage vs command line overriding
    use_tests_default = True
    with_nontrivial_restrictions = False
    use_vms_default = {vm_name: True for vm_name in available_vms}
    with_selected_vms = list(available_vms)

    # the run string includes only pure parameters
    param_dict = {}
    # the tests string includes the test restrictions while the vm strings include the ones for the vm variants
    tests_str, vm_strs = "", collections.OrderedDict([(vm, "")
                                                      for vm in available_vms])

    # main tokenizing loop
    for cmd_param in config["params"]:
        re_param = re.match(r"(\w+)=(.*)", cmd_param)
        if re_param is None:
            ui.error(
                "Found malformed parameter on the command line '%s' - "
                "must be of the form <key>=<val>", cmd_param)
            sys.exit(1)
        (key, value) = re_param.group(1, 2)
        if key == "only" or key == "no":
            # detect if this is the primary restriction to escape defaults
            for variant in re.split(r",|\.|\.\.", value):
                if variant in available_restrictions:
                    use_tests_default = False
                # else this is an auxiliary restriction
                else:
                    with_nontrivial_restrictions = True
            # main test restriction part
            tests_str += "%s %s\n" % (key, value)
        elif key.startswith("only_") or key.startswith("no_"):
            for vm_name in available_vms:
                if re.match("(only|no)_%s" % vm_name, key):
                    # escape defaults for this vm and use the command line
                    use_vms_default[vm_name] = False
                    # main vm restriction part
                    vm_strs[vm_name] += "%s %s\n" % (key.replace(
                        "_%s" % vm_name, ""), value)
        # NOTE: comma in a parameter sense implies the same as space in config file
        elif key == "vms":
            # NOTE: no restrictions of the required vms are allowed during tests since
            # these are specified by each test (allowed only for manual setup steps)
            with_selected_vms[:] = value.split(",")
            for vm_name in with_selected_vms:
                if vm_name not in available_vms:
                    raise ValueError(
                        "The vm '%s' is not among the supported vms: "
                        "%s" % (vm_name, ", ".join(available_vms)))
        else:
            # NOTE: comma on the command line is space in a config file
            value = value.replace(",", " ")
            param_dict[key] = value
    config["param_dict"] = param_dict
    log.debug("Parsed param dict '%s'", param_dict)

    # get minimal configurations and parse defaults if no command line arguments
    config["vms_params"], config["vm_strs"] = full_vm_params_and_strs(
        param_dict, vm_strs, use_vms_default)
    config["vms_params"]["vms"] = " ".join(with_selected_vms)
    config["available_vms"] = vm_strs.copy()
    for vm_name in available_vms:
        # the keys of vm strings must be equivalent to the selected vms
        if vm_name not in with_selected_vms:
            del config["vm_strs"][vm_name]
    config["tests_params"], config["tests_str"] = full_tests_params_and_str(
        param_dict, tests_str, use_tests_default)
    config["available_restrictions"] = available_restrictions

    # control against invoking only runnable tests and empty Cartesian products
    control_config = param.Reparsable()
    control_config.parse_next_batch(base_file="sets.cfg",
                                    ovrwrt_file=param.tests_ovrwrt_file(),
                                    ovrwrt_str=config["tests_str"],
                                    ovrwrt_dict=config["param_dict"])
    control_parser = control_config.get_parser()
    if with_nontrivial_restrictions:
        log.info("%s tests with nontrivial restriction %s",
                 len(list(control_parser.get_dicts())), config["tests_str"])

    # prefix for all tests of the current run making it possible to perform multiple runs in one command
    config["prefix"] = ""

    # log into files for each major level the way it was done for autotest
    config["run.store_logging_stream"] = [":10", ":20", ":30", ":40"]

    # set default off and on state backends
    from .states import lvm, qcow2, lxc, btrfs, ramfile, pool, vmnet
    ss.BACKENDS = {
        "lvm": lvm.LVMBackend,
        "qcow2": qcow2.QCOW2Backend,
        "lxc": lxc.LXCBackend,
        "btrfs": btrfs.BtrfsBackend,
        "pool": pool.QCOW2PoolBackend,
        "qcow2vt": qcow2.QCOW2VTBackend,
        "ramfile": ramfile.RamfileBackend,
        "vmnet": vmnet.VMNetBackend
    }

    # attach environment processing hooks
    env_process_hooks()
Beispiel #12
0
    def _run_test(job, result, test_factory, queue):
        """
        Run a test instance.

        This code is the first thing that runs inside a new process, known here
        as the test process. It communicates to the test runner by using
        :param:`queue`. It's important that this early state is given to the
        test runner in a reliable way.

        :param test_factory: Test factory (test class and parameters).
        :type test_factory: tuple of :class:`avocado.core.test.Test` and dict.
        :param queue: Multiprocess queue.
        :type queue: :class:`multiprocessing.Queue` instance.
        """
        sys.stdout = output.LoggingFile(["[stdout] "], loggers=[TEST_LOG])
        sys.stderr = output.LoggingFile(["[stderr] "], loggers=[TEST_LOG])

        def sigterm_handler(signum, frame):     # pylint: disable=W0613
            """ Produce traceback on SIGTERM """
            raise RuntimeError("Test interrupted by SIGTERM")

        signal.signal(signal.SIGTERM, sigterm_handler)

        # At this point, the original `sys.stdin` has already been
        # closed and replaced with `os.devnull` by
        # `multiprocessing.Process()` (not directly from Avocado
        # code).  Still, tests trying to use file descriptor 0 would
        # be able to read from the tty, and would hang. Let's replace
        # STDIN fd (0), with the same fd previously set by
        # `multiprocessing.Process()`
        os.dup2(sys.stdin.fileno(), 0)

        instance = loader.load_test(test_factory)
        if instance.runner_queue is None:
            instance.set_runner_queue(queue)
        early_state = instance.get_state()
        early_state['early_status'] = True
        try:
            queue.put(early_state)
        except queueFullException:
            instance.error(stacktrace.str_unpickable_object(early_state))

        result.start_test(early_state)
        job.result_events_dispatcher.map_method('start_test',
                                                result,
                                                early_state)
        if job.config.get('run.log_test_data_directories'):
            data_sources = getattr(instance, "DATA_SOURCES", [])
            if data_sources:
                locations = []
                for source in data_sources:
                    locations.append(instance.get_data("", source=source,
                                                       must_exist=False))
                TEST_LOG.info('Test data directories: ')
                for source, location in zip(data_sources, locations):
                    if location is not None:
                        TEST_LOG.info('  %s: %s', source, location)
                TEST_LOG.info('')
        try:
            instance.run_avocado()
        finally:
            try:
                state = instance.get_state()
                queue.put(state)
            except queueFullException:
                instance.error(stacktrace.str_unpickable_object(state))
Beispiel #13
0
 def _log_tmp_dir(self):
     LOG_JOB.info("Temporary dir: %s", self.tmpdir)
     LOG_JOB.info("")
Beispiel #14
0
 def end_test(self, result, state):
     if self.runner == 'nrunner':
         LOG_JOB.info('%s: %s', self._get_name(state),
                      state.get("status", "ERROR"))
         LOG_JOB.info('More information in %s', state.get('task_path', ''))
Beispiel #15
0
 def _log_variants(variants):
     lines = variants.to_str(summary=1, variants=1, use_utf8=False)
     for line in lines.splitlines():
         LOG_JOB.info(line)
Beispiel #16
0
 def _log_job_id(self):
     LOG_JOB.info('Job ID: %s', self.unique_id)
     if self.replay_sourcejob is not None:
         LOG_JOB.info('Replay of Job ID: %s', self.replay_sourcejob)
     LOG_JOB.info('')
Beispiel #17
0
 def run(self, args):
     LOG_JOB.info(self.description)
Beispiel #18
0
 def end_test(self, result, state):
     LOG_JOB.info("%s: %s", self._get_name(state),
                  state.get("status", "ERROR"))
     LOG_JOB.info("More information in %s", state.get("task_path", ""))
Beispiel #19
0
 def start_test(self, result, state):
     LOG_JOB.info("%s: STARTED", self._get_name(state))
Beispiel #20
0
 def _log_cmdline():
     cmdline = " ".join(sys.argv)
     LOG_JOB.info("Command line: %s", cmdline)
     LOG_JOB.info('')
Beispiel #21
0
def params_from_cmd(config):
    """
    Take care of command line overwriting, parameter preparation,
    setup and cleanup chains, and paths/utilities for all host controls.

    :param config: command line arguments
    :type config: {str, str}
    :raises: :py:class:`ValueError` if a command line selected vm is not available
             from the configuration and thus supported or internal tests are
             restricted from the command line
    """
    sys.path.insert(1, os.path.join(param.suite_path, "utils"))

    # validate typed vm names and possible vm specific restrictions
    available_vms = param.all_vms()
    available_restrictions = param.all_restrictions()

    # defaults usage vs command line overriding
    use_tests_default = True
    with_nontrivial_restrictions = False
    use_vms_default = {vm_name: True for vm_name in available_vms}
    with_selected_vms = list(available_vms)

    # the run string includes only pure parameters
    param_dict = {}
    # the tests string includes the test restrictions while the vm strings include the ones for the vm variants
    tests_str, vm_strs = "", {vm: "" for vm in available_vms}

    # main tokenizing loop
    for cmd_param in config["params"]:
        re_param = re.match(r"(\w+)=(.*)", cmd_param)
        if re_param is None:
            log.error("Skipping malformed parameter on the command line '%s' - "
                      "must be of the form <key>=<val>", cmd_param)
            continue
        (key, value) = re_param.group(1, 2)
        if key == "only" or key == "no":
            # detect if this is the primary restriction to escape defaults
            if value in available_restrictions:
                use_tests_default = False
            # else this is an auxiliary restriction
            else:
                with_nontrivial_restrictions = True
            # main test restriction part
            tests_str += "%s %s\n" % (key, value)
        elif key.startswith("only_") or key.startswith("no_"):
            for vm_name in available_vms:
                if re.match("(only|no)_%s" % vm_name, key):
                    # escape defaults for this vm and use the command line
                    use_vms_default[vm_name] = False
                    # main vm restriction part
                    vm_strs[vm_name] += "%s %s\n" % (key.replace("_%s" % vm_name, ""), value)
        # NOTE: comma in a parameter sense implies the same as space in config file
        elif key == "vms":
            # NOTE: no restrictions of the required vms are allowed during tests since
            # these are specified by each test (allowed only for manual setup steps)
            with_selected_vms[:] = value.split(",")
            for vm_name in with_selected_vms:
                if vm_name not in available_vms:
                    raise ValueError("The vm '%s' is not among the supported vms: "
                                     "%s" % (vm_name, ", ".join(available_vms)))
        else:
            # NOTE: comma on the command line is space in a config file
            value = value.replace(",", " ")
            param_dict[key] = value
    config["param_dict"] = param_dict
    log.debug("Parsed param dict '%s'", param_dict)

    # get minimal configurations and parse defaults if no command line arguments
    config["vms_params"], config["vm_strs"] = full_vm_params_and_strs(param_dict, vm_strs,
                                                                      use_vms_default)
    config["vms_params"]["vms"] = " ".join(with_selected_vms)
    config["available_vms"] = vm_strs.copy()
    for vm_name in available_vms:
        # the keys of vm strings must be equivalent to the selected vms
        if vm_name not in with_selected_vms:
            del config["vm_strs"][vm_name]
    config["tests_params"], config["tests_str"] = full_tests_params_and_str(param_dict, tests_str,
                                                                            use_tests_default)
    config["available_restrictions"] = available_restrictions

    # control against invoking only runnable tests and empty Cartesian products
    control_config = param.Reparsable()
    control_config.parse_next_batch(base_file="sets.cfg",
                                    ovrwrt_file=param.tests_ovrwrt_file(),
                                    ovrwrt_str=config["tests_str"],
                                    ovrwrt_dict=config["param_dict"])
    control_parser = control_config.get_parser()
    if with_nontrivial_restrictions:
        log.info("%s tests with nontrivial restriction %s",
                 len(list(control_parser.get_dicts())), config["tests_str"])

    # prefix for all tests of the current run making it possible to perform multiple runs in one command
    config["prefix"] = ""

    # log into files for each major level the way it was done for autotest
    config["run.store_logging_stream"] = [":10", ":20", ":30", ":40"]

    # attach environment processing hooks
    env_process_hooks()
Beispiel #22
0
 def _log_tmp_dir(self):
     LOG_JOB.info('Temporary dir: %s', self.tmpdir)
     LOG_JOB.info('')
Beispiel #23
0
    def run_suite(self, job, test_suite):
        summary = set()

        if not test_suite.enabled:
            job.interrupted_reason = f"Suite {test_suite.name} is disabled."
            return summary

        test_suite.tests, missing_requirements = check_runnables_runner_requirements(
            test_suite.tests)

        self._update_avocado_configuration_used_on_runnables(
            test_suite.tests, test_suite.config)

        self._abort_if_missing_runners(missing_requirements)

        job.result.tests_total = test_suite.variants.get_number_of_tests(
            test_suite.tests)

        self._create_status_server(test_suite, job)

        graph = RuntimeTaskGraph(
            test_suite.get_test_variants(),
            test_suite.name,
            self.status_server.uri,
            job.unique_id,
        )
        # pylint: disable=W0201
        self.runtime_tasks = graph.get_tasks_in_topological_order()

        # Start the status server
        asyncio.ensure_future(self.status_server.serve_forever())

        if test_suite.config.get("nrunner.shuffle"):
            random.shuffle(self.runtime_tasks)
        test_ids = [
            rt.task.identifier for rt in self.runtime_tasks
            if rt.task.category == "test"
        ]
        tsm = TaskStateMachine(self.runtime_tasks, self.status_repo)
        spawner_name = test_suite.config.get("nrunner.spawner")
        spawner = SpawnerDispatcher(test_suite.config, job)[spawner_name].obj
        max_running = min(test_suite.config.get("nrunner.max_parallel_tasks"),
                          len(self.runtime_tasks))
        timeout = test_suite.config.get("task.timeout.running")
        failfast = test_suite.config.get("run.failfast")
        workers = [
            Worker(
                state_machine=tsm,
                spawner=spawner,
                max_running=max_running,
                task_timeout=timeout,
                failfast=failfast,
            ).run() for _ in range(max_running)
        ]
        asyncio.ensure_future(self._update_status(job))
        loop = asyncio.get_event_loop()
        try:
            loop.run_until_complete(
                asyncio.wait_for(asyncio.gather(*workers), job.timeout
                                 or None))
        except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex:
            LOG_JOB.info(str(ex))
            job.interrupted_reason = str(ex)
            summary.add("INTERRUPTED")

        # Wait until all messages may have been processed by the
        # status_updater. This should be replaced by a mechanism
        # that only waits if there are missing status messages to
        # be processed, and, only for a given amount of time.
        # Tests with non received status will always show as SKIP
        # because of result reconciliation.
        loop.run_until_complete(asyncio.sleep(0.05))

        job.result.end_tests()
        self.status_server.close()
        if self.status_server_dir is not None:
            self.status_server_dir.cleanup()

        # Update the overall summary with found test statuses, which will
        # determine the Avocado command line exit status
        summary.update([
            status.upper()
            for status in self.status_repo.get_result_set_for_tasks(test_ids)
        ])
        return summary
Beispiel #24
0
 def start_test(self, result, state):
     if self.runner == 'nrunner':
         LOG_JOB.info('%s: STARTED', self._get_name(state))
Beispiel #25
0
 def _log_avocado_datadir(self):
     LOG_JOB.info("Avocado Data Directories:")
     LOG_JOB.info("")
     LOG_JOB.info("base     %s", self.config.get("datadir.paths.base_dir"))
     LOG_JOB.info("tests    %s", data_dir.get_test_dir())
     LOG_JOB.info("data     %s", self.config.get("datadir.paths.data_dir"))
     LOG_JOB.info("logs     %s", self.logdir)
     LOG_JOB.info("")