Ejemplo n.º 1
0
 def __init__(self, queue, runnable):
     self.__vt_params = utils_params.Params(runnable.kwargs)
     self.queue = queue
     self.tmpdir = tempfile.mkdtemp()
     self.logdir = os.path.join(self.tmpdir, 'results')
     path.init_dir(self.logdir)
     self.logfile = os.path.join(self.logdir, 'debug.log')
     self.log = output.LOG_JOB
     self.log_level = runnable.config.get('job.output.loglevel',
                                          logging.DEBUG)
     self.env_version = utils_env.get_env_version()
     self.iteration = 0
     self.background_errors = error_event.error_events_bus
     # clear existing error events
     self.background_errors.clear()
     self.debugdir = self.logdir
     self.bindir = data_dir.get_root_dir()
     self.virtdir = os.path.join(self.bindir, 'shared')
Ejemplo n.º 2
0
class virt(test.test):

    """
    Shared test class infrastructure for tests such as the KVM test.

    It comprises a subtest load system, use of parameters, and an env
    file, all code that can be reused among those virt tests.
    """
    version = 1
    env_version = utils_env.get_env_version()

    def initialize(self, params):
        # Change the value of the preserve_srcdir attribute according to
        # the value present on the configuration file (defaults to yes)
        if params.get("preserve_srcdir", "yes") == "yes":
            self.preserve_srcdir = True
        virtdir = os.path.dirname(sys.modules[__name__].__file__)
        self.virtdir = os.path.join(virtdir, "shared")
        # Place where virt software will be built/linked
        self.builddir = os.path.join(
            virtdir, 'backends', params.get("vm_type"))
        self.background_errors = Queue.Queue()

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            six.reraise(exc[1], None, exc[2])

    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        custom_env_path = params.get("custom_env_path", "")
        if custom_env_path:
            env_path = custom_env_path
        else:
            env_path = params.get("vm_type")
        env_filename = os.path.join(self.bindir, "backends", env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        other_subtests_dirs = params.get("other_tests_dirs", "")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise exceptions.TestError("Directory %s not"
                                                       " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    for generic_subdir in asset.get_test_provider_subdirs('generic'):
                        subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                            bootstrap.test_filter)
                    for multi_host_migration_subdir in asset.get_test_provider_subdirs(
                            'multi_host_migration'):
                        subtest_dirs += data_dir.SubdirList(multi_host_migration_subdir,
                                                            bootstrap.test_filter)

                    for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                        subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                            bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    provider = params.get("provider", None)
                    if provider is not None:
                        subtest_dirs = [
                            d for d in subtest_dirs if provider in d]
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise exceptions.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s"
                                                  % error_message)

                except Exception as e:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception as e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)
Ejemplo n.º 3
0
class VirtTest(test.Test, utils.TestUtils):

    """
    Minimal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self, **kwargs):
        """
        :note: methodName, name, base_logdir, job/config and runner_queue
               params are inherited from test.Test
               From the avocado 86 the test.Test uses config instead of job
               instance. Because of the compatibility with avocado 82.0 LTS we
               can't remove the job instance. For avocado < 86 job instance is
               used and for avocado=>86 config is used.
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.__vt_params = None
        self.__avocado_params = None
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')
        # self.__vt_params must be initialized after super
        vt_params = utils_params.Params(kwargs.pop("vt_params", None))
        # for timeout use Avocado-vt timeout as default but allow
        # overriding from Avocado params (varianter)
        self.timeout = vt_params.get("test_timeout", self.timeout)

        self.iteration = 0
        self.resultsdir = None
        self.background_errors = error_event.error_events_bus
        # clear existing error events
        self.background_errors.clear()

        if "methodName" not in kwargs:
            kwargs["methodName"] = 'runTest'
        super(VirtTest, self).__init__(**kwargs)

        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type", ""))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        try:
            self.__avocado_params = super(VirtTest, self).params
        except AttributeError:
            # 36LTS set's `self.params` instead of having it as a property
            # which stores the avocado params in `self.__params`
            self.__avocado_params = self.__params
        self.__vt_params = vt_params
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
        self.__status = None
        self.__exc_info = None

    @property
    def params(self):
        """
        Avocado-vt test params

        During `avocado.Test.__init__` this reports the original params but
        once the Avocado-vt params are set it reports those instead. This
        is necessary to complete the `avocado.Test.__init__` phase
        """
        if self.__vt_params is not None:
            return self.__vt_params
        else:
            # The `self.__params` is set after the `avocado.test.__init__`,
            # but in newer Avocado `self.params` is used during `__init__`
            # Report the parent's value in such case.
            return super(VirtTest, self).params

    @property
    def avocado_params(self):
        """
        Original Avocado (multiplexer/varianter) params
        """
        return self.__avocado_params

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Pre Avocado-60.0 used to override self.__params attribute and
        requires special handling while reporting the state.

        TODO: Remove when 52LTS is deprecated.
        """
        state = super(VirtTest, self).get_state()
        if state["params"] == self.__vt_params:
            state["params"] = self.avocado_params
        return state

    def setUp(self):
        """
        Avocado-vt uses custom setUp/test/tearDown handling and unlike
        Avocado it allows skipping tests from any phase. To convince
        Avocado to allow skips let's say our tests run during setUp
        phase and report the status in test.
        """
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
            self.__status = "PASS"
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except exceptions.TestSkipError:
            self.__exc_info = sys.exc_info()
            raise   # This one has to be raised in setUp
        except:  # nopep8 Old-style exceptions are not inherited from Exception()
            self.__exc_info = sys.exc_info()
            self.__status = self.__exc_info[1]
        finally:
            # Clean libvirtd debug logs if the test is not fail or error
            if self.params.get("libvirtd_log_cleanup", "no") == "yes":
                if(self.params.get("vm_type") == 'libvirt' and
                   self.params.get("enable_libvirtd_debug_log", "yes") == "yes"):
                    libvirtd_log = self.params["libvirtd_debug_file"]
                    if("TestFail" not in str(self.__exc_info) and
                       "TestError" not in str(self.__exc_info)):
                        if libvirtd_log and os.path.isfile(libvirtd_log):
                            logging.info("cleaning libvirtd logs...")
                            os.remove(libvirtd_log)
                    else:
                        # tar the libvirtd log and archive
                        logging.info("archiving libvirtd debug logs")
                        from virttest import utils_package
                        if utils_package.package_install("tar"):
                            if os.path.isfile(libvirtd_log):
                                archive = os.path.join(os.path.dirname(
                                    libvirtd_log), "libvirtd.tar.gz")
                                cmd = ("tar -zcf %s -P %s"
                                       % (pipes.quote(archive),
                                          pipes.quote(libvirtd_log)))
                                if process.system(cmd) == 0:
                                    os.remove(libvirtd_log)
                            else:
                                logging.error("Unable to find log file: %s",
                                              libvirtd_log)
                        else:
                            logging.error("Unable to find tar to compress libvirtd "
                                          "logs")

            if env_lang:
                os.environ['LANG'] = env_lang
            else:
                del os.environ['LANG']

    def runTest(self):
        """
        This only reports the results

        The actual testing happens inside setUp stage, this only
        reports the correct results
        """
        if self.__status != "PASS":
            raise self.__status  # pylint: disable=E0702

    def _runTest(self):
        params = self.params

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        self._log_parameters()

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        subtest_dirs = self._get_subtest_dirs()

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()

        utils.insert_dirs_to_path(subtest_dirs)

        test_modules = utils.find_test_modules(t_types, subtest_dirs)

        # Open the environment file
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        if params.get_boolean("job_env_cleanup", "yes"):
            self.runner_queue.put({"func_at_exit": cleanup_env,
                                   "args": (env_filename, self.env_version),
                                   "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Pre-process
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self._safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self._safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self._safe_env_save(env)
                    raise

            finally:
                # Post-process
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self._safe_env_save(env) or params.get("env_cleanup", "no") == "yes":
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Ejemplo n.º 4
0
class VirtTest(test.Test):

    """
    Mininal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params,
                                       base_logdir=base_logdir, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.__params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = vt_params.get("test_timeout", self.timeout)
        utils_misc.set_log_file_dir(self.logdir)

    @property
    def params(self):
        """
        Avocado-vt test params

        During `avocado.Test.__init__` this reports the original params but
        once the Avocado-vt params are set it reports those instead. This
        is necessary to complete the `avocado.Test.__init__` phase
        """
        try:
            return self.__params
        except AttributeError:
            return self.avocado_params

    @params.setter
    def params(self, value):
        """
        For compatibility with 36lts we need to support setter on params
        """
        self.__params = value

    @property
    def avocado_params(self):
        """
        Original Avocado (multiplexer/varianter) params
        """
        return super(VirtTest, self).params

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Avocado-vt replaces Test.params with avocado-vt params. This function
        reports the original params on `get_state` call.
        """
        state = super(VirtTest, self).get_state()
        state["params"] = self.__dict__.get("avocado_params")
        return state

    def _start_logging(self):
        super(VirtTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(VirtTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            raise exc[1], None, exc[2]

    def __safe_env_save(self, env):
        """
        Treat "env.save()" exception as warnings

        :param env: The virttest env object
        :return: True on failure
        """
        try:
            env.save()
        except Exception as details:
            if hasattr(stacktrace, "str_unpickable_object"):
                self.log.warn("Unable to save environment: %s",
                              stacktrace.str_unpickable_object(env.data))
            else:    # TODO: Remove when 36.0 LTS is not supported
                self.log.warn("Unable to save environment: %s (%s)", details,
                              env.data)
            return True
        return False

    def setUp(self):
        """
        Avocado-vt uses custom setUp/test/tearDown handling and unlike
        Avocado it allows skipping tests from any phase. To convince
        Avocado to allow skips let's say our tests run during setUp
        phase and don't do anything during runTest/tearDown.
        """
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except error.TestNAError, details:
            raise exceptions.TestSkipError(details)
        except error.TestWarn, details:
            raise exceptions.TestWarn(details)
Ejemplo n.º 5
0
class VirtTest(test.Test):

    """
    Mininal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.__params_vt = None
        self.__avocado_params = None
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params,
                                       base_logdir=base_logdir, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type", ""))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        try:
            self.__avocado_params = super(VirtTest, self).params
        except AttributeError:
            # 36LTS set's `self.params` instead of having it as a property
            # which stores the avocado params in `self.__params`
            self.__avocado_params = self.__params
        self.__params_vt = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = vt_params.get("test_timeout", self.timeout)
        utils_misc.set_log_file_dir(self.logdir)
        self.__status = None

    @property
    def params(self):
        """
        Avocado-vt test params

        During `avocado.Test.__init__` this reports the original params but
        once the Avocado-vt params are set it reports those instead. This
        is necessary to complete the `avocado.Test.__init__` phase
        """
        if self.__params_vt is not None:
            return self.__params_vt
        else:
            # The `self.__params` is set after the `avocado.test.__init__`,
            # but in newer Avocado `self.params` is used during `__init__`
            # Report the parent's value in such case.
            return super(VirtTest, self).params

    @params.setter
    def params(self, value):
        """
        For compatibility with 36lts we need to support setter on params
        """
        self.__params_vt = value

    @property
    def avocado_params(self):
        """
        Original Avocado (multiplexer/varianter) params
        """
        return self.__avocado_params

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Pre Avocado-60.0 used to override self.__params attribute and
        requires special handling while reporting the state.

        TODO: Remove when 52LTS is deprecated.
        """
        state = super(VirtTest, self).get_state()
        if state["params"] == self.__params_vt:
            state["params"] = self.avocado_params
        return state

    def _start_logging(self):
        super(VirtTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(VirtTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            six.reraise(exc[1], None, exc[2])

    def __safe_env_save(self, env):
        """
        Treat "env.save()" exception as warnings

        :param env: The virttest env object
        :return: True on failure
        """
        try:
            env.save()
        except Exception as details:
            if hasattr(stacktrace, "str_unpickable_object"):
                self.log.warn("Unable to save environment: %s",
                              stacktrace.str_unpickable_object(env.data))
            else:    # TODO: Remove when 36.0 LTS is not supported
                self.log.warn("Unable to save environment: %s (%s)", details,
                              env.data)
            return True
        return False

    def setUp(self):
        """
        Avocado-vt uses custom setUp/test/tearDown handling and unlike
        Avocado it allows skipping tests from any phase. To convince
        Avocado to allow skips let's say our tests run during setUp
        phase and report the status in test.
        """
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
            self.__status = "PASS"
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except exceptions.TestSkipError:
            raise   # This one has to be raised in setUp
        except:  # nopep8 Old-style exceptions are not inherited from Exception()
            details = sys.exc_info()[1]
            self.__status = details
            if not hasattr(self, "cancel"):     # Old Avocado, skip here
                if isinstance(self.__status, error.TestNAError):
                    raise exceptions.TestSkipError(self.__status)
        finally:
            if env_lang:
                os.environ['LANG'] = env_lang
            else:
                del os.environ['LANG']

    def runTest(self):
        """
        This only reports the results

        The actual testing happens inside setUp stage, this only
        reports the correct results
        """
        if self.__status != "PASS":
            if isinstance(self.__status, error.TestNAError):
                self.cancel(str(self.__status))
            elif isinstance(self.__status, error.TestWarn):
                self.log.warn(str(self.__status))
            elif isinstance(self.__status, error.TestFail):
                self.fail(str(self.__status))
            else:
                raise self.__status  # pylint: disable=E0702

    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = list(params.keys())
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise exceptions.TestError("Directory %s does not "
                                           "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise exceptions.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self.__safe_env_save(env):
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Ejemplo n.º 6
0
class virt(test.test):
    """
    Shared test class infrastructure for tests such as the KVM test.

    It comprises a subtest load system, use of parameters, and an env
    file, all code that can be reused among those virt tests.
    """
    version = 1
    env_version = utils_env.get_env_version()

    def initialize(self, params):
        # Change the value of the preserve_srcdir attribute according to
        # the value present on the configuration file (defaults to yes)
        if params.get("preserve_srcdir", "yes") == "yes":
            self.preserve_srcdir = True
        virtdir = os.path.dirname(sys.modules[__name__].__file__)
        self.virtdir = os.path.join(virtdir, "shared")
        # Place where virt software will be built/linked
        self.builddir = os.path.join(virtdir, params.get("vm_type"))
        self.background_errors = Queue.Queue()

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        @raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            raise exc[1], None, exc[2]

    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.job.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(tests_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs.append(subtestdir)
                    # Verify if we have the correspondent source file for it
                    virt_dir = os.path.dirname(self.virtdir)
                    subtest_dirs.append(os.path.join(virt_dir, "tests"))
                    subtest_dirs.append(
                        os.path.join(self.bindir, params.get("vm_type"),
                                     "tests"))
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    t_types = params.get("type").split()
                    test_modules = []
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules.append(
                            (t_type, imp.load_module(t_type, f, p, d)))
                        f.close()
                    # Preprocess
                    try:
                        env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    for t_type, test_module in test_modules:
                        msg = "Running function: %s.run_%s()" % (t_type,
                                                                 t_type)
                        logging.info(msg)
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Ejemplo n.º 7
0
class VirtTest(test.Test):
    """
    Mininal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, tag, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        self.outputdir = None
        self.resultsdir = None
        self.logfile = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        super(VirtTest, self).__init__(methodName=methodName,
                                       name=name,
                                       params=params,
                                       base_logdir=base_logdir,
                                       tag=tag,
                                       job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.avocado_params = self.params
        self.params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = vt_params.get("test_timeout", self.timeout)
        utils_misc.set_log_file_dir(self.logdir)

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def _start_logging(self):
        super(VirtTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(VirtTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            raise exc[1], None, exc[2]

    def runTest(self):
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except error.TestNAError, details:
            raise exceptions.TestSkipError(details)
        except error.TestWarn, details:
            raise exceptions.TestWarn(details)
Ejemplo n.º 8
0
class VirtTest(test.Test):
    """
    Minimal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 job=None,
                 runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.__params_vt = None
        self.__avocado_params = None
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')
        # self.__params_vt must be initialized after super
        params_vt = utils_params.Params(vt_params)
        # for timeout use Avocado-vt timeout as default but allow
        # overriding from Avocado params (varianter)
        self.timeout = params_vt.get("test_timeout", self.timeout)

        self.iteration = 0
        self.resultsdir = None
        self.background_errors = error_event.error_events_bus
        # clear existing error events
        self.background_errors.clear()
        super(VirtTest, self).__init__(methodName=methodName,
                                       name=name,
                                       params=params,
                                       base_logdir=base_logdir,
                                       job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type", ""))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        try:
            self.__avocado_params = super(VirtTest, self).params
        except AttributeError:
            # 36LTS set's `self.params` instead of having it as a property
            # which stores the avocado params in `self.__params`
            self.__avocado_params = self.__params
        self.__params_vt = params_vt
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
        self.__status = None
        self.__exc_info = None

    @property
    def params(self):
        """
        Avocado-vt test params

        During `avocado.Test.__init__` this reports the original params but
        once the Avocado-vt params are set it reports those instead. This
        is necessary to complete the `avocado.Test.__init__` phase
        """
        if self.__params_vt is not None:
            return self.__params_vt
        else:
            # The `self.__params` is set after the `avocado.test.__init__`,
            # but in newer Avocado `self.params` is used during `__init__`
            # Report the parent's value in such case.
            return super(VirtTest, self).params

    @params.setter
    def params(self, value):
        """
        For compatibility with 36lts we need to support setter on params
        """
        self.__params_vt = value

    @property
    def avocado_params(self):
        """
        Original Avocado (multiplexer/varianter) params
        """
        return self.__avocado_params

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Pre Avocado-60.0 used to override self.__params attribute and
        requires special handling while reporting the state.

        TODO: Remove when 52LTS is deprecated.
        """
        state = super(VirtTest, self).get_state()
        if state["params"] == self.__params_vt:
            state["params"] = self.avocado_params
        return state

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.
        Logs all errors in the background_errors into background-error.log and
        error the test.
        """
        err_file_path = os.path.join(self.logdir, BG_ERR_FILE)
        bg_errors = self.background_errors.get_all()
        error_messages = ["BACKGROUND ERROR LIST:"]
        for index, error in enumerate(bg_errors):
            error_messages.append(
                "- ERROR #%d -\n%s" %
                (index, "".join(traceback.format_exception(*error))))
        genio.write_file(err_file_path, '\n'.join(error_messages))
        if bg_errors:
            msg = ["Background error"]
            msg.append("s are" if len(bg_errors) > 1 else " is")
            msg.append((" detected, please refer to file: "
                        "'%s' for more details.") % BG_ERR_FILE)
            self.error(''.join(msg))

    def __safe_env_save(self, env):
        """
        Treat "env.save()" exception as warnings

        :param env: The virttest env object
        :return: True on failure
        """
        try:
            env.save()
        except Exception as details:
            try:
                pickle.dumps(env.data)
            except Exception:
                self.log.warn("Unable to save environment: %s",
                              stacktrace.str_unpickable_object(env.data))
            else:
                self.log.warn("Unable to save environment: %s (%s)", details,
                              env.data)
            return True
        return False

    def setUp(self):
        """
        Avocado-vt uses custom setUp/test/tearDown handling and unlike
        Avocado it allows skipping tests from any phase. To convince
        Avocado to allow skips let's say our tests run during setUp
        phase and report the status in test.
        """
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
            self.__status = "PASS"
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except exceptions.TestSkipError:
            self.__exc_info = sys.exc_info()
            raise  # This one has to be raised in setUp
        except:  # nopep8 Old-style exceptions are not inherited from Exception()
            self.__exc_info = sys.exc_info()
            self.__status = self.__exc_info[1]
        finally:
            # Clean libvirtd debug logs if the test is not fail or error
            if self.params.get("libvirtd_log_cleanup", "no") == "yes":
                if (self.params.get("vm_type") == 'libvirt'
                        and self.params.get("enable_libvirtd_debug_log",
                                            "yes") == "yes"):
                    libvirtd_log = self.params["libvirtd_debug_file"]
                    if ("TestFail" not in str(self.__exc_info)
                            and "TestError" not in str(self.__exc_info)):
                        if libvirtd_log and os.path.isfile(libvirtd_log):
                            logging.info("cleaning libvirtd logs...")
                            os.remove(libvirtd_log)
                    else:
                        # tar the libvirtd log and archive
                        logging.info("archiving libvirtd debug logs")
                        from virttest import utils_package
                        if utils_package.package_install("tar"):
                            if os.path.isfile(libvirtd_log):
                                archive = os.path.join(
                                    os.path.dirname(libvirtd_log),
                                    "libvirtd.tar.gz")
                                cmd = ("tar -zcf %s -P %s" %
                                       (pipes.quote(archive),
                                        pipes.quote(libvirtd_log)))
                                if process.system(cmd) == 0:
                                    os.remove(libvirtd_log)
                            else:
                                logging.error("Unable to find log file: %s",
                                              libvirtd_log)
                        else:
                            logging.error(
                                "Unable to find tar to compress libvirtd "
                                "logs")

            if env_lang:
                os.environ['LANG'] = env_lang
            else:
                del os.environ['LANG']

    def runTest(self):
        """
        This only reports the results

        The actual testing happens inside setUp stage, this only
        reports the correct results
        """
        if self.__status != "PASS":
            raise self.__status  # pylint: disable=E0702

    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = list(params.keys())
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            # If d starts with a "/" an absolute path will be assumed
            # else the relative path will be searched in the bin_dir
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise exceptions.TestError("Directory %s does not "
                                           "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir, test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs('generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                    bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug(
            "Searching for test modules that match "
            "'type = %s' and 'provider = %s' "
            "on this cartesian dict", params.get("type"),
            params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s", module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise exceptions.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # Open the environment file
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        if params.get_boolean("job_env_cleanup", "yes"):
            self.runner_queue.put({
                "func_at_exit": cleanup_env,
                "args": (env_filename, self.env_version),
                "once": True
            })

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn(
                            "funcatexit failed with: %s" % error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s",
                            sys.exc_info()[1])
                finally:
                    if self.__safe_env_save(env) or params.get(
                            "env_cleanup", "no") == "yes":
                        env.destroy()  # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed