Exemple #1
0
    def _execute_cmd(self):
        """
        Run the executable, and log its detailed execution.
        """
        try:
            test_params = dict([(str(key), str(val))
                                for _, key, val in self.params.iteritems()])  # pylint: disable=W1620

            input_encoding = self._config.get('core.input_encoding')
            result = process.run(self._command,
                                 verbose=True,
                                 env=test_params,
                                 encoding=input_encoding)

            self._log_detailed_cmd_info(result)
        except process.CmdError as details:
            self._log_detailed_cmd_info(details.result)
            test_failure = self._cmd_error_to_test_failure(details)
            raise exceptions.TestFail(test_failure)

        warn_regex = self._config.get('simpletests.status.warn_regex')
        warn_location = self._config.get('simpletests.status.warn_location')
        skip_regex = self._config.get('simpletests.status.skip_regex')
        skip_location = self._config.get('simpletests.status.skip_location')

        # Keeping compatibility with 'avocado_warn' libexec
        for regex in [warn_regex, r'^\d\d:\d\d:\d\d WARN \|']:
            warn_msg = ("Test passed but there were warnings on %s during "
                        "execution. Check the log for details.")
            if regex is not None:
                re_warn = re.compile(regex, re.MULTILINE)
                if warn_location in ['all', 'stdout']:
                    if re_warn.search(result.stdout_text):
                        raise exceptions.TestWarn(warn_msg % 'stdout')

                if warn_location in ['all', 'stderr']:
                    if re_warn.search(result.stderr_text):
                        raise exceptions.TestWarn(warn_msg % 'stderr')

        if skip_regex is not None:
            re_skip = re.compile(skip_regex, re.MULTILINE)
            skip_msg = ("Test passed but %s indicates test was skipped. "
                        "Check the log for details.")

            if skip_location in ['all', 'stdout']:
                if re_skip.search(result.stdout_text):
                    raise exceptions.TestSkipError(skip_msg % 'stdout')

            if skip_location in ['all', 'stderr']:
                if re_skip.search(result.stderr_text):
                    raise exceptions.TestSkipError(skip_msg % 'stderr')
Exemple #2
0
 def _catch_test_status(self, method):
     """Wrapper around test methods for catching and logging failures."""
     try:
         method()
         if self.__log_warn_used:
             raise exceptions.TestWarn("Test passed but there were warnings "
                                       "during execution. Check the log for "
                                       "details.")
     except exceptions.TestBaseException as detail:
         self.__status = detail.status
         self.__fail_class = detail.__class__.__name__
         self.__fail_reason = astring.to_text(detail)
         self.__traceback = stacktrace.prepare_exc_info(sys.exc_info())
     except AssertionError as detail:
         self.__status = 'FAIL'
         self.__fail_class = detail.__class__.__name__
         self.__fail_reason = astring.to_text(detail)
         self.__traceback = stacktrace.prepare_exc_info(sys.exc_info())
     except Exception as detail:  # pylint: disable=W0703
         self.__status = 'ERROR'
         tb_info = stacktrace.tb_info(sys.exc_info())
         self.__traceback = stacktrace.prepare_exc_info(sys.exc_info())
         try:
             self.__fail_class = astring.to_text(detail.__class__.__name__)
             self.__fail_reason = astring.to_text(detail)
         except TypeError:
             self.__fail_class = "Exception"
             self.__fail_reason = ("Unable to get exception, check the "
                                   "traceback for details.")
         for e_line in tb_info:
             self.log.error(e_line)
Exemple #3
0
 def run(self, result=None):
     super(SimpleTest, self).run(result)
     for line in open(self.logfile):
         if self.re_avocado_log.match(line):
             raise exceptions.TestWarn("Test passed but there were warnings"
                                       " on stdout during execution. Check "
                                       "the log for details.")
Exemple #4
0
    def unset_root(params, object=None):
        """
        Unset a root state to prevent object existence.

        All arguments match the base class and in addition:

        :raises: :py:class:`exceptions.TestWarn` if permanent vm was detected

        Remove the ramdisk, virtual group, thin pool and logical volume
        of each object (all off).
        """
        vm_name = params["vms"]
        mount_loc = LVMBackend._get_images_mount_loc(params)
        logging.info("Removing original logical volume for %s", vm_name)
        try:
            if mount_loc:
                if lv_utils.vg_check(params["vg_name"]):
                    # mount to avoid not-mounted errors
                    try:
                        lv_utils.lv_mount(params["vg_name"],
                                          params["lv_pointer_name"], mount_loc)
                    except lv_utils.LVException:
                        pass
                    lv_utils.lv_umount(params["vg_name"],
                                       params["lv_pointer_name"])
                if os.path.exists(mount_loc):
                    try:
                        os.rmdir(mount_loc)
                    except OSError as ex:
                        logging.warning(
                            "No permanent vm can be removed automatically. If "
                            "this is not a permanent test object, see the debug."
                        )
                        raise exceptions.TestWarn(
                            "Permanent vm %s was detected but cannot be "
                            "removed automatically" % vm_name)
            lv_utils.vg_ramdisk_cleanup(
                params["ramdisk_sparse_filename"],
                os.path.join(params["ramdisk_basedir"], params["vg_name"]),
                params["vg_name"], None, params["use_tmpfs"] == "yes")
        except exceptions.TestError as ex:
            logging.error(ex)
Exemple #5
0
    def _runTest(self):
        params = self.params

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        self._log_parameters()

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        subtest_dirs = self._get_subtest_dirs()

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()

        utils.insert_dirs_to_path(subtest_dirs)

        test_modules = utils.find_test_modules(t_types, subtest_dirs)

        # Open the environment file
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        if params.get_boolean("job_env_cleanup", "yes"):
            self.runner_queue.put({"func_at_exit": cleanup_env,
                                   "args": (env_filename, self.env_version),
                                   "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Pre-process
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self._safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self._safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self._safe_env_save(env)
                    raise

            finally:
                # Post-process
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self._safe_env_save(env) or params.get("env_cleanup", "no") == "yes":
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Exemple #6
0
class VirtTest(test.Test):

    """
    Mininal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params,
                                       base_logdir=base_logdir, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.__params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = vt_params.get("test_timeout", self.timeout)
        utils_misc.set_log_file_dir(self.logdir)

    @property
    def params(self):
        """
        Avocado-vt test params

        During `avocado.Test.__init__` this reports the original params but
        once the Avocado-vt params are set it reports those instead. This
        is necessary to complete the `avocado.Test.__init__` phase
        """
        try:
            return self.__params
        except AttributeError:
            return self.avocado_params

    @params.setter
    def params(self, value):
        """
        For compatibility with 36lts we need to support setter on params
        """
        self.__params = value

    @property
    def avocado_params(self):
        """
        Original Avocado (multiplexer/varianter) params
        """
        return super(VirtTest, self).params

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Avocado-vt replaces Test.params with avocado-vt params. This function
        reports the original params on `get_state` call.
        """
        state = super(VirtTest, self).get_state()
        state["params"] = self.__dict__.get("avocado_params")
        return state

    def _start_logging(self):
        super(VirtTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(VirtTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            raise exc[1], None, exc[2]

    def __safe_env_save(self, env):
        """
        Treat "env.save()" exception as warnings

        :param env: The virttest env object
        :return: True on failure
        """
        try:
            env.save()
        except Exception as details:
            if hasattr(stacktrace, "str_unpickable_object"):
                self.log.warn("Unable to save environment: %s",
                              stacktrace.str_unpickable_object(env.data))
            else:    # TODO: Remove when 36.0 LTS is not supported
                self.log.warn("Unable to save environment: %s (%s)", details,
                              env.data)
            return True
        return False

    def setUp(self):
        """
        Avocado-vt uses custom setUp/test/tearDown handling and unlike
        Avocado it allows skipping tests from any phase. To convince
        Avocado to allow skips let's say our tests run during setUp
        phase and don't do anything during runTest/tearDown.
        """
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except error.TestNAError, details:
            raise exceptions.TestSkipError(details)
        except error.TestWarn, details:
            raise exceptions.TestWarn(details)
Exemple #7
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = list(params.keys())
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise exceptions.TestError("Directory %s does not "
                                           "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise exceptions.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self.__safe_env_save(env):
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Exemple #8
0
    def check_image(self, params, root_dir):
        """
        Check an image using the appropriate tools for each virt backend.

        :param params: Dictionary containing the test parameters.
        :param root_dir: Base directory for relative filenames.

        :note: params should contain:
               image_name -- the name of the image file, without extension
               image_format -- the format of the image (qcow2, raw etc)

        :raise VMImageCheckError: In case qemu-img check fails on the image.
        """
        image_filename = self.image_filename
        logging.debug("Checking image file %s", image_filename)
        qemu_img_cmd = self.image_cmd
        image_is_checkable = self.image_format in ['qcow2', 'qed']

        if (storage.file_exists(params, image_filename)
                or self.is_remote_image()) and image_is_checkable:
            check_img = self.support_cmd("check") and self.support_cmd("info")
            if not check_img:
                logging.debug("Skipping image check "
                              "(lack of support in qemu-img)")
            else:
                try:
                    process.run("%s info %s" % (qemu_img_cmd, image_filename),
                                shell=True,
                                verbose=False)
                except process.CmdError:
                    logging.error("Error getting info from image %s",
                                  image_filename)

                cmd_result = process.run("%s check %s" %
                                         (qemu_img_cmd, image_filename),
                                         ignore_status=True,
                                         shell=True,
                                         verbose=False)
                # Error check, large chances of a non-fatal problem.
                # There are chances that bad data was skipped though
                if cmd_result.exit_status == 1:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise exceptions.TestWarn(
                        "qemu-img check exceptions. Some bad "
                        "data in the image may have gone"
                        " unnoticed (%s)" % image_filename)
                # Exit status 2 is data corruption for sure,
                # so fail the test
                elif cmd_result.exit_status == 2:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise virt_vm.VMImageCheckError(image_filename)
                # Leaked clusters, they are known to be harmless to data
                # integrity
                elif cmd_result.exit_status == 3:
                    raise exceptions.TestWarn("Leaked clusters were noticed"
                                              " during image check. No data "
                                              "integrity problem was found "
                                              "though. (%s)" % image_filename)

                # Just handle normal operation
                if params.get("backup_image", "no") == "yes":
                    self.backup_image(params, root_dir, "backup", True, True)
        else:
            if not storage.file_exists(params, image_filename):
                logging.debug("Image file %s not found, skipping check",
                              image_filename)
            elif not image_is_checkable:
                logging.debug(
                    "Image format %s is not checkable, skipping check",
                    self.image_format)
Exemple #9
0
        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
        genio.write_file(whiteboard_file, self.whiteboard)

        # pylint: disable=E0702
        if test_exception is not None:
            raise test_exception
        elif cleanup_exception is not None:
            raise exceptions.TestSetupFail(cleanup_exception)
        elif stdout_check_exception is not None:
            raise stdout_check_exception
        elif stderr_check_exception is not None:
            raise stderr_check_exception
        elif self._Test__log_warn_used:
            raise exceptions.TestWarn("Test passed but there were warnings "
                                      "during execution. Check the log for "
                                      "details.")

        self.status = 'PASS'
        self.sysinfo_logger.end_test_hook()

    def get_state(self):
        """
        Avocado-HealthCheckTest replaces Test.params with avocado-ct params.
        This function reports the original params on `get_state` call.
        """
        state = super(HealthCheckTest, self).get_state()
        state["params"] = self.__dict__.get("avocado_params")
        return state

    def _start_logging(self):
Exemple #10
0
class NFVTest(test.Test):
    """
    Main test class used to run a NFV test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 job=None,
                 runner_queue=None,
                 ct_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param ct_params: avocado-cloudtest/cartesian_config params stored as
                          `self.params`.
        """
        self.bindir = data_dir.get_root_dir()

        self.iteration = 0
        self.outputdir = None
        self.resultsdir = None
        self.logfile = None
        self.file_handler = None
        self.hc_file_handler = None

        self.whiteboard = None
        super(NFVTest, self).__init__(methodName=methodName,
                                      name=name,
                                      params=params,
                                      base_logdir=base_logdir,
                                      job=job,
                                      runner_queue=runner_queue)
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize cloudtest
        # (cartesian_config) params
        self.avocado_params = self.params
        self.params = utils_params.Params(ct_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = ct_params.get("test_timeout", self.timeout)

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For NFVTest tests, this always returns None. The reason is that
        individual CloudTest tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For NFVTest tests, this always returns None. The reason is that
        individual CloudTest tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Avocado-cloudtest replaces Test.params with avocado-ct params.
        This function reports the original params on `get_state` call.
        """
        state = super(NFVTest, self).get_state()
        state["params"] = self.__dict__.get("avocado_params")
        return state

    def _start_logging(self):
        super(NFVTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(NFVTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def _start_logging_hc(self):
        self._filename = os.path.join(self.logdir, 'health_check.log')
        self.hc_file_handler = logging.FileHandler(filename=self._filename)
        self.hc_file_handler.setLevel(logging.DEBUG)
        fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
        self.hc_file_handler.setFormatter(formatter)
        self.log.addHandler(self.hc_file_handler)

    def _stop_logging_hc(self):
        self.log.removeHandler(self.hc_file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def __safe_env_save(self, env):
        """
        Treat "env.save()" exception as warnings

        :param env: The cloudtest env object
        :return: True on failure
        """
        try:
            env.save()
        except Exception as details:
            if hasattr(stacktrace, "str_unpickable_object"):
                self.log.warn("Unable to save environment: %s",
                              stacktrace.str_unpickable_object(env.data))
            else:  # TODO: Remove when 36.0 LTS is not supported
                self.log.warn("Unable to save environment: %s (%s)", details,
                              env.data)
            return True
        return False

    def runTest(self):
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        except exceptions.TestNotFoundError, details:
            raise exceptions.TestSkipError(details)
        except exceptions.TestWarn, details:
            raise exceptions.TestWarn(details)
Exemple #11
0
class VirtTest(test.Test):
    """
    Mininal test class used to run a virt test.
    """

    env_version = utils_env.get_env_version()

    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, tag, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        self.outputdir = None
        self.resultsdir = None
        self.logfile = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        super(VirtTest, self).__init__(methodName=methodName,
                                       name=name,
                                       params=params,
                                       base_logdir=base_logdir,
                                       tag=tag,
                                       job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.avocado_params = self.params
        self.params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = vt_params.get("test_timeout", self.timeout)
        utils_misc.set_log_file_dir(self.logdir)

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For VT tests, this always returns None. The reason is that
        individual VT tests do not map 1:1 to a file.
        """
        return None

    def _start_logging(self):
        super(VirtTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(VirtTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            raise exc[1], None, exc[2]

    def runTest(self):
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        try:
            self._runTest()
        # This trick will give better reporting of virt tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except error.TestNAError, details:
            raise exceptions.TestSkipError(details)
        except error.TestWarn, details:
            raise exceptions.TestWarn(details)
Exemple #12
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise exceptions.TestError("Directory %s does not "
                                                       "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for
                        # it
                        for generic_subdir in asset.get_test_provider_subdirs('generic'):
                            subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                                bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                                bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise exceptions.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s"
                                                  % error_message)

                except Exception, e:
                    if (t_type is not None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise exceptions.JobError("Abort requested (%s)" % e)
Exemple #13
0
    def _run_avocado(self):
        """
        Auxiliary method to run_avocado.
        """
        testMethod = getattr(self, self._testMethodName)
        if self._config.get("run.test_runner") != 'nrunner':
            self._start_logging()
        if self.__sysinfo_enabled:
            self.__sysinfo_logger.start()
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        skip_test_condition = getattr(testMethod, '__skip_test_condition__',
                                      False)
        skip_test_condition_negate = getattr(testMethod,
                                             '__skip_test_condition_negate__',
                                             False)
        if skip_test_condition:
            if callable(skip_test_condition):
                if skip_test_condition_negate:
                    skip_test = not bool(skip_test_condition(self))
                else:
                    skip_test = bool(skip_test_condition(self))
            else:
                if skip_test_condition_negate:
                    skip_test = not bool(skip_test_condition)
                else:
                    skip_test = bool(skip_test_condition)
        else:
            skip_test = bool(skip_test_condition)
        try:
            if skip_test is False:
                self.__phase = 'SETUP'
                self.setUp()
        except exceptions.TestSkipError as details:
            skip_test = True
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            raise exceptions.TestSkipError(details)
        except exceptions.TestCancel as details:
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            raise
        except:  # Old-style exceptions are not inherited from Exception()
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            details = sys.exc_info()[1]
            raise exceptions.TestSetupFail(details)
        else:
            try:
                self.__phase = 'TEST'
                if inspect.iscoroutinefunction(testMethod):
                    loop = asyncio.get_event_loop()
                    loop.run_until_complete(testMethod())
                else:
                    testMethod()
            except exceptions.TestCancel as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise
            except:  # Old-style exceptions are not inherited from Exception() pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                details = sys.exc_info()[1]
                if not isinstance(details,
                                  Exception):  # Avoid passing nasty exc
                    details = exceptions.TestError("%r: %s" %
                                                   (details, details))
                test_exception = details
                self.log.debug("Local variables:")
                local_vars = inspect.trace()[1][0].f_locals
                for key, value in local_vars.items():
                    self.log.debug(' -> %s %s: %s', key, type(value), value)
        finally:
            try:
                if skip_test is False:
                    self.__phase = 'TEARDOWN'
                    self.tearDown()
            except exceptions.TestSkipError as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                skip_illegal_msg = ('Using skip decorators in tearDown() '
                                    'is not allowed in '
                                    'avocado, you must fix your '
                                    'test. Original skip exception: %s' %
                                    details)
                raise exceptions.TestError(skip_illegal_msg)
            except exceptions.TestCancel as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise
            except:  # avoid old-style exception failures pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                details = sys.exc_info()[1]
                cleanup_exception = exceptions.TestSetupFail(details)

        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
        genio.write_file(whiteboard_file, self.whiteboard)

        # pylint: disable=E0702
        if test_exception is not None:
            raise test_exception
        elif cleanup_exception is not None:
            raise cleanup_exception
        elif stdout_check_exception is not None:
            raise stdout_check_exception
        elif stderr_check_exception is not None:
            raise stderr_check_exception
        elif self.__log_warn_used:
            raise exceptions.TestWarn("Test passed but there were warnings "
                                      "during execution. Check the log for "
                                      "details.")

        self.__status = 'PASS'
Exemple #14
0
    def check_image(self, params, root_dir, force_share=False):
        """
        Check an image using the appropriate tools for each virt backend.

        :param params: Dictionary containing the test parameters.
        :param root_dir: Base directory for relative filenames.

        :note: params should contain:
               image_name -- the name of the image file, without extension
               image_format -- the format of the image (qcow2, raw etc)

        :raise VMImageCheckError: In case qemu-img check fails on the image.
        """
        image_filename = self.image_filename
        logging.debug("Checking image file %s", image_filename)
        image_is_checkable = self.image_format in ['qcow2', 'qed']
        force_share &= self.cap_force_share

        if (storage.file_exists(params, image_filename)
                or self.is_remote_image()) and image_is_checkable:
            check_img = self.support_cmd("check") and self.support_cmd("info")
            if not check_img:
                logging.debug("Skipping image check "
                              "(lack of support in qemu-img)")
            else:
                try:
                    # FIXME: do we really need it?
                    self.info(force_share)
                except process.CmdError:
                    logging.error("Error getting info from image %s",
                                  image_filename)
                cmd_dict = {
                    "image_filename": image_filename,
                    "force_share": force_share
                }
                if self.encryption_config.key_secret:
                    cmd_dict["image_filename"] = "'%s'" % \
                        get_image_json(self.tag, params, root_dir)
                secret_objects = self._secret_objects
                if secret_objects:
                    cmd_dict["secret_object"] = " ".join(secret_objects)
                check_cmd = self.image_cmd + " " + \
                    self._cmd_formatter.format(self.check_cmd, **cmd_dict)
                cmd_result = process.run(check_cmd,
                                         ignore_status=True,
                                         shell=True,
                                         verbose=False)
                # Error check, large chances of a non-fatal problem.
                # There are chances that bad data was skipped though
                if cmd_result.exit_status == 1:
                    stdout = results_stdout_52lts(cmd_result)
                    for e_line in stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    stderr = results_stderr_52lts(cmd_result)
                    for e_line in stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise exceptions.TestWarn(
                        "qemu-img check not completed because of internal "
                        "errors. Some bad data in the image may have gone "
                        "unnoticed (%s)" % image_filename)
                # Exit status 2 is data corruption for sure,
                # so fail the test
                elif cmd_result.exit_status == 2:
                    stdout = results_stdout_52lts(cmd_result)
                    for e_line in stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    stderr = results_stderr_52lts(cmd_result)
                    for e_line in stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    chk = params.get("backup_image_on_check_error", "no")
                    if chk == "yes":
                        self.backup_image(params, root_dir, "backup", False)
                    raise virt_vm.VMImageCheckError(image_filename)
                # Leaked clusters, they are known to be harmless to data
                # integrity
                elif cmd_result.exit_status == 3:
                    raise exceptions.TestWarn("Leaked clusters were noticed"
                                              " during image check. No data "
                                              "integrity problem was found "
                                              "though. (%s)" % image_filename)
        else:
            if not storage.file_exists(params, image_filename):
                logging.debug("Image file %s not found, skipping check",
                              image_filename)
            elif not image_is_checkable:
                logging.debug(
                    "Image format %s is not checkable, skipping check",
                    self.image_format)
Exemple #15
0
class SecurityTest(test.Test):
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None, ct_params=None):
        self.bindir = data_dir.get_root_dir()

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        self.casename = name
        super(SecurityTest, self).__init__(methodName=methodName, name=name,
                                           params=params,
                                           base_logdir=base_logdir,
                                           runner_queue=runner_queue, job=job)
        self.avocado_params = self.params
        self.params = utils_params.Params(ct_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = ct_params.get("test_timeout", self.timeout)

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For SecurityTest tests, this always returns None. The reason is that
        individual CloudTest tests do not map 1:1 to a file and do not provide
        the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For SecurityTest tests, this always returns None. The reason is that
        individual CloudTest tests do not map 1:1 to a file.
        """
        return None

    def get_state(self):
        """
        Avocado-cloudtest replaces Test.params with avocado-ct params.
        This function reports the original params on `get_state` call.
        """
        state = super(SecurityTest, self).get_state()
        state["params"] = self.__dict__.get("avocado_params")
        return state

    def _start_logging(self):
        super(SecurityTest, self)._start_logging()
        root_logger = logging.getLogger()
        root_logger.addHandler(self.file_handler)

    def _stop_logging(self):
        super(SecurityTest, self)._stop_logging()
        root_logger = logging.getLogger()
        root_logger.removeHandler(self.file_handler)

    def write_test_keyval(self, d):
        self.whiteboard = str(d)

    def verify_background_errors(self):
        """
        Verify if there are any errors that happened on background threads.

        :raise Exception: Any exception stored on the background_errors queue.
        """
        try:
            exc = self.background_errors.get(block=False)
        except Queue.Empty:
            pass
        else:
            raise exc[1], None, exc[2]

    def __safe_env_save(self, env):
        """
        Treat "env.save()" exception as warnings

        :param env: The cloudtest env object
        :return: True on failure
        """
        try:
            env.save()
        except Exception as details:
            if hasattr(stacktrace, "str_unpickable_object"):
                self.log.warn("Unable to save environment: %s",
                              stacktrace.str_unpickable_object(env.data))
            else:  # TODO: Remove when 36.0 LTS is not supported
                self.log.warn("Unable to save environment: %s (%s)", details,
                              env.data)
            return True
        return False

    def runTest(self):
        env_lang = os.environ.get('LANG')
        os.environ['LANG'] = 'C'
        params = self.params
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")
        try:
            if params.get("security_type") == "bandit":
                self._banditTest()
            else:
                self._syntribosTest()
        # This trick will give better reporting of cloud tests being executed
        # into avocado (skips, warns and errors will display correctly)
        except exceptions.TestNotFoundError, details:
            raise exceptions.TestSkipError(details)
        except exceptions.TestWarn, details:
            raise exceptions.TestWarn(details)
Exemple #16
0
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        custom_env_path = params.get("custom_env_path", "")
        if custom_env_path:
            env_path = custom_env_path
        else:
            env_path = params.get("vm_type")
        env_filename = os.path.join(self.bindir, "backends", env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        other_subtests_dirs = params.get("other_tests_dirs", "")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise exceptions.TestError("Directory %s not"
                                                       " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    for generic_subdir in asset.get_test_provider_subdirs('generic'):
                        subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                            bootstrap.test_filter)
                    for multi_host_migration_subdir in asset.get_test_provider_subdirs(
                            'multi_host_migration'):
                        subtest_dirs += data_dir.SubdirList(multi_host_migration_subdir,
                                                            bootstrap.test_filter)

                    for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                        subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                            bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    provider = params.get("provider", None)
                    if provider is not None:
                        subtest_dirs = [
                            d for d in subtest_dirs if provider in d]
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise exceptions.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s"
                                                  % error_message)

                except Exception as e:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception as e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)
Exemple #17
0
 def action(self):
     """
     This should throw a TestWarn.
     """
     raise exceptions.TestWarn('This should throw a TestWarn')