Esempio n. 1
0
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params,
                                       base_logdir=base_logdir, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.__params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        self.timeout = vt_params.get("test_timeout", self.timeout)
        utils_misc.set_log_file_dir(self.logdir)
Esempio n. 2
0
    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, tag, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        del name
        options = job.args
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        name = None
        if options.vt_config:
            name = vt_params.get("shortname")
        elif options.vt_type == 'spice':
            short_name_map_file = vt_params.get("_short_name_map_file")
            if "tests-variants.cfg" in short_name_map_file:
                name = short_name_map_file["tests-variants.cfg"]
        if name is None:
            name = vt_params.get("_short_name_map_file")["subtests.cfg"]
        self.outputdir = None
        self.resultsdir = None
        self.logfile = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        super(VirtTest, self).__init__(methodName=methodName,
                                       name=name,
                                       params=params,
                                       base_logdir=base_logdir,
                                       tag=tag,
                                       job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.avocado_params = self.params
        self.params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
Esempio n. 3
0
    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 job=None,
                 runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.__params_vt = None
        self.__avocado_params = None
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')
        # self.__params_vt must be initialized after super
        params_vt = utils_params.Params(vt_params)
        # for timeout use Avocado-vt timeout as default but allow
        # overriding from Avocado params (varianter)
        self.timeout = params_vt.get("test_timeout", self.timeout)

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = error_event.error_events_bus
        # clear existing error events
        self.background_errors.clear()
        super(VirtTest, self).__init__(methodName=methodName,
                                       name=name,
                                       params=params,
                                       base_logdir=base_logdir,
                                       job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type", ""))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        try:
            self.__avocado_params = super(VirtTest, self).params
        except AttributeError:
            # 36LTS set's `self.params` instead of having it as a property
            # which stores the avocado params in `self.__params`
            self.__avocado_params = self.__params
        self.__params_vt = params_vt
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
        self.__status = None
Esempio n. 4
0
    def __init__(self, **kwargs):
        """
        :note: methodName, name, base_logdir, job/config and runner_queue
               params are inherited from test.Test
               From the avocado 86 the test.Test uses config instead of job
               instance. Because of the compatibility with avocado 82.0 LTS we
               can't remove the job instance. For avocado < 86 job instance is
               used and for avocado=>86 config is used.
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        vt_params = kwargs.pop("vt_params", None)
        self.__params_vt = None
        self.__avocado_params = None
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')
        # self.__params_vt must be initialized after super
        params_vt = utils_params.Params(vt_params)
        # for timeout use Avocado-vt timeout as default but allow
        # overriding from Avocado params (varianter)
        self.timeout = params_vt.get("test_timeout", self.timeout)

        self.iteration = 0
        self.resultsdir = None
        self.background_errors = error_event.error_events_bus
        # clear existing error events
        self.background_errors.clear()

        if "methodName" not in kwargs:
            kwargs["methodName"] = 'runTest'
        super(VirtTest, self).__init__(**kwargs)

        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type", ""))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        try:
            self.__avocado_params = super(VirtTest, self).params
        except AttributeError:
            # 36LTS set's `self.params` instead of having it as a property
            # which stores the avocado params in `self.__params`
            self.__avocado_params = self.__params
        self.__params_vt = params_vt
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
        self.__status = None
        self.__exc_info = None
Esempio n. 5
0
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None, runner_queue=None):
        del name
        options = job.args
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        name = None
        if options.vt_config:
            name = params.get("shortname")
        elif options.vt_type == 'spice':
            short_name_map_file = params.get("_short_name_map_file")
            if "tests-variants.cfg" in short_name_map_file:
                name = short_name_map_file["tests-variants.cfg"]
        if name is None:
            name = params.get("_short_name_map_file")["subtests.cfg"]
        self.outputdir = None
        self.resultsdir = None
        self.logfile = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params, base_logdir=base_logdir,
                                       tag=tag, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)

        self.params = utils_params.Params(params)
        # Here we turn the data the multiplexer injected into the params and
        # turn it into an AvocadoParams object, that will allow users to
        # access data from it. Example:
        # sleep_length = test.avocado_params.get('sleep_length', default=1)
        p = params.get('avocado_params', None)
        if p is not None:
            params, mux_path = p[0], p[1]
        else:
            params, mux_path = [], []
        self.avocado_params = multiplexer.AvocadoParams(params, self.name,
                                                        self.tag,
                                                        mux_path,
                                                        self.default_params)

        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
Esempio n. 6
0
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        self.__params_vt = None
        self.__avocado_params = None
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')
        # self.__params_vt must be initialized after super
        params_vt = utils_params.Params(vt_params)
        # for timeout use Avocado-vt timeout as default but allow
        # overriding from Avocado params (varianter)
        self.timeout = params_vt.get("test_timeout", self.timeout)

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = error_event.error_events_bus
        # clear existing error events
        self.background_errors.clear()
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params,
                                       base_logdir=base_logdir, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type", ""))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        try:
            self.__avocado_params = super(VirtTest, self).params
        except AttributeError:
            # 36LTS set's `self.params` instead of having it as a property
            # which stores the avocado params in `self.__params`
            self.__avocado_params = self.__params
        self.__params_vt = params_vt
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
        self.__status = None
Esempio n. 7
0
    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, tag=None, job=None, runner_queue=None,
                 vt_params=None):
        """
        :note: methodName, name, base_logdir, tag, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param vt_params: avocado-vt/cartesian_config params stored as
                          `self.params`.
        """
        del name
        options = job.args
        self.bindir = data_dir.get_root_dir()
        self.virtdir = os.path.join(self.bindir, 'shared')

        self.iteration = 0
        name = None
        if options.vt_config:
            name = vt_params.get("shortname")
        elif options.vt_type == 'spice':
            short_name_map_file = vt_params.get("_short_name_map_file")
            if "tests-variants.cfg" in short_name_map_file:
                name = short_name_map_file["tests-variants.cfg"]
        if name is None:
            name = vt_params.get("_short_name_map_file")["subtests.cfg"]
        self.outputdir = None
        self.resultsdir = None
        self.logfile = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        super(VirtTest, self).__init__(methodName=methodName, name=name,
                                       params=params,
                                       base_logdir=base_logdir,
                                       tag=tag, job=job,
                                       runner_queue=runner_queue)
        self.builddir = os.path.join(self.workdir, 'backends',
                                     vt_params.get("vm_type"))
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize virttest
        # (cartesian_config) params
        self.avocado_params = self.params
        self.params = utils_params.Params(vt_params)
        self.debugdir = self.logdir
        self.resultsdir = self.logdir
        utils_misc.set_log_file_dir(self.logdir)
Esempio n. 8
0
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        custom_env_path = params.get("custom_env_path", "")
        if custom_env_path:
            env_path = custom_env_path
        else:
            env_path = params.get("vm_type")
        env_filename = os.path.join(self.bindir, "backends", env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        other_subtests_dirs = params.get("other_tests_dirs", "")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise exceptions.TestError("Directory %s not"
                                                       " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    for generic_subdir in asset.get_test_provider_subdirs('generic'):
                        subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                            bootstrap.test_filter)
                    for multi_host_migration_subdir in asset.get_test_provider_subdirs(
                            'multi_host_migration'):
                        subtest_dirs += data_dir.SubdirList(multi_host_migration_subdir,
                                                            bootstrap.test_filter)

                    for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                        subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                            bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    provider = params.get("provider", None)
                    if provider is not None:
                        subtest_dirs = [
                            d for d in subtest_dirs if provider in d]
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise exceptions.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s"
                                                  % error_message)

                except Exception as e:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception as e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)
Esempio n. 9
0
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        custom_env_path = params.get("custom_env_path", "")
        if custom_env_path:
            env_path = custom_env_path
        else:
            env_path = params.get("vm_type")
        env_filename = os.path.join(self.bindir, "backends", env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        other_subtests_dirs = params.get("other_tests_dirs", "")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    for generic_subdir in asset.get_test_provider_subdirs('generic'):
                        subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                            bootstrap.test_filter)

                    for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                        subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                            bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    provider = params.get("provider", None)
                    if provider is not None:
                        subtest_dirs = [
                            d for d in subtest_dirs if provider in d]
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s"
                                             % error_message)

                except Exception, e:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_create_command())
                raise error.JobError("Abort requested (%s)" % e)
Esempio n. 10
0
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.job.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(tests_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs.append(subtestdir)
                    # Verify if we have the correspondent source file for it
                    virt_dir = os.path.dirname(self.virtdir)
                    subtest_dirs.append(os.path.join(virt_dir, "tests"))
                    subtest_dirs.append(os.path.join(self.bindir,
                                                     params.get("vm_type"),
                                                     "tests"))
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    t_types = params.get("type").split()
                    test_modules = []
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules.append((t_type,
                                             imp.load_module(t_type, f, p, d)))
                        f.close()
                    # Preprocess
                    try:
                        env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    for t_type, test_module in test_modules:
                        msg = "Running function: %s.run_%s()" % (t_type, t_type)
                        logging.info(msg)
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "kvm":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("'%s' has a %s monitor unix socket at: %s",
                                     vm.name, m.protocol, m.filename)
                    logging.info("The command line used to start '%s' was:\n%s",
                                 vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Esempio n. 11
0
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.job.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(tests_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs.append(subtestdir)
                    # Verify if we have the correspondent source file for it
                    virt_dir = os.path.dirname(self.virtdir)
                    subtest_dirs.append(os.path.join(virt_dir, "tests"))
                    subtest_dirs.append(
                        os.path.join(self.bindir, params.get("vm_type"),
                                     "tests"))
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    t_types = params.get("type").split()
                    test_modules = []
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules.append(
                            (t_type, imp.load_module(t_type, f, p, d)))
                        f.close()
                    # Preprocess
                    try:
                        env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    for t_type, test_module in test_modules:
                        msg = "Running function: %s.run_%s()" % (t_type,
                                                                 t_type)
                        logging.info(msg)
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)