def map_method_with_return(self, method_name, *args, **kwargs):
        """
        The same as `map_method` but additionally reports the list of returned
        values and optionally deepcopies the passed arguments

        :param method_name: Name of the method to be called on each ext
        :param args: Arguments to be passed to all called functions
        :param kwargs: Key-word arguments to be passed to all called functions
                        if `"deepcopy" == True` is present in kwargs the
                        args and kwargs are deepcopied before passing it
                        to each called function.
        """
        deepcopy = kwargs.pop("deepcopy", False)
        ret = []
        for ext in self.extensions:
            try:
                if hasattr(ext.obj, method_name):
                    method = getattr(ext.obj, method_name)
                    if deepcopy:
                        copied_args = [copy.deepcopy(arg) for arg in args]
                        copied_kwargs = copy.deepcopy(kwargs)
                        ret.append(method(*copied_args, **copied_kwargs))
                    else:
                        ret.append(method(*args, **kwargs))
            except SystemExit:
                raise
            except KeyboardInterrupt:
                raise
            except:  # catch any exception pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(),
                                        logger='avocado.app.debug')
                LOG_UI.error('Error running method "%s" of plugin "%s": %s',
                             method_name, ext.name,
                             sys.exc_info()[1])
        return ret
Beispiel #2
0
 def setUp(self):
     """
     Avocado-vt uses custom setUp/test/tearDown handling and unlike
     Avocado it allows skipping tests from any phase. To convince
     Avocado to allow skips let's say our tests run during setUp
     phase and report the status in test.
     """
     env_lang = os.environ.get('LANG')
     os.environ['LANG'] = 'C'
     try:
         self._runTest()
         self.__status = "PASS"
     # This trick will give better reporting of virt tests being executed
     # into avocado (skips, warns and errors will display correctly)
     except exceptions.TestSkipError:
         raise  # This one has to be raised in setUp
     except:  # nopep8 Old-style exceptions are not inherited from Exception()
         details = sys.exc_info()[1]
         stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
         self.__status = details
         if not hasattr(self, "cancel"):  # Old Avocado, skip here
             if isinstance(self.__status, error.TestNAError):
                 raise exceptions.TestSkipError(self.__status)
     finally:
         if env_lang:
             os.environ['LANG'] = env_lang
         else:
             del os.environ['LANG']
Beispiel #3
0
    def run_test(self, references, timeout):
        """
        Run tests.

        :param references: a string with test references.
        :return: a dictionary with test results.
        """
        def arg_to_dest(arg):
            """
            Turns long argparse arguments into default dest
            """
            return arg[2:].replace('-', '_')

        extra_params = []
        # bool or nargs
        for arg in ["--mux-yaml", "--dry-run",
                    "--filter-by-tags-include-empty"]:
            value = getattr(self.job.args, arg_to_dest(arg), None)
            if value is True:
                extra_params.append(arg)
            elif value:
                extra_params.append("%s %s" % (arg, " ".join(value)))
        # append
        for arg in ["--filter-by-tags"]:
            value = getattr(self.job.args, arg_to_dest(arg), None)
            if value:
                join = ' %s ' % arg
                extra_params.append("%s %s" % (arg, join.join(value)))

        references_str = " ".join(references)

        avocado_cmd = ('avocado run --force-job-id %s --json - '
                       '--archive %s %s' % (self.job.unique_id,
                                            references_str, " ".join(extra_params)))
        try:
            result = self.remote.run(avocado_cmd, ignore_status=True,
                                     timeout=timeout)
            if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
                raise exceptions.JobError("Remote execution failed with: %s" % result.stderr)

        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting."
                                      % (timeout))

        try:
            json_result = self._parse_json_response(result.stdout)
        except:
            stacktrace.log_exc_info(sys.exc_info(),
                                    logger='avocado.app.debug')
            raise exceptions.JobError(result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.join(self.job.logdir, 'test-results')
            relative_path = astring.string_to_safe_path(str(t_dict['id']))
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
Beispiel #4
0
 def setUp(self):
     """
     Avocado-vt uses custom setUp/test/tearDown handling and unlike
     Avocado it allows skipping tests from any phase. To convince
     Avocado to allow skips let's say our tests run during setUp
     phase and report the status in test.
     """
     env_lang = os.environ.get('LANG')
     os.environ['LANG'] = 'C'
     try:
         self._runTest()
         self.__status = "PASS"
     # This trick will give better reporting of virt tests being executed
     # into avocado (skips, warns and errors will display correctly)
     except exceptions.TestSkipError:
         raise   # This one has to be raised in setUp
     except:  # Old-style exceptions are not inherited from Exception()
         details = sys.exc_info()[1]
         stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
         self.__status = details
         if not hasattr(self, "cancel"):     # Old Avocado, skip here
             if isinstance(self.__status, error.TestNAError):
                 raise exceptions.TestSkipError(self.__status)
     finally:
         if env_lang:
             os.environ['LANG'] = env_lang
         else:
             del os.environ['LANG']
Beispiel #5
0
 def handle_exception(plugin, details):
     # FIXME: Introduce avocado.exceptions logger and use here
     stacktrace.log_message(
         "Test discovery plugin %s failed: "
         "%s" % (plugin, details), LOG_UI.getChild("exceptions"))
     # FIXME: Introduce avocado.traceback logger and use here
     stacktrace.log_exc_info(sys.exc_info(), LOG_UI.getChild("debug"))
Beispiel #6
0
    def run_test(self, references, timeout):
        """
        Run tests.

        :param references: a string with test references.
        :return: a dictionary with test results.
        """
        def arg_to_dest(arg):
            """
            Turns long argparse arguments into default dest
            """
            return arg[2:].replace('-', '_')

        extra_params = []
        # bool or nargs
        for arg in ["--mux-yaml", "--dry-run",
                    "--filter-by-tags-include-empty"]:
            value = getattr(self.job.args, arg_to_dest(arg), None)
            if value is True:
                extra_params.append(arg)
            elif value:
                extra_params.append("%s %s" % (arg, " ".join(value)))
        # append
        for arg in ["--filter-by-tags"]:
            value = getattr(self.job.args, arg_to_dest(arg), None)
            if value:
                join = ' %s ' % arg
                extra_params.append("%s %s" % (arg, join.join(value)))

        references_str = " ".join(references)

        avocado_cmd = ('avocado run --force-job-id %s --json - '
                       '--archive %s %s' % (self.job.unique_id,
                                            references_str, " ".join(extra_params)))
        try:
            result = self.remote.run(avocado_cmd, ignore_status=True,
                                     timeout=timeout)
            if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
                raise exceptions.JobError("Remote execution failed with: %s" % result.stderr)

        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting."
                                      % (timeout))

        try:
            json_result = self._parse_json_response(result.stdout)
        except:
            stacktrace.log_exc_info(sys.exc_info(),
                                    logger='avocado.app.debug')
            raise exceptions.JobError(result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.join(self.job.logdir, 'test-results')
            relative_path = astring.string_to_safe_path(str(t_dict['id']))
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
Beispiel #7
0
    def create_test_suite(references):
        """
        Creates the test suite for this Job

        This is a public Job API as part of the documented Job phases

        NOTE: This is similar to avocado.core.Job.create_test_suite
        """
        try:
            suite = loader.loader.discover(references)
        except loader.LoaderError as details:
            stacktrace.log_exc_info(sys.exc_info(), LOG_UI.getChild("debug"))
            raise exceptions.OptionValidationError(details)

        if not suite:
            if references:
                references = " ".join(references)
                e_msg = ("No tests found for given test references, try "
                         "'avocado list -V %s' for details" % references)
            else:
                e_msg = ("No test references provided nor any other arguments "
                         "resolved into tests. Please double check the "
                         "executed command.")
            raise exceptions.OptionValidationError(e_msg)

        return suite
Beispiel #8
0
    def _run_test(self):
        """
        Auxiliary method to run setup and test method.
        """
        self._tag_start()
        testMethod = getattr(self, self._testMethodName)
        skip_test_condition = getattr(testMethod, '__skip_test_condition__', False)
        skip_test_condition_negate = getattr(testMethod, '__skip_test_condition_negate__', False)
        if skip_test_condition:
            if callable(skip_test_condition):
                if skip_test_condition_negate:
                    self.__skip_test = not bool(skip_test_condition(self))
                else:
                    self.__skip_test = bool(skip_test_condition(self))
            else:
                if skip_test_condition_negate:
                    self.__skip_test = not bool(skip_test_condition)
                else:
                    self.__skip_test = bool(skip_test_condition)
        else:
            self.__skip_test = bool(skip_test_condition)
        try:
            if self.__skip_test is False:
                self.__phase = 'SETUP'
                self.setUp()
        except exceptions.TestSkipError as details:
            self.__skip_test = True
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            raise exceptions.TestSkipError(details)
        except exceptions.TestCancel:
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            raise
        except:  # Old-style exceptions are not inherited from Exception()
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            details = sys.exc_info()[1]
            raise exceptions.TestSetupFail(details)
        else:
            try:
                self.__phase = 'TEST'
                if inspect.iscoroutinefunction(testMethod):
                    loop = asyncio.get_event_loop()
                    loop.run_until_complete(testMethod())
                else:
                    testMethod()
            except exceptions.TestCancel:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise
            except:  # Old-style exceptions are not inherited from Exception() pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                details = sys.exc_info()[1]
                if not isinstance(details, Exception):  # Avoid passing nasty exc
                    details = exceptions.TestError(f"{details!r}: {details}")
                self.log.debug("Local variables:")
                local_vars = inspect.trace()[1][0].f_locals
                for key, value in local_vars.items():
                    self.log.debug(' -> %s %s: %s', key, type(value), value)
                raise details

        self.__status = 'PASS'
Beispiel #9
0
 def pre(self, job):
     try:
         if any(test_factory[0] is VirtTest
                for test_factory in job.test_suite):
             self._lock(job)
     except Exception as detail:
         msg = "Failure trying to set Avocado-VT job lock: %s" % detail
         self.log.error(msg)
         log_exc_info(sys.exc_info(), self.log.name)
         sys.exit(exit_codes.AVOCADO_JOB_FAIL | job.exitcode)
Beispiel #10
0
 def pre(self, job):
     try:
         if any(test_factory[0] is VirtTest
                for test_factory in job.test_suite):
             self._lock(job)
     except Exception as detail:
         msg = "Failure trying to set Avocado-VT job lock: %s" % detail
         self.log.error(msg)
         log_exc_info(sys.exc_info(), self.log.name)
         sys.exit(exit_codes.AVOCADO_JOB_FAIL | job.exitcode)
Beispiel #11
0
 def _run_avocado(self):
     testMethod = getattr(self, self._testMethodName)
     self._start_logging()
     self.sysinfo_logger.start_test_hook()
     test_exception = None
     cleanup_exception = None
     stdout_check_exception = None
     stderr_check_exception = None
     try:
         self.setUp()
     except exceptions.TestSkipError, details:
         stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
         raise exceptions.TestSkipError(details)
    def run_bandwidth_test(check_net=False, check_iface=False):
        """
        Test bandwidth option for network or interface by tc command.
        """
        iface_inbound = ast.literal_eval(iface_bandwidth_inbound)
        iface_outbound = ast.literal_eval(iface_bandwidth_outbound)
        net_inbound = ast.literal_eval(net_bandwidth_inbound)
        net_outbound = ast.literal_eval(net_bandwidth_outbound)
        net_bridge_name = ast.literal_eval(net_bridge)["name"]
        iface_name = libvirt.get_ifname_host(vm_name, iface_mac)

        try:
            if check_net and net_inbound:
                # Check qdisc rules
                cmd = "tc -d qdisc show dev %s" % net_bridge_name
                qdisc_output = process.system_output(cmd)
                logging.debug("Bandwidth qdisc output: %s", qdisc_output)
                if not qdisc_output.count("qdisc ingress ffff:"):
                    test.fail("Can't find ingress setting")
                check_class_rules(net_bridge_name, "1:1", {
                    "average": net_inbound["average"],
                    "peak": net_inbound["peak"]
                })
                check_class_rules(net_bridge_name, "1:2", net_inbound)

            # Check filter rules on bridge interface
            if check_net and net_outbound:
                check_filter_rules(net_bridge_name, net_outbound)

            # Check class rules on interface inbound settings
            if check_iface and iface_inbound:
                check_class_rules(
                    iface_name, "1:1", {
                        'average': iface_inbound['average'],
                        'peak': iface_inbound['peak'],
                        'burst': iface_inbound['burst']
                    })
                if iface_inbound.has_key("floor"):
                    if not libvirt_version.version_compare(1, 0, 1):
                        test.cancel("Not supported Qos options 'floor'")

                    check_class_rules(net_bridge_name, "1:3",
                                      {'floor': iface_inbound["floor"]})

            # Check filter rules on interface outbound settings
            if check_iface and iface_outbound:
                check_filter_rules(iface_name, iface_outbound)
        except AssertionError:
            stacktrace.log_exc_info(sys.exc_info())
            test.fail("Failed to check network bandwidth")
Beispiel #13
0
 def pre_tests(self, job):
     try:
         if job.test_suite is not None:
             if hasattr(job.test_suite, 'tests'):
                 tests = job.test_suite.tests
             else:
                 tests = job.test_suite
             if any(test_factory[0] is VirtTest for test_factory in tests):
                 self._lock(job)
     except Exception as detail:
         msg = "Failure trying to set Avocado-VT job lock: %s" % detail
         self.log.error(msg)
         log_exc_info(sys.exc_info(), self.log.name)
         sys.exit(exit_codes.AVOCADO_JOB_FAIL | job.exitcode)
Beispiel #14
0
    def run_test(self, references, timeout):
        """
        Run tests.

        :param references: a string with test references.
        :return: a dictionary with test results.
        """
        extra_params = []
        mux_files = getattr(self.job.args, 'mux_yaml') or []
        if mux_files:
            extra_params.append("-m %s" % " ".join(mux_files))

        if getattr(self.job.args, "dry_run", False):
            extra_params.append("--dry-run")
        references_str = " ".join(references)

        avocado_cmd = (
            'avocado run --force-job-id %s --json - '
            '--archive %s %s' %
            (self.job.unique_id, references_str, " ".join(extra_params)))
        try:
            result = self.remote.run(avocado_cmd,
                                     ignore_status=True,
                                     timeout=timeout)
            if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
                raise exceptions.JobError("Remote execution failed with: %s" %
                                          result.stderr)

        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting." %
                                      (timeout))

        try:
            json_result = self._parse_json_response(result.stdout)
        except:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')
            raise exceptions.JobError(result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.join(self.job.logdir, 'test-results')
            relative_path = astring.string_to_safe_path(str(t_dict['test']))
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
Beispiel #15
0
    def run_bandwidth_test(check_net=False, check_iface=False):
        """
        Test bandwidth option for network or interface by tc command.
        """
        iface_inbound = ast.literal_eval(iface_bandwidth_inbound)
        iface_outbound = ast.literal_eval(iface_bandwidth_outbound)
        net_inbound = ast.literal_eval(net_bandwidth_inbound)
        net_outbound = ast.literal_eval(net_bandwidth_outbound)
        net_bridge_name = ast.literal_eval(net_bridge)["name"]
        iface_name = libvirt.get_ifname_host(vm_name, iface_mac)

        try:
            if check_net and net_inbound:
                # Check qdisc rules
                cmd = "tc -d qdisc show dev %s" % net_bridge_name
                qdisc_output = process.system_output(cmd)
                logging.debug("Bandwidth qdisc output: %s", qdisc_output)
                if not qdisc_output.count("qdisc ingress ffff:"):
                    test.fail("Can't find ingress setting")
                check_class_rules(net_bridge_name, "1:1",
                                  {"average": net_inbound["average"],
                                   "peak": net_inbound["peak"]})
                check_class_rules(net_bridge_name, "1:2", net_inbound)

            # Check filter rules on bridge interface
            if check_net and net_outbound:
                check_filter_rules(net_bridge_name, net_outbound)

            # Check class rules on interface inbound settings
            if check_iface and iface_inbound:
                check_class_rules(iface_name, "1:1",
                                  {'average': iface_inbound['average'],
                                   'peak': iface_inbound['peak'],
                                   'burst': iface_inbound['burst']})
                if iface_inbound.has_key("floor"):
                    if not libvirt_version.version_compare(1, 0, 1):
                        test.cancel("Not supported Qos options 'floor'")

                    check_class_rules(net_bridge_name, "1:3",
                                      {'floor': iface_inbound["floor"]})

            # Check filter rules on interface outbound settings
            if check_iface and iface_outbound:
                check_filter_rules(iface_name, iface_outbound)
        except AssertionError:
            stacktrace.log_exc_info(sys.exc_info())
            test.fail("Failed to check network bandwidth")
Beispiel #16
0
    def runTest(self, result=None):
        """
        Run test method, for compatibility with unittest.TestCase.

        :result: Unused param, compatibiltiy with :class:`unittest.TestCase`.
        """
        self.start_logging()
        self.sysinfo_logger.start_test_hook()
        action_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setup()
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSetupFail(details)
Beispiel #17
0
    def runTest(self, result=None):
        """
        Run test method, for compatibility with unittest.TestCase.

        :result: Unused param, compatibiltiy with :class:`unittest.TestCase`.
        """
        self.start_logging()
        self.sysinfo_logger.start_test_hook()
        action_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setup()
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSetupFail(details)
Beispiel #18
0
    def run_test(self, references, timeout):
        """
        Run tests.

        :param references: a string with test references.
        :return: a dictionary with test results.
        """
        extra_params = []
        mux_files = getattr(self.job.args, 'mux_yaml', [])
        if mux_files:
            extra_params.append("-m %s" % " ".join(mux_files))

        if getattr(self.job.args, "dry_run", False):
            extra_params.append("--dry-run")
        references_str = " ".join(references)

        avocado_cmd = ('avocado run --force-job-id %s --json - '
                       '--archive %s %s' % (self.job.unique_id,
                                            references_str, " ".join(extra_params)))
        try:
            result = self.remote.run(avocado_cmd, ignore_status=True,
                                     timeout=timeout)
            if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
                raise exceptions.JobError("Remote execution failed with: %s" % result.stderr)

        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting."
                                      % (timeout))

        try:
            json_result = self._parse_json_response(result.stdout)
        except:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')
            raise exceptions.JobError(result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.join(self.job.logdir, 'test-results')
            relative_path = astring.string_to_safe_path(str(t_dict['id']))
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
Beispiel #19
0
def check_br_tap_params(br_name, tap_name, libvirt_manage=True):
    """
    When macTableManager is set to libvirt, libvirt disables kernel management
    of the MAC table (in the case of the Linux host bridge, this means enabling
    vlan_filtering on the bridge, and disabling learning and unicast_filter for
    all bridge ports), and explicitly adds/removes entries to the table
    according to the MAC addresses in the domain interface configurations.
    The bridge's vlan_filtering should be 1, and the tap device connected to
    this bridge has unicast_flood and learning set to '0'.

    :param br_name: string, name of the bridge
    :param tap_name: string, name of the tap device
    :param libvirt_manage: bool, whether MAC table is managed by libvirt
    :return: True or False
    """
    cmd1 = 'cat /sys/class/net/%s/bridge/vlan_filtering' % br_name
    cmd2 = 'cat /sys/class/net/%s/brif/%s/learning' % (br_name, tap_name)
    cmd3 = 'cat /sys/class/net/%s/brif/%s/unicast_flood' % (br_name, tap_name)
    try:
        vlan_filtering = process.run(cmd1, ignore_status=True,
                                     shell=True).stdout_text.strip()
        learning = process.run(cmd2, ignore_status=True,
                               shell=True).stdout_text.strip()
        unicast_flood = process.run(cmd3, ignore_status=True,
                                    shell=True).stdout_text.strip()
        logging.debug("bridge's vlan_filtering is {1}, {0} learning is {2}, "
                      "unicast_flood is {3}".format(tap_name, vlan_filtering,
                                                    learning, unicast_flood))
        if libvirt_manage:
            assert vlan_filtering == '1'
            assert learning == '0'
            assert unicast_flood == '0'
        else:
            assert vlan_filtering == '0'
            assert learning == '1'
            assert unicast_flood == '1'
    except AssertionError:
        stacktrace.log_exc_info(sys.exc_info())
        return False
    return True
Beispiel #20
0
    def _run_avocado(self):
        """
        Auxiliary method to run_avocado.

        We have to override this method because the avocado-vt plugin
        has to override the behavior that tests shouldn't raise
        exceptions.TestSkipError by themselves in avocado. In the old
        avocado-vt case, that rule is not in place, so we have to be
        a little more lenient for correct test status reporting.
        """
        testMethod = getattr(self, self._testMethodName)
        self._start_logging()
        self.sysinfo_logger.start_test_hook()
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setUp()
        except exceptions.TestSkipError, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSkipError(details)
    def map_method(self, method_name, *args):
        """
        Maps method_name on each extension in case the extension has the attr

        :param method_name: Name of the method to be called on each ext
        :param args: Arguments to be passed to all called functions
        """
        for ext in self.extensions:
            try:
                if hasattr(ext.obj, method_name):
                    method = getattr(ext.obj, method_name)
                    method(*args)
            except SystemExit:
                raise
            except KeyboardInterrupt:
                raise
            except:  # catch any exception pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(),
                                        logger='avocado.app.debug')
                LOG_UI.error('Error running method "%s" of plugin "%s": %s',
                             method_name, ext.name,
                             sys.exc_info()[1])
Beispiel #22
0
    def _run_avocado(self):
        """
        Auxiliary method to run_avocado.

        We have to override this method because the avocado-vt plugin
        has to override the behavior that tests shouldn't raise
        exceptions.TestSkipError by themselves in avocado. In the old
        avocado-vt case, that rule is not in place, so we have to be
        a little more lenient for correct test status reporting.
        """
        testMethod = getattr(self, self._testMethodName)
        self._start_logging()
        self.sysinfo_logger.start_test_hook()
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setUp()
        except exceptions.TestSkipError, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSkipError(details)
Beispiel #23
0
 def _tearDown(self):
     """
     Auxiliary method to run tearDown.
     """
     try:
         if self.__skip_test is False:
             self.__phase = 'TEARDOWN'
             self.tearDown()
     except exceptions.TestSkipError as details:
         stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
         skip_illegal_msg = (f'Using skip decorators in tearDown() '
                             f'is not allowed in '
                             f'avocado, you must fix your '
                             f'test. Original skip exception: {details}')
         raise exceptions.TestError(skip_illegal_msg)
     except exceptions.TestCancel:
         stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
         raise
     except:  # avoid old-style exception failures pylint: disable=W0702
         stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
         details = sys.exc_info()[1]
         raise exceptions.TestSetupFail(details)
Beispiel #24
0
    def run_suite(self,
                  test_suite,
                  variants,
                  timeout=0,
                  replay_map=None,
                  suite_order="variants-per-test"):
        """
        Run one or more tests and report with test result.

        :param params_list: a list of param dicts.
        :param variants: A varianter iterator (unused here)

        :return: a set with types of test failures.
        """
        del test_suite  # using self.job.references instead
        del variants  # we're not using multiplexation here
        if suite_order != "variants-per-test" and suite_order is not None:
            raise exceptions.JobError("execution-order %s is not supported "
                                      "for remote execution." % suite_order)
        del suite_order  # suite_order is ignored for now
        if not timeout:  # avoid timeout = 0
            timeout = None
        summary = set()

        stdout_backup = sys.stdout
        stderr_backup = sys.stderr
        fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')
        paramiko_logger = logging.getLogger('paramiko')
        fabric_logger = logging.getLogger('avocado.fabric')
        remote_logger = logging.getLogger('avocado.remote')
        app_logger = logging.getLogger('avocado.debug')
        fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('
               'levelname)-5.5s| %(message)s')
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
        file_handler = logging.FileHandler(filename=fabric_debugfile)
        file_handler.setFormatter(formatter)
        fabric_logger.addHandler(file_handler)
        paramiko_logger.addHandler(file_handler)
        remote_logger.addHandler(file_handler)
        if self.job.args.show_job_log:
            output.add_log_handler(paramiko_logger.name)
        logger_list = [output.LOG_JOB]
        sys.stdout = output.LoggingFile(loggers=logger_list)
        sys.stderr = output.LoggingFile(loggers=logger_list)
        try:
            try:
                self.setup()
                avocado_installed, _ = self.check_remote_avocado()
                if not avocado_installed:
                    raise exceptions.JobError('Remote machine does not seem to'
                                              ' have avocado installed')
            except Exception as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise exceptions.JobError(details)
            results = self.run_test(self.job.references, timeout)
            remote_log_dir = os.path.dirname(results['debuglog'])
            self.result.tests_total = results['total']
            local_log_dir = self.job.logdir
            for tst in results['tests']:
                name = tst['test'].split('-', 1)
                name = [name[0]] + name[1].split(';')
                if len(name) == 3:
                    name[2] = {"variant_id": name[2]}
                name = TestID(*name, no_digits=-1)
                state = dict(name=name,
                             time_elapsed=tst['time'],
                             time_start=tst['start'],
                             time_end=tst['end'],
                             status=tst['status'],
                             logdir=tst['logdir'],
                             logfile=tst['logfile'],
                             fail_reason=tst['fail_reason'],
                             job_logdir=local_log_dir,
                             job_unique_id='')
                self.result.start_test(state)
                self.job._result_events_dispatcher.map_method(
                    'start_test', self.result, state)
                self.result.check_test(state)
                self.job._result_events_dispatcher.map_method(
                    'end_test', self.result, state)
                if state['status'] == "INTERRUPTED":
                    summary.add("INTERRUPTED")
                elif not status.mapping[state['status']]:
                    summary.add("FAIL")
            zip_filename = remote_log_dir + '.zip'
            zip_path_filename = os.path.join(local_log_dir,
                                             os.path.basename(zip_filename))
            self.remote.receive_files(local_log_dir, zip_filename)
            archive.uncompress(zip_path_filename, local_log_dir)
            os.remove(zip_path_filename)
            self.result.end_tests()
            self.job._result_events_dispatcher.map_method(
                'post_tests', self.job)
        finally:
            try:
                self.tear_down()
            except Exception as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise exceptions.JobError(details)
            sys.stdout = stdout_backup
            sys.stderr = stderr_backup
        return summary
Beispiel #25
0
    def _run_avocado(self):
        """
        Auxiliary method to run_avocado.
        """
        testMethod = getattr(self, self._testMethodName)
        if self._config.get("run.test_runner") != 'nrunner':
            self._start_logging()
        if self.__sysinfo_enabled:
            self.__sysinfo_logger.start()
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        skip_test_condition = getattr(testMethod, '__skip_test_condition__',
                                      False)
        skip_test_condition_negate = getattr(testMethod,
                                             '__skip_test_condition_negate__',
                                             False)
        if skip_test_condition:
            if callable(skip_test_condition):
                if skip_test_condition_negate:
                    skip_test = not bool(skip_test_condition(self))
                else:
                    skip_test = bool(skip_test_condition(self))
            else:
                if skip_test_condition_negate:
                    skip_test = not bool(skip_test_condition)
                else:
                    skip_test = bool(skip_test_condition)
        else:
            skip_test = bool(skip_test_condition)
        try:
            if skip_test is False:
                self.__phase = 'SETUP'
                self.setUp()
        except exceptions.TestSkipError as details:
            skip_test = True
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            raise exceptions.TestSkipError(details)
        except exceptions.TestCancel as details:
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            raise
        except:  # Old-style exceptions are not inherited from Exception()
            stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
            details = sys.exc_info()[1]
            raise exceptions.TestSetupFail(details)
        else:
            try:
                self.__phase = 'TEST'
                if inspect.iscoroutinefunction(testMethod):
                    loop = asyncio.get_event_loop()
                    loop.run_until_complete(testMethod())
                else:
                    testMethod()
            except exceptions.TestCancel as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise
            except:  # Old-style exceptions are not inherited from Exception() pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                details = sys.exc_info()[1]
                if not isinstance(details,
                                  Exception):  # Avoid passing nasty exc
                    details = exceptions.TestError("%r: %s" %
                                                   (details, details))
                test_exception = details
                self.log.debug("Local variables:")
                local_vars = inspect.trace()[1][0].f_locals
                for key, value in local_vars.items():
                    self.log.debug(' -> %s %s: %s', key, type(value), value)
        finally:
            try:
                if skip_test is False:
                    self.__phase = 'TEARDOWN'
                    self.tearDown()
            except exceptions.TestSkipError as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                skip_illegal_msg = ('Using skip decorators in tearDown() '
                                    'is not allowed in '
                                    'avocado, you must fix your '
                                    'test. Original skip exception: %s' %
                                    details)
                raise exceptions.TestError(skip_illegal_msg)
            except exceptions.TestCancel as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise
            except:  # avoid old-style exception failures pylint: disable=W0702
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                details = sys.exc_info()[1]
                cleanup_exception = exceptions.TestSetupFail(details)

        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
        genio.write_file(whiteboard_file, self.whiteboard)

        # pylint: disable=E0702
        if test_exception is not None:
            raise test_exception
        elif cleanup_exception is not None:
            raise cleanup_exception
        elif stdout_check_exception is not None:
            raise stdout_check_exception
        elif stderr_check_exception is not None:
            raise stderr_check_exception
        elif self.__log_warn_used:
            raise exceptions.TestWarn("Test passed but there were warnings "
                                      "during execution. Check the log for "
                                      "details.")

        self.__status = 'PASS'
Beispiel #26
0
    def _runTest(self):
        params = self.params

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        self._log_parameters()

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        subtest_dirs = self._get_subtest_dirs()

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()

        utils.insert_dirs_to_path(subtest_dirs)

        test_modules = utils.find_test_modules(t_types, subtest_dirs)

        # Open the environment file
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        if params.get_boolean("job_env_cleanup", "yes"):
            self.runner_queue.put({"func_at_exit": cleanup_env,
                                   "args": (env_filename, self.env_version),
                                   "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Pre-process
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self._safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self._safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self._safe_env_save(env)
                    raise

            finally:
                # Post-process
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self._safe_env_save(env) or params.get("env_cleanup", "no") == "yes":
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Beispiel #27
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = list(params.keys())
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise exceptions.TestError("Directory %s does not "
                                           "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise exceptions.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self.__safe_env_save(env) or params.get("env_cleanup", "no") == "yes":
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Beispiel #28
0
        a little more lenient for correct test status reporting.
        """
        testMethod = getattr(self, self._testMethodName)
        self._start_logging()
        self.sysinfo_logger.start_test_hook()
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setUp()
        except exceptions.TestSkipError, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSkipError(details)
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSetupFail(details)
        try:
            testMethod()
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            test_exception = details
        finally:
            try:
                self.tearDown()
            except Exception, details:
                stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
                cleanup_exception = details

        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
        genio.write_file(whiteboard_file, self.whiteboard)
Beispiel #29
0
    def run_suite(self, test_suite, variants, timeout=0, replay_map=None,
                  suite_order="variants-per-test"):
        """
        Run one or more tests and report with test result.

        :param params_list: a list of param dicts.
        :param variants: A varianter iterator (unused here)

        :return: a set with types of test failures.
        """
        del test_suite     # using self.job.references instead
        del variants            # we're not using multiplexation here
        if suite_order != "variants-per-test" and suite_order is not None:
            raise exceptions.JobError("execution-order %s is not supported "
                                      "for remote execution." % suite_order)
        del suite_order     # suite_order is ignored for now
        if not timeout:     # avoid timeout = 0
            timeout = None
        summary = set()

        stdout_backup = sys.stdout
        stderr_backup = sys.stderr
        fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')
        paramiko_logger = logging.getLogger('paramiko')
        fabric_logger = logging.getLogger('avocado.fabric')
        remote_logger = logging.getLogger('avocado.remote')
        fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('
               'levelname)-5.5s| %(message)s')
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
        file_handler = logging.FileHandler(filename=fabric_debugfile)
        file_handler.setFormatter(formatter)
        fabric_logger.addHandler(file_handler)
        paramiko_logger.addHandler(file_handler)
        remote_logger.addHandler(file_handler)
        if self.job.args.show_job_log:
            output.add_log_handler(paramiko_logger.name)
        logger_list = [output.LOG_JOB]
        sys.stdout = output.LoggingFile(loggers=logger_list)
        sys.stderr = output.LoggingFile(loggers=logger_list)
        try:
            try:
                self.setup()
                avocado_installed, _ = self.check_remote_avocado()
                if not avocado_installed:
                    raise exceptions.JobError('Remote machine does not seem to'
                                              ' have avocado installed')
            except Exception as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise exceptions.JobError(details)
            results = self.run_test(self.job.references, timeout)
            remote_log_dir = os.path.dirname(results['debuglog'])
            self.result.tests_total = results['total']
            local_log_dir = self.job.logdir
            for tst in results['tests']:
                name = tst['id'].split('-', 1)
                name = [name[0]] + name[1].split(';')
                if len(name) == 3:
                    name[2] = {"variant_id": name[2]}
                name = TestID(*name, no_digits=-1)
                state = dict(name=name,
                             time_elapsed=tst['time'],
                             time_start=tst['start'],
                             time_end=tst['end'],
                             status=tst['status'],
                             logdir=tst['logdir'],
                             logfile=tst['logfile'],
                             fail_reason=tst['fail_reason'],
                             job_logdir=local_log_dir,
                             job_unique_id='')
                self.result.start_test(state)
                self.job._result_events_dispatcher.map_method('start_test',
                                                              self.result,
                                                              state)
                self.result.check_test(state)
                self.job._result_events_dispatcher.map_method('end_test',
                                                              self.result,
                                                              state)
                if state['status'] == "INTERRUPTED":
                    summary.add("INTERRUPTED")
                elif not status.mapping[state['status']]:
                    summary.add("FAIL")
            zip_filename = remote_log_dir + '.zip'
            zip_path_filename = os.path.join(local_log_dir,
                                             os.path.basename(zip_filename))
            self.remote.receive_files(local_log_dir, zip_filename)
            archive.uncompress(zip_path_filename, local_log_dir)
            os.remove(zip_path_filename)
            self.result.end_tests()
            self.job._result_events_dispatcher.map_method('post_tests',
                                                          self.job)
        finally:
            try:
                self.tear_down()
            except Exception as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise exceptions.JobError(details)
            sys.stdout = stdout_backup
            sys.stderr = stderr_backup
        return summary
Beispiel #30
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            if key != 'test_cases':
                self.log.info("    %s = %s", key, params[key])

        self.ct_type = self.params.get('ct_type')
        test_script = self.params.get('script')
        class_name = self.params.get('class_name')
        # Import the module
        mod_name = 'cloudtest.tests.ceph_api.tests.%s.%s' % \
                (params.get('sds_mgmt_test_type'), test_script)
        test_module = importlib.import_module(mod_name)

        for _, obj in inspect.getmembers(test_module):
            if (inspect.isclass(obj) and obj.__name__ == class_name
                    and inspect.getmodule(obj) == test_module):
                test_class = obj
                break
        self.log.info("Initialize test class: %s" % class_name)

        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({
            "func_at_exit": utils_env.cleanup_env,
            "args": (env_filename, self.env_version),
            "once": True
        })
        self.runner_queue.put({"func_at_exit": cleanup_token, "once": True})
        comp = test_class(params, env)

        test_passed = False
        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Run the test function
                    self.log.info("Start to run ceph management API test")
                    try:
                        comp.setup()
                        func = getattr(comp, params.get('func_name', 'test'))
                        # fixme: To solve create monitor hang
                        if 'test_monitors' in test_script and \
                           'test_create' in params.get('func_name'):
                            t1 = threading.Thread(target=func)
                            t1.start()
                            time.sleep(20)
                        else:
                            func()
                    finally:
                        self.__safe_env_save(env)

                except Exception, e:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    stacktrace.log_exc_info(sys.exc_info(),
                                            logger='avocado.test')
                    logging.debug("Exception happened during running test")
                    raise e

            finally:
                comp.teardown()
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = funcatexit.run_exitfuncs(
                            env, self.ct_type)
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
            raise exceptions.JobError("Aborted job as config specified.")
Beispiel #31
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        utils_misc.set_openstack_environment()

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            if key != 'test_cases':
                self.log.info("    %s = %s", key, params[key])

        self.ct_type = self.params.get('ct_type')
        test_script = self.params.get('script')
        class_name = self.params.get('class_name')
        # Import the module
        mod_name = 'cloudtest.tests.nfv.%s' % test_script
        test_module = importlib.import_module(mod_name)

        for _, obj in inspect.getmembers(test_module):
            if (inspect.isclass(obj) and obj.__name__ == class_name
                    and inspect.getmodule(obj) == test_module):
                test_class = obj
                break
        self.log.info("Initialize test class: %s" % class_name)

        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({
            "func_at_exit": utils_env.cleanup_env,
            "args": (env_filename, self.env_version),
            "once": True
        })

        comp = test_class(params, env)

        test_passed = False
        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    quotas_to_update = params.get('quotas_need_to_update', '')
                    if quotas_to_update:
                        self.log.info('Need to update quotas before test: %s' %
                                      quotas_to_update)
                        compute_utils = Compute(self.params)
                        quotas_to_update = quotas_to_update.split(',')
                        expected_quota = dict()
                        for q in quotas_to_update:
                            k, v = q.split(':')
                            expected_quota[k] = v
                        compute_utils.update_quotas(expected_quota)

                    # Initialize injection tests including workload and fault
                    need_injection = (
                        self.params.get('workload_injection') in 'true'
                        or self.params.get('fault_injection') in 'true')

                    force_injection = (self.params.get(
                        'workload_injection_force',
                        'false') in 'true' or self.params.get(
                            'fault_injection_force', 'false') in 'true')

                    if need_injection:
                        injector = utils_injection.Injection(params, env)
                        if not injector:
                            self.log.error("Failed to initialize injection")
                            if force_injection:
                                raise Exception("Failed to inject"
                                                "workload and/or fault")

                        if not injector.start() and force_injection:
                            msg = "Failed to inject workload/fault"
                            raise exceptions.InjectionFail(msg)
                        # Sleep specified time after injection
                        delay = int(params.get('sleep_after_injection', 3))
                        logging.info("Sleep %d seconds before running test" %
                                     delay)
                        time.sleep(delay)

                    # Run the test function
                    self.log.info(
                        "Start to run NFV test: '%s:%s:%s'" %
                        (test_script, class_name, params.get('func_name')))
                    try:
                        comp.setup()
                        func = getattr(comp, params.get('func_name', 'test'))
                        func()
                    finally:
                        self.__safe_env_save(env)

                except Exception, e:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    stacktrace.log_exc_info(sys.exc_info(),
                                            logger='avocado.test')
                    logging.debug("Exception happened during running test")
                    raise e

            finally:
                try:
                    comp.teardown()
                except Exception, e:
                    self.log.error("Exception happened during teardown: %s" %
                                   e)

                # Postprocess
                try:
                    try:
                        # Stop injection
                        if need_injection:
                            injector.stop()

                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = funcatexit.run_exitfuncs(
                            env, self.ct_type)
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored
                    if params.get('sleep_after_test') is not None:
                        time.sleep(int(params.get('sleep_after_test')))

                    # Perform health check after test if needed
                    health_check = \
                        self.params.get("perform_health_check_after_case")
                    health_check = health_check.lower() == "true"
                    if health_check:
                        self._stop_logging()
                        self._start_logging_hc()
                        cloud_manager = CloudManager(params, env)
                        nodes = cloud_manager.get_controller_node(
                            select_policy="all")
                        nodes.extend(
                            cloud_manager.get_compute_node(
                                select_policy="all"))
                        self._run_health_check(nodes)
                        self._stop_logging_hc()
                        self._start_logging()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Beispiel #32
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = list(params.keys())
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise exceptions.TestError("Directory %s does not "
                                           "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise exceptions.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self.__safe_env_save(env):
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
Beispiel #33
0
class Test(unittest.TestCase):
    """
    Base implementation for the test class.

    You'll inherit from this to write your own tests. Typically you'll want
    to implement setUp(), runTest() and tearDown() methods on your own tests.
    """
    default_params = {}

    def __init__(self,
                 methodName='runTest',
                 name=None,
                 params=None,
                 base_logdir=None,
                 tag=None,
                 job=None,
                 runner_queue=None):
        """
        Initializes the test.

        :param methodName: Name of the main method to run. For the sake of
                           compatibility with the original unittest class,
                           you should not set this.
        :param name: Pretty name of the test name. For normal tests, written
                     with the avocado API, this should not be set, this is
                     reserved for running random executables as tests.
        :param base_logdir: Directory where test logs should go. If None
                            provided, it'll use
                            :func:`avocado.core.data_dir.create_job_logs_dir`.
        :param tag: Tag that differentiates 2 executions of the same test name.
                    Example: 'long', 'short', so we can differentiate
                    'sleeptest.long' and 'sleeptest.short'.
        :param job: The job that this test is part of.
        """
        def record_and_warn(*args, **kwargs):
            """ Record call to this function and log warning """
            if not self.__log_warn_used:
                self.__log_warn_used = True
            return original_log_warn(*args, **kwargs)

        if name is not None:
            self.name = name
        else:
            self.name = self.__class__.__name__

        self.tag = tag or None

        self.job = job

        basename = os.path.basename(self.name)

        tmpdir = data_dir.get_tmp_dir()

        self.filename = inspect.getfile(self.__class__).rstrip('co')
        self.basedir = os.path.dirname(self.filename)
        self.datadir = self.filename + '.data'

        self.expected_stdout_file = os.path.join(self.datadir,
                                                 'stdout.expected')
        self.expected_stderr_file = os.path.join(self.datadir,
                                                 'stderr.expected')

        self.workdir = utils_path.init_dir(tmpdir, basename)
        self.srcdir = utils_path.init_dir(self.workdir, 'src')
        if base_logdir is None:
            base_logdir = data_dir.create_job_logs_dir()
        base_logdir = os.path.join(base_logdir, 'test-results')
        self.tagged_name = self.get_tagged_name(base_logdir)

        # Let's avoid trouble at logdir init time, since we're interested
        # in a relative directory here
        tagged_name = self.tagged_name
        if tagged_name.startswith('/'):
            tagged_name = tagged_name[1:]

        self.logdir = utils_path.init_dir(base_logdir, tagged_name)
        genio.set_log_file_dir(self.logdir)
        self.logfile = os.path.join(self.logdir, 'debug.log')

        self.stdout_file = os.path.join(self.logdir, 'stdout')
        self.stderr_file = os.path.join(self.logdir, 'stderr')

        self.outputdir = utils_path.init_dir(self.logdir, 'data')
        self.sysinfodir = utils_path.init_dir(self.logdir, 'sysinfo')
        self.sysinfo_logger = sysinfo.SysInfo(basedir=self.sysinfodir)

        self.log = logging.getLogger("avocado.test")
        original_log_warn = self.log.warning
        self.__log_warn_used = False
        self.log.warn = self.log.warning = record_and_warn

        self.stdout_log = logging.getLogger("avocado.test.stdout")
        self.stderr_log = logging.getLogger("avocado.test.stderr")

        mux_entry = ['/test/*']
        if isinstance(params, dict):
            self.default_params = self.default_params.copy()
            self.default_params.update(params)
            params = []
        elif params is None:
            params = []
        elif isinstance(params, tuple):
            params, mux_entry = params[0], params[1]
        self.params = multiplexer.AvocadoParams(params, self.name, self.tag,
                                                mux_entry, self.default_params)

        self.log.info('START %s', self.tagged_name)
        self.log.debug('')

        self.debugdir = None
        self.resultsdir = None
        self.status = None
        self.fail_reason = None
        self.fail_class = None
        self.traceback = None
        self.text_output = None

        self.whiteboard = ''

        self.running = False
        self.time_start = None
        self.time_end = None
        self.paused = False
        self.paused_msg = ''

        self.runner_queue = runner_queue

        self.time_elapsed = None
        unittest.TestCase.__init__(self, methodName=methodName)

    def __str__(self):
        return str(self.name)

    def __repr__(self):
        return "Test(%r)" % self.tagged_name

    def tag_start(self):
        self.running = True
        self.time_start = time.time()

    def tag_end(self):
        self.running = False
        self.time_end = time.time()
        # for consistency sake, always use the same stupid method
        self.update_time_elapsed(self.time_end)

    def update_time_elapsed(self, current_time=None):
        if current_time is None:
            current_time = time.time()
        self.time_elapsed = current_time - self.time_start

    def report_state(self):
        """
        Send the current test state to the test runner process
        """
        if self.runner_queue is not None:
            self.runner_queue.put(self.get_state())

    def get_state(self):
        """
        Serialize selected attributes representing the test state

        :returns: a dictionary containing relevant test state data
        :rtype: dict
        """
        if self.running and self.time_start:
            self.update_time_elapsed()
        preserve_attr = [
            'basedir', 'debugdir', 'depsdir', 'fail_reason', 'logdir',
            'logfile', 'name', 'resultsdir', 'srcdir', 'status', 'sysinfodir',
            'tag', 'tagged_name', 'text_output', 'time_elapsed', 'traceback',
            'workdir', 'whiteboard', 'time_start', 'time_end', 'running',
            'paused', 'paused_msg', 'fail_class', 'params'
        ]
        state = dict([(key, self.__dict__.get(key)) for key in preserve_attr])
        state['class_name'] = self.__class__.__name__
        state['job_logdir'] = self.job.logdir
        state['job_unique_id'] = self.job.unique_id
        return state

    def get_data_path(self, basename):
        """
        Find a test dependency path inside the test data dir.

        This is a short hand for an operation that will be commonly
        used on avocado tests, so we feel it deserves its own API.

        :param basename: Basename of the dep file. Ex: ``testsuite.tar.bz2``.

        :return: Path where dependency is supposed to be found.
        """
        return os.path.join(self.datadir, basename)

    def _register_log_file_handler(self,
                                   logger,
                                   formatter,
                                   filename,
                                   log_level=logging.DEBUG):
        file_handler = logging.FileHandler(filename=filename)
        file_handler.setLevel(log_level)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        return file_handler

    def start_logging(self):
        """
        Simple helper for adding a file logger to the root logger.
        """
        self.file_handler = logging.FileHandler(filename=self.logfile)
        self.file_handler.setLevel(logging.DEBUG)

        fmt = '%(asctime)s %(levelname)-5.5s| %(message)s'
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')

        self.file_handler.setFormatter(formatter)
        self.log.addHandler(self.file_handler)

        stream_fmt = '%(message)s'
        stream_formatter = logging.Formatter(fmt=stream_fmt)

        self.stdout_file_handler = self._register_log_file_handler(
            self.stdout_log, stream_formatter, self.stdout_file)
        self.stderr_file_handler = self._register_log_file_handler(
            self.stderr_log, stream_formatter, self.stderr_file)

    def stop_logging(self):
        """
        Stop the logging activity of the test by cleaning the logger handlers.
        """
        self.log.removeHandler(self.file_handler)

    def get_tagged_name(self, logdir):
        """
        Get a test tagged name.

        Combines name + tag (if present) to obtain unique name. When associated
        directory already exists, appends ".$number" until unused name
        is generated to avoid clashes.

        :param logdir: Log directory being in use for result storage.

        :return: Unique test name
        """
        name = self.name
        if self.tag is not None:
            name += ".%s" % self.tag
        tag = 0
        tagged_name = name
        while os.path.isdir(os.path.join(logdir, tagged_name)):
            tag += 1
            tagged_name = "%s.%s" % (name, tag)
        self.tag = "%s.%s" % (self.tag, tag) if self.tag else str(tag)

        return tagged_name

    def setUp(self):
        """
        Setup stage that the test needs before passing to the actual runTest.

        Must be implemented by tests if they want such an stage. Commonly we'll
        download/compile test suites, create files needed for a test, among
        other possibilities.
        """
        pass

    def runTest(self):
        """
        Actual test payload. Must be implemented by tests.

        In case of an existing test suite wrapper, it'll execute the suite,
        or perform a series of operations, and based in the results of the
        operations decide if the test pass (let the test complete) or fail
        (raise a test related exception).
        """
        pass

    def tearDown(self):
        """
        Cleanup stage after the runTest is done.

        Examples of cleanup runTests are deleting temporary files, restoring
        firewall configurations or other system settings that were changed
        in setup.
        """
        pass

    def record_reference_stdout(self):
        utils_path.init_dir(self.datadir)
        shutil.copyfile(self.stdout_file, self.expected_stdout_file)

    def record_reference_stderr(self):
        utils_path.init_dir(self.datadir)
        shutil.copyfile(self.stderr_file, self.expected_stderr_file)

    def check_reference_stdout(self):
        if os.path.isfile(self.expected_stdout_file):
            expected = genio.read_file(self.expected_stdout_file)
            actual = genio.read_file(self.stdout_file)
            msg = ('Actual test sdtout differs from expected one:\n'
                   'Actual:\n%s\nExpected:\n%s' % (actual, expected))
            self.assertEqual(expected, actual, msg)

    def check_reference_stderr(self):
        if os.path.isfile(self.expected_stderr_file):
            expected = genio.read_file(self.expected_stderr_file)
            actual = genio.read_file(self.stderr_file)
            msg = ('Actual test sdterr differs from expected one:\n'
                   'Actual:\n%s\nExpected:\n%s' % (actual, expected))
            self.assertEqual(expected, actual, msg)

    def run(self, result=None):
        """
        Run test method, for compatibility with unittest.TestCase.

        :result: Unused param, compatibiltiy with :class:`unittest.TestCase`.
        """
        testMethod = getattr(self, self._testMethodName)
        self.start_logging()
        self.sysinfo_logger.start_test_hook()
        runTest_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setUp()
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSetupFail(details)
        try:
            testMethod()
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            runTest_exception = details
Beispiel #34
0
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setUp()
        except exceptions.TestSkipError, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSkipError(details)
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSetupFail(details)
        try:
            testMethod()
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            test_exception = details
        finally:
            try:
                self.tearDown()
            except Exception, details:
                stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
                cleanup_exception = details

        whiteboard_file = os.path.join(self.logdir, 'whiteboard')
        genio.write_file(whiteboard_file, self.whiteboard)

        # pylint: disable=E0702
        if test_exception is not None:
            raise test_exception
        elif cleanup_exception is not None:
Beispiel #35
0
class HealthCheckTest(test.Test):
    """
    Main test class used to run a cloud test.
    """

    # env_version = utils_env.get_env_version()

    def __init__(self, methodName='runTest', name=None, params=None,
                 base_logdir=None, job=None, runner_queue=None,
                 ct_params=None):
        """
        :note: methodName, name, base_logdir, job and runner_queue params
               are inherited from test.Test
        :param params: avocado/multiplexer params stored as
                       `self.avocado_params`.
        :param ct_params: avocado-HealthCheckTest/cartesian_config params
        stored
        as `self.params`.
        """
        self.bindir = data_dir.get_root_dir()

        self.iteration = 0
        self.resultsdir = None
        self.file_handler = None
        self.background_errors = Queue.Queue()
        self.whiteboard = None
        self.casename = name
        super(HealthCheckTest, self).__init__(methodName=methodName, name=name,
                                              params=params,
                                              base_logdir=base_logdir, job=job,
                                              runner_queue=runner_queue)
        self.tmpdir = os.path.dirname(self.workdir)
        # Move self.params to self.avocado_params and initialize TempestTest
        # (cartesian_config) params
        self.avocado_params = self.params
        self.params = utils_params.Params(ct_params)

        self.resultsdir = self.logdir
        self.reportsdir = os.path.join(self.logdir, 'healthcheck.log')
        self.timeout = ct_params.get("test_timeout", self.timeout)
        # utils_misc.set_log_file_dir(self.logdir)

    @property
    def datadir(self):
        """
        Returns the path to the directory that contains test data files

        For HealthCheckTest tests, this always returns None. The reason is that
        individual HealthCheckTest tests do not map 1:1 to a file and do not
        provide the concept of a datadir.
        """
        return None

    @property
    def filename(self):
        """
        Returns the name of the file (path) that holds the current test

        For HealthCheckTest tests, this always returns None. The reason is that
        individual HealthCheckTest tests do not map 1:1 to a file.
        """
        return None

    def _run_avocado(self):
        testMethod = getattr(self, self._testMethodName)
        self._start_logging()
        self.sysinfo_logger.start_test_hook()
        test_exception = None
        cleanup_exception = None
        stdout_check_exception = None
        stderr_check_exception = None
        try:
            self.setUp()
        except exceptions.TestSkipError, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSkipError(details)
        except Exception, details:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.test')
            raise exceptions.TestSetupFail(details)