コード例 #1
0
ファイル: __init__.py プロジェクト: shanjinkui/avocado
    def run_test(self, references, timeout):
        """
        Run tests.

        :param references: a string with test references.
        :return: a dictionary with test results.
        """
        def arg_to_dest(arg):
            """
            Turns long argparse arguments into default dest
            """
            return arg[2:].replace('-', '_')

        extra_params = []
        # bool or nargs
        for arg in ["--mux-yaml", "--dry-run",
                    "--filter-by-tags-include-empty"]:
            value = getattr(self.job.args, arg_to_dest(arg), None)
            if value is True:
                extra_params.append(arg)
            elif value:
                extra_params.append("%s %s" % (arg, " ".join(value)))
        # append
        for arg in ["--filter-by-tags"]:
            value = getattr(self.job.args, arg_to_dest(arg), None)
            if value:
                join = ' %s ' % arg
                extra_params.append("%s %s" % (arg, join.join(value)))

        references_str = " ".join(references)

        avocado_cmd = ('avocado run --force-job-id %s --json - '
                       '--archive %s %s' % (self.job.unique_id,
                                            references_str, " ".join(extra_params)))
        try:
            result = self.remote.run(avocado_cmd, ignore_status=True,
                                     timeout=timeout)
            if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
                raise exceptions.JobError("Remote execution failed with: %s" % result.stderr)

        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting."
                                      % (timeout))

        try:
            json_result = self._parse_json_response(result.stdout)
        except:
            stacktrace.log_exc_info(sys.exc_info(),
                                    logger='avocado.app.debug')
            raise exceptions.JobError(result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.join(self.job.logdir, 'test-results')
            relative_path = astring.string_to_safe_path(str(t_dict['id']))
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
コード例 #2
0
ファイル: runner.py プロジェクト: PyLearner/avocado
    def run_test(self, urls, timeout):
        """
        Run tests.

        :param urls: a string with test URLs.
        :return: a dictionary with test results.
        """
        avocado_installed_version = self.check_remote_avocado()
        if not avocado_installed_version[0]:
            raise exceptions.JobError('Remote machine does not seem to have '
                                      'avocado installed')

        urls_str = " ".join(urls)
        avocado_check_urls_cmd = ('cd %s; avocado list %s '
                                  '--paginator=off' %
                                  (self.remote_test_dir, urls_str))
        check_urls_result = self.result.remote.run(avocado_check_urls_cmd,
                                                   ignore_status=True,
                                                   timeout=60)
        if check_urls_result.exit_status != 0:
            raise exceptions.JobError(check_urls_result.stdout)

        avocado_cmd = (
            'cd %s; avocado run --force-job-id %s --json - '
            '--archive %s' %
            (self.remote_test_dir, self.result.stream.job_unique_id, urls_str))
        try:
            result = self.result.remote.run(avocado_cmd,
                                            ignore_status=True,
                                            timeout=timeout)
        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting." %
                                      (timeout))
        json_result = None
        for json_output in result.stdout.splitlines():
            # We expect dictionary:
            if json_output.startswith('{') and json_output.endswith('}'):
                try:
                    json_result = json.loads(json_output)
                except ValueError:
                    pass

        if json_result is None:
            raise ValueError("Could not parse JSON from avocado remote output:"
                             "\n%s" % result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.dirname(self.result.stream.debuglog)
            logdir = os.path.join(logdir, 'test-results')
            relative_path = t_dict['url'].lstrip('/')
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
コード例 #3
0
    def run_test(self, references, timeout):
        """
        Run tests.

        :param references: a string with test references.
        :return: a dictionary with test results.
        """
        extra_params = []
        mux_files = getattr(self.job.args, 'mux_yaml') or []
        if mux_files:
            extra_params.append("-m %s" % " ".join(mux_files))

        if getattr(self.job.args, "dry_run", False):
            extra_params.append("--dry-run")
        references_str = " ".join(references)

        avocado_cmd = (
            'avocado run --force-job-id %s --json - '
            '--archive %s %s' %
            (self.job.unique_id, references_str, " ".join(extra_params)))
        try:
            result = self.remote.run(avocado_cmd,
                                     ignore_status=True,
                                     timeout=timeout)
            if result.exit_status & exit_codes.AVOCADO_JOB_FAIL:
                raise exceptions.JobError("Remote execution failed with: %s" %
                                          result.stderr)

        except CommandTimeout:
            raise exceptions.JobError("Remote execution took longer than "
                                      "specified timeout (%s). Interrupting." %
                                      (timeout))

        try:
            json_result = self._parse_json_response(result.stdout)
        except:
            stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')
            raise exceptions.JobError(result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.join(self.job.logdir, 'test-results')
            relative_path = astring.string_to_safe_path(str(t_dict['test']))
            logdir = os.path.join(logdir, relative_path)
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
コード例 #4
0
ファイル: __init__.py プロジェクト: qingemini/avocado
 def setup(self):
     """
     Initialize VM and establish connection
     """
     # Super called after VM is found and initialized
     stdout_claimed_by = self.job.config.get('stdout_claimed_by', None)
     if not stdout_claimed_by:
         self.job.log.info("DOMAIN     : %s", self.job.config.get('vm_domain'))
     try:
         self.vm = vm_connect(self.job.config.get('vm_domain'),
                              self.job.config.get('vm_hypervisor_uri'))
     except VirtError as exception:
         raise exceptions.JobError(exception)
     if self.vm.start() is False:
         e_msg = "Could not start VM '%s'" % self.job.config.get('vm_domain')
         raise exceptions.JobError(e_msg)
     assert self.vm.domain.isActive() is not False
     # If hostname wasn't given, let's try to find out the IP address
     if self.job.config.get('vm_hostname') is None:
         self.job.config['vm_hostname'] = self.vm.ip_address()
         if self.job.config.get('vm_hostname') is None:
             e_msg = ("Could not find the IP address for VM '%s'. Please "
                      "set it explicitly with --vm-hostname" %
                      self.job.config.get('vm_domain'))
             raise exceptions.JobError(e_msg)
     if self.job.config.get('vm_cleanup') is True:
         self.vm.create_snapshot()
         if self.vm.snapshot is None:
             e_msg = ("Could not create snapshot on VM '%s'" %
                      self.job.config.get('vm_domain'))
             raise exceptions.JobError(e_msg)
     # Finish remote setup and copy the tests
     self.job.config['remote_hostname'] = self.job.config.get('vm_hostname')
     self.job.config['remote_port'] = self.job.config.get('vm_port')
     self.job.config['remote_username'] = self.job.config.get('vm_username')
     self.job.config['remote_password'] = self.job.config.get('vm_password')
     self.job.config['remote_key_file'] = self.job.config.get('vm_key_file')
     self.job.config['remote_timeout'] = self.job.config.get('vm_timeout')
     super(VMTestRunner, self).setup()
コード例 #5
0
ファイル: job.py プロジェクト: PyLearner/avocado
    def _validate_test_suite(self, test_suite):
        try:
            # Do not attempt to validate the tests given on the command line if
            # the tests will not be copied from this system to a remote one
            # using the remote plugin features
            if not getattr(self.args, 'remote_no_copy', False):
                error_msg_parts = self.test_loader.validate_ui(test_suite)
            else:
                error_msg_parts = []
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)
コード例 #6
0
    def run_test(self, urls):
        """
        Run tests.

        :param urls: a string with test URLs.
        :return: a dictionary with test results.
        """
        avocado_cmd = ('cd %s; avocado run --force-job-id %s --json - '
                       '--archive %s' %
                       (self.remote_test_dir, self.result.stream.job_unique_id,
                        " ".join(urls)))
        result = self.result.remote.run(avocado_cmd,
                                        ignore_status=True,
                                        timeout=None)
        if result.exit_status == 127:
            raise exceptions.JobError('Remote machine does not have avocado '
                                      'installed')
        json_result = None
        for json_output in result.stdout.splitlines():
            # We expect dictionary:
            if json_output.startswith('{') and json_output.endswith('}'):
                try:
                    json_result = json.loads(json_output)
                except ValueError:
                    pass

        if json_result is None:
            raise ValueError("Could not parse JSON from avocado remote output:"
                             "\n%s" % result.stdout)

        for t_dict in json_result['tests']:
            logdir = os.path.dirname(self.result.stream.debuglog)
            logdir = os.path.join(logdir, 'test-results')
            logdir = os.path.join(logdir, os.path.relpath(t_dict['url'], '/'))
            t_dict['logdir'] = logdir
            t_dict['logfile'] = os.path.join(logdir, 'debug.log')

        return json_result
コード例 #7
0
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        custom_env_path = params.get("custom_env_path", "")
        if custom_env_path:
            env_path = custom_env_path
        else:
            env_path = params.get("vm_type")
        env_filename = os.path.join(self.bindir, "backends", env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        other_subtests_dirs = params.get("other_tests_dirs", "")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise exceptions.TestError("Directory %s not"
                                                       " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    for generic_subdir in asset.get_test_provider_subdirs('generic'):
                        subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                            bootstrap.test_filter)
                    for multi_host_migration_subdir in asset.get_test_provider_subdirs(
                            'multi_host_migration'):
                        subtest_dirs += data_dir.SubdirList(multi_host_migration_subdir,
                                                            bootstrap.test_filter)

                    for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                        subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                            bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    provider = params.get("provider", None)
                    if provider is not None:
                        subtest_dirs = [
                            d for d in subtest_dirs if provider in d]
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise exceptions.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s"
                                                  % error_message)

                except Exception as e:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s",
                                  e.__class__.__name__, e)
                    try:
                        env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception as e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)
コード例 #8
0
    def run_suite(self,
                  test_suite,
                  variants,
                  timeout=0,
                  replay_map=None,
                  suite_order="variants-per-test"):
        """
        Run one or more tests and report with test result.

        :param params_list: a list of param dicts.
        :param variants: A varianter iterator (unused here)

        :return: a set with types of test failures.
        """
        del test_suite  # using self.job.references instead
        del variants  # we're not using multiplexation here
        if suite_order != "variants-per-test" and suite_order is not None:
            raise exceptions.JobError("execution-order %s is not supported "
                                      "for remote execution." % suite_order)
        del suite_order  # suite_order is ignored for now
        if not timeout:  # avoid timeout = 0
            timeout = None
        summary = set()

        stdout_backup = sys.stdout
        stderr_backup = sys.stderr
        fabric_debugfile = os.path.join(self.job.logdir, 'remote.log')
        paramiko_logger = logging.getLogger('paramiko')
        fabric_logger = logging.getLogger('avocado.fabric')
        remote_logger = logging.getLogger('avocado.remote')
        app_logger = logging.getLogger('avocado.debug')
        fmt = ('%(asctime)s %(module)-10.10s L%(lineno)-.4d %('
               'levelname)-5.5s| %(message)s')
        formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S')
        file_handler = logging.FileHandler(filename=fabric_debugfile)
        file_handler.setFormatter(formatter)
        fabric_logger.addHandler(file_handler)
        paramiko_logger.addHandler(file_handler)
        remote_logger.addHandler(file_handler)
        if self.job.args.show_job_log:
            output.add_log_handler(paramiko_logger.name)
        logger_list = [output.LOG_JOB]
        sys.stdout = output.LoggingFile(loggers=logger_list)
        sys.stderr = output.LoggingFile(loggers=logger_list)
        try:
            try:
                self.setup()
                avocado_installed, _ = self.check_remote_avocado()
                if not avocado_installed:
                    raise exceptions.JobError('Remote machine does not seem to'
                                              ' have avocado installed')
            except Exception as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise exceptions.JobError(details)
            results = self.run_test(self.job.references, timeout)
            remote_log_dir = os.path.dirname(results['debuglog'])
            self.result.tests_total = results['total']
            local_log_dir = self.job.logdir
            for tst in results['tests']:
                name = tst['test'].split('-', 1)
                name = [name[0]] + name[1].split(';')
                if len(name) == 3:
                    name[2] = {"variant_id": name[2]}
                name = TestID(*name, no_digits=-1)
                state = dict(name=name,
                             time_elapsed=tst['time'],
                             time_start=tst['start'],
                             time_end=tst['end'],
                             status=tst['status'],
                             logdir=tst['logdir'],
                             logfile=tst['logfile'],
                             fail_reason=tst['fail_reason'],
                             job_logdir=local_log_dir,
                             job_unique_id='')
                self.result.start_test(state)
                self.job._result_events_dispatcher.map_method(
                    'start_test', self.result, state)
                self.result.check_test(state)
                self.job._result_events_dispatcher.map_method(
                    'end_test', self.result, state)
                if state['status'] == "INTERRUPTED":
                    summary.add("INTERRUPTED")
                elif not status.mapping[state['status']]:
                    summary.add("FAIL")
            zip_filename = remote_log_dir + '.zip'
            zip_path_filename = os.path.join(local_log_dir,
                                             os.path.basename(zip_filename))
            self.remote.receive_files(local_log_dir, zip_filename)
            archive.uncompress(zip_path_filename, local_log_dir)
            os.remove(zip_path_filename)
            self.result.end_tests()
            self.job._result_events_dispatcher.map_method(
                'post_tests', self.job)
        finally:
            try:
                self.tear_down()
            except Exception as details:
                stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB)
                raise exceptions.JobError(details)
            sys.stdout = stdout_backup
            sys.stderr = stderr_backup
        return summary
コード例 #9
0
    def _runTest(self):
        params = self.params

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        self._log_parameters()

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        subtest_dirs = self._get_subtest_dirs()

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()

        utils.insert_dirs_to_path(subtest_dirs)

        test_modules = utils.find_test_modules(t_types, subtest_dirs)

        # Open the environment file
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        if params.get_boolean("job_env_cleanup", "yes"):
            self.runner_queue.put({"func_at_exit": cleanup_env,
                                   "args": (env_filename, self.env_version),
                                   "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Pre-process
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self._safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self._safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self._safe_env_save(env)
                    raise

            finally:
                # Post-process
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self._safe_env_save(env) or params.get("env_cleanup", "no") == "yes":
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
コード例 #10
0
ファイル: test.py プロジェクト: neuswcjr/avocado-vt
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestSkipError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = list(params.keys())
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise exceptions.TestError("Directory %s does not "
                                           "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise exceptions.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s" %
                                                  error_message)

                except:  # nopep8 Old-style exceptions are not inherited from Exception()
                    stacktrace.log_exc_info(sys.exc_info(), 'avocado.test')
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except:  # nopep8 Old-style exceptions are not inherited from Exception()

                        stacktrace.log_exc_info(sys.exc_info(),
                                                'avocado.test')
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s",
                                      sys.exc_info()[1])
                finally:
                    if self.__safe_env_save(env):
                        env.destroy()   # Force-clean as it can't be stored

        except Exception as e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise exceptions.JobError("Abort requested (%s)" % e)

        return test_passed
コード例 #11
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            if key != 'test_cases':
                self.log.info("    %s = %s", key, params[key])

        self.ct_type = self.params.get('ct_type')
        test_script = self.params.get('script')
        class_name = self.params.get('class_name')
        # Import the module
        mod_name = 'cloudtest.tests.ceph_api.tests.%s.%s' % \
                (params.get('sds_mgmt_test_type'), test_script)
        test_module = importlib.import_module(mod_name)

        for _, obj in inspect.getmembers(test_module):
            if (inspect.isclass(obj) and obj.__name__ == class_name
                    and inspect.getmodule(obj) == test_module):
                test_class = obj
                break
        self.log.info("Initialize test class: %s" % class_name)

        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({
            "func_at_exit": utils_env.cleanup_env,
            "args": (env_filename, self.env_version),
            "once": True
        })
        self.runner_queue.put({"func_at_exit": cleanup_token, "once": True})
        comp = test_class(params, env)

        test_passed = False
        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Run the test function
                    self.log.info("Start to run ceph management API test")
                    try:
                        comp.setup()
                        func = getattr(comp, params.get('func_name', 'test'))
                        # fixme: To solve create monitor hang
                        if 'test_monitors' in test_script and \
                           'test_create' in params.get('func_name'):
                            t1 = threading.Thread(target=func)
                            t1.start()
                            time.sleep(20)
                        else:
                            func()
                    finally:
                        self.__safe_env_save(env)

                except Exception, e:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    stacktrace.log_exc_info(sys.exc_info(),
                                            logger='avocado.test')
                    logging.debug("Exception happened during running test")
                    raise e

            finally:
                comp.teardown()
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = funcatexit.run_exitfuncs(
                            env, self.ct_type)
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
            raise exceptions.JobError("Aborted job as config specified.")
コード例 #12
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise exceptions.TestError("Directory %s does not "
                                                       "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for
                        # it
                        for generic_subdir in asset.get_test_provider_subdirs('generic'):
                            subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                                bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                                bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise exceptions.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise exceptions.TestWarn("funcatexit failed with: %s"
                                                  % error_message)

                except Exception, e:
                    if (t_type is not None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise exceptions.JobError("Abort requested (%s)" % e)
コード例 #13
0
ファイル: job.py プロジェクト: FreeTommyLi/avocado
    def _run(self, urls=None, multiplex_files=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :param multiplex_files: File that multiplexes a given test url.

        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        if urls is None:
            if self.args and self.args.url is not None:
                urls = self.args.url

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        self._make_test_loader()

        params_list = self.test_loader.discover_urls(urls)

        if multiplexer.MULTIPLEX_CAPABLE:
            if multiplex_files is None:
                if self.args and self.args.multiplex_files is not None:
                    multiplex_files = self.args.multiplex_files

            if multiplex_files is not None:
                params_list = self._multiplex_params_list(
                    params_list, multiplex_files)

        self._setup_job_results()

        try:
            test_suite = self.test_loader.discover(params_list)
            error_msg_parts = self.test_loader.validate_ui(test_suite)
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)

        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, typos)")
            raise exceptions.OptionValidationError(e_msg)

        if self.args is not None:
            self.args.test_result_total = len(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile, self.loglevel,
                                     self.unique_id)
        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite)
        self.view.stop_file_logging()
        self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if self.args is not None:
            if self.args.archive:
                filename = self.logdir + '.zip'
                archive.create(filename, self.logdir)
            if not self.args.keep_tmp_files:
                data_dir.clean_tmp_files()

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL
コード例 #14
0
ファイル: job.py プロジェクト: ypu/avocado
    def _run(self, urls=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        if urls is None:
            urls = getattr(self.args, 'url', None)

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        self._make_test_loader()

        params_list = self.test_loader.discover_urls(urls)

        mux = multiplexer.Mux(self.args)
        self._setup_job_results()

        try:
            test_suite = self.test_loader.discover(params_list)
            # Do not attempt to validate the tests given on the command line if
            # the tests will not be copied from this system to a remote one
            # using the remote plugin features
            if not getattr(self.args, 'remote_no_copy', False):
                error_msg_parts = self.test_loader.validate_ui(test_suite)
            else:
                error_msg_parts = []
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)

        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, typos)")
            raise exceptions.OptionValidationError(e_msg)

        self.args.test_result_total = mux.get_number_of_tests(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile, self.loglevel,
                                     self.unique_id)
        _TEST_LOGGER.info('Job ID: %s', self.unique_id)
        _TEST_LOGGER.info('')

        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite, mux)
        self.view.stop_file_logging()
        if not self.standalone:
            self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if getattr(self.args, 'archive', False):
            filename = self.logdir + '.zip'
            archive.create(filename, self.logdir)
        if not settings.get_value(
                'runner.behavior', 'keep_tmp_files', key_type=bool,
                default=False):
            data_dir.clean_tmp_files()
        _TEST_LOGGER.info('Test results available in %s', self.logdir)

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL