コード例 #1
0
ファイル: kernel.py プロジェクト: song-buaa/autotest
    def get_kernel_build_ident(self):
        (release, version) = self.get_kernel_build_release()

        if not release or not version:
            raise error.JobError('kernel has no identity')

        return release + '::' + version
コード例 #2
0
ファイル: job.py プロジェクト: swapnakrishnan2k/autotest
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        abort_on_mismatch = GLOBAL_CONFIG.get_config_value('CLIENT',
                                                           'abort_on_mismatch',
                                                           type=bool,
                                                           default=False)
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self,
                                                          exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)
        old_mount_info = self._state.get('client', 'mount_info')
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = ("mounted partitions are different after reboot "
                           "(old entries: %s, new entries: %s)" %
                           (old_entries, new_entries))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, "reboot.verify_config",
                                            description, running_id=running_id)
                raise error.JobError("Reboot failed: %s" % description)
            else:
                logging.warning(description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get('client', 'cpu_count')
        if cpu_count != old_count:
            description = ('Number of CPUs changed after reboot '
                           '(old count: %d, new count: %d)' %
                           (old_count, cpu_count))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, 'reboot.verify_config',
                                            description, running_id=running_id)
                raise error.JobError('Reboot failed: %s' % description)
            else:
                logging.warning(description)
コード例 #3
0
 def _init_on_demand(self):
     if not self.instantiated:
         try:
             install_grubby_if_necessary()
             Grubby.__init__(self, self.path)
             self.instantiated = True
         except Exception, e:
             raise error.JobError("Unable to instantiate boottool: %s" % e)
コード例 #4
0
ファイル: repair_nodes.py プロジェクト: KnightKu/Python_demos
    def run_once(self):
        logging.info("starting repair")
        need_hard_repair = []
        need_force_repair = []

        try:
            self.repair_prepare()
        except Exception, err:
            logging.error("failed to prepare: %s, %s" %
                          (str(err), traceback.format_exc()))
            raise error.JobError("failed to prepare")
コード例 #5
0
ファイル: job.py プロジェクト: swapnakrishnan2k/autotest
def runjob(control, drop_caches, options):
    """
    Run a job using the given control file.

    This is the main interface to this module.

    @see base_job.__init__ for parameter info.
    """
    control = os.path.abspath(control)
    state = control + '.state'
    # Ensure state file is cleaned up before the job starts to run if autotest
    # is not running with the --continue flag
    if not options.cont and os.path.isfile(state):
        os.remove(state)

    # instantiate the job object ready for the control file.
    myjob = None
    try:
        # Check that the control file is valid
        if not os.path.exists(control):
            raise error.JobError(control + ": control file not found")

        # When continuing, the job is complete when there is no
        # state file, ensure we don't try and continue.
        if options.cont and not os.path.exists(state):
            raise error.JobComplete("all done")

        myjob = job(control=control, drop_caches=drop_caches, options=options)

        # Load in the users control file, may do any one of:
        #  1) execute in toto
        #  2) define steps, and select the first via next_step()
        myjob.step_engine()

    except error.JobContinue:
        sys.exit(5)

    except error.JobComplete:
        sys.exit(1)

    except error.JobError, instance:
        logging.error("JOB ERROR: " + str(instance))
        if myjob:
            command = None
            if len(instance.args) > 1:
                command = instance.args[1]
                myjob.record('ABORT', None, command, str(instance))
            myjob.record('END ABORT', None, None, str(instance))
            assert myjob._record_indent == 0
            myjob.complete(1)
        else:
            sys.exit(1)
コード例 #6
0
    def end_reboot_and_verify(self, expected_when, expected_id, subdir,
                              type='src', patches=[]):
        """ Check the passed kernel identifier against the command line
            and the running kernel, abort the job on missmatch. """

        logging.info("POST BOOT: checking booted kernel "
                     "mark=%d identity='%s' type='%s'",
                     expected_when, expected_id, type)

        running_id = utils.running_os_ident()

        cmdline = utils.read_one_line("/proc/cmdline")

        find_sum = re.compile(r'.*IDENT=(\d+)')
        m = find_sum.match(cmdline)
        cmdline_when = -1
        if m:
            cmdline_when = int(m.groups()[0])

        # We have all the facts, see if they indicate we
        # booted the requested kernel or not.
        bad = False
        if (type == 'src' and expected_id != running_id or
            type == 'rpm' and
                not running_id.startswith(expected_id + '::')):
            logging.error("Kernel identifier mismatch")
            bad = True
        if expected_when != cmdline_when:
            logging.error("Kernel command line mismatch")
            bad = True

        if bad:
            logging.error("   Expected Ident: " + expected_id)
            logging.error("    Running Ident: " + running_id)
            logging.error("    Expected Mark: %d", expected_when)
            logging.error("Command Line Mark: %d", cmdline_when)
            logging.error("     Command Line: " + cmdline)

            self._record_reboot_failure(subdir, "reboot.verify", "boot failure",
                                        running_id=running_id)
            raise error.JobError("Reboot returned with the wrong kernel")

        self.record('GOOD', subdir, 'reboot.verify',
                    utils.running_os_full_version())
        self.end_reboot(subdir, expected_id, patches, running_id=running_id)
コード例 #7
0
    def __mark(self, checkpoint):
        name = self.job.resultdir + '/sequence'
        fd = file(name, 'r')
        current = int(fd.readline())
        fd.close()

        current += 1
        fd = file(name + '.new', 'w')
        fd.write('%d' % current)
        fd.close()

        os.rename(name + '.new', name)

        logging.debug("checkpoint %d %d", current, checkpoint)

        if (current != checkpoint):
            raise error.JobError("selftest: sequence was " +
                    "%d when %d expected" % (current, checkpoint))
コード例 #8
0
    def parallel(self, *tasklist):
        """Run tasks in parallel"""

        pids = []
        old_log_filename = self._logger.global_filename
        for i, task in enumerate(tasklist):
            assert isinstance(task, (tuple, list))
            self._logger.global_filename = old_log_filename + (".%d" % i)

            def task_func():
                # stub out _record_indent with a process-local one
                base_record_indent = self._record_indent
                proc_local = self._job_state.property_factory(
                    '_state', '_record_indent.%d' % os.getpid(),
                    base_record_indent, namespace='client')
                self.__class__._record_indent = proc_local
                task[0](*task[1:])
            pids.append(parallel.fork_start(self.resultdir, task_func))

        old_log_path = os.path.join(self.resultdir, old_log_filename)
        old_log = open(old_log_path, "a")
        exceptions = []
        for i, pid in enumerate(pids):
            # wait for the task to finish
            try:
                parallel.fork_waitfor(self.resultdir, pid)
            except Exception as e:
                exceptions.append(e)
            # copy the logs from the subtask into the main log
            new_log_path = old_log_path + (".%d" % i)
            if os.path.exists(new_log_path):
                new_log = open(new_log_path)
                old_log.write(new_log.read())
                new_log.close()
                old_log.flush()
                os.remove(new_log_path)
        old_log.close()

        self._logger.global_filename = old_log_filename

        # handle any exceptions raised by the parallel tasks
        if exceptions:
            msg = "%d task(s) failed in job.parallel" % len(exceptions)
            raise error.JobError(msg)
コード例 #9
0
ファイル: server_job.py プロジェクト: andykobewu/autotest-1
    def _run_group(self, name, subdir, function, *args, **dargs):
        """
        Underlying method for running something inside of a group.
        """
        result, exc_info = None, None
        try:
            self.record('START', subdir, name)
            result = function(*args, **dargs)
        except error.TestBaseException as e:
            self.record("END %s" % e.exit_status, subdir, name)
            exc_info = sys.exc_info()
        except Exception as e:
            err_msg = str(e) + '\n'
            err_msg += traceback.format_exc()
            self.record('END ABORT', subdir, name, err_msg)
            raise error.JobError(name + ' failed\n' + traceback.format_exc())
        else:
            self.record('END GOOD', subdir, name)

        return result, exc_info
コード例 #10
0
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs(
                "multi_host_migration")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [x for x in map(lambda x: utils_params.Params(x), test_dicts)]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        for params in test_dicts_ar:

            params['hosts'] = ips

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    params['mac_%s_%s' % (nic, vm)] = generate_mac_address()

            params['master_images_clone'] = "image1"
            params['kill_vm'] = "yes"

            s_host = _hosts[machines[0]]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[0]

            for host_id, machine in enumerate(machines[1:]):
                host = _hosts[machine]
                host.params = params.object_params("host%s" % (host_id + 2))
                params['not_preprocess'] = "yes"
                host.params['clone_master'] = "no"
                host.params['hostid'] = ips[host_id + 1]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            for machine in machines:
                host = _hosts[machine]
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))

            logging.debug('Master control file:\n%s', _hosts[machines[0]].control)
            for machine in machines[1:]:
                host = _hosts[machine]
                logging.debug('Slave control file:\n%s', host.control)

            commands = []

            for machine in machines:
                host = _hosts[machine]
                result_path = os.path.join(self.resultsdir,
                                           host.params["shortname"],
                                           host.host.hostname)
                commands.append(subcommand.subcommand(host.at.run,
                                                      [host.control,
                                                       result_path]))

            try:
                subcommand.parallel(commands)
            except error.AutoservError, e:
                logging.error(e)
コード例 #11
0
    def _run_group(self, name, subdir, function, *args, **dargs):
        """\
        Underlying method for running something inside of a group.
        """
        result, exc_info = None, None
        try:
            self.record('START', subdir, name)
            result = function(*args, **dargs)
        except error.TestBaseException, e:
            self.record("END %s" % e.exit_status, subdir, name)
            exc_info = sys.exc_info()
        except Exception, e:
            err_msg = str(e) + '\n'
            err_msg += traceback.format_exc()
            self.record('END ABORT', subdir, name, err_msg)
            raise error.JobError(name + ' failed\n' + traceback.format_exc())
        else:
            self.record('END GOOD', subdir, name)

        return result, exc_info


    def run_group(self, function, *args, **dargs):
        """\
        function:
                subroutine to run
        *args:
                arguments for the function
        """

        name = function.__name__
コード例 #12
0
ファイル: repair_nodes.py プロジェクト: KnightKu/Python_demos
        try:
            self.repair_prepare()
        except Exception, err:
            logging.error("failed to prepare: %s, %s" %
                          (str(err), traceback.format_exc()))
            raise error.JobError("failed to prepare")

        for vm_node in self.vm_nodes_list:
            try:
                if not self.soft_repair_vm(vm_node):
                    need_hard_repair.append(vm_node)
            except Exception, err:
                logging.error(
                    "failed to soft repair domain[%s]: %s, %s" %
                    (vm_node.hostname, str(err), traceback.format_exc()))
                raise error.JobError("failed to soft repair domain[%s]" %
                                     vm_node.hostname)

        if not len(need_hard_repair):
            return True

        for vm_node in need_hard_repair:
            try:
                if not self.hard_repair_vm(vm_node):
                    need_force_repair.append(vm_node)
            except Exception, err:
                logging.error(
                    "failed to hard repair domain[%s]: %s, %s" %
                    (vm_node.hostname, str(err), traceback.format_exc()))
                raise error.JobError("failed to hard repair domain[%s]" %
                                     vm_node.hostname)
コード例 #13
0
ファイル: test.py プロジェクト: quanwenli/avocado-vt
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Find the test
        subtest_dirs = []
        test_filter = bootstrap.test_filter

        other_subtests_dirs = params.get("other_tests_dirs", "")
        for d in other_subtests_dirs.split():
            d = os.path.join(*d.split("/"))
            subtestdir = os.path.join(self.bindir, d, "tests")
            if not os.path.isdir(subtestdir):
                raise error.TestError("Directory %s does not "
                                      "exist" % subtestdir)
            subtest_dirs += data_dir.SubdirList(subtestdir,
                                                test_filter)

        provider = params.get("provider", None)

        if provider is None:
            # Verify if we have the correspondent source file for
            # it
            generic_subdirs = asset.get_test_provider_subdirs(
                'generic')
            for generic_subdir in generic_subdirs:
                subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                    test_filter)
            specific_subdirs = asset.get_test_provider_subdirs(
                params.get("vm_type"))
            for specific_subdir in specific_subdirs:
                subtest_dirs += data_dir.SubdirList(
                    specific_subdir, bootstrap.test_filter)
        else:
            provider_info = asset.get_test_provider_info(provider)
            for key in provider_info['backends']:
                subtest_dirs += data_dir.SubdirList(
                    provider_info['backends'][key]['path'],
                    bootstrap.test_filter)

        subtest_dir = None

        # Get the test routine corresponding to the specified
        # test type
        logging.debug("Searching for test modules that match "
                      "'type = %s' and 'provider = %s' "
                      "on this cartesian dict",
                      params.get("type"),
                      params.get("provider", None))

        t_types = params.get("type").split()
        # Make sure we can load provider_lib in tests
        for s in subtest_dirs:
            if os.path.dirname(s) not in sys.path:
                sys.path.insert(0, os.path.dirname(s))

        test_modules = {}
        for t_type in t_types:
            for d in subtest_dirs:
                module_path = os.path.join(d, "%s.py" % t_type)
                if os.path.isfile(module_path):
                    logging.debug("Found subtest module %s",
                                  module_path)
                    subtest_dir = d
                    break
            if subtest_dir is None:
                msg = ("Could not find test file %s.py on test"
                       "dirs %s" % (t_type, subtest_dirs))
                raise error.TestError(msg)
            # Load the test module
            f, p, d = imp.find_module(t_type, [subtest_dir])
            test_modules[t_type] = imp.load_module(t_type, f, p, d)
            f.close()

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross virt-test/avocado-vt job runs)
        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        self.__safe_env_save(env)

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            self.__safe_env_save(env)
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        self.__safe_env_save(env)
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    if self.__safe_env_save(env):
                        env.destroy()   # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_create_command())
                raise error.JobError("Abort requested (%s)" % e)
コード例 #14
0
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        cpu_number = 2**31
        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)
            cpu_number = min(host.host.get_num_cpu(), cpu_number)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs("qemu")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        machine_hold_vm = machines[0]

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [
            x for x in map(lambda x: utils_params.Params(x), test_dicts)
        ]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        keep_macs = {}
        random_cpu_number = random.randint(1, cpu_number)
        for params in test_dicts_ar:

            params['hosts'] = ips
            if params.get("use_randome_smp") == "yes":
                params['smp'] = random_cpu_number

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    if 'mac_%s_%s' % (nic, vm) not in keep_macs:
                        keep_macs['mac_%s_%s' %
                                  (nic, vm)] = generate_mac_address()
                    params['mac_%s_%s' % (nic, vm)] = keep_macs['mac_%s_%s' %
                                                                (nic, vm)]

            s_host = _hosts[machine_hold_vm]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[machines.index(machine_hold_vm)]

            for host_id, machine in enumerate(machines):
                if machine != machine_hold_vm:
                    host = _hosts[machine]
                    host_name = "host%s" % (host_id + 2)
                    host.params = params.object_params("host%s" %
                                                       (host_id + 2))
                    params['not_preprocess'] = "yes"
                    host.params['clone_master'] = "no"
                    host.params['hostid'] = ips[host_id]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            if params.get("need_multi_host") == "yes":
                for machine in machines:
                    host = _hosts[machine]
                    host.control += ("job.run_test('virt', tag='%s',"
                                     " params=%s)" %
                                     (host.params['shortname'], host.params))

                logging.debug('Master control file:\n%s',
                              _hosts[machine_hold_vm].control)
                for machine in machines:
                    if machine != machine_hold_vm:
                        host = _hosts[machine]
                        logging.debug('Slave control file:\n%s', host.control)

                commands = []

                for machine in machines:
                    host = _hosts[machine]
                    result_path = os.path.join(self.resultsdir,
                                               host.host.hostname,
                                               host.params["shortname"])
                    cmd = subcommand.subcommand(host.at.run,
                                                [host.control, result_path])
                    commands.append(cmd)
            else:
                host = _hosts[machine_hold_vm]
                result_path = os.path.join(self.resultsdir, host.host.hostname,
                                           host.params["shortname"])
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))
                logging.debug("Run control file:\n %s", host.control)
                commands = [
                    subcommand.subcommand(host.at.run,
                                          [host.control, result_path])
                ]
            try:
                subcommand.parallel(commands)
                if params.get("vm_migrated") == "yes":
                    # This update based on the logical in test case
                    # migration_multi_host. It use the machines[0] as
                    # src and machines[1] as dst. This may need update
                    # based on different design. Just keep the mahinces
                    # and ips list in the right order for following tests.
                    machine_hold_vm = machines[1]
                    ip_hold_vm = ips[1]
                    machines.remove(machine_hold_vm)
                    ips.remove(ip_hold_vm)

                    if params.get("random_dst_host") == "yes":
                        my_random = random.SystemRandom()
                        dst_machine = my_random.choice(machines)
                        dst_ip = ips[machines.index(dst_machine)]
                    else:
                        dst_machine = machines[0]
                        dst_ip = ips[0]
                    machines.remove(dst_machine)
                    ips.remove(dst_ip)

                    machines.insert(0, machine_hold_vm)
                    machines.insert(1, dst_machine)
                    ips.insert(0, ip_hold_vm)
                    ips.insert(1, dst_ip)

            except error.AutoservError, e:
                logging.error(e)
コード例 #15
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    subtest_dirs += data_dir.SubdirList(
                        self.testdir, bootstrap.test_filter)
                    specific_testdir = os.path.join(self.bindir,
                                                    params.get("vm_type"),
                                                    "tests")
                    subtest_dirs += data_dir.SubdirList(
                        specific_testdir, bootstrap.test_filter)
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "param 'type = %s' on this cartesian dict",
                        params.get("type"))
                    t_types = params.get("type").split()
                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
コード例 #16
0
            # copy the logs from the subtask into the main log
            new_log_path = old_log_path + (".%d" % i)
            if os.path.exists(new_log_path):
                new_log = open(new_log_path)
                old_log.write(new_log.read())
                new_log.close()
                old_log.flush()
                os.remove(new_log_path)
        old_log.close()

        self._logger.global_filename = old_log_filename

        # handle any exceptions raised by the parallel tasks
        if exceptions:
            msg = "%d task(s) failed in job.parallel" % len(exceptions)
            raise error.JobError(msg)

    def quit(self):
        # XXX: should have a better name.
        self.harness.run_pause()
        raise error.JobContinue("more to come")

    def complete(self, status):
        """Write pending TAP reports, clean up, and exit"""
        # write out TAP reports
        if self._tap.do_tap_report:
            self._tap.write()
            self._tap._write_tap_archive()

        # write out a job HTML report
        try:
コード例 #17
0
ファイル: virt.py プロジェクト: vi-patel/virt-test
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.job.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(tests_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs.append(subtestdir)
                    # Verify if we have the correspondent source file for it
                    virt_dir = os.path.dirname(self.virtdir)
                    subtest_dirs.append(os.path.join(virt_dir, "tests"))
                    subtest_dirs.append(
                        os.path.join(self.bindir, params.get("vm_type"),
                                     "tests"))
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    t_types = params.get("type").split()
                    test_modules = []
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules.append(
                            (t_type, imp.load_module(t_type, f, p, d)))
                        f.close()
                    # Preprocess
                    try:
                        env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    for t_type, test_module in test_modules:
                        msg = "Running function: %s.run_%s()" % (t_type,
                                                                 t_type)
                        logging.info(msg)
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
コード例 #18
0
ファイル: virt.py プロジェクト: xutian/virt-test
    def run_once(self, params):
        # Convert params to a Params object
        params = utils_params.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        utils_misc.set_log_file_dir(self.debugdir)

        # Open the environment file
        custom_env_path = params.get("custom_env_path", "")
        if custom_env_path:
            env_path = custom_env_path
        else:
            env_path = params.get("vm_type")
        env_filename = os.path.join(self.bindir, "backends", env_path,
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        other_subtests_dirs = params.get("other_tests_dirs", "")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []
                    bin_dir = self.bindir

                    for d in other_subtests_dirs.split():
                        # Replace split char.
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(bin_dir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s not"
                                                  " exist." % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    for generic_subdir in asset.get_test_provider_subdirs(
                            'generic'):
                        subtest_dirs += data_dir.SubdirList(
                            generic_subdir, bootstrap.test_filter)

                    for specific_subdir in asset.get_test_provider_subdirs(
                            params.get("vm_type")):
                        subtest_dirs += data_dir.SubdirList(
                            specific_subdir, bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "'type = %s' and 'provider = %s' "
                        "on this cartesian dict", params.get("type"),
                        params.get("provider", None))

                    t_types = params.get("type").split()
                    provider = params.get("provider", None)
                    if provider is not None:
                        subtest_dirs = [
                            d for d in subtest_dirs if provider in d
                        ]
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on tests"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if t_type is not None:
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
コード例 #19
0
 def execute(self):
     raise error.JobError('Arrrrrrrrggggh. You are DOOOMED')
コード例 #20
0
def runjob(control, drop_caches, options):
    """
    Run a job using the given control file.

    This is the main interface to this module.

    :see: base_job.__init__ for parameter info.
    """
    control = os.path.abspath(control)

    try:
        autodir = os.path.abspath(os.environ['AUTODIR'])
    except KeyError:
        autodir = settings.get_value('COMMON', 'autotest_top_path')

    tmpdir = os.path.join(autodir, 'tmp')
    tests_out_dir = settings.get_value('COMMON', 'test_output_dir',
                                       default=tmpdir)
    state = os.path.join(tests_out_dir, os.path.basename(control) + '.state')

    # Ensure state file is cleaned up before the job starts to run if autotest
    # is not running with the --continue flag
    if not options.cont and os.path.isfile(state):
        os.remove(state)

    # instantiate the job object ready for the control file.
    myjob = None
    try:
        # Check that the control file is valid
        if not os.path.exists(control):
            raise error.JobError(control + ": control file not found")

        # When continuing, the job is complete when there is no
        # state file, ensure we don't try and continue.
        if options.cont and not os.path.exists(state):
            raise error.JobComplete("all done")

        myjob = job(control=control, drop_caches=drop_caches, options=options)

        # Load in the users control file, may do any one of:
        #  1) execute in toto
        #  2) define steps, and select the first via next_step()
        myjob.step_engine()

    except error.JobContinue:
        sys.exit(5)

    except error.JobComplete:
        sys.exit(1)

    except error.JobError as instance:
        logging.error("JOB ERROR: " + str(instance))
        if myjob:
            command = None
            if len(instance.args) > 1:
                command = instance.args[1]
                myjob.record('ABORT', None, command, str(instance))
            myjob.record('END ABORT', None, None, str(instance))
            assert myjob._record_indent == 0
            myjob.complete(1)
        else:
            sys.exit(1)

    except Exception as e:
        # NOTE: job._run_step_fn and job.step_engine will turn things into
        # a JobError for us.  If we get here, its likely an autotest bug.
        msg = str(e) + '\n' + traceback.format_exc()
        logging.critical("JOB ERROR (autotest bug?): " + msg)
        if myjob:
            myjob.record('END ABORT', None, None, msg)
            assert myjob._record_indent == 0
            myjob.complete(1)
        else:
            sys.exit(1)

    # If we get here, then we assume the job is complete and good.
    myjob.record('END GOOD', None, None)
    assert myjob._record_indent == 0

    myjob.complete(0)