Beispiel #1
0
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        abort_on_mismatch = GLOBAL_CONFIG.get_config_value('CLIENT',
                                                           'abort_on_mismatch',
                                                           type=bool,
                                                           default=False)
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self,
                                                          exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)
        old_mount_info = self._state.get('client', 'mount_info')
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = ("mounted partitions are different after reboot "
                           "(old entries: %s, new entries: %s)" %
                           (old_entries, new_entries))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir,
                                            "reboot.verify_config",
                                            description,
                                            running_id=running_id)
                raise error.JobError("Reboot failed: %s" % description)
            else:
                logging.warning(description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get('client', 'cpu_count')
        if cpu_count != old_count:
            description = ('Number of CPUs changed after reboot '
                           '(old count: %d, new count: %d)' %
                           (old_count, cpu_count))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir,
                                            'reboot.verify_config',
                                            description,
                                            running_id=running_id)
                raise error.JobError('Reboot failed: %s' % description)
            else:
                logging.warning(description)
Beispiel #2
0
    def get_kernel_build_ident(self):
        (release, version) = self.get_kernel_build_release()

        if not release or not version:
            raise error.JobError('kernel has no identity')

        return release + '::' + version
def runjob(control, drop_caches, options):
    """
    Run a job using the given control file.

    This is the main interface to this module.

    @see base_job.__init__ for parameter info.
    """
    control = os.path.abspath(control)
    state = control + '.state'
    # Ensure state file is cleaned up before the job starts to run if autotest
    # is not running with the --continue flag
    if not options.cont and os.path.isfile(state):
        logging.debug('Cleaning up previously found state file')
        os.remove(state)

    # instantiate the job object ready for the control file.
    myjob = None
    try:
        # Check that the control file is valid
        if not os.path.exists(control):
            raise error.JobError(control + ": control file not found")

        # When continuing, the job is complete when there is no
        # state file, ensure we don't try and continue.
        if options.cont and not os.path.exists(state):
            raise error.JobComplete("all done")

        myjob = job(control=control, drop_caches=drop_caches, options=options)

        # Load in the users control file, may do any one of:
        #  1) execute in toto
        #  2) define steps, and select the first via next_step()
        myjob.step_engine()

    except error.JobContinue:
        sys.exit(5)

    except error.JobComplete:
        sys.exit(1)

    except error.JobError, instance:
        logging.error("JOB ERROR: " + str(instance))
        if myjob:
            command = None
            if len(instance.args) > 1:
                command = instance.args[1]
                myjob.record('ABORT', None, command, str(instance))
            myjob.record('END ABORT', None, None, str(instance))
            assert myjob._record_indent == 0
            myjob.complete(1)
        else:
            sys.exit(1)
Beispiel #4
0
    def end_reboot_and_verify(self,
                              expected_when,
                              expected_id,
                              subdir,
                              type='src',
                              patches=[]):
        """ Check the passed kernel identifier against the command line
            and the running kernel, abort the job on missmatch. """

        logging.info(
            "POST BOOT: checking booted kernel "
            "mark=%d identity='%s' type='%s'", expected_when, expected_id,
            type)

        running_id = utils.running_os_ident()

        cmdline = utils.read_one_line("/proc/cmdline")

        find_sum = re.compile(r'.*IDENT=(\d+)')
        m = find_sum.match(cmdline)
        cmdline_when = -1
        if m:
            cmdline_when = int(m.groups()[0])

        # We have all the facts, see if they indicate we
        # booted the requested kernel or not.
        bad = False
        if (type == 'src' and expected_id != running_id or type == 'rpm'
                and not running_id.startswith(expected_id + '::')):
            logging.error("Kernel identifier mismatch")
            bad = True
        if expected_when != cmdline_when:
            logging.error("Kernel command line mismatch")
            bad = True

        if bad:
            logging.error("   Expected Ident: " + expected_id)
            logging.error("    Running Ident: " + running_id)
            logging.error("    Expected Mark: %d", expected_when)
            logging.error("Command Line Mark: %d", cmdline_when)
            logging.error("     Command Line: " + cmdline)

            self._record_reboot_failure(subdir,
                                        "reboot.verify",
                                        "boot failure",
                                        running_id=running_id)
            raise error.JobError("Reboot returned with the wrong kernel")

        self.record('GOOD', subdir, 'reboot.verify',
                    utils.running_os_full_version())
        self.end_reboot(subdir, expected_id, patches, running_id=running_id)
Beispiel #5
0
    def __mark(self, checkpoint):
        name = self.job.resultdir + '/sequence'
        fd = file(name, 'r')
        current = int(fd.readline())
        fd.close()

        current += 1
        fd = file(name + '.new', 'w')
        fd.write('%d' % current)
        fd.close()

        os.rename(name + '.new', name)

        logging.debug("checkpoint %d %d", current, checkpoint)

        if (current != checkpoint):
            raise error.JobError("selftest: sequence was " +
                                 "%d when %d expected" % (current, checkpoint))
Beispiel #6
0
def _locate_test_control_file(dirpath, testname):
    """
    Locate the control file for the given test.

    @param dirpath Root directory to search.
    @param testname Name of test.

    @returns Absolute path to the control file.
    @raise JobError: Raised if control file not found.
    """
    for dirpath, _dirnames, filenames in os.walk(dirpath):
        for filename in filenames:
            if 'control' not in filename:
                continue
            path = os.path.join(dirpath, filename)
            if _is_control_file_for_test(path, testname):
                return os.path.abspath(path)
    raise error.JobError('could not find client test control file', dirpath,
                         testname)
Beispiel #7
0
    def _run_group(self, name, subdir, function, *args, **dargs):
        """\
        Underlying method for running something inside of a group.
        """
        result, exc_info = None, None
        try:
            self.record('START', subdir, name)
            result = function(*args, **dargs)
        except error.TestBaseException, e:
            self.record("END %s" % e.exit_status, subdir, name)
            exc_info = sys.exc_info()
        except Exception, e:
            err_msg = str(e) + '\n'
            err_msg += traceback.format_exc()
            self.record('END ABORT', subdir, name, err_msg)
            raise error.JobError(name + ' failed\n' + traceback.format_exc())
        else:
            self.record('END GOOD', subdir, name)

        return result, exc_info

    def run_group(self, function, *args, **dargs):
        """\
        function:
                subroutine to run
        *args:
                arguments for the function
        """

        name = function.__name__
            # copy the logs from the subtask into the main log
            new_log_path = old_log_path + (".%d" % i)
            if os.path.exists(new_log_path):
                new_log = open(new_log_path)
                old_log.write(new_log.read())
                new_log.close()
                old_log.flush()
                os.remove(new_log_path)
        old_log.close()

        self._logger.global_filename = old_log_filename

        # handle any exceptions raised by the parallel tasks
        if exceptions:
            msg = "%d task(s) failed in job.parallel" % len(exceptions)
            raise error.JobError(msg)


    def quit(self):
        # XXX: should have a better name.
        self.harness.run_pause()
        raise error.JobContinue("more to come")


    def complete(self, status):
        """Write pending reports, clean up, and exit"""
        # write out a job HTML report
        try:
            html_report.create_report(self.resultdir)
        except Exception, e:
            logging.error("Error writing job HTML report: %s", e)
Beispiel #9
0
    def run_once(self, params):
        # Convert params to a Params object
        params = virt_utils.Params(params)

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        virt_utils.set_log_file_dir(self.debugdir)

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("env", "env"))
        env = virt_utils.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    # Get the test routine corresponding to the specified
                    # test type
                    t_type = params.get("type")
                    # Verify if we have the correspondent source file for it
                    virt_dir = os.path.dirname(virt_utils.__file__)
                    subtest_dir_common = os.path.join(virt_dir, "tests")
                    subtest_dir_test = os.path.join(self.bindir, "tests")
                    subtest_dir = None
                    for d in [subtest_dir_test, subtest_dir_common]:
                        module_path = os.path.join(d, "%s.py" % t_type)
                        if os.path.isfile(module_path):
                            subtest_dir = d
                            break
                    if subtest_dir is None:
                        raise error.TestError(
                            "Could not find test file %s.py "
                            "on either %s or %s directory" %
                            (t_type, subtest_dir_test, subtest_dir_common))
                    # Load the test module
                    f, p, d = imp.find_module(t_type, [subtest_dir])
                    test_module = imp.load_module(t_type, f, p, d)
                    f.close()

                    # Preprocess
                    try:
                        virt_env_process.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    run_func = getattr(test_module, "run_%s" % t_type)
                    try:
                        run_func(self, params, env)
                    finally:
                        env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        virt_env_process.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        virt_env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "kvm":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info(
                            "'%s' has a %s monitor unix socket at: %s",
                            vm.name, m.protocol, m.filename)
                    logging.info(
                        "The command line used to start '%s' was:\n%s",
                        vm.name, vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Beispiel #10
0
    def run_once(self, params):
        # Convert params to a Params object
        params = kvm_utils.Params(params)

        # Report the parameters we've received and write them as keyvals
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])
            self.write_test_keyval({key: params[key]})

        # Set the log file dir for the logging mechanism used by kvm_subprocess
        # (this must be done before unpickling env)
        kvm_utils.set_log_file_dir(self.debugdir)

        # Open the environment file
        logging.info("Unpickling env. You may see some harmless error "
                     "messages.")
        env_filename = os.path.join(self.bindir, params.get("env", "env"))
        env = kvm_utils.Env(env_filename, self.env_version)

        test_passed = False

        try:
            try:
                try:
                    # Get the test routine corresponding to the specified
                    # test type
                    t_type = params.get("type")
                    # Verify if we have the correspondent source file for it
                    subtest_dir = os.path.join(self.bindir, "tests")
                    module_path = os.path.join(subtest_dir, "%s.py" % t_type)
                    if not os.path.isfile(module_path):
                        raise error.TestError("No %s.py test file found" %
                                              t_type)
                    # Load the test module
                    f, p, d = imp.find_module(t_type, [subtest_dir])
                    test_module = imp.load_module(t_type, f, p, d)
                    f.close()

                    # Preprocess
                    try:
                        kvm_preprocessing.preprocess(self, params, env)
                    finally:
                        env.save()
                    # Run the test function
                    run_func = getattr(test_module, "run_%s" % t_type)
                    try:
                        run_func(self, params, env)
                    finally:
                        env.save()
                    test_passed = True

                except Exception, e:
                    logging.error("Test failed: %s: %s", e.__class__.__name__,
                                  e)
                    try:
                        kvm_preprocessing.postprocess_on_error(
                            self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        kvm_preprocessing.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            for vm in env.get_all_vms():
                if vm.is_dead():
                    continue
                logging.info("VM '%s' is alive.", vm.name)
                for m in vm.monitors:
                    logging.info("'%s' has a %s monitor unix socket at: %s",
                                 vm.name, m.protocol, m.filename)
                logging.info("The command line used to start '%s' was:\n%s",
                             vm.name, vm.make_qemu_command())
            raise error.JobError("Abort requested (%s)" % e)
Beispiel #11
0
 def execute(self):
     raise error.JobError('Arrrrrrrrggggh. You are DOOOMED')