Ejemplo n.º 1
0
    def test_select_ABAT(self):
        job = object()
        self.god.stub_class(harness_ABAT, "harness_ABAT")

        harness_ABAT.harness_ABAT.expect_new(job)
        harness.select('ABAT', job)
        self.god.check_playback()
Ejemplo n.º 2
0
    def test_select_standalone(self):
        job = object()
        self.god.stub_class(harness_standalone, "harness_standalone")

        harness_standalone.harness_standalone.expect_new(job)
        harness.select('standalone', job)
        self.god.check_playback()
    def test_select_none(self):
        job = object()
        self.god.stub_class(harness_standalone, "harness_standalone")

        harness_args = ''
        harness_standalone.harness_standalone.expect_new(job, harness_args)
        harness.select(None, job, harness_args)
        self.god.check_playback()
Ejemplo n.º 4
0
    def test_select_ABAT(self):
        job = object()
        self.god.stub_class(harness_ABAT, "harness_ABAT")

        harness_args = ''
        harness_ABAT.harness_ABAT.expect_new(job, harness_args)
        harness.select('ABAT', job, harness_args)
        self.god.check_playback()
Ejemplo n.º 5
0
    def test_select_standalone(self):
        job = object()
        self.god.stub_class(harness_standalone, "harness_standalone")

        harness_args = ''
        harness_standalone.harness_standalone.expect_new(job, harness_args)
        harness.select('standalone', job, harness_args)
        self.god.check_playback()
Ejemplo n.º 6
0
Archivo: job.py Proyecto: ceph/autotest
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        # harness is chosen by following rules:
        # 1. explicitly specified via command line
        # 2. harness stored in state file (if continuing job '-c')
        # 3. default harness
        selected_harness = None
        if options.harness:
            selected_harness = options.harness
            self._state.set('client', 'harness', selected_harness)
        else:
            stored_harness = self._state.get('client', 'harness', None)
            if stored_harness:
                selected_harness = stored_harness

        self.harness = harness.select(selected_harness, self)

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)
        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook,
            tap_writer=self._tap)
Ejemplo n.º 7
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        if self.control:
            parsed_control = control_data.parse_control(self.control,
                                                        raise_warnings=False)
            self.fast = parsed_control.fast

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)

        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook)
Ejemplo n.º 8
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)
        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook,
            tap_writer=self._tap)
Ejemplo n.º 9
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self.group_level,
        self.log_filename, self.harness.
        """
        self.autodir = os.environ['AUTODIR']
        self.resultdir = os.path.join(self.autodir, 'results', options.tag)
        self.tmpdir = os.path.join(self.autodir, 'tmp')

        if not os.path.exists(self.resultdir):
            os.makedirs(self.resultdir)

        if not options.cont:
            self._cleanup_results_dir()
            # Don't cleanup the tmp dir (which contains the lockfile)
            # in the constructor, this would be a problem for multiple
            # jobs starting at the same time on the same client. Instead
            # do the delete at the server side. We simply create the tmp
            # directory here if it does not already exist.
            if not os.path.exists(self.tmpdir):
                os.mkdir(self.tmpdir)

        logging_manager.configure_logging(
                client_logging_config.ClientLoggingConfig(),
                results_dir=self.resultdir,
                verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        self.log_filename = self.DEFAULT_LOG_FILENAME

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self.state_file = self.control + '.state'
        self.current_step_ancestry = []
        self.next_step_index = 0
        self.testtag = ''
        self._test_tag_prefix = ''
        self._load_state()

        self._init_group_level()

        self.harness = harness.select(options.harness, self)
Ejemplo n.º 10
0
 def harness_select(self, which, harness_args):
     self.harness = harness.select(which, self, harness_args)
Ejemplo n.º 11
0
 def harness_select(self, which):
     self.harness = harness.select(which, self)
Ejemplo n.º 12
0
 def harness_select(self, which, harness_args):
     self.harness = harness.select(which, self, harness_args)
Ejemplo n.º 13
0
    def __init__(self, control, options, drop_caches=True,
                 extra_copy_cmdline=None):
        """
        Prepare a client side job object.

        @param control: The control file (pathname of).
        @param options: an object which includes:
                jobtag: The job tag string (eg "default").
                cont: If this is the continuation of this job.
                harness_type: An alternative server harness.  [None]
                use_external_logging: If true, the enable_external_logging
                          method will be called during construction.  [False]
        @param drop_caches: If true, utils.drop_caches() is called before and
                between all tests.  [True]
        @param extra_copy_cmdline: list of additional /proc/cmdline arguments to
                copy from the running kernel to all the installed kernels with
                this job
        """
        self.autodir = os.environ['AUTODIR']
        self.bindir = os.path.join(self.autodir, 'bin')
        self.libdir = os.path.join(self.autodir, 'lib')
        self.testdir = os.path.join(self.autodir, 'tests')
        self.configdir = os.path.join(self.autodir, 'config')
        self.site_testdir = os.path.join(self.autodir, 'site_tests')
        self.profdir = os.path.join(self.autodir, 'profilers')
        self.tmpdir = os.path.join(self.autodir, 'tmp')
        self.toolsdir = os.path.join(self.autodir, 'tools')
        self.resultdir = os.path.join(self.autodir, 'results', options.tag)

        if not os.path.exists(self.resultdir):
            os.makedirs(self.resultdir)

        if not options.cont:
            self._cleanup_results_dir()

        logging_manager.configure_logging(
                client_logging_config.ClientLoggingConfig(),
                results_dir=self.resultdir,
                verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        self.drop_caches_between_iterations = False
        self.drop_caches = drop_caches
        if self.drop_caches:
            logging.debug("Dropping caches")
            utils.drop_caches()

        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self.state_file = self.control + '.state'
        self.current_step_ancestry = []
        self.next_step_index = 0
        self.testtag = ''
        self._test_tag_prefix = ''

        self._load_state()
        self.pkgmgr = packages.PackageManager(
            self.autodir, run_function_dargs={'timeout':3600})
        self.pkgdir = os.path.join(self.autodir, 'packages')
        self.run_test_cleanup = self.get_state("__run_test_cleanup",
                                                default=True)

        self.sysinfo = sysinfo.sysinfo(self.resultdir)
        self._load_sysinfo_state()

        self.last_boot_tag = self.get_state("__last_boot_tag", default=None)
        self.tag = self.get_state("__job_tag", default=None)

        if not options.cont:
            """
            Don't cleanup the tmp dir (which contains the lockfile)
            in the constructor, this would be a problem for multiple
            jobs starting at the same time on the same client. Instead
            do the delete at the server side. We simply create the tmp
            directory here if it does not already exist.
            """
            if not os.path.exists(self.tmpdir):
                os.mkdir(self.tmpdir)

            if not os.path.exists(self.pkgdir):
                os.mkdir(self.pkgdir)

            results = os.path.join(self.autodir, 'results')
            if not os.path.exists(results):
                os.mkdir(results)

            download = os.path.join(self.testdir, 'download')
            if not os.path.exists(download):
                os.mkdir(download)

            os.makedirs(os.path.join(self.resultdir, 'analysis'))

            shutil.copyfile(self.control,
                            os.path.join(self.resultdir, 'control'))


        self.control = control
        self.jobtag = options.tag
        self.log_filename = self.DEFAULT_LOG_FILENAME

        self.logging = logging_manager.get_logging_manager(
                manage_stdout_and_stderr=True, redirect_fds=True)
        self.logging.start_logging()

        self._init_group_level()

        self.config = config.config(self)
        self.harness = harness.select(options.harness, self)
        self.profilers = profilers.profilers(self)

        try:
            tool = self.config_get('boottool.executable')
            self.bootloader = boottool.boottool(tool)
        except:
            pass

        self.sysinfo.log_per_reboot_data()

        if not options.cont:
            self.record('START', None, None)
            self._increment_group_level()

        self.harness.run_start()

        if options.log:
            self.enable_external_logging()

        # load the max disk usage rate - default to no monitoring
        self.max_disk_usage_rate = self.get_state('__monitor_disk', default=0.0)

        copy_cmdline = set(['console'])
        if extra_copy_cmdline is not None:
            copy_cmdline.update(extra_copy_cmdline)

        # extract console= and other args from cmdline and add them into the
        # base args that we use for all kernels we install
        cmdline = utils.read_one_line('/proc/cmdline')
        kernel_args = []
        for karg in cmdline.split():
            for param in copy_cmdline:
                if karg.startswith(param) and \
                    (len(param) == len(karg) or karg[len(param)] == '='):
                    kernel_args.append(karg)
        self.config_set('boot.default_args', ' '.join(kernel_args))