Esempio n. 1
0
File: job.py Progetto: ceph/autotest
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        # harness is chosen by following rules:
        # 1. explicitly specified via command line
        # 2. harness stored in state file (if continuing job '-c')
        # 3. default harness
        selected_harness = None
        if options.harness:
            selected_harness = options.harness
            self._state.set('client', 'harness', selected_harness)
        else:
            stored_harness = self._state.get('client', 'harness', None)
            if stored_harness:
                selected_harness = stored_harness

        self.harness = harness.select(selected_harness, self)

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)
        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook,
            tap_writer=self._tap)
Esempio n. 2
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        if self.control:
            parsed_control = control_data.parse_control(self.control,
                                                        raise_warnings=False)
            self.fast = parsed_control.fast

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)

        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook)
Esempio n. 3
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)
        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook,
            tap_writer=self._tap)
Esempio n. 4
0
    def __init__(self,
                 control,
                 args,
                 resultdir,
                 label,
                 user,
                 machines,
                 client=False,
                 parse_job='',
                 ssh_user='******',
                 ssh_port=22,
                 ssh_pass='',
                 group_name='',
                 tag='',
                 control_filename=SERVER_CONTROL_FILENAME):
        """
        Create a server side job object.

        @param control: The pathname of the control file.
        @param args: Passed to the control file.
        @param resultdir: Where to throw the results.
        @param label: Description of the job.
        @param user: Username for the job (email address).
        @param client: True if this is a client-side control file.
        @param parse_job: string, if supplied it is the job execution tag that
                the results will be passed through to the TKO parser with.
        @param ssh_user: The SSH username.  [root]
        @param ssh_port: The SSH port number.  [22]
        @param ssh_pass: The SSH passphrase, if needed.
        @param group_name: If supplied, this will be written out as
                host_group_name in the keyvals file for the parser.
        @param tag: The job execution tag from the scheduler.  [optional]
        @param control_filename: The filename where the server control file
                should be written in the results directory.
        """
        super(base_server_job, self).__init__(resultdir=resultdir)

        path = os.path.dirname(__file__)
        self.control = control
        self._uncollected_log_file = os.path.join(self.resultdir,
                                                  'uncollected_logs')
        debugdir = os.path.join(self.resultdir, 'debug')
        if not os.path.exists(debugdir):
            os.mkdir(debugdir)

        if user:
            self.user = user
        else:
            self.user = getpass.getuser()

        self.args = args
        self.machines = machines
        self._client = client
        self.warning_loggers = set()
        self.warning_manager = warning_manager()
        self._ssh_user = ssh_user
        self._ssh_port = ssh_port
        self._ssh_pass = ssh_pass
        self.tag = tag
        self.last_boot_tag = None
        self.hosts = set()
        self.drop_caches = False
        self.drop_caches_between_iterations = False
        self._control_filename = control_filename

        self.logging = logging_manager.get_logging_manager(
            manage_stdout_and_stderr=True, redirect_fds=True)
        subcommand.logging_manager_object = self.logging

        self.sysinfo = sysinfo.sysinfo(self.resultdir)
        self.profilers = profilers.profilers(self)

        job_data = {
            'label': label,
            'user': user,
            'hostname': ','.join(machines),
            'drone': platform.node(),
            'status_version': str(self._STATUS_VERSION),
            'job_started': str(int(time.time()))
        }
        if group_name:
            job_data['host_group_name'] = group_name

        # only write these keyvals out on the first job in a resultdir
        if 'job_started' not in utils.read_keyval(self.resultdir):
            job_data.update(get_site_job_data(self))
            utils.write_keyval(self.resultdir, job_data)

        self._parse_job = parse_job
        self._using_parser = (self._parse_job and len(machines) <= 1)
        self.pkgmgr = packages.PackageManager(
            self.autodir, run_function_dargs={'timeout': 600})
        self.num_tests_run = 0
        self.num_tests_failed = 0

        self._register_subcommand_hooks()

        # these components aren't usable on the server
        self.bootloader = None
        self.harness = None

        # set up the status logger
        self._indenter = status_indenter()
        self._logger = base_job.status_logger(
            self,
            self._indenter,
            'status.log',
            'status.log',
            record_hook=server_job_record_hook(self))
Esempio n. 5
0
    def __init__(
        self,
        control,
        args,
        resultdir,
        label,
        user,
        machines,
        client=False,
        parse_job="",
        ssh_user="******",
        ssh_port=22,
        ssh_pass="",
        group_name="",
        tag="",
        control_filename=SERVER_CONTROL_FILENAME,
    ):
        """
        Create a server side job object.

        @param control: The pathname of the control file.
        @param args: Passed to the control file.
        @param resultdir: Where to throw the results.
        @param label: Description of the job.
        @param user: Username for the job (email address).
        @param client: True if this is a client-side control file.
        @param parse_job: string, if supplied it is the job execution tag that
                the results will be passed through to the TKO parser with.
        @param ssh_user: The SSH username.  [root]
        @param ssh_port: The SSH port number.  [22]
        @param ssh_pass: The SSH passphrase, if needed.
        @param group_name: If supplied, this will be written out as
                host_group_name in the keyvals file for the parser.
        @param tag: The job execution tag from the scheduler.  [optional]
        @param control_filename: The filename where the server control file
                should be written in the results directory.
        """
        super(base_server_job, self).__init__(resultdir=resultdir)

        path = os.path.dirname(__file__)
        self.control = control
        self._uncollected_log_file = os.path.join(self.resultdir, "uncollected_logs")
        debugdir = os.path.join(self.resultdir, "debug")
        if not os.path.exists(debugdir):
            os.mkdir(debugdir)

        if user:
            self.user = user
        else:
            self.user = getpass.getuser()

        self.args = args
        self.machines = machines
        self._client = client
        self.warning_loggers = set()
        self.warning_manager = warning_manager()
        self._ssh_user = ssh_user
        self._ssh_port = ssh_port
        self._ssh_pass = ssh_pass
        self.tag = tag
        self.last_boot_tag = None
        self.hosts = set()
        self.drop_caches = False
        self.drop_caches_between_iterations = False
        self._control_filename = control_filename

        self.logging = logging_manager.get_logging_manager(manage_stdout_and_stderr=True, redirect_fds=True)
        subcommand.logging_manager_object = self.logging

        self.sysinfo = sysinfo.sysinfo(self.resultdir)
        self.profilers = profilers.profilers(self)

        job_data = {
            "label": label,
            "user": user,
            "hostname": ",".join(machines),
            "drone": platform.node(),
            "status_version": str(self._STATUS_VERSION),
            "job_started": str(int(time.time())),
        }
        if group_name:
            job_data["host_group_name"] = group_name

        # only write these keyvals out on the first job in a resultdir
        if "job_started" not in utils.read_keyval(self.resultdir):
            job_data.update(get_site_job_data(self))
            utils.write_keyval(self.resultdir, job_data)

        self._parse_job = parse_job
        self._using_parser = self._parse_job and len(machines) <= 1
        self.pkgmgr = packages.PackageManager(self.autodir, run_function_dargs={"timeout": 600})
        self.num_tests_run = 0
        self.num_tests_failed = 0

        self._register_subcommand_hooks()

        # these components aren't usable on the server
        self.bootloader = None
        self.harness = None

        # set up the status logger
        self._indenter = status_indenter()
        self._logger = base_job.status_logger(
            self, self._indenter, "status.log", "status.log", record_hook=server_job_record_hook(self)
        )