def __init__(self, *args, **dargs): """ Initialize new subtest, passes all arguments through to parent class """ super(Subtest, self).__init__(*args, **dargs) # log indentation level not easy to get at, so use opaque implementation _si = job.status_indenter(self.job) _sl = base_job.status_logger(self.job, _si) self._re = _sl.render_entry # will return string w/ proper indentation # So tests don't need to set this up every time config_parser = config.Config() self.config = config_parser.get(self.config_section) if self.config is None: logging.warning("No configuration section found '%s'", self.config_section) self.config = config_parser['DEFAULTS'] # Mark this to not be checked, no config, no version info. self.config['config_version'] = version.NOVERSIONCHECK self.version = 0 else: # Version number used by one-time setup() test.test method self.version = version.str2int(self.config['config_version']) # Fail test if configuration being used doesn't match dockertest API version.check_version(self.config) # Log original key/values before subtest could modify them self.write_test_keyval(self.config) # Optionally setup different iterations if option exists self.iterations = self.config.get('iterations', self.iterations) # subclasses can do whatever they like with this self.stuff = {}
def _pre_record_init(self, control, options): """ Initialization function that should peform ONLY the required setup so that the self.record() method works. As of now self.record() needs self.resultdir, self._group_level, self.harness and of course self._logger. """ if not options.cont: self._cleanup_debugdir_files() self._cleanup_results_dir() logging_manager.configure_logging( client_logging_config.ClientLoggingConfig(), results_dir=self.resultdir, verbose=options.verbose ) logging.info("Writing results to %s", self.resultdir) # init_group_level needs the state self.control = os.path.realpath(control) self._is_continuation = options.cont self._current_step_ancestry = [] self._next_step_index = 0 self._load_state() _harness = self.handle_persistent_option(options, "harness") _harness_args = self.handle_persistent_option(options, "harness_args") self.harness = harness.select(_harness, self, _harness_args) # set up the status logger def client_job_record_hook(entry): msg_tag = "" if "." in self._logger.global_filename: msg_tag = self._logger.global_filename.split(".", 1)[1] # send the entry to the job harness message = "\n".join([entry.message] + entry.extra_message_lines) rendered_entry = self._logger.render_entry(entry) self.harness.test_status_detail( entry.status_code, entry.subdir, entry.operation, message, msg_tag, entry.fields ) self.harness.test_status(rendered_entry, msg_tag) # send the entry to stdout, if it's enabled logging.info(rendered_entry) self._logger = base_job.status_logger( self, status_indenter(self), record_hook=client_job_record_hook, tap_writer=self._tap )
def _pre_record_init(self, control, options): """ Initialization function that should peform ONLY the required setup so that the self.record() method works. As of now self.record() needs self.resultdir, self._group_level, self.harness and of course self._logger. """ if not options.cont: self._cleanup_debugdir_files() self._cleanup_results_dir() logging_manager.configure_logging( client_logging_config.ClientLoggingConfig(), results_dir=self.resultdir, verbose=options.verbose) logging.info('Writing results to %s', self.resultdir) # init_group_level needs the state self.control = os.path.realpath(control) self._is_continuation = options.cont self._current_step_ancestry = [] self._next_step_index = 0 self._load_state() _harness = self.handle_persistent_option(options, 'harness') _harness_args = self.handle_persistent_option(options, 'harness_args') self.harness = harness.select(_harness, self, _harness_args) # set up the status logger def client_job_record_hook(entry): msg_tag = '' if '.' in self._logger.global_filename: msg_tag = self._logger.global_filename.split('.', 1)[1] # send the entry to the job harness message = '\n'.join([entry.message] + entry.extra_message_lines) rendered_entry = self._logger.render_entry(entry) self.harness.test_status_detail(entry.status_code, entry.subdir, entry.operation, message, msg_tag, entry.fields) self.harness.test_status(rendered_entry, msg_tag) # send the entry to stdout, if it's enabled logging.info(rendered_entry) self._logger = base_job.status_logger( self, status_indenter(self), record_hook=client_job_record_hook, tap_writer=self._tap)
def __init__(self, control, args, resultdir, label, user, machines, client=False, parse_job='', ssh_user='******', ssh_port=22, ssh_pass='', group_name='', tag='', control_filename=SERVER_CONTROL_FILENAME): """ Create a server side job object. @param control: The pathname of the control file. @param args: Passed to the control file. @param resultdir: Where to throw the results. @param label: Description of the job. @param user: Username for the job (email address). @param client: True if this is a client-side control file. @param parse_job: string, if supplied it is the job execution tag that the results will be passed through to the TKO parser with. @param ssh_user: The SSH username. [root] @param ssh_port: The SSH port number. [22] @param ssh_pass: The SSH passphrase, if needed. @param group_name: If supplied, this will be written out as host_group_name in the keyvals file for the parser. @param tag: The job execution tag from the scheduler. [optional] @param control_filename: The filename where the server control file should be written in the results directory. """ super(base_server_job, self).__init__(resultdir=resultdir) path = os.path.dirname(__file__) self.control = control self._uncollected_log_file = os.path.join(self.resultdir, 'uncollected_logs') debugdir = os.path.join(self.resultdir, 'debug') if not os.path.exists(debugdir): os.mkdir(debugdir) if user: self.user = user else: self.user = getpass.getuser() self.args = args self.machines = machines self._client = client self.warning_loggers = set() self.warning_manager = warning_manager() self._ssh_user = ssh_user self._ssh_port = ssh_port self._ssh_pass = ssh_pass self.tag = tag self.last_boot_tag = None self.hosts = set() self.drop_caches = False self.drop_caches_between_iterations = False self._control_filename = control_filename self.logging = logging_manager.get_logging_manager( manage_stdout_and_stderr=True, redirect_fds=True) subcommand.logging_manager_object = self.logging self.sysinfo = sysinfo.sysinfo(self.resultdir) self.profilers = profilers.profilers(self) job_data = {'label' : label, 'user' : user, 'hostname' : ','.join(machines), 'drone' : platform.node(), 'status_version' : str(self._STATUS_VERSION), 'job_started' : str(int(time.time()))} if group_name: job_data['host_group_name'] = group_name # only write these keyvals out on the first job in a resultdir if 'job_started' not in utils.read_keyval(self.resultdir): job_data.update(get_site_job_data(self)) utils.write_keyval(self.resultdir, job_data) self._parse_job = parse_job self._using_parser = (self._parse_job and len(machines) <= 1) self.pkgmgr = packages.PackageManager( self.autodir, run_function_dargs={'timeout':600}) self.num_tests_run = 0 self.num_tests_failed = 0 self._register_subcommand_hooks() # these components aren't usable on the server self.bootloader = None self.harness = None # set up the status logger self._indenter = status_indenter() self._logger = base_job.status_logger( self, self._indenter, 'status.log', 'status.log', record_hook=server_job_record_hook(self))