def wrapper(machine): self.push_execution_context(machine) os.chdir(self.resultdir) machine_data = {'hostname' : machine, 'status_version' : str(self._STATUS_VERSION)} utils.write_keyval(self.resultdir, machine_data) result = function(machine) return result
def wrapper(machine): self._parse_job += "/" + machine self._using_parser = True self.machines = [machine] self.push_execution_context(machine) os.chdir(self.resultdir) utils.write_keyval(self.resultdir, {"hostname": machine}) self.init_parser() result = function(machine) self.cleanup_parser() return result
def write_keyval(self, filename, dictionary, expected_filename=None, type_tag=None): if expected_filename is None: expected_filename = filename test_file = StringIO.StringIO() self.god.stub_function(test_file, "close") utils.open.expect_call(expected_filename, "a").and_return(test_file) test_file.close.expect_call() if type_tag is None: utils.write_keyval(filename, dictionary) else: utils.write_keyval(filename, dictionary, type_tag) return test_file.getvalue()
def record_summary(self, status_code, test_name, reason='', attributes=None, distinguishing_attributes=(), child_test_ids=None): """Record a summary test result. :param status_code: status code string, see shared.log.is_valid_status() :param test_name: name of the test :param reason: (optional) string providing detailed reason for test outcome :param attributes: (optional) dict of string keyvals to associate with this result :param distinguishing_attributes: (optional) list of attribute names that should be used to distinguish identically-named test results. These attributes should be present in the attributes parameter. This is used to generate user-friendly subdirectory names. :param child_test_ids: (optional) list of test indices for test results used in generating this result. """ subdirectory_name_parts = [test_name] for attribute in distinguishing_attributes: assert attributes assert attribute in attributes, '%s not in %s' % (attribute, attributes) subdirectory_name_parts.append(attributes[attribute]) base_subdirectory_name = '.'.join(subdirectory_name_parts) subdirectory = self._unique_subdirectory(base_subdirectory_name) subdirectory_path = os.path.join(self.resultdir, subdirectory) os.mkdir(subdirectory_path) self.record(status_code, subdirectory, test_name, status=reason, optional_fields={'is_summary': True}) if attributes: utils.write_keyval(subdirectory_path, attributes) if child_test_ids: ids_string = ','.join(str(test_id) for test_id in child_test_ids) summary_data = {'child_test_ids': ids_string} utils.write_keyval(os.path.join(subdirectory_path, 'summary_data'), summary_data)
def record_summary(self, status_code, test_name, reason='', attributes=None, distinguishing_attributes=(), child_test_ids=None): """Record a summary test result. @param status_code: status code string, see shared.log.is_valid_status() @param test_name: name of the test @param reason: (optional) string providing detailed reason for test outcome @param attributes: (optional) dict of string keyvals to associate with this result @param distinguishing_attributes: (optional) list of attribute names that should be used to distinguish identically-named test results. These attributes should be present in the attributes parameter. This is used to generate user-friendly subdirectory names. @param child_test_ids: (optional) list of test indices for test results used in generating this result. """ subdirectory_name_parts = [test_name] for attribute in distinguishing_attributes: assert attributes assert attribute in attributes, '%s not in %s' % (attribute, attributes) subdirectory_name_parts.append(attributes[attribute]) base_subdirectory_name = '.'.join(subdirectory_name_parts) subdirectory = self._unique_subdirectory(base_subdirectory_name) subdirectory_path = os.path.join(self.resultdir, subdirectory) os.mkdir(subdirectory_path) self.record(status_code, subdirectory, test_name, status=reason, optional_fields={'is_summary': True}) if attributes: utils.write_keyval(subdirectory_path, attributes) if child_test_ids: ids_string = ','.join(str(test_id) for test_id in child_test_ids) summary_data = {'child_test_ids': ids_string} utils.write_keyval(os.path.join(subdirectory_path, 'summary_data'), summary_data)
def __init__(self, control, args, resultdir, label, user, machines, client=False, parse_job='', ssh_user='******', ssh_port=22, ssh_pass='', group_name='', tag='', control_filename=SERVER_CONTROL_FILENAME): """ Create a server side job object. @param control: The pathname of the control file. @param args: Passed to the control file. @param resultdir: Where to throw the results. @param label: Description of the job. @param user: Username for the job (email address). @param client: True if this is a client-side control file. @param parse_job: string, if supplied it is the job execution tag that the results will be passed through to the TKO parser with. @param ssh_user: The SSH username. [root] @param ssh_port: The SSH port number. [22] @param ssh_pass: The SSH passphrase, if needed. @param group_name: If supplied, this will be written out as host_group_name in the keyvals file for the parser. @param tag: The job execution tag from the scheduler. [optional] @param control_filename: The filename where the server control file should be written in the results directory. """ super(base_server_job, self).__init__(resultdir=resultdir) path = os.path.dirname(__file__) self.control = control self._uncollected_log_file = os.path.join(self.resultdir, 'uncollected_logs') debugdir = os.path.join(self.resultdir, 'debug') if not os.path.exists(debugdir): os.mkdir(debugdir) if user: self.user = user else: self.user = getpass.getuser() self.args = args self.machines = machines self._client = client self.warning_loggers = set() self.warning_manager = warning_manager() self._ssh_user = ssh_user self._ssh_port = ssh_port self._ssh_pass = ssh_pass self.tag = tag self.last_boot_tag = None self.hosts = set() self.drop_caches = False self.drop_caches_between_iterations = False self._control_filename = control_filename self.logging = logging_manager.get_logging_manager( manage_stdout_and_stderr=True, redirect_fds=True) subcommand.logging_manager_object = self.logging self.sysinfo = sysinfo.sysinfo(self.resultdir) self.profilers = profilers.profilers(self) job_data = {'label' : label, 'user' : user, 'hostname' : ','.join(machines), 'drone' : platform.node(), 'status_version' : str(self._STATUS_VERSION), 'job_started' : str(int(time.time()))} if group_name: job_data['host_group_name'] = group_name # only write these keyvals out on the first job in a resultdir if 'job_started' not in utils.read_keyval(self.resultdir): job_data.update(get_site_job_data(self)) utils.write_keyval(self.resultdir, job_data) self._parse_job = parse_job self._using_parser = (self._parse_job and len(machines) <= 1) self.pkgmgr = packages.PackageManager( self.autodir, run_function_dargs={'timeout':600}) self.num_tests_run = 0 self.num_tests_failed = 0 self._register_subcommand_hooks() # these components aren't usable on the server self.bootloader = None self.harness = None # set up the status logger self._indenter = status_indenter() self._logger = base_job.status_logger( self, self._indenter, 'status.log', 'status.log', record_hook=server_job_record_hook(self))