def load_from_dir(dir, verify_ident=None): # try and load the booted kernel version attributes = False i = 1 build_dir = os.path.join(dir, "build") while True: if not os.path.exists(build_dir): break build_log = os.path.join(build_dir, "debug", "build_log") attributes = kernel.load_from_build_log(build_log) if attributes: break i += 1 build_dir = os.path.join(dir, "build.%d" % (i)) if not attributes: if verify_ident: base = verify_ident else: base = kernel.load_from_sysinfo(dir) patches = [] hashes = [] else: base, patches, hashes = attributes tko_utils.dprint("kernel.__init__() found kernel version %s" % base) # compute the kernel hash if base == "UNKNOWN": kernel_hash = "UNKNOWN" else: kernel_hash = kernel.compute_hash(base, hashes) return {"base": base, "patches": patches, "kernel_hash": kernel_hash}
def put_back_line_and_abort( line_buffer, line, indent, subdir, timestamp, reason): tko_utils.dprint("Unexpected indent regression, aborting") line_buffer.put_back(line) abort = parser.make_dummy_abort( indent, subdir, subdir, timestamp, reason) line_buffer.put_back(abort)
def parse_line_into_dicts(line, attr_dict, perf_dict): key, val_type, value = "", "", "" # figure out what the key, value and keyval type are typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line) if typed_match: key, val_type, value = typed_match.groups() else: # old-fashioned untyped match, assume perf untyped_match = re.search("^([^=]*)=(.*)$", line) if untyped_match: key, value = untyped_match.groups() val_type = "perf" # parse the actual value into a dict try: if val_type == "attr": attr_dict[key] = value elif val_type == "perf": perf_dict[key] = float(value) else: raise ValueError except ValueError: msg = ("WARNING: line '%s' found in test " "iteration keyval could not be parsed") msg %= line tko_utils.dprint(msg)
def parse_test(cls, job, subdir, testname, status, reason, test_kernel, started_time, finished_time, existing_instance=None): """Given a job and the basic metadata about the test that can be extracted from the status logs, parse the test keyval files and use it to construct a complete test instance.""" tko_utils.dprint("parsing test %s %s" % (subdir, testname)) if subdir: # grab iterations from the results keyval iteration_keyval = os.path.join(job.dir, subdir, "results", "keyval") iterations = cls.load_iterations(iteration_keyval) # grab test attributes from the subdir keyval test_keyval = os.path.join(job.dir, subdir, "keyval") attributes = test.load_attributes(test_keyval) else: iterations = [] attributes = {} # grab test+host attributes from the host keyval host_keyval = cls.parse_host_keyval(job.dir, job.machine) attributes.update(dict(("host-%s" % k, v) for k, v in host_keyval.iteritems())) if existing_instance: def constructor(*args, **dargs): existing_instance.__init__(*args, **dargs) return existing_instance else: constructor = cls return constructor(subdir, testname, status, reason, test_kernel, job.machine, started_time, finished_time, iterations, attributes, [])
def load_from_dir(cls, dir): keyval = cls.read_keyval(dir) tko_utils.dprint(str(keyval)) user = keyval.get("user", None) label = keyval.get("label", None) queued_time = tko_utils.get_timestamp(keyval, "job_queued") started_time = tko_utils.get_timestamp(keyval, "job_started") finished_time = tko_utils.get_timestamp(keyval, "job_finished") machine = cls.determine_hostname(keyval, dir) machine_group = cls.determine_machine_group(machine, dir) machine_owner = keyval.get("owner", None) aborted_by = keyval.get("aborted_by", None) aborted_at = tko_utils.get_timestamp(keyval, "aborted_on") return { "user": user, "label": label, "machine": machine, "queued_time": queued_time, "started_time": started_time, "finished_time": finished_time, "machine_owner": machine_owner, "machine_group": machine_group, "aborted_by": aborted_by, "aborted_on": aborted_at, "keyval_dict": keyval }
def parse_line_into_dicts(line, attr_dict, perf_dict): key, val_type, value = "", "", "" # Figure out what the key, value and keyval type are. typed_match = re.search('^([^=]*)\{(\w*)\}=(.*)$', line) if typed_match: key, val_type, value = typed_match.groups() else: # Old-fashioned untyped match, assume perf. untyped_match = re.search('^([^=]*)=(.*)$', line) if untyped_match: key, value = untyped_match.groups() val_type = 'perf' # Parse the actual value into a dict. try: if val_type == 'attr': attr_dict[key] = value elif val_type == 'perf': perf_dict[key] = float(value) else: raise ValueError except ValueError: msg = ('WARNING: line "%s" found in test ' 'iteration keyval could not be parsed') msg %= line tko_utils.dprint(msg)
def parse_args(): # build up our options parser and parse sys.argv parser = optparse.OptionParser() parser.add_option("-m", help="Send mail for FAILED tests", dest="mailit", action="store_true") parser.add_option("-r", help="Reparse the results of a job", dest="reparse", action="store_true") parser.add_option("-o", help="Parse a single results directory", dest="singledir", action="store_true") parser.add_option("-l", help=("Levels of subdirectories to include " "in the job name"), type="int", dest="level", default=1) parser.add_option("-n", help="No blocking on an existing parse", dest="noblock", action="store_true") parser.add_option("-s", help="Database server hostname", dest="db_host", action="store") parser.add_option("-u", help="Database username", dest="db_user", action="store") parser.add_option("-p", help="Database password", dest="db_pass", action="store") parser.add_option("-d", help="Database name", dest="db_name", action="store") parser.add_option("--write-pidfile", help="write pidfile (.parser_execute)", dest="write_pidfile", action="store_true", default=False) options, args = parser.parse_args() # we need a results directory if len(args) == 0: tko_utils.dprint("ERROR: at least one results directory must " "be provided") parser.print_help() sys.exit(1) # pass the options back return options, args
def parse_partial_test(cls, job, subdir, testname, reason, test_kernel, started_time): """Given a job and the basic metadata available when a test is started, create a test instance representing the partial result. Assume that since the test is not complete there are no results files actually available for parsing.""" tko_utils.dprint("parsing partial test %s %s" % (subdir, testname)) return cls(subdir, testname, "RUNNING", reason, test_kernel, job.machine, started_time, None, [], {}, [])
def process_lines(self, lines): """ Feed 'lines' into the parser state machine, and return a list of all the new test results produced.""" self.line_buffer.put_multiple(lines) try: return self.state.next() except StopIteration: msg = ("WARNING: parser was called to process status " "lines after it was end()ed\n" "Current traceback:\n" + traceback.format_exc() + "\nCurrent stack:\n" + "".join(traceback.format_stack())) tko_utils.dprint(msg) return []
def determine_machine_group(cls, hostname, job_dir): machine_groups = set() for individual_hostname in hostname.split(","): host_keyval = models.test.parse_host_keyval( job_dir, individual_hostname) if not host_keyval: tko_utils.dprint('Unable to parse host keyval for %s' % individual_hostname) elif "platform" in host_keyval: machine_groups.add(host_keyval["platform"]) machine_group = ",".join(sorted(machine_groups)) tko_utils.dprint("MACHINE GROUP: %s" % machine_group) return machine_group
def determine_machine_group(cls, hostname, job_dir): machine_groups = set() for individual_hostname in hostname.split(","): host_keyval = models.test.parse_host_keyval(job_dir, individual_hostname) if not host_keyval: tko_utils.dprint('Unable to parse host keyval for %s' % individual_hostname) elif "platform" in host_keyval: machine_groups.add(host_keyval["platform"]) machine_group = ",".join(sorted(machine_groups)) tko_utils.dprint("MACHINE GROUP: %s" % machine_group) return machine_group
def _throttle_result_size(path): """Limit the total size of test results for the given path. @param path: Path of the result directory. """ if not result_runner.ENABLE_RESULT_THROTTLING: tko_utils.dprint( 'Result throttling is not enabled. Skipping throttling %s' % path) return max_result_size_KB = control_data.DEFAULT_MAX_RESULT_SIZE_KB # Client side test saves the test control to file `control`, while server # side test saves the test control to file `control.srv` for control_file in ['control', 'control.srv']: control = os.path.join(path, control_file) try: max_result_size_KB = control_data.parse_control( control, raise_warnings=False).max_result_size_KB # Any value different from the default is considered to be the one # set in the test control file. if max_result_size_KB != control_data.DEFAULT_MAX_RESULT_SIZE_KB: break except IOError as e: tko_utils.dprint('Failed to access %s. Error: %s\nDetails %s' % (control, e, traceback.format_exc())) except control_data.ControlVariableException as e: tko_utils.dprint('Failed to parse %s. Error: %s\nDetails %s' % (control, e, traceback.format_exc())) try: result_utils.execute(path, max_result_size_KB) except: tko_utils.dprint('Failed to throttle result size of %s.\nDetails %s' % (path, traceback.format_exc()))
def export_tko_job_to_file(job, jobname, filename): """Exports the tko job to disk file. @param job: database object. @param jobname: the job name as string. @param filename: The path to the results to be parsed. """ try: from autotest_lib.tko import job_serializer serializer = job_serializer.JobSerializer() serializer.serialize_to_binary(job, jobname, filename) except ImportError: tko_utils.dprint("WARNING: tko_pb2.py doesn't exist. Create by " "compiling tko/tko.proto.")
def end(self, lines=[]): """ Feed 'lines' into the parser state machine, signal to the state machine that no more lines are forthcoming, and then return a list of all the new test results produced.""" self.line_buffer.put_multiple(lines) # run the state machine to clear out the buffer self.finished = True try: return self.state.next() except StopIteration: msg = ("WARNING: parser was end()ed multiple times\n" "Current traceback:\n" + traceback.format_exc() + "\nCurrent stack:\n" + "".join(traceback.format_stack())) tko_utils.dprint(msg) return []
def find_hostname(path): hostname = os.path.join(path, "sysinfo", "hostname") try: machine = open(hostname).readline().rstrip() return machine except Exception: tko_utils.dprint("Could not read a hostname from " "sysinfo/hostname") uname = os.path.join(path, "sysinfo", "uname_-a") try: machine = open(uname).readline().split()[1] return machine except Exception: tko_utils.dprint("Could not read a hostname from " "sysinfo/uname_-a") raise NoHostnameError("Unable to find a machine name")
def parse_leaf_path(db, path, level, parse_options): """Parse a leaf path. @param db: database handle. @param path: The path to the results to be parsed. @param level: Integer, level of subdirectories to include in the job name. @param parse_options: _ParseOptions instance. @returns: The job name of the parsed job, e.g. '123-chromeos-test/host1' """ job_elements = path.split("/")[-level:] jobname = "/".join(job_elements) try: db.run_with_retry(parse_one, db, jobname, path, parse_options) except Exception as e: tko_utils.dprint("Error parsing leaf path: %s\nException:\n%s\n%s" % (path, e, traceback.format_exc())) return jobname
def put_back_line_and_abort(line_buffer, line, indent, subdir, timestamp, reason): """ Appends a line to the line buffer and aborts. @param line_buffer: A line_buffer object. @param line: A line to append to the line buffer. @param indent: The number of indentation levels. @param subdir: The subdirectory name. @param timestamp: The timestamp value. @param reason: The reason string. """ tko_utils.dprint('Unexpected indent: aborting log parse') line_buffer.put_back(line) abort = parser.make_dummy_abort(indent, subdir, subdir, timestamp, reason) line_buffer.put_back(abort)
def parse_test(cls, job, subdir, testname, status, reason, test_kernel, started_time, finished_time, existing_instance=None): """Given a job and the basic metadata about the test that can be extracted from the status logs, parse the test keyval files and use it to construct a complete test instance.""" tko_utils.dprint("parsing test %s %s" % (subdir, testname)) if subdir: # grab iterations from the results keyval iteration_keyval = os.path.join(job.dir, subdir, "results", "keyval") iterations = cls.load_iterations(iteration_keyval) # grab test attributes from the subdir keyval test_keyval = os.path.join(job.dir, subdir, "keyval") attributes = test.load_attributes(test_keyval) else: iterations = [] attributes = {} # grab test+host attributes from the host keyval host_keyval = cls.parse_host_keyval(job.dir, job.machine) attributes.update( dict(("host-%s" % k, v) for k, v in host_keyval.iteritems())) if existing_instance: def constructor(*args, **dargs): existing_instance.__init__(*args, **dargs) return existing_instance else: constructor = cls return constructor(subdir, testname, status, reason, test_kernel, job.machine, started_time, finished_time, iterations, attributes, [])
def determine_hostname(cls, keyval, job_dir): host_group_name = keyval.get("host_group_name", None) machine = keyval.get("hostname", "") is_multimachine = "," in machine # determine what hostname to use if host_group_name: if is_multimachine or not machine: tko_utils.dprint("Using host_group_name %r instead of " "machine name." % host_group_name) machine = host_group_name elif is_multimachine: try: machine = job.find_hostname(job_dir) # find a unique hostname except NoHostnameError: pass # just use the comma-separated name tko_utils.dprint("MACHINE NAME: %s" % machine) return machine
def record_parsing(processed_jobs, duration_secs): """Record the time spent on parsing to metadata db. @param processed_jobs: A set of job names of the parsed jobs. set(['123-chromeos-test/host1', '123-chromeos-test/host2']) @param duration_secs: Total time spent on parsing, in seconds. """ for job_name in processed_jobs: job_id, hostname = tko_utils.get_afe_job_id_and_hostname(job_name) if not job_id or not hostname: tko_utils.dprint('ERROR: can not parse job name %s, ' 'will not send duration to metadata db.' % job_name) continue else: job_overhead.record_state_duration(job_id, hostname, job_overhead.STATUS.PARSING, duration_secs)
def parse_host_keyval(job_dir, hostname): """ Parse host keyvals. @param job_dir: The string directory name of the associated job. @param hostname: The string hostname. @return A dictionary representing the host keyvals. """ keyval_path = os.path.join('host_keyvals', hostname) # The host keyval is <job_dir>/host_keyvals/<hostname> if it exists. # Otherwise we're running on Skylab which uses hostinfo. if not os.path.exists(os.path.join(job_dir, keyval_path)): tko_utils.dprint("trying to use hostinfo") try: return _parse_hostinfo_keyval(job_dir, hostname) except Exception as e: # If anything goes wrong, log it and just use the old flow. tko_utils.dprint("tried using hostinfo: %s" % e) return test._parse_keyval(job_dir, keyval_path)
def load_from_dir(cls, dir): keyval = cls.read_keyval(dir) tko_utils.dprint(str(keyval)) user = keyval.get("user", None) label = keyval.get("label", None) queued_time = tko_utils.get_timestamp(keyval, "job_queued") started_time = tko_utils.get_timestamp(keyval, "job_started") finished_time = tko_utils.get_timestamp(keyval, "job_finished") machine = cls.determine_hostname(keyval, dir) machine_group = cls.determine_machine_group(machine, dir) machine_owner = keyval.get("owner", None) aborted_by = keyval.get("aborted_by", None) aborted_at = tko_utils.get_timestamp(keyval, "aborted_on") return {"user": user, "label": label, "machine": machine, "queued_time": queued_time, "started_time": started_time, "finished_time": finished_time, "machine_owner": machine_owner, "machine_group": machine_group, "aborted_by": aborted_by, "aborted_on": aborted_at}
def _throttle_result_size(path): """Limit the total size of test results for the given path. @param path: Path of the result directory. """ if not result_runner.ENABLE_RESULT_THROTTLING: tko_utils.dprint( 'Result throttling is not enabled. Skipping throttling %s' % path) return max_result_size_KB = _max_result_size_from_control(path) if max_result_size_KB is None: max_result_size_KB = control_data.DEFAULT_MAX_RESULT_SIZE_KB try: result_utils.execute(path, max_result_size_KB) except: tko_utils.dprint( 'Failed to throttle result size of %s.\nDetails %s' % (path, traceback.format_exc()))
def determine_machine_group(cls, hostname, job_dir): machine_groups = set() for individual_hostname in hostname.split(","): host_keyval = models.test.parse_host_keyval( job_dir, individual_hostname) if not host_keyval: tko_utils.dprint('Unable to parse host keyval for %s' % individual_hostname) elif 'labels' in host_keyval: # Use board label as machine group. This is to avoid the # confusion of multiple boards mapping to the same platform in # wmatrix. With this change, wmatrix will group tests with the # same board, rather than the same platform. labels = host_keyval['labels'].split(',') board_labels = [ l[8:] for l in labels if l.startswith('board%3A') ] if board_labels and len(board_labels) == 1: machine_groups.add(board_labels[0]) else: error = ( 'Failed to retrieve board label from host labels: ' '%s' % host_keyval['labels']) tko_utils.dprint(error) raise BoardLabelError(error) elif "platform" in host_keyval: machine_groups.add(host_keyval["platform"]) machine_group = ",".join(sorted(machine_groups)) tko_utils.dprint("MACHINE GROUP: %s" % machine_group) return machine_group
def load_from_dir(cls, dir): keyval = cls.read_keyval(dir) tko_utils.dprint(str(keyval)) user = keyval.get("user", None) label = keyval.get("label", None) host_group_name = keyval.get("host_group_name", None) machine = keyval.get("hostname", None) if not host_group_name and machine and "," in machine: try: machine = job.find_hostname(dir) # find a unique hostname except NoHostnameError: pass # just use the comma-separated name queued_time = tko_utils.get_timestamp(keyval, "job_queued") started_time = tko_utils.get_timestamp(keyval, "job_started") finished_time = tko_utils.get_timestamp(keyval, "job_finished") machine_owner = keyval.get("owner", None) aborted_by = keyval.get("aborted_by", None) aborted_at = tko_utils.get_timestamp(keyval, "aborted_on") tko_utils.dprint("MACHINE NAME: %s" % machine) if host_group_name and ((machine and "," in machine) or not machine): tko_utils.dprint("Using host_group_name %r instead of " "machine name." % host_group_name) machine = host_group_name return {"user": user, "label": label, "machine": machine, "queued_time": queued_time, "started_time": started_time, "finished_time": finished_time, "machine_owner": machine_owner, "aborted_by": aborted_by, "aborted_on": aborted_at}
def parse_args(): # build up our options parser and parse sys.argv parser = optparse.OptionParser() parser.add_option("-m", help="Send mail for FAILED tests", dest="mailit", action="store_true") parser.add_option("-r", help="Reparse the results of a job", dest="reparse", action="store_true") parser.add_option("-o", help="Parse a single results directory", dest="singledir", action="store_true") parser.add_option("-l", help=("Levels of subdirectories to include " "in the job name"), type="int", dest="level", default=1) parser.add_option("-n", help="No blocking on an existing parse", dest="noblock", action="store_true") parser.add_option("-s", help="Database server hostname", dest="db_host", action="store") parser.add_option("-u", help="Database username", dest="db_user", action="store") parser.add_option("-p", help="Database password", dest="db_pass", action="store") parser.add_option("-d", help="Database name", dest="db_name", action="store") parser.add_option("-P", help="Run site post-processing", dest="site_do_post", action="store_true", default=False) parser.add_option("--write-pidfile", help="write pidfile (.parser_execute)", dest="write_pidfile", action="store_true", default=False) options, args = parser.parse_args() # we need a results directory if len(args) == 0: tko_utils.dprint("ERROR: at least one results directory must " "be provided") parser.print_help() sys.exit(1) # pass the options back return options, args
def print_ignored_lines(): """ Prints the ignored_lines using tko_utils.dprint method. """ tko_utils.dprint('The following lines were ignored:') for line in ignored_lines: tko_utils.dprint(line) tko_utils.dprint('---------------------------------')
def _match_existing_tests(db, job): """Find entries in the DB corresponding to the job's tests, update job. @return: Any unmatched tests in the db. """ old_job_idx = job.job_idx raw_old_tests = db.select("test_idx,subdir,test", "tko_tests", {"job_idx": old_job_idx}) if raw_old_tests: old_tests = dict(((test, subdir), test_idx) for test_idx, subdir, test in raw_old_tests) else: old_tests = {} for test in job.tests: test_idx = old_tests.pop((test.testname, test.subdir), None) if test_idx is not None: test.test_idx = test_idx else: tko_utils.dprint("! Reparse returned new test " "testname=%r subdir=%r" % (test.testname, test.subdir)) return old_tests
def _max_result_size_from_control(path): """Gets the max result size set in a control file, if any. If not overrides is found, returns None. """ for control_file in _HARDCODED_CONTROL_FILE_NAMES: control = os.path.join(path, control_file) if not os.path.exists(control): continue try: max_result_size_KB = control_data.parse_control( control, raise_warnings=False).max_result_size_KB if max_result_size_KB != control_data.DEFAULT_MAX_RESULT_SIZE_KB: return max_result_size_KB except IOError as e: tko_utils.dprint( 'Failed to access %s. Error: %s\nDetails %s' % (control, e, traceback.format_exc())) except control_data.ControlVariableException as e: tko_utils.dprint( 'Failed to parse %s. Error: %s\nDetails %s' % (control, e, traceback.format_exc())) return None
def parse_line_into_dicts(line, attr_dict, perf_dict): key, val_type, value = "", "", "" # Figure out what the key, value and keyval type are. typed_match = re.search('^([^=]*)\{(\w*)\}=(.*)$', line) if typed_match: key, val_type, value = typed_match.groups() else: # Old-fashioned untyped match, assume perf. untyped_match = re.search('^([^=]*)=(.*)$', line) if untyped_match: key, value = untyped_match.groups() val_type = 'perf' # Parse the actual value into a dict. try: if val_type == 'attr': attr_dict[key] = value elif val_type == 'perf': # first check if value is in the form of 'mean+-deviation' if isinstance(value, str): r = re.compile('(\d+.?\d*)\+-(\d+.?\d*)') match = r.match(value) if match: perf_dict[key] = float(match.group(1)) perf_dict['%s_dev' % key] = float(match.group(2)) return # otherwise try to interpret as a regular float perf_dict[key] = float(value) else: raise ValueError except ValueError: msg = ('WARNING: line "%s" found in test ' 'iteration keyval could not be parsed') msg %= line tko_utils.dprint(msg)
def parse_partial_test(cls, job, subdir, testname, reason, test_kernel, started_time): """ Create a test instance representing a partial test result. Given a job and the basic metadata available when a test is started, create a test instance representing the partial result. Assume that since the test is not complete there are no results files actually available for parsing. @param job: A job object. @param subdir: The string subdirectory name for the given test. @param testname: The name of the test. @param reason: The reason string for the test. @param test_kernel: The kernel of the test. @param started_time: The start time of the test. @return A test instance that has partial test information. """ tko_utils.dprint('parsing partial test %s %s' % (subdir, testname)) return cls(subdir, testname, 'RUNNING', reason, test_kernel, job.machine, started_time, None, [], {}, [], [])
def upload_test(job, test): """Uploads any perf data associated with a test to the perf dashboard. @param job: An autotest tko.models.job object that is associated with the given |test|. @param test: An autotest tko.models.test object that may or may not be associated with measured perf data. """ if not test.perf_values: return # Aggregate values from multiple iterations together. perf_data = _aggregate_iterations(test.perf_values) # Compute averages and standard deviations as needed for measured perf # values that exist in multiple iterations. Ultimately, we only upload a # single measurement (with standard deviation) for every unique measured # perf metric. _compute_avg_stddev(perf_data) # Format the perf data for the upload, then upload it. test_name = test.testname platform_name = job.machine_group hardware_id = test.attributes.get('hwid', '') hardware_hostname = test.machine variant_name = test.attributes.get(constants.VARIANT_KEY, None) config_data = _parse_config_file(_PRESENTATION_CONFIG_FILE) try: shadow_config_data = _parse_config_file( _PRESENTATION_SHADOW_CONFIG_FILE) config_data.update(shadow_config_data) except ValueError as e: tko_utils.dprint('Failed to parse config file %s: %s.' % (_PRESENTATION_SHADOW_CONFIG_FILE, e)) try: cros_version, chrome_version = _get_version_numbers(test.attributes) presentation_info = _gather_presentation_info(config_data, test_name) formatted_data = _format_for_upload(platform_name, cros_version, chrome_version, hardware_id, variant_name, hardware_hostname, perf_data, presentation_info) _send_to_dashboard(formatted_data) except PerfUploadingError as e: tko_utils.dprint('Error when uploading perf data to the perf ' 'dashboard for test %s: %s' % (test_name, e)) else: tko_utils.dprint('Successfully uploaded perf data to the perf ' 'dashboard for test %s.' % test_name)
def upload_test(job, test, jobname): """Uploads any perf data associated with a test to the perf dashboard. @param job: An autotest tko.models.job object that is associated with the given |test|. @param test: An autotest tko.models.test object that may or may not be associated with measured perf data. @param jobname: A string uniquely identifying the test run, this enables linking back from a test result to the logs of the test run. """ # Format the perf data for the upload, then upload it. test_name = test.testname platform_name = job.machine_group # Append the platform name with '.arc' if the suffix of the control # filename is '.arc'. if job.label and re.match('.*\.arc$', job.label): platform_name += '.arc' hardware_id = test.attributes.get('hwid', '') hardware_hostname = test.machine variant_name = test.attributes.get(constants.VARIANT_KEY, None) config_data = _parse_config_file(_PRESENTATION_CONFIG_FILE) try: shadow_config_data = _parse_config_file( _PRESENTATION_SHADOW_CONFIG_FILE) config_data.update(shadow_config_data) except ValueError as e: tko_utils.dprint('Failed to parse config file %s: %s.' % (_PRESENTATION_SHADOW_CONFIG_FILE, e)) try: cros_version, chrome_version = _get_version_numbers(test.attributes) presentation_info = _gather_presentation_info(config_data, test_name) formatted_data = _format_for_upload(platform_name, cros_version, chrome_version, hardware_id, variant_name, hardware_hostname, test.perf_values, presentation_info, jobname) _send_to_dashboard(formatted_data) except PerfUploadingError as e: tko_utils.dprint('Error when uploading perf data to the perf ' 'dashboard for test %s: %s' % (test_name, e)) else: tko_utils.dprint('Successfully uploaded perf data to the perf ' 'dashboard for test %s.' % test_name)
def determine_machine_group(cls, hostname, job_dir): machine_groups = set() for individual_hostname in hostname.split(","): host_keyval = models.test.parse_host_keyval( job_dir, individual_hostname) if not host_keyval: tko_utils.dprint('Unable to parse host keyval for %s' % individual_hostname) elif 'labels' in host_keyval: # Use `model` label as machine group. This is to avoid the # confusion of multiple boards mapping to the same platform in # wmatrix. With this change, wmatrix will group tests with the # same model, rather than the same platform. labels = host_keyval['labels'].split(',') board_labels = [ l[8:] for l in labels if l.startswith('model%3A') ] # If the host doesn't have `model:` label, fall back to `board:` # label. if not board_labels: board_labels = [ l[8:] for l in labels if l.startswith('board%3A') ] if board_labels: # Multiple board/model labels aren't supposed to # happen, but let's report something sane rather # than just failing. machine_groups.add(','.join(board_labels)) else: error = ( 'Failed to retrieve board label from host labels: ' '%s' % host_keyval['labels']) tko_utils.dprint(error) raise BoardLabelError(error) elif "platform" in host_keyval: machine_groups.add(host_keyval["platform"]) machine_group = ",".join(sorted(machine_groups)) tko_utils.dprint("MACHINE GROUP: %s" % machine_group) return machine_group
def parse_one(db, jobname, path, reparse, mail_on_failure): """ Parse a single job. Optionally send email on failure. """ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path)) old_job_idx = db.find_job(jobname) old_tests = [] if reparse and old_job_idx: tko_utils.dprint("! Deleting old copy of job results to " "reparse it") old_tests = find_old_tests(db, old_job_idx) db.delete_job(jobname) if db.find_job(jobname): tko_utils.dprint("! Job is already parsed, done") return # look up the status version job_keyval = models.job.read_keyval(path) status_version = job_keyval.get("status_version", 0) # parse out the job parser = status_lib.parser(status_version) job = parser.make_job(path) status_log = os.path.join(path, "status.log") if not os.path.exists(status_log): status_log = os.path.join(path, "status") if not os.path.exists(status_log): tko_utils.dprint("! Unable to parse job, no status file") return # parse the status logs tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname)) status_lines = open(status_log).readlines() parser.start(job) tests = parser.end(status_lines) # parser.end can return the same object multiple times, so filter out dups job.tests = [] already_added = set() for test in tests: if test not in already_added: already_added.add(test) job.tests.append(test) # try and port labels over from the old tests, but if old tests stop # matching up with new ones just give up for test, old_test in zip(job.tests, old_tests): tests_are_the_same = (test.testname == old_test.testname and test.subdir == old_test.subdir and test.started_time == old_test.started_time and (test.finished_time == old_test.finished_time or old_test.finished_time is None)) if tests_are_the_same: test.labels = old_test.labels else: tko_utils.dprint("! Reparse returned new tests, " "dropping old test labels") # check for failures message_lines = [""] for test in job.tests: if not test.subdir: continue tko_utils.dprint("* testname, status, reason: %s %s %s" % (test.subdir, test.status, test.reason)) if test.status in ("FAIL", "WARN"): message_lines.append(format_failure_message( jobname, test.kernel.base, test.subdir, test.status, test.reason)) message = "\n".join(message_lines) # send out a email report of failure if len(message) > 2 and mail_on_failure: tko_utils.dprint("Sending email report of failure on %s to %s" % (jobname, job.user)) mailfailure(jobname, job, message) # write the job into the database db.insert_job(jobname, job) db.commit()
def state_iterator(self, buffer): new_tests = [] boot_count = 0 group_subdir = None sought_level = 0 stack = status_lib.status_stack() current_kernel = kernel(self.job) boot_in_progress = False alert_pending = None started_time = None while not self.finished or buffer.size(): # stop processing once the buffer is empty if buffer.size() == 0: yield new_tests new_tests = [] continue # parse the next line line = buffer.get() tko_utils.dprint('\nSTATUS: ' + line.strip()) line = status_line.parse_line(line) if line is None: tko_utils.dprint('non-status line, ignoring') continue # ignore non-status lines # have we hit the job start line? if (line.type == "START" and not line.subdir and not line.testname): sought_level = 1 tko_utils.dprint("found job level start " "marker, looking for level " "1 groups now") continue # have we hit the job end line? if (line.type == "END" and not line.subdir and not line.testname): tko_utils.dprint("found job level end " "marker, looking for level " "0 lines now") sought_level = 0 # START line, just push another layer on to the stack # and grab the start time if this is at the job level # we're currently seeking if line.type == "START": group_subdir = None stack.start() if line.indent == sought_level: started_time = \ tko_utils.get_timestamp( line.optional_fields, "timestamp") tko_utils.dprint("start line, ignoring") continue # otherwise, update the status on the stack else: tko_utils.dprint("GROPE_STATUS: %s" % [stack.current_status(), line.status, line.subdir, line.testname, line.reason]) stack.update(line.status) if line.status == "ALERT": tko_utils.dprint("job level alert, recording") alert_pending = line.reason continue # ignore Autotest.install => GOOD lines if (line.testname == "Autotest.install" and line.status == "GOOD"): tko_utils.dprint("Successful Autotest " "install, ignoring") continue # ignore END lines for a reboot group if (line.testname == "reboot" and line.type == "END"): tko_utils.dprint("reboot group, ignoring") continue # convert job-level ABORTs into a 'CLIENT_JOB' test, and # ignore other job-level events if line.testname is None: if (line.status == "ABORT" and line.type != "END"): line.testname = "CLIENT_JOB" else: tko_utils.dprint("job level event, " "ignoring") continue # use the group subdir for END lines if line.type == "END": line.subdir = group_subdir # are we inside a block group? if (line.indent != sought_level and line.status != "ABORT" and not line.testname.startswith('reboot.')): if line.subdir: tko_utils.dprint("set group_subdir: " + line.subdir) group_subdir = line.subdir tko_utils.dprint("ignoring incorrect indent " "level %d != %d," % (line.indent, sought_level)) continue # use the subdir as the testname, except for # boot.* and kernel.* tests if (line.testname is None or not re.search(r"^(boot(\.\d+)?$|kernel\.)", line.testname)): if line.subdir and '.' in line.subdir: line.testname = line.subdir # has a reboot started? if line.testname == "reboot.start": started_time = tko_utils.get_timestamp( line.optional_fields, "timestamp") tko_utils.dprint("reboot start event, " "ignoring") boot_in_progress = True continue # has a reboot finished? if line.testname == "reboot.verify": line.testname = "boot.%d" % boot_count tko_utils.dprint("reboot verified") boot_in_progress = False verify_ident = line.reason.strip() current_kernel = kernel(self.job, verify_ident) boot_count += 1 if alert_pending: line.status = "ALERT" line.reason = alert_pending alert_pending = None # create the actual test object finished_time = tko_utils.get_timestamp( line.optional_fields, "timestamp") final_status = stack.end() tko_utils.dprint("Adding: " "%s\nSubdir:%s\nTestname:%s\n%s" % (final_status, line.subdir, line.testname, line.reason)) new_test = test.parse_test(self.job, line.subdir, line.testname, final_status, line.reason, current_kernel, started_time, finished_time) started_time = None new_tests.append(new_test) # the job is finished, but we never came back from reboot if boot_in_progress: testname = "boot.%d" % boot_count reason = "machine did not return from reboot" tko_utils.dprint(("Adding: ABORT\nSubdir:----\n" "Testname:%s\n%s") % (testname, reason)) new_test = test.parse_test(self.job, None, testname, "ABORT", reason, current_kernel, None, None) new_tests.append(new_test) yield new_tests
def __init__(self, spec, reference, hash): tko_utils.dprint("PATCH::%s %s %s" % (spec, reference, hash)) super(patch, self).__init__(spec, reference, hash) self.spec = spec self.reference = reference self.hash = hash
def parse_one(db, jobname, path, reparse, mail_on_failure): """ Parse a single job. Optionally send email on failure. """ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path)) old_job_idx = db.find_job(jobname) # old tests is a dict from tuple (test_name, subdir) to test_idx old_tests = {} if old_job_idx is not None: if not reparse: tko_utils.dprint("! Job is already parsed, done") return raw_old_tests = db.select("test_idx,subdir,test", "tko_tests", {"job_idx": old_job_idx}) if raw_old_tests: old_tests = dict(((test, subdir), test_idx) for test_idx, subdir, test in raw_old_tests) # look up the status version job_keyval = models.job.read_keyval(path) status_version = job_keyval.get("status_version", 0) # parse out the job parser = status_lib.parser(status_version) job = parser.make_job(path) status_log = os.path.join(path, "status.log") if not os.path.exists(status_log): status_log = os.path.join(path, "status") if not os.path.exists(status_log): tko_utils.dprint("! Unable to parse job, no status file") return # parse the status logs tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname)) status_lines = open(status_log).readlines() parser.start(job) tests = parser.end(status_lines) # parser.end can return the same object multiple times, so filter out dups job.tests = [] already_added = set() for test in tests: if test not in already_added: already_added.add(test) job.tests.append(test) # try and port test_idx over from the old tests, but if old tests stop # matching up with new ones just give up if reparse and old_job_idx is not None: job.index = old_job_idx for test in job.tests: test_idx = old_tests.pop((test.testname, test.subdir), None) if test_idx is not None: test.test_idx = test_idx else: tko_utils.dprint("! Reparse returned new test " "testname=%r subdir=%r" % (test.testname, test.subdir)) for test_idx in old_tests.itervalues(): where = {'test_idx' : test_idx} db.delete('tko_iteration_result', where) db.delete('tko_iteration_attributes', where) db.delete('tko_test_attributes', where) db.delete('tko_test_labels_tests', {'test_id': test_idx}) db.delete('tko_tests', where) # check for failures message_lines = [""] for test in job.tests: if not test.subdir: continue tko_utils.dprint("* testname, status, reason: %s %s %s" % (test.subdir, test.status, test.reason)) if test.status in ("FAIL", "WARN"): message_lines.append(format_failure_message( jobname, test.kernel.base, test.subdir, test.status, test.reason)) message = "\n".join(message_lines) # send out a email report of failure if len(message) > 2 and mail_on_failure: tko_utils.dprint("Sending email report of failure on %s to %s" % (jobname, job.user)) mailfailure(jobname, job, message) # write the job into the database db.insert_job(jobname, job) # Serializing job into a binary file try: from autotest_lib.tko import tko_pb2 from autotest_lib.tko import job_serializer serializer = job_serializer.JobSerializer() binary_file_name = os.path.join(path, "job.serialize") serializer.serialize_to_binary(job, jobname, binary_file_name) if reparse: site_export_file = "autotest_lib.tko.site_export" site_export = utils.import_site_function(__file__, site_export_file, "site_export", _site_export_dummy) site_export(binary_file_name) except ImportError: tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by " "compiling tko/tko.proto.") db.commit()
def state_iterator(self, buffer): line = None new_tests = [] job_count, boot_count = 0, 0 min_stack_size = 0 stack = status_lib.status_stack() current_kernel = kernel("", []) # UNKNOWN current_status = status_lib.statuses[-1] current_reason = None started_time_stack = [None] subdir_stack = [None] running_test = None running_reasons = set() yield [] # we're ready to start running # create a RUNNING SERVER_JOB entry to represent the entire test running_job = test.parse_partial_test(self.job, "----", "SERVER_JOB", "", current_kernel, self.job.started_time) new_tests.append(running_job) while True: # are we finished with parsing? if buffer.size() == 0 and self.finished: if stack.size() == 0: break # we have status lines left on the stack, # we need to implicitly abort them first tko_utils.dprint('\nUnexpected end of job, aborting') abort_subdir_stack = list(subdir_stack) if self.job.aborted_by: reason = "Job aborted by %s" % self.job.aborted_by reason += self.job.aborted_on.strftime( " at %b %d %H:%M:%S") else: reason = "Job aborted unexpectedly" timestamp = line.optional_fields.get('timestamp') for i in reversed(xrange(stack.size())): if abort_subdir_stack: subdir = abort_subdir_stack.pop() else: subdir = None abort = self.make_dummy_abort( i, subdir, subdir, timestamp, reason) buffer.put(abort) # stop processing once the buffer is empty if buffer.size() == 0: yield new_tests new_tests = [] continue # reinitialize the per-iteration state started_time = None finished_time = None # get the next line raw_line = status_lib.clean_raw_line(buffer.get()) tko_utils.dprint('\nSTATUS: ' + raw_line.strip()) line = status_line.parse_line(raw_line) if line is None: tko_utils.dprint('non-status line, ignoring') continue # do an initial sanity check of the indentation expected_indent = stack.size() if line.type == "END": expected_indent -= 1 if line.indent < expected_indent: # ABORT the current level if indentation was unexpectedly low self.put_back_line_and_abort( buffer, raw_line, stack.size() - 1, subdir_stack[-1], line.optional_fields.get("timestamp"), line.reason) continue elif line.indent > expected_indent: # ignore the log if the indent was unexpectedly high tko_utils.dprint("unexpected extra indentation, ignoring") continue # initial line processing if line.type == "START": stack.start() started_time = line.get_timestamp() if (line.testname is None and line.subdir is None and not running_test): # we just started a client, all tests are relative to here min_stack_size = stack.size() elif stack.size() == min_stack_size + 1 and not running_test: # we just started a new test, insert a running record running_reasons = set() if line.reason: running_reasons.add(line.reason) running_test = test.parse_partial_test(self.job, line.subdir, line.testname, line.reason, current_kernel, started_time) msg = "RUNNING: %s\nSubdir: %s\nTestname: %s\n%s" msg %= (running_test.status, running_test.subdir, running_test.testname, running_test.reason) tko_utils.dprint(msg) new_tests.append(running_test) started_time_stack.append(started_time) subdir_stack.append(line.subdir) continue elif line.type == "INFO": # update the current kernel if one is defined in the info if "kernel" in line.optional_fields: current_kernel = line.get_kernel() continue elif line.type == "STATUS": # update the stacks if line.subdir and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # update the status, start and finished times stack.update(line.status) if status_lib.is_worse_than_or_equal_to(line.status, current_status): if line.reason: # update the status of a currently running test if running_test: running_reasons.add(line.reason) running_reasons = tko_utils.drop_redundant_messages( running_reasons) sorted_reasons = sorted(running_reasons) running_test.reason = ", ".join(sorted_reasons) current_reason = running_test.reason new_tests.append(running_test) msg = "update RUNNING reason: %s" % line.reason tko_utils.dprint(msg) else: current_reason = line.reason current_status = stack.current_status() started_time = None finished_time = line.get_timestamp() # if this is a non-test entry there's nothing else to do if line.testname is None and line.subdir is None: continue elif line.type == "END": # grab the current subdir off of the subdir stack, or, if this # is the end of a job, just pop it off if (line.testname is None and line.subdir is None and not running_test): min_stack_size = stack.size() - 1 subdir_stack.pop() else: line.subdir = subdir_stack.pop() if not subdir_stack[-1] and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # update the status, start and finished times stack.update(line.status) current_status = stack.end() if stack.size() > min_stack_size: stack.update(current_status) current_status = stack.current_status() started_time = started_time_stack.pop() finished_time = line.get_timestamp() # update the current kernel if line.is_successful_reboot(current_status): current_kernel = line.get_kernel() # adjust the testname if this is a reboot if line.testname == "reboot" and line.subdir is None: line.testname = "boot.%d" % boot_count else: assert False # have we just finished a test? if stack.size() <= min_stack_size: # if there was no testname, just use the subdir if line.testname is None: line.testname = line.subdir # if there was no testname or subdir, use 'CLIENT_JOB' if line.testname is None: line.testname = "CLIENT_JOB.%d" % job_count job_count += 1 if not status_lib.is_worse_than_or_equal_to( current_status, "ABORT"): # a job hasn't really failed just because some of the # tests it ran have current_status = "GOOD" if not current_reason: current_reason = line.reason new_test = test.parse_test(self.job, line.subdir, line.testname, current_status, current_reason, current_kernel, started_time, finished_time, running_test) running_test = None current_status = status_lib.statuses[-1] current_reason = None if new_test.testname == ("boot.%d" % boot_count): boot_count += 1 msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s" msg %= (new_test.status, new_test.subdir, new_test.testname, new_test.reason) tko_utils.dprint(msg) new_tests.append(new_test) # the job is finished, produce the final SERVER_JOB entry and exit final_job = test.parse_test(self.job, "----", "SERVER_JOB", self.job.exit_status(), "", current_kernel, self.job.started_time, self.job.finished_time, running_job) new_tests.append(final_job) yield new_tests
def state_iterator(self, buffer): """ Yields a list of tests out of the buffer. @param buffer: a buffer object """ line = None new_tests = [] job_count, boot_count = 0, 0 min_stack_size = 0 stack = status_lib.status_stack() current_kernel = kernel("", []) # UNKNOWN current_status = status_lib.statuses[-1] current_reason = None started_time_stack = [None] subdir_stack = [None] testname_stack = [None] running_test = None running_reasons = set() ignored_lines = [] yield [] # We're ready to start running. def print_ignored_lines(): """ Prints the ignored_lines using tko_utils.dprint method. """ tko_utils.dprint('The following lines were ignored:') for line in ignored_lines: tko_utils.dprint(line) tko_utils.dprint('---------------------------------') # Create a RUNNING SERVER_JOB entry to represent the entire test. running_job = test.parse_partial_test(self.job, '----', 'SERVER_JOB', '', current_kernel, self.job.started_time) new_tests.append(running_job) while True: # Are we finished with parsing? if buffer.size() == 0 and self.finished: if ignored_lines: print_ignored_lines() ignored_lines = [] if stack.size() == 0: break # We have status lines left on the stack; # we need to implicitly abort them first. tko_utils.dprint('\nUnexpected end of job, aborting') abort_subdir_stack = list(subdir_stack) if self.job.aborted_by: reason = 'Job aborted by %s' % self.job.aborted_by reason += self.job.aborted_on.strftime( ' at %b %d %H:%M:%S') else: reason = 'Job aborted unexpectedly' timestamp = line.optional_fields.get('timestamp') for i in reversed(xrange(stack.size())): if abort_subdir_stack: subdir = abort_subdir_stack.pop() else: subdir = None abort = self.make_dummy_abort(i, subdir, subdir, timestamp, reason) buffer.put(abort) # Stop processing once the buffer is empty. if buffer.size() == 0: yield new_tests new_tests = [] continue # Reinitialize the per-iteration state. started_time = None finished_time = None # Get the next line. raw_line = status_lib.clean_raw_line(buffer.get()) line = status_line.parse_line(raw_line) if line is None: ignored_lines.append(raw_line) continue elif ignored_lines: print_ignored_lines() ignored_lines = [] # Do an initial sanity check of the indentation. expected_indent = stack.size() if line.type == 'END': expected_indent -= 1 if line.indent < expected_indent: # ABORT the current level if indentation was unexpectedly low. self.put_back_line_and_abort( buffer, raw_line, stack.size() - 1, subdir_stack[-1], testname_stack[-1], line.optional_fields.get('timestamp'), line.reason) continue elif line.indent > expected_indent: # Ignore the log if the indent was unexpectedly high. tko_utils.dprint('ignoring line because of extra indentation') continue # Initial line processing. if line.type == 'START': stack.start() started_time = line.get_timestamp() testname = None if (line.testname is None and line.subdir is None and not running_test): # We just started a client; all tests are relative to here. min_stack_size = stack.size() # Start a "RUNNING" CLIENT_JOB entry. job_name = 'CLIENT_JOB.%d' % job_count running_client = test.parse_partial_test( self.job, None, job_name, '', current_kernel, started_time) msg = 'RUNNING: %s\n%s\n' msg %= (running_client.status, running_client.testname) tko_utils.dprint(msg) new_tests.append(running_client) testname = running_client.testname elif stack.size() == min_stack_size + 1 and not running_test: # We just started a new test; insert a running record. running_reasons = set() if line.reason: running_reasons.add(line.reason) running_test = test.parse_partial_test( self.job, line.subdir, line.testname, line.reason, current_kernel, started_time) msg = 'RUNNING: %s\nSubdir: %s\nTestname: %s\n%s' msg %= (running_test.status, running_test.subdir, running_test.testname, running_test.reason) tko_utils.dprint(msg) new_tests.append(running_test) testname = running_test.testname started_time_stack.append(started_time) subdir_stack.append(line.subdir) testname_stack.append(testname) continue elif line.type == 'INFO': fields = line.optional_fields # Update the current kernel if one is defined in the info. if 'kernel' in fields: current_kernel = line.get_kernel() # Update the SERVER_JOB reason if one was logged for an abort. if 'job_abort_reason' in fields: running_job.reason = fields['job_abort_reason'] new_tests.append(running_job) continue elif line.type == 'STATUS': # Update the stacks. if line.subdir and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir testname_stack[-1] = line.testname # Update the status, start and finished times. stack.update(line.status) if status_lib.is_worse_than_or_equal_to( line.status, current_status): if line.reason: # Update the status of a currently running test. if running_test: running_reasons.add(line.reason) running_reasons = tko_utils.drop_redundant_messages( running_reasons) sorted_reasons = sorted(running_reasons) running_test.reason = ', '.join(sorted_reasons) current_reason = running_test.reason new_tests.append(running_test) msg = 'update RUNNING reason: %s' % line.reason tko_utils.dprint(msg) else: current_reason = line.reason current_status = stack.current_status() started_time = None finished_time = line.get_timestamp() # If this is a non-test entry there's nothing else to do. if line.testname is None and line.subdir is None: continue elif line.type == 'END': # Grab the current subdir off of the subdir stack, or, if this # is the end of a job, just pop it off. if (line.testname is None and line.subdir is None and not running_test): min_stack_size = stack.size() - 1 subdir_stack.pop() testname_stack.pop() else: line.subdir = subdir_stack.pop() testname_stack.pop() if not subdir_stack[-1] and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # Update the status, start and finished times. stack.update(line.status) current_status = stack.end() if stack.size() > min_stack_size: stack.update(current_status) current_status = stack.current_status() started_time = started_time_stack.pop() finished_time = line.get_timestamp() # Update the current kernel. if line.is_successful_reboot(current_status): current_kernel = line.get_kernel() # Adjust the testname if this is a reboot. if line.testname == 'reboot' and line.subdir is None: line.testname = 'boot.%d' % boot_count else: assert False # Have we just finished a test? if stack.size() <= min_stack_size: # If there was no testname, just use the subdir. if line.testname is None: line.testname = line.subdir # If there was no testname or subdir, use 'CLIENT_JOB'. if line.testname is None: line.testname = 'CLIENT_JOB.%d' % job_count running_test = running_client job_count += 1 if not status_lib.is_worse_than_or_equal_to( current_status, 'ABORT'): # A job hasn't really failed just because some of the # tests it ran have. current_status = 'GOOD' if not current_reason: current_reason = line.reason new_test = test.parse_test(self.job, line.subdir, line.testname, current_status, current_reason, current_kernel, started_time, finished_time, running_test) running_test = None current_status = status_lib.statuses[-1] current_reason = None if new_test.testname == ('boot.%d' % boot_count): boot_count += 1 msg = 'ADD: %s\nSubdir: %s\nTestname: %s\n%s' msg %= (new_test.status, new_test.subdir, new_test.testname, new_test.reason) tko_utils.dprint(msg) new_tests.append(new_test) # The job is finished; produce the final SERVER_JOB entry and exit. final_job = test.parse_test(self.job, '----', 'SERVER_JOB', self.job.exit_status(), running_job.reason, current_kernel, self.job.started_time, self.job.finished_time, running_job) new_tests.append(final_job) yield new_tests