def load_from_dir(cls, dir): keyval = cls.read_keyval(dir) tko_utils.dprint(str(keyval)) user = keyval.get("user", None) label = keyval.get("label", None) queued_time = tko_utils.get_timestamp(keyval, "job_queued") started_time = tko_utils.get_timestamp(keyval, "job_started") finished_time = tko_utils.get_timestamp(keyval, "job_finished") machine = cls.determine_hostname(keyval, dir) machine_group = cls.determine_machine_group(machine, dir) machine_owner = keyval.get("owner", None) aborted_by = keyval.get("aborted_by", None) aborted_at = tko_utils.get_timestamp(keyval, "aborted_on") return { "user": user, "label": label, "machine": machine, "queued_time": queued_time, "started_time": started_time, "finished_time": finished_time, "machine_owner": machine_owner, "machine_group": machine_group, "aborted_by": aborted_by, "aborted_on": aborted_at, "keyval_dict": keyval }
def put_back_line_and_abort( line_buffer, line, indent, subdir, timestamp, reason): tko_utils.dprint("Unexpected indent regression, aborting") line_buffer.put_back(line) abort = parser.make_dummy_abort( indent, subdir, subdir, timestamp, reason) line_buffer.put_back(abort)
def load_from_dir(dir, verify_ident=None): # try and load the booted kernel version attributes = False i = 1 build_dir = os.path.join(dir, "build") while True: if not os.path.exists(build_dir): break build_log = os.path.join(build_dir, "debug", "build_log") attributes = kernel.load_from_build_log(build_log) if attributes: break i += 1 build_dir = os.path.join(dir, "build.%d" % (i)) if not attributes: if verify_ident: base = verify_ident else: base = kernel.load_from_sysinfo(dir) patches = [] hashes = [] else: base, patches, hashes = attributes tko_utils.dprint("kernel.__init__() found kernel version %s" % base) # compute the kernel hash if base == "UNKNOWN": kernel_hash = "UNKNOWN" else: kernel_hash = kernel.compute_hash(base, hashes) return {"base": base, "patches": patches, "kernel_hash": kernel_hash}
def parse_test(cls, job, subdir, testname, status, reason, test_kernel, started_time, finished_time, existing_instance=None): """Given a job and the basic metadata about the test that can be extracted from the status logs, parse the test keyval files and use it to construct a complete test instance.""" tko_utils.dprint("parsing test %s %s" % (subdir, testname)) if subdir: # grab iterations from the results keyval iteration_keyval = os.path.join(job.dir, subdir, "results", "keyval") iterations = cls.load_iterations(iteration_keyval) # grab test attributes from the subdir keyval test_keyval = os.path.join(job.dir, subdir, "keyval") attributes = test.load_attributes(test_keyval) else: iterations = [] attributes = {} # grab test+host attributes from the host keyval host_keyval = cls.parse_host_keyval(job.dir, job.machine) attributes.update(dict(("host-%s" % k, v) for k, v in host_keyval.iteritems())) if existing_instance: def constructor(*args, **dargs): existing_instance.__init__(*args, **dargs) return existing_instance else: constructor = cls return constructor(subdir, testname, status, reason, test_kernel, job.machine, started_time, finished_time, iterations, attributes, [])
def parse_line_into_dicts(line, attr_dict, perf_dict): key, val_type, value = "", "", "" # figure out what the key, value and keyval type are typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line) if typed_match: key, val_type, value = typed_match.groups() else: # old-fashioned untyped match, assume perf untyped_match = re.search("^([^=]*)=(.*)$", line) if untyped_match: key, value = untyped_match.groups() val_type = "perf" # parse the actual value into a dict try: if val_type == "attr": attr_dict[key] = value elif val_type == "perf": perf_dict[key] = float(value) else: raise ValueError except ValueError: msg = "WARNING: line '%s' found in test " "iteration keyval could not be parsed" msg %= line tko_utils.dprint(msg)
def parse_line_into_dicts(line, attr_dict, perf_dict): key, val_type, value = "", "", "" # figure out what the key, value and keyval type are typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line) if typed_match: key, val_type, value = typed_match.groups() else: # old-fashioned untyped match, assume perf untyped_match = re.search("^([^=]*)=(.*)$", line) if untyped_match: key, value = untyped_match.groups() val_type = "perf" # parse the actual value into a dict try: if val_type == "attr": attr_dict[key] = value elif val_type == "perf": perf_dict[key] = float(value) else: raise ValueError except ValueError: msg = ("WARNING: line '%s' found in test " "iteration keyval could not be parsed") msg %= line tko_utils.dprint(msg)
def parse_args(): # build up our options parser and parse sys.argv parser = optparse.OptionParser() parser.add_option("-m", help="Send mail for FAILED tests", dest="mailit", action="store_true") parser.add_option("-r", help="Reparse the results of a job", dest="reparse", action="store_true") parser.add_option("-o", help="Parse a single results directory", dest="singledir", action="store_true") parser.add_option("-l", help=("Levels of subdirectories to include " "in the job name"), type="int", dest="level", default=1) parser.add_option("-n", help="No blocking on an existing parse", dest="noblock", action="store_true") parser.add_option("-s", help="Database server hostname", dest="db_host", action="store") parser.add_option("-u", help="Database username", dest="db_user", action="store") parser.add_option("-p", help="Database password", dest="db_pass", action="store") parser.add_option("-d", help="Database name", dest="db_name", action="store") parser.add_option("--write-pidfile", help="write pidfile (.parser_execute)", dest="write_pidfile", action="store_true", default=False) options, args = parser.parse_args() # we need a results directory if len(args) == 0: tko_utils.dprint("ERROR: at least one results directory must " "be provided") parser.print_help() sys.exit(1) # pass the options back return options, args
def parse_partial_test(cls, job, subdir, testname, reason, test_kernel, started_time): """Given a job and the basic metadata available when a test is started, create a test instance representing the partial result. Assume that since the test is not complete there are no results files actually available for parsing.""" tko_utils.dprint("parsing partial test %s %s" % (subdir, testname)) return cls(subdir, testname, "RUNNING", reason, test_kernel, job.machine, started_time, None, [], {}, [])
def determine_machine_group(cls, hostname, job_dir): machine_groups = set() for individual_hostname in hostname.split(","): host_keyval = models.test.parse_host_keyval(job_dir, individual_hostname) if not host_keyval: tko_utils.dprint('Unable to parse host keyval for %s' % individual_hostname) elif "platform" in host_keyval: machine_groups.add(host_keyval["platform"]) machine_group = ",".join(sorted(machine_groups)) tko_utils.dprint("MACHINE GROUP: %s" % machine_group) return machine_group
def determine_machine_group(cls, hostname, job_dir): machine_groups = set() for individual_hostname in hostname.split(","): host_keyval = models.test.parse_host_keyval( job_dir, individual_hostname) if not host_keyval: tko_utils.dprint('Unable to parse host keyval for %s' % individual_hostname) elif "platform" in host_keyval: machine_groups.add(host_keyval["platform"]) machine_group = ",".join(sorted(machine_groups)) tko_utils.dprint("MACHINE GROUP: %s" % machine_group) return machine_group
def process_lines(self, lines): """ Feed 'lines' into the parser state machine, and return a list of all the new test results produced.""" self.line_buffer.put_multiple(lines) try: return self.state.next() except StopIteration: msg = ("WARNING: parser was called to process status " "lines after it was end()ed\n" "Current traceback:\n" + traceback.format_exc() + "\nCurrent stack:\n" + "".join(traceback.format_stack())) tko_utils.dprint(msg) return []
def end(self, lines=[]): """ Feed 'lines' into the parser state machine, signal to the state machine that no more lines are forthcoming, and then return a list of all the new test results produced.""" self.line_buffer.put_multiple(lines) # run the state machine to clear out the buffer self.finished = True try: return self.state.next() except StopIteration: msg = ("WARNING: parser was end()ed multiple times\n" "Current traceback:\n" + traceback.format_exc() + "\nCurrent stack:\n" + "".join(traceback.format_stack())) tko_utils.dprint(msg) return []
def find_hostname(path): hostname = os.path.join(path, "sysinfo", "hostname") try: machine = open(hostname).readline().rstrip() return machine except Exception: tko_utils.dprint("Could not read a hostname from " "sysinfo/hostname") uname = os.path.join(path, "sysinfo", "uname_-a") try: machine = open(uname).readline().split()[1] return machine except Exception: tko_utils.dprint("Could not read a hostname from " "sysinfo/uname_-a") raise NoHostnameError("Unable to find a machine name")
def parse_test(cls, job, subdir, testname, status, reason, test_kernel, started_time, finished_time, existing_instance=None): """Given a job and the basic metadata about the test that can be extracted from the status logs, parse the test keyval files and use it to construct a complete test instance.""" tko_utils.dprint("parsing test %s %s" % (subdir, testname)) if subdir: # grab iterations from the results keyval iteration_keyval = os.path.join(job.dir, subdir, "results", "keyval") iterations = cls.load_iterations(iteration_keyval) # grab test attributes from the subdir keyval test_keyval = os.path.join(job.dir, subdir, "keyval") attributes = test.load_attributes(test_keyval) else: iterations = [] attributes = {} # grab test+host attributes from the host keyval host_keyval = cls.parse_host_keyval(job.dir, job.machine) attributes.update( dict(("host-%s" % k, v) for k, v in host_keyval.iteritems())) if existing_instance: def constructor(*args, **dargs): existing_instance.__init__(*args, **dargs) return existing_instance else: constructor = cls return constructor(subdir, testname, status, reason, test_kernel, job.machine, started_time, finished_time, iterations, attributes, [])
def determine_hostname(cls, keyval, job_dir): host_group_name = keyval.get("host_group_name", None) machine = keyval.get("hostname", "") is_multimachine = "," in machine # determine what hostname to use if host_group_name: if is_multimachine or not machine: tko_utils.dprint("Using host_group_name %r instead of " "machine name." % host_group_name) machine = host_group_name elif is_multimachine: try: machine = job.find_hostname(job_dir) # find a unique hostname except NoHostnameError: pass # just use the comma-separated name tko_utils.dprint("MACHINE NAME: %s" % machine) return machine
def load_from_dir(cls, dir): keyval = cls.read_keyval(dir) tko_utils.dprint(str(keyval)) user = keyval.get("user", None) label = keyval.get("label", None) queued_time = tko_utils.get_timestamp(keyval, "job_queued") started_time = tko_utils.get_timestamp(keyval, "job_started") finished_time = tko_utils.get_timestamp(keyval, "job_finished") machine = cls.determine_hostname(keyval, dir) machine_group = cls.determine_machine_group(machine, dir) machine_owner = keyval.get("owner", None) aborted_by = keyval.get("aborted_by", None) aborted_at = tko_utils.get_timestamp(keyval, "aborted_on") return {"user": user, "label": label, "machine": machine, "queued_time": queued_time, "started_time": started_time, "finished_time": finished_time, "machine_owner": machine_owner, "machine_group": machine_group, "aborted_by": aborted_by, "aborted_on": aborted_at, "keyval_dict": keyval}
def state_iterator(self, buffer): line = None new_tests = [] job_count, boot_count = 0, 0 min_stack_size = 0 stack = status_lib.status_stack() current_kernel = kernel("", []) # UNKNOWN current_status = status_lib.statuses[-1] current_reason = None started_time_stack = [None] subdir_stack = [None] running_test = None running_reasons = set() yield [] # we're ready to start running # create a RUNNING SERVER_JOB entry to represent the entire test running_job = test.parse_partial_test(self.job, "----", "SERVER_JOB", "", current_kernel, self.job.started_time) new_tests.append(running_job) while True: # are we finished with parsing? if buffer.size() == 0 and self.finished: if stack.size() == 0: break # we have status lines left on the stack, # we need to implicitly abort them first tko_utils.dprint("\nUnexpected end of job, aborting") abort_subdir_stack = list(subdir_stack) if self.job.aborted_by: reason = "Job aborted by %s" % self.job.aborted_by reason += self.job.aborted_on.strftime(" at %b %d %H:%M:%S") else: reason = "Job aborted unexpectedly" timestamp = line.optional_fields.get("timestamp") for i in reversed(xrange(stack.size())): if abort_subdir_stack: subdir = abort_subdir_stack.pop() else: subdir = None abort = self.make_dummy_abort(i, subdir, subdir, timestamp, reason) buffer.put(abort) # stop processing once the buffer is empty if buffer.size() == 0: yield new_tests new_tests = [] continue # reinitialize the per-iteration state started_time = None finished_time = None # get the next line raw_line = status_lib.clean_raw_line(buffer.get()) tko_utils.dprint("\nSTATUS: " + raw_line.strip()) line = status_line.parse_line(raw_line) if line is None: tko_utils.dprint("non-status line, ignoring") continue # do an initial sanity check of the indentation expected_indent = stack.size() if line.type == "END": expected_indent -= 1 if line.indent < expected_indent: # ABORT the current level if indentation was unexpectedly low self.put_back_line_and_abort( buffer, raw_line, stack.size() - 1, subdir_stack[-1], line.optional_fields.get("timestamp"), line.reason, ) continue elif line.indent > expected_indent: # ignore the log if the indent was unexpectedly high tko_utils.dprint("unexpected extra indentation, ignoring") continue # initial line processing if line.type == "START": stack.start() started_time = line.get_timestamp() if line.testname is None and line.subdir is None and not running_test: # we just started a client, all tests are relative to here min_stack_size = stack.size() # start a "RUNNING" CLIENT_JOB entry job_name = "CLIENT_JOB.%d" % job_count running_client = test.parse_partial_test(self.job, None, job_name, "", current_kernel, started_time) msg = "RUNNING: %s\n%s\n" msg %= (running_client.status, running_client.testname) tko_utils.dprint(msg) new_tests.append(running_client) elif stack.size() == min_stack_size + 1 and not running_test: # we just started a new test, insert a running record running_reasons = set() if line.reason: running_reasons.add(line.reason) running_test = test.parse_partial_test( self.job, line.subdir, line.testname, line.reason, current_kernel, started_time ) msg = "RUNNING: %s\nSubdir: %s\nTestname: %s\n%s" msg %= (running_test.status, running_test.subdir, running_test.testname, running_test.reason) tko_utils.dprint(msg) new_tests.append(running_test) started_time_stack.append(started_time) subdir_stack.append(line.subdir) continue elif line.type == "INFO": fields = line.optional_fields # update the current kernel if one is defined in the info if "kernel" in fields: current_kernel = line.get_kernel() # update the SERVER_JOB reason if one was logged for an abort if "job_abort_reason" in fields: running_job.reason = fields["job_abort_reason"] new_tests.append(running_job) continue elif line.type == "STATUS": # update the stacks if line.subdir and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # update the status, start and finished times stack.update(line.status) if status_lib.is_worse_than_or_equal_to(line.status, current_status): if line.reason: # update the status of a currently running test if running_test: running_reasons.add(line.reason) running_reasons = tko_utils.drop_redundant_messages(running_reasons) sorted_reasons = sorted(running_reasons) running_test.reason = ", ".join(sorted_reasons) current_reason = running_test.reason new_tests.append(running_test) msg = "update RUNNING reason: %s" % line.reason tko_utils.dprint(msg) else: current_reason = line.reason current_status = stack.current_status() started_time = None finished_time = line.get_timestamp() # if this is a non-test entry there's nothing else to do if line.testname is None and line.subdir is None: continue elif line.type == "END": # grab the current subdir off of the subdir stack, or, if this # is the end of a job, just pop it off if line.testname is None and line.subdir is None and not running_test: min_stack_size = stack.size() - 1 subdir_stack.pop() else: line.subdir = subdir_stack.pop() if not subdir_stack[-1] and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # update the status, start and finished times stack.update(line.status) current_status = stack.end() if stack.size() > min_stack_size: stack.update(current_status) current_status = stack.current_status() started_time = started_time_stack.pop() finished_time = line.get_timestamp() # update the current kernel if line.is_successful_reboot(current_status): current_kernel = line.get_kernel() # adjust the testname if this is a reboot if line.testname == "reboot" and line.subdir is None: line.testname = "boot.%d" % boot_count else: assert False # have we just finished a test? if stack.size() <= min_stack_size: # if there was no testname, just use the subdir if line.testname is None: line.testname = line.subdir # if there was no testname or subdir, use 'CLIENT_JOB' if line.testname is None: line.testname = "CLIENT_JOB.%d" % job_count running_test = running_client job_count += 1 if not status_lib.is_worse_than_or_equal_to(current_status, "ABORT"): # a job hasn't really failed just because some of the # tests it ran have current_status = "GOOD" if not current_reason: current_reason = line.reason new_test = test.parse_test( self.job, line.subdir, line.testname, current_status, current_reason, current_kernel, started_time, finished_time, running_test, ) running_test = None current_status = status_lib.statuses[-1] current_reason = None if new_test.testname == ("boot.%d" % boot_count): boot_count += 1 msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s" msg %= (new_test.status, new_test.subdir, new_test.testname, new_test.reason) tko_utils.dprint(msg) new_tests.append(new_test) # the job is finished, produce the final SERVER_JOB entry and exit final_job = test.parse_test( self.job, "----", "SERVER_JOB", self.job.exit_status(), running_job.reason, current_kernel, self.job.started_time, self.job.finished_time, running_job, ) new_tests.append(final_job) yield new_tests
def state_iterator(self, buffer): new_tests = [] boot_count = 0 group_subdir = None sought_level = 0 stack = status_lib.status_stack() current_kernel = kernel(self.job) boot_in_progress = False alert_pending = None started_time = None while not self.finished or buffer.size(): # stop processing once the buffer is empty if buffer.size() == 0: yield new_tests new_tests = [] continue # parse the next line line = buffer.get() tko_utils.dprint('\nSTATUS: ' + line.strip()) line = status_line.parse_line(line) if line is None: tko_utils.dprint('non-status line, ignoring') continue # ignore non-status lines # have we hit the job start line? if (line.type == "START" and not line.subdir and not line.testname): sought_level = 1 tko_utils.dprint("found job level start " "marker, looking for level " "1 groups now") continue # have we hit the job end line? if (line.type == "END" and not line.subdir and not line.testname): tko_utils.dprint("found job level end " "marker, looking for level " "0 lines now") sought_level = 0 # START line, just push another layer on to the stack # and grab the start time if this is at the job level # we're currently seeking if line.type == "START": group_subdir = None stack.start() if line.indent == sought_level: started_time = \ tko_utils.get_timestamp( line.optional_fields, "timestamp") tko_utils.dprint("start line, ignoring") continue # otherwise, update the status on the stack else: tko_utils.dprint("GROPE_STATUS: %s" % [ stack.current_status(), line.status, line.subdir, line.testname, line.reason ]) stack.update(line.status) if line.status == "ALERT": tko_utils.dprint("job level alert, recording") alert_pending = line.reason continue # ignore Autotest.install => GOOD lines if (line.testname == "Autotest.install" and line.status == "GOOD"): tko_utils.dprint("Successful Autotest " "install, ignoring") continue # ignore END lines for a reboot group if (line.testname == "reboot" and line.type == "END"): tko_utils.dprint("reboot group, ignoring") continue # convert job-level ABORTs into a 'CLIENT_JOB' test, and # ignore other job-level events if line.testname is None: if (line.status == "ABORT" and line.type != "END"): line.testname = "CLIENT_JOB" else: tko_utils.dprint("job level event, " "ignoring") continue # use the group subdir for END lines if line.type == "END": line.subdir = group_subdir # are we inside a block group? if (line.indent != sought_level and line.status != "ABORT" and not line.testname.startswith('reboot.')): if line.subdir: tko_utils.dprint("set group_subdir: " + line.subdir) group_subdir = line.subdir tko_utils.dprint("ignoring incorrect indent " "level %d != %d," % (line.indent, sought_level)) continue # use the subdir as the testname, except for # boot.* and kernel.* tests if (line.testname is None or not re.search( r"^(boot(\.\d+)?$|kernel\.)", line.testname)): if line.subdir and '.' in line.subdir: line.testname = line.subdir # has a reboot started? if line.testname == "reboot.start": started_time = tko_utils.get_timestamp(line.optional_fields, "timestamp") tko_utils.dprint("reboot start event, " "ignoring") boot_in_progress = True continue # has a reboot finished? if line.testname == "reboot.verify": line.testname = "boot.%d" % boot_count tko_utils.dprint("reboot verified") boot_in_progress = False verify_ident = line.reason.strip() current_kernel = kernel(self.job, verify_ident) boot_count += 1 if alert_pending: line.status = "ALERT" line.reason = alert_pending alert_pending = None # create the actual test object finished_time = tko_utils.get_timestamp(line.optional_fields, "timestamp") final_status = stack.end() tko_utils.dprint( "Adding: " "%s\nSubdir:%s\nTestname:%s\n%s" % (final_status, line.subdir, line.testname, line.reason)) new_test = test.parse_test(self.job, line.subdir, line.testname, final_status, line.reason, current_kernel, started_time, finished_time) started_time = None new_tests.append(new_test) # the job is finished, but we never came back from reboot if boot_in_progress: testname = "boot.%d" % boot_count reason = "machine did not return from reboot" tko_utils.dprint(("Adding: ABORT\nSubdir:----\n" "Testname:%s\n%s") % (testname, reason)) new_test = test.parse_test(self.job, None, testname, "ABORT", reason, current_kernel, None, None) new_tests.append(new_test) yield new_tests
def __init__(self, spec, reference, hash): tko_utils.dprint("PATCH::%s %s %s" % (spec, reference, hash)) super(patch, self).__init__(spec, reference, hash) self.spec = spec self.reference = reference self.hash = hash
def state_iterator(self, buffer): line = None new_tests = [] job_count, boot_count = 0, 0 min_stack_size = 0 stack = status_lib.status_stack() current_kernel = kernel("", []) # UNKNOWN current_status = status_lib.statuses[-1] current_reason = None started_time_stack = [None] subdir_stack = [None] running_test = None running_reasons = set() yield [] # we're ready to start running # create a RUNNING SERVER_JOB entry to represent the entire test running_job = test.parse_partial_test(self.job, "----", "SERVER_JOB", "", current_kernel, self.job.started_time) new_tests.append(running_job) while True: # are we finished with parsing? if buffer.size() == 0 and self.finished: if stack.size() == 0: break # we have status lines left on the stack, # we need to implicitly abort them first tko_utils.dprint('\nUnexpected end of job, aborting') abort_subdir_stack = list(subdir_stack) if self.job.aborted_by: reason = "Job aborted by %s" % self.job.aborted_by reason += self.job.aborted_on.strftime( " at %b %d %H:%M:%S") else: reason = "Job aborted unexpectedly" timestamp = line.optional_fields.get('timestamp') for i in reversed(xrange(stack.size())): if abort_subdir_stack: subdir = abort_subdir_stack.pop() else: subdir = None abort = self.make_dummy_abort( i, subdir, subdir, timestamp, reason) buffer.put(abort) # stop processing once the buffer is empty if buffer.size() == 0: yield new_tests new_tests = [] continue # reinitialize the per-iteration state started_time = None finished_time = None # get the next line raw_line = status_lib.clean_raw_line(buffer.get()) tko_utils.dprint('\nSTATUS: ' + raw_line.strip()) line = status_line.parse_line(raw_line) if line is None: tko_utils.dprint('non-status line, ignoring') continue # do an initial sanity check of the indentation expected_indent = stack.size() if line.type == "END": expected_indent -= 1 if line.indent < expected_indent: # ABORT the current level if indentation was unexpectedly low self.put_back_line_and_abort( buffer, raw_line, stack.size() - 1, subdir_stack[-1], line.optional_fields.get("timestamp"), line.reason) continue elif line.indent > expected_indent: # ignore the log if the indent was unexpectedly high tko_utils.dprint("unexpected extra indentation, ignoring") continue # initial line processing if line.type == "START": stack.start() started_time = line.get_timestamp() if (line.testname is None and line.subdir is None and not running_test): # we just started a client, all tests are relative to here min_stack_size = stack.size() # start a "RUNNING" CLIENT_JOB entry job_name = "CLIENT_JOB.%d" % job_count running_client = test.parse_partial_test(self.job, None, job_name, "", current_kernel, started_time) msg = "RUNNING: %s\n%s\n" msg %= (running_client.status, running_client.testname) tko_utils.dprint(msg) new_tests.append(running_client) elif stack.size() == min_stack_size + 1 and not running_test: # we just started a new test, insert a running record running_reasons = set() if line.reason: running_reasons.add(line.reason) running_test = test.parse_partial_test(self.job, line.subdir, line.testname, line.reason, current_kernel, started_time) msg = "RUNNING: %s\nSubdir: %s\nTestname: %s\n%s" msg %= (running_test.status, running_test.subdir, running_test.testname, running_test.reason) tko_utils.dprint(msg) new_tests.append(running_test) started_time_stack.append(started_time) subdir_stack.append(line.subdir) continue elif line.type == "INFO": fields = line.optional_fields # update the current kernel if one is defined in the info if "kernel" in fields: current_kernel = line.get_kernel() # update the SERVER_JOB reason if one was logged for an abort if "job_abort_reason" in fields: running_job.reason = fields["job_abort_reason"] new_tests.append(running_job) continue elif line.type == "STATUS": # update the stacks if line.subdir and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # update the status, start and finished times stack.update(line.status) if status_lib.is_worse_than_or_equal_to(line.status, current_status): if line.reason: # update the status of a currently running test if running_test: running_reasons.add(line.reason) running_reasons = tko_utils.drop_redundant_messages( running_reasons) sorted_reasons = sorted(running_reasons) running_test.reason = ", ".join(sorted_reasons) current_reason = running_test.reason new_tests.append(running_test) msg = "update RUNNING reason: %s" % line.reason tko_utils.dprint(msg) else: current_reason = line.reason current_status = stack.current_status() started_time = None finished_time = line.get_timestamp() # if this is a non-test entry there's nothing else to do if line.testname is None and line.subdir is None: continue elif line.type == "END": # grab the current subdir off of the subdir stack, or, if this # is the end of a job, just pop it off if (line.testname is None and line.subdir is None and not running_test): min_stack_size = stack.size() - 1 subdir_stack.pop() else: line.subdir = subdir_stack.pop() if not subdir_stack[-1] and stack.size() > min_stack_size: subdir_stack[-1] = line.subdir # update the status, start and finished times stack.update(line.status) current_status = stack.end() if stack.size() > min_stack_size: stack.update(current_status) current_status = stack.current_status() started_time = started_time_stack.pop() finished_time = line.get_timestamp() # update the current kernel if line.is_successful_reboot(current_status): current_kernel = line.get_kernel() # adjust the testname if this is a reboot if line.testname == "reboot" and line.subdir is None: line.testname = "boot.%d" % boot_count else: assert False # have we just finished a test? if stack.size() <= min_stack_size: # if there was no testname, just use the subdir if line.testname is None: line.testname = line.subdir # if there was no testname or subdir, use 'CLIENT_JOB' if line.testname is None: line.testname = "CLIENT_JOB.%d" % job_count running_test = running_client job_count += 1 if not status_lib.is_worse_than_or_equal_to( current_status, "ABORT"): # a job hasn't really failed just because some of the # tests it ran have current_status = "GOOD" if not current_reason: current_reason = line.reason new_test = test.parse_test(self.job, line.subdir, line.testname, current_status, current_reason, current_kernel, started_time, finished_time, running_test) running_test = None current_status = status_lib.statuses[-1] current_reason = None if new_test.testname == ("boot.%d" % boot_count): boot_count += 1 msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s" msg %= (new_test.status, new_test.subdir, new_test.testname, new_test.reason) tko_utils.dprint(msg) new_tests.append(new_test) # the job is finished, produce the final SERVER_JOB entry and exit final_job = test.parse_test(self.job, "----", "SERVER_JOB", self.job.exit_status(), running_job.reason, current_kernel, self.job.started_time, self.job.finished_time, running_job) new_tests.append(final_job) yield new_tests
def parse_one(db, jobname, path, reparse, mail_on_failure): """ Parse a single job. Optionally send email on failure. """ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path)) old_job_idx = db.find_job(jobname) # old tests is a dict from tuple (test_name, subdir) to test_idx old_tests = {} if old_job_idx is not None: if not reparse: tko_utils.dprint("! Job is already parsed, done") return raw_old_tests = db.select("test_idx,subdir,test", "tko_tests", {"job_idx": old_job_idx}) if raw_old_tests: old_tests = dict(((test, subdir), test_idx) for test_idx, subdir, test in raw_old_tests) # look up the status version job_keyval = models.job.read_keyval(path) status_version = job_keyval.get("status_version", 0) # parse out the job parser = status_lib.parser(status_version) job = parser.make_job(path) status_log = os.path.join(path, "status.log") if not os.path.exists(status_log): status_log = os.path.join(path, "status") if not os.path.exists(status_log): tko_utils.dprint("! Unable to parse job, no status file") return # parse the status logs tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname)) status_lines = open(status_log).readlines() parser.start(job) tests = parser.end(status_lines) # parser.end can return the same object multiple times, so filter out dups job.tests = [] already_added = set() for test in tests: if test not in already_added: already_added.add(test) job.tests.append(test) # try and port test_idx over from the old tests, but if old tests stop # matching up with new ones just give up if reparse and old_job_idx is not None: job.index = old_job_idx for test in job.tests: test_idx = old_tests.pop((test.testname, test.subdir), None) if test_idx is not None: test.test_idx = test_idx else: tko_utils.dprint("! Reparse returned new test " "testname=%r subdir=%r" % (test.testname, test.subdir)) for test_idx in old_tests.itervalues(): where = {'test_idx' : test_idx} db.delete('tko_iteration_result', where) db.delete('tko_iteration_attributes', where) db.delete('tko_test_attributes', where) db.delete('tko_test_labels_tests', {'test_id': test_idx}) db.delete('tko_tests', where) # check for failures message_lines = [""] for test in job.tests: if not test.subdir: continue tko_utils.dprint("* testname, status, reason: %s %s %s" % (test.subdir, test.status, test.reason)) if test.status in ("FAIL", "WARN"): message_lines.append(format_failure_message( jobname, test.kernel.base, test.subdir, test.status, test.reason)) message = "\n".join(message_lines) # send out a email report of failure if len(message) > 2 and mail_on_failure: tko_utils.dprint("Sending email report of failure on %s to %s" % (jobname, job.user)) mailfailure(jobname, job, message) # write the job into the database db.insert_job(jobname, job) # Serializing job into a binary file try: from autotest.tko import tko_pb2 from autotest.tko import job_serializer serializer = job_serializer.JobSerializer() binary_file_name = os.path.join(path, "job.serialize") serializer.serialize_to_binary(job, jobname, binary_file_name) if reparse: site_export_file = "autotest.tko.site_export" site_export = utils.import_site_function(__file__, site_export_file, "site_export", _site_export_dummy) site_export(binary_file_name) except ImportError: tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by " "compiling tko/tko.proto.") db.commit()
def put_back_line_and_abort(line_buffer, line, indent, subdir, timestamp, reason): tko_utils.dprint("Unexpected indent regression, aborting") line_buffer.put_back(line) abort = parser.make_dummy_abort(indent, subdir, subdir, timestamp, reason) line_buffer.put_back(abort)
def state_iterator(self, buffer): new_tests = [] boot_count = 0 group_subdir = None sought_level = 0 stack = status_lib.status_stack() current_kernel = kernel(self.job) boot_in_progress = False alert_pending = None started_time = None while not self.finished or buffer.size(): # stop processing once the buffer is empty if buffer.size() == 0: yield new_tests new_tests = [] continue # parse the next line line = buffer.get() tko_utils.dprint('\nSTATUS: ' + line.strip()) line = status_line.parse_line(line) if line is None: tko_utils.dprint('non-status line, ignoring') continue # ignore non-status lines # have we hit the job start line? if (line.type == "START" and not line.subdir and not line.testname): sought_level = 1 tko_utils.dprint("found job level start " "marker, looking for level " "1 groups now") continue # have we hit the job end line? if (line.type == "END" and not line.subdir and not line.testname): tko_utils.dprint("found job level end " "marker, looking for level " "0 lines now") sought_level = 0 # START line, just push another layer on to the stack # and grab the start time if this is at the job level # we're currently seeking if line.type == "START": group_subdir = None stack.start() if line.indent == sought_level: started_time = \ tko_utils.get_timestamp( line.optional_fields, "timestamp") tko_utils.dprint("start line, ignoring") continue # otherwise, update the status on the stack else: tko_utils.dprint("GROPE_STATUS: %s" % [stack.current_status(), line.status, line.subdir, line.testname, line.reason]) stack.update(line.status) if line.status == "ALERT": tko_utils.dprint("job level alert, recording") alert_pending = line.reason continue # ignore Autotest.install => GOOD lines if (line.testname == "Autotest.install" and line.status == "GOOD"): tko_utils.dprint("Successful Autotest " "install, ignoring") continue # ignore END lines for a reboot group if (line.testname == "reboot" and line.type == "END"): tko_utils.dprint("reboot group, ignoring") continue # convert job-level ABORTs into a 'CLIENT_JOB' test, and # ignore other job-level events if line.testname is None: if (line.status == "ABORT" and line.type != "END"): line.testname = "CLIENT_JOB" else: tko_utils.dprint("job level event, " "ignoring") continue # use the group subdir for END lines if line.type == "END": line.subdir = group_subdir # are we inside a block group? if (line.indent != sought_level and line.status != "ABORT" and not line.testname.startswith('reboot.')): if line.subdir: tko_utils.dprint("set group_subdir: " + line.subdir) group_subdir = line.subdir tko_utils.dprint("ignoring incorrect indent " "level %d != %d," % (line.indent, sought_level)) continue # use the subdir as the testname, except for # boot.* and kernel.* tests if (line.testname is None or not re.search(r"^(boot(\.\d+)?$|kernel\.)", line.testname)): if line.subdir and '.' in line.subdir: line.testname = line.subdir # has a reboot started? if line.testname == "reboot.start": started_time = tko_utils.get_timestamp( line.optional_fields, "timestamp") tko_utils.dprint("reboot start event, " "ignoring") boot_in_progress = True continue # has a reboot finished? if line.testname == "reboot.verify": line.testname = "boot.%d" % boot_count tko_utils.dprint("reboot verified") boot_in_progress = False verify_ident = line.reason.strip() current_kernel = kernel(self.job, verify_ident) boot_count += 1 if alert_pending: line.status = "ALERT" line.reason = alert_pending alert_pending = None # create the actual test object finished_time = tko_utils.get_timestamp( line.optional_fields, "timestamp") final_status = stack.end() tko_utils.dprint("Adding: " "%s\nSubdir:%s\nTestname:%s\n%s" % (final_status, line.subdir, line.testname, line.reason)) new_test = test.parse_test(self.job, line.subdir, line.testname, final_status, line.reason, current_kernel, started_time, finished_time) started_time = None new_tests.append(new_test) # the job is finished, but we never came back from reboot if boot_in_progress: testname = "boot.%d" % boot_count reason = "machine did not return from reboot" tko_utils.dprint(("Adding: ABORT\nSubdir:----\n" "Testname:%s\n%s") % (testname, reason)) new_test = test.parse_test(self.job, None, testname, "ABORT", reason, current_kernel, None, None) new_tests.append(new_test) yield new_tests
def parse_one(db, jobname, path, reparse, mail_on_failure): """ Parse a single job. Optionally send email on failure. """ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path)) old_job_idx = db.find_job(jobname) # old tests is a dict from tuple (test_name, subdir) to test_idx old_tests = {} if old_job_idx is not None: if not reparse: tko_utils.dprint("! Job is already parsed, done") return raw_old_tests = db.select("test_idx,subdir,test", "tko_tests", {"job_idx": old_job_idx}) if raw_old_tests: old_tests = dict(((test, subdir), test_idx) for test_idx, subdir, test in raw_old_tests) # look up the status version job_keyval = models.job.read_keyval(path) status_version = job_keyval.get("status_version", 0) # parse out the job parser = status_lib.parser(status_version) job = parser.make_job(path) status_log = os.path.join(path, "status.log") if not os.path.exists(status_log): status_log = os.path.join(path, "status") if not os.path.exists(status_log): tko_utils.dprint("! Unable to parse job, no status file") return # parse the status logs tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname)) status_lines = open(status_log).readlines() parser.start(job) tests = parser.end(status_lines) # parser.end can return the same object multiple times, so filter out dups job.tests = [] already_added = set() for test in tests: if test not in already_added: already_added.add(test) job.tests.append(test) # try and port test_idx over from the old tests, but if old tests stop # matching up with new ones just give up if reparse and old_job_idx is not None: job.index = old_job_idx for test in job.tests: test_idx = old_tests.pop((test.testname, test.subdir), None) if test_idx is not None: test.test_idx = test_idx else: tko_utils.dprint("! Reparse returned new test " "testname=%r subdir=%r" % (test.testname, test.subdir)) for test_idx in old_tests.itervalues(): where = {'test_idx': test_idx} db.delete('tko_iteration_result', where) db.delete('tko_iteration_attributes', where) db.delete('tko_test_attributes', where) db.delete('tko_test_labels_tests', {'test_id': test_idx}) db.delete('tko_tests', where) # check for failures message_lines = [""] for test in job.tests: if not test.subdir: continue tko_utils.dprint("* testname, status, reason: %s %s %s" % (test.subdir, test.status, test.reason)) if test.status in ("FAIL", "WARN"): message_lines.append( format_failure_message(jobname, test.kernel.base, test.subdir, test.status, test.reason)) message = "\n".join(message_lines) # send out a email report of failure if len(message) > 2 and mail_on_failure: tko_utils.dprint("Sending email report of failure on %s to %s" % (jobname, job.user)) mailfailure(jobname, job, message) # write the job into the database db.insert_job(jobname, job) # Serializing job into a binary file try: from autotest.tko import tko_pb2 from autotest.tko import job_serializer serializer = job_serializer.JobSerializer() binary_file_name = os.path.join(path, "job.serialize") serializer.serialize_to_binary(job, jobname, binary_file_name) if reparse: site_export_file = "autotest.tko.site_export" site_export = utils.import_site_function(__file__, site_export_file, "site_export", _site_export_dummy) site_export(binary_file_name) except ImportError: tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by " "compiling tko/tko.proto.") db.commit()