def init_parser(self): """ Start the continuous parsing of self.resultdir. This sets up the database connection and inserts the basic job object into the database if necessary. """ if not self._using_parser: return # redirect parser debugging to .parse.log parse_log = os.path.join(self.resultdir, '.parse.log') parse_log = open(parse_log, 'w', 0) tko_utils.redirect_parser_debugging(parse_log) # create a job model object and set up the db self.results_db = tko_db.db(autocommit=True) self.parser = status_lib.parser(self._STATUS_VERSION) self.job_model = self.parser.make_job(self.resultdir) self.parser.start(self.job_model) # check if a job already exists in the db and insert it if # it does not job_idx = self.results_db.find_job(self._parse_job) if job_idx is None: self.results_db.insert_job(self._parse_job, self.job_model) else: machine_idx = self.results_db.lookup_machine(self.job_model.machine) self.job_model.index = job_idx self.job_model.machine_idx = machine_idx
def new_parser_harness(results_dirpath): """Ensure sane environment and create new parser with wrapper. Args: results_dirpath: str; Path to job results directory Returns: ParserHarness; Raises: BadResultsDirectoryError; If results dir does not exist or is malformed. """ if not path.exists(results_dirpath): raise BadResultsDirectoryError keyval_path = path.join(results_dirpath, KEYVAL) job_keyval = utils.read_keyval(keyval_path) status_version = job_keyval[STATUS_VERSION] parser = status_lib.parser(status_version) job = parser.make_job(results_dirpath) status_log_filepath = path.join(results_dirpath, 'status.log') if not path.exists(status_log_filepath): raise BadResultsDirectoryError return ParserHarness(parser, job, job_keyval, status_version, status_log_filepath)
def new_parser_harness(results_dirpath): """Ensure sane environment and create new parser with wrapper. Args: results_dirpath: str; Path to job results directory Returns: ParserHarness; Raises: BadResultsDirectoryError; If results dir does not exist or is malformed. """ if not path.exists(results_dirpath): raise BadResultsDirectoryError keyval_path = path.join(results_dirpath, KEYVAL) job_keyval = utils.read_keyval(keyval_path) status_version = job_keyval[STATUS_VERSION] parser = status_lib.parser(status_version) job = parser.make_job(results_dirpath) status_log_filepath = path.join(results_dirpath, 'status.log') if not path.exists(status_log_filepath): raise BadResultsDirectoryError return ParserHarness( parser, job, job_keyval, status_version, status_log_filepath)
def init_parser(self): """ Start the continuous parsing of self.resultdir. This sets up the database connection and inserts the basic job object into the database if necessary. """ if not self._using_parser: return # create a job model object self.parser = status_lib.parser(self._STATUS_VERSION) self.job_model = self.parser.make_job(self.resultdir) self.parser.start(self.job_model) # check if a job already exists in the db and insert it if # it does not job_idx = models_utils.job_get_idx_by_tag(self._parse_job) if job_idx is None: dbutils.insert_job(self._parse_job, self.job_model) else: machine_idx = models_utils.machine_get_idx_by_hostname( self.job_model.machine) self.job_model.index = job_idx self.job_model.machine_idx = machine_idx
def test_can_import_available_versions(self): for version in self.available_versions: p = status_lib.parser(0) self.assertNotEqual(p, None)
def parse_one(db, jobname, path, reparse, mail_on_failure): """ Parse a single job. Optionally send email on failure. """ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path)) old_job_idx = db.find_job(jobname) # old tests is a dict from tuple (test_name, subdir) to test_idx old_tests = {} if old_job_idx is not None: if not reparse: tko_utils.dprint("! Job is already parsed, done") return raw_old_tests = db.select("test_idx,subdir,test", "tko_tests", {"job_idx": old_job_idx}) if raw_old_tests: old_tests = dict(((test, subdir), test_idx) for test_idx, subdir, test in raw_old_tests) # look up the status version job_keyval = models.job.read_keyval(path) status_version = job_keyval.get("status_version", 0) # parse out the job parser = status_lib.parser(status_version) job = parser.make_job(path) status_log = os.path.join(path, "status.log") if not os.path.exists(status_log): status_log = os.path.join(path, "status") if not os.path.exists(status_log): tko_utils.dprint("! Unable to parse job, no status file") return # parse the status logs tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname)) status_lines = open(status_log).readlines() parser.start(job) tests = parser.end(status_lines) # parser.end can return the same object multiple times, so filter out dups job.tests = [] already_added = set() for test in tests: if test not in already_added: already_added.add(test) job.tests.append(test) # try and port test_idx over from the old tests, but if old tests stop # matching up with new ones just give up if reparse and old_job_idx is not None: job.index = old_job_idx for test in job.tests: test_idx = old_tests.pop((test.testname, test.subdir), None) if test_idx is not None: test.test_idx = test_idx else: tko_utils.dprint("! Reparse returned new test " "testname=%r subdir=%r" % (test.testname, test.subdir)) for test_idx in old_tests.itervalues(): where = {'test_idx' : test_idx} db.delete('tko_iteration_result', where) db.delete('tko_iteration_attributes', where) db.delete('tko_test_attributes', where) db.delete('tko_test_labels_tests', {'test_id': test_idx}) db.delete('tko_tests', where) # check for failures message_lines = [""] for test in job.tests: if not test.subdir: continue tko_utils.dprint("* testname, status, reason: %s %s %s" % (test.subdir, test.status, test.reason)) if test.status in ("FAIL", "WARN"): message_lines.append(format_failure_message( jobname, test.kernel.base, test.subdir, test.status, test.reason)) message = "\n".join(message_lines) # send out a email report of failure if len(message) > 2 and mail_on_failure: tko_utils.dprint("Sending email report of failure on %s to %s" % (jobname, job.user)) mailfailure(jobname, job, message) # write the job into the database db.insert_job(jobname, job) # Serializing job into a binary file try: from autotest.tko import tko_pb2 from autotest.tko import job_serializer serializer = job_serializer.JobSerializer() binary_file_name = os.path.join(path, "job.serialize") serializer.serialize_to_binary(job, jobname, binary_file_name) if reparse: site_export_file = "autotest.tko.site_export" site_export = utils.import_site_function(__file__, site_export_file, "site_export", _site_export_dummy) site_export(binary_file_name) except ImportError: tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by " "compiling tko/tko.proto.") db.commit()
def parse_one(db, jobname, path, reparse, mail_on_failure): """ Parse a single job. Optionally send email on failure. """ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path)) old_job_idx = db.find_job(jobname) # old tests is a dict from tuple (test_name, subdir) to test_idx old_tests = {} if old_job_idx is not None: if not reparse: tko_utils.dprint("! Job is already parsed, done") return raw_old_tests = db.select("test_idx,subdir,test", "tko_tests", {"job_idx": old_job_idx}) if raw_old_tests: old_tests = dict(((test, subdir), test_idx) for test_idx, subdir, test in raw_old_tests) # look up the status version job_keyval = models.job.read_keyval(path) status_version = job_keyval.get("status_version", 0) # parse out the job parser = status_lib.parser(status_version) job = parser.make_job(path) status_log = os.path.join(path, "status.log") if not os.path.exists(status_log): status_log = os.path.join(path, "status") if not os.path.exists(status_log): tko_utils.dprint("! Unable to parse job, no status file") return # parse the status logs tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname)) status_lines = open(status_log).readlines() parser.start(job) tests = parser.end(status_lines) # parser.end can return the same object multiple times, so filter out dups job.tests = [] already_added = set() for test in tests: if test not in already_added: already_added.add(test) job.tests.append(test) # try and port test_idx over from the old tests, but if old tests stop # matching up with new ones just give up if reparse and old_job_idx is not None: job.index = old_job_idx for test in job.tests: test_idx = old_tests.pop((test.testname, test.subdir), None) if test_idx is not None: test.test_idx = test_idx else: tko_utils.dprint("! Reparse returned new test " "testname=%r subdir=%r" % (test.testname, test.subdir)) for test_idx in old_tests.itervalues(): where = {'test_idx': test_idx} db.delete('tko_iteration_result', where) db.delete('tko_iteration_attributes', where) db.delete('tko_test_attributes', where) db.delete('tko_test_labels_tests', {'test_id': test_idx}) db.delete('tko_tests', where) # check for failures message_lines = [""] for test in job.tests: if not test.subdir: continue tko_utils.dprint("* testname, status, reason: %s %s %s" % (test.subdir, test.status, test.reason)) if test.status in ("FAIL", "WARN"): message_lines.append( format_failure_message(jobname, test.kernel.base, test.subdir, test.status, test.reason)) message = "\n".join(message_lines) # send out a email report of failure if len(message) > 2 and mail_on_failure: tko_utils.dprint("Sending email report of failure on %s to %s" % (jobname, job.user)) mailfailure(jobname, job, message) # write the job into the database db.insert_job(jobname, job) # Serializing job into a binary file try: from autotest.tko import tko_pb2 from autotest.tko import job_serializer serializer = job_serializer.JobSerializer() binary_file_name = os.path.join(path, "job.serialize") serializer.serialize_to_binary(job, jobname, binary_file_name) if reparse: site_export_file = "autotest.tko.site_export" site_export = utils.import_site_function(__file__, site_export_file, "site_export", _site_export_dummy) site_export(binary_file_name) except ImportError: tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by " "compiling tko/tko.proto.") db.commit()