def export_tko_job_to_file(job, jobname, filename):
    """Exports the tko job to disk file.

    @param job: database object.
    @param jobname: the job name as string.
    @param filename: The path to the results to be parsed.
    """
    from autotest_lib.tko import job_serializer

    serializer = job_serializer.JobSerializer()
    serializer.serialize_to_binary(job, jobname, filename)
    def setUp(self):
        super(ReadBackGetterTest, self).setUp()

        temp_binary = NamedTemporaryFile(mode='wb')
        try:
            temp_binary.write(self.pb_job.SerializeToString())
            temp_binary.flush()

            js = job_serializer.JobSerializer()
            self.from_pb_job = js.deserialize_from_binary(temp_binary.name)
        finally:
            temp_binary.close()
def export_tko_job_to_file(job, jobname, filename):
    """Exports the tko job to disk file.

    @param job: database object.
    @param jobname: the job name as string.
    @param filename: The path to the results to be parsed.
    """
    try:
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        serializer.serialize_to_binary(job, jobname, filename)
    except ImportError:
        tko_utils.dprint("WARNING: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")
Exemple #4
0
    def setUp(self):
        tko_patches = []
        tko_patches.append(models.patch('New spec!', 'Reference?', 123456))

        tko_kernel = models.kernel('My Computer', tko_patches, '1234567')
        tko_time = datetime.now()

        tko_job = models.job('/tmp/', 'autotest', 'test', 'My Computer',
                             tko_time, tko_time, tko_time, 'root', 'www',
                             'No one', tko_time, {'1+1': 2})
        tko_job.afe_parent_job_id = '111'
        tko_job.build_version = 'R1-1.0.0'
        tko_job.suite = 'bvt'
        tko_job.board = 'alex'
        tko_job.index = 2

        tko_iteration = models.iteration(0, {
            '2+2': 4,
            '3+3': 6
        }, {
            '4+4': 8,
            '5+5': 10,
            '6+6': 12
        })

        tko_labels = ['unittest', 'dummy test', 'autotest']

        # See the comment about the models.test constructor in
        # job_serializer.py, where the models.test constructor is used.
        tko_test = models.test('/tmp/', 'mocktest', 'Completed', 'N/A',
                               tko_kernel, 'My Computer', tko_time, tko_time,
                               [tko_iteration, tko_iteration, tko_iteration],
                               {'abc': 'def'}, [], tko_labels)
        tko_test.test_idx = 3
        self.tko_job = tko_job
        self.tko_job.tests = [tko_test, tko_test, tko_test]

        self.pb_job = tko_pb2.Job()
        self.tag = '1-abc./.'
        self.expected_afe_job_id = '1'

        js = job_serializer.JobSerializer()
        js.set_pb_job(self.tko_job, self.pb_job, self.tag)
    def setUp(self):
        tko_patches = []
        tko_patches.append(models.patch('New spec!', 'Reference?', 123456))

        tko_kernel = models.kernel('My Computer', tko_patches, '1234567')
        tko_time = datetime.now()

        tko_job = models.job('/tmp/', 'autotest', 'test', 'My Computer',
                             tko_time, tko_time, tko_time, 'root', 'www',
                             'No one', tko_time, {'1+1': 2})

        tko_iteration = models.iteration(0, {
            '2+2': 4,
            '3+3': 6
        }, {
            '4+4': 8,
            '5+5': 10,
            '6+6': 12
        })

        tko_labels = ['unittest', 'dummy test', 'autotest']

        tko_test = models.test('/tmp/', 'mocktest', 'Completed', 'N/A',
                               tko_kernel, 'My Computer', tko_time, tko_time,
                               [tko_iteration, tko_iteration, tko_iteration],
                               {'abc': 'def'}, tko_labels)

        self.tko_job = tko_job
        self.tko_job.tests = [tko_test, tko_test, tko_test]

        self.pb_job = tko_pb2.Job()
        self.tag = '1-abc./.'
        self.expected_afe_job_id = '1'

        js = job_serializer.JobSerializer()
        js.set_pb_job(self.tko_job, self.pb_job, self.tag)
def parse_one(db, jobname, path, parse_options):
    """Parse a single job. Optionally send email on failure.

    @param db: database object.
    @param jobname: the tag used to search for existing job in db,
                    e.g. '1234-chromeos-test/host1'
    @param path: The path to the results to be parsed.
    @param parse_options: _ParseOptions instance.
    """
    reparse = parse_options.reparse
    mail_on_failure = parse_options.mail_on_failure
    dry_run = parse_options.dry_run
    suite_report = parse_options.suite_report
    datastore_creds = parse_options.datastore_creds
    export_to_gcloud_path = parse_options.export_to_gcloud_path

    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = parser_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        if not dry_run:
            for test_idx in old_tests.itervalues():
                where = {'test_idx': test_idx}
                db.delete('tko_iteration_result', where)
                db.delete('tko_iteration_perf_value', where)
                db.delete('tko_iteration_attributes', where)
                db.delete('tko_test_attributes', where)
                db.delete('tko_test_labels_tests', {'test_id': test_idx})
                db.delete('tko_tests', where)

    job.build = None
    job.board = None
    job.build_version = None
    job.suite = None
    if job.label:
        label_info = site_utils.parse_job_name(job.label)
        if label_info:
            job.build = label_info.get('build', None)
            job.build_version = label_info.get('build_version', None)
            job.board = label_info.get('board', None)
            job.suite = label_info.get('suite', None)

    # Upload job details to Sponge.
    if not dry_run:
        sponge_url = sponge_utils.upload_results(job, log=tko_utils.dprint)
        if sponge_url:
            job.keyval_dict['sponge_url'] = sponge_url

    # check for failures
    message_lines = [""]
    job_successful = True
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s" %
                         (test.subdir, test.status, test.reason))
        if test.status != 'GOOD':
            job_successful = False
            message_lines.append(
                format_failure_message(jobname, test.kernel.base, test.subdir,
                                       test.status, test.reason))
    try:
        message = "\n".join(message_lines)

        if not dry_run:
            # send out a email report of failure
            if len(message) > 2 and mail_on_failure:
                tko_utils.dprint(
                    "Sending email report of failure on %s to %s" %
                    (jobname, job.user))
                mailfailure(jobname, job, message)

            # write the job into the database.
            job_data = db.insert_job(jobname,
                                     job,
                                     parent_job_id=job_keyval.get(
                                         constants.PARENT_JOB_ID, None))

            # Upload perf values to the perf dashboard, if applicable.
            for test in job.tests:
                perf_uploader.upload_test(job, test, jobname)

            # Although the cursor has autocommit, we still need to force it to
            # commit existing changes before we can use django models, otherwise
            # it will go into deadlock when django models try to start a new
            # trasaction while the current one has not finished yet.
            db.commit()

            # Handle retry job.
            orig_afe_job_id = job_keyval.get(constants.RETRY_ORIGINAL_JOB_ID,
                                             None)
            if orig_afe_job_id:
                orig_job_idx = tko_models.Job.objects.get(
                    afe_job_id=orig_afe_job_id).job_idx
                _invalidate_original_tests(orig_job_idx, job.index)
    except Exception as e:
        metadata = {
            'path': path,
            'error': str(e),
            'details': traceback.format_exc()
        }
        tko_utils.dprint("Hit exception while uploading to tko db:\n%s" %
                         traceback.format_exc())
        autotest_es.post(use_http=True,
                         type_str='parse_failure',
                         metadata=metadata)
        raise e

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    if not dry_run:
        db.commit()

    # Generate a suite report.
    # Check whether this is a suite job, a suite job will be a hostless job, its
    # jobname will be <JOB_ID>-<USERNAME>/hostless, the suite field will not be
    # NULL. Only generate timeline report when datastore_parent_key is given.
    try:
        datastore_parent_key = job_keyval.get('datastore_parent_key', None)
        if (suite_report and jobname.endswith('/hostless')
                and job_data['suite'] and datastore_parent_key):
            tko_utils.dprint('Start dumping suite timing report...')
            timing_log = os.path.join(path, 'suite_timing.log')
            dump_cmd = (
                "%s/site_utils/dump_suite_report.py %s "
                "--output='%s' --debug" %
                (common.autotest_dir, job_data['afe_job_id'], timing_log))
            subprocess.check_output(dump_cmd, shell=True)
            tko_utils.dprint('Successfully finish dumping suite timing report')

            if (datastore_creds and export_to_gcloud_path
                    and os.path.exists(export_to_gcloud_path)):
                upload_cmd = [
                    export_to_gcloud_path, datastore_creds, timing_log,
                    '--parent_key',
                    repr(tuple(datastore_parent_key))
                ]
                tko_utils.dprint('Start exporting timeline report to gcloud')
                subprocess.check_output(upload_cmd)
                tko_utils.dprint('Successfully export timeline report to '
                                 'gcloud')
            else:
                tko_utils.dprint('DEBUG: skip exporting suite timeline to '
                                 'gcloud, because either gcloud creds or '
                                 'export_to_gcloud script is not found.')
    except Exception as e:
        tko_utils.dprint("WARNING: fail to dump/export suite report. "
                         "Error:\n%s" % e)

    # Mark GS_OFFLOADER_NO_OFFLOAD in gs_offloader_instructions at the end of
    # the function, so any failure, e.g., db connection error, will stop
    # gs_offloader_instructions being updated, and logs can be uploaded for
    # troubleshooting.
    if job_successful:
        # Check if we should not offload this test's results.
        if job_keyval.get(constants.JOB_OFFLOAD_FAILURES_KEY, False):
            # Update the gs_offloader_instructions json file.
            gs_instructions_file = os.path.join(
                path, constants.GS_OFFLOADER_INSTRUCTIONS)
            gs_offloader_instructions = {}
            if os.path.exists(gs_instructions_file):
                with open(gs_instructions_file, 'r') as f:
                    gs_offloader_instructions = json.load(f)

            gs_offloader_instructions[constants.GS_OFFLOADER_NO_OFFLOAD] = True
            with open(gs_instructions_file, 'w') as f:
                json.dump(gs_offloader_instructions, f)
Exemple #7
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """Parse a single job. Optionally send email on failure.

    @param db: database object.
    @param jobname: the tag used to search for existing job in db,
                    e.g. '1234-chromeos-test/host1'
    @param path: The path to the results to be parsed.
    @param reparse: True/False, whether this is reparsing of the job.
    @param mail_on_failure: whether to send email on FAILED test.


    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx': test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_perf_value', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    job_successful = True
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s" %
                         (test.subdir, test.status, test.reason))
        if test.status != 'GOOD':
            job_successful = False
            message_lines.append(
                format_failure_message(jobname, test.kernel.base, test.subdir,
                                       test.status, test.reason))

    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s" %
                         (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database.
    db.insert_job(jobname,
                  job,
                  parent_job_id=job_keyval.get(constants.PARENT_JOB_ID, None))

    # Upload perf values to the perf dashboard, if applicable.
    for test in job.tests:
        perf_uploader.upload_test(job, test)

    # Although the cursor has autocommit, we still need to force it to commit
    # existing changes before we can use django models, otherwise it
    # will go into deadlock when django models try to start a new trasaction
    # while the current one has not finished yet.
    db.commit()

    # Handle retry job.
    orig_afe_job_id = job_keyval.get(constants.RETRY_ORIGINAL_JOB_ID, None)
    if orig_afe_job_id:
        orig_job_idx = tko_models.Job.objects.get(
            afe_job_id=orig_afe_job_id).job_idx
        _invalidate_original_tests(orig_job_idx, job.index)

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()

    # Mark GS_OFFLOADER_NO_OFFLOAD in gs_offloader_instructions at the end of
    # the function, so any failure, e.g., db connection error, will stop
    # gs_offloader_instructions being updated, and logs can be uploaded for
    # troubleshooting.
    if job_successful:
        # Check if we should not offload this test's results.
        if job_keyval.get(constants.JOB_OFFLOAD_FAILURES_KEY, False):
            # Update the gs_offloader_instructions json file.
            gs_instructions_file = os.path.join(
                path, constants.GS_OFFLOADER_INSTRUCTIONS)
            gs_offloader_instructions = {}
            if os.path.exists(gs_instructions_file):
                with open(gs_instructions_file, 'r') as f:
                    gs_offloader_instructions = json.load(f)

            gs_offloader_instructions[constants.GS_OFFLOADER_NO_OFFLOAD] = True
            with open(gs_instructions_file, 'w') as f:
                json.dump(gs_offloader_instructions, f)
Exemple #8
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """
    Parse a single job. Optionally send email on failure.
    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx' : test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s"
                         % (test.subdir, test.status, test.reason))
        if test.status in ("FAIL", "WARN"):
            message_lines.append(format_failure_message(
                jobname, test.kernel.base, test.subdir,
                test.status, test.reason))
    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s"
                         % (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database
    db.insert_job(jobname, job)

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()