def new_parser_harness(results_dirpath):
    """Ensure sane environment and create new parser with wrapper.

    Args:
      results_dirpath: str; Path to job results directory

    Returns:
      ParserHarness;

    Raises:
      BadResultsDirectoryError; If results dir does not exist or is malformed.
    """
    if not path.exists(results_dirpath):
        raise BadResultsDirectoryError

    keyval_path = path.join(results_dirpath, KEYVAL)
    job_keyval = utils.read_keyval(keyval_path)
    status_version = job_keyval[STATUS_VERSION]
    parser = parser_lib.parser(status_version)
    job = parser.make_job(results_dirpath)
    status_log_filepath = path.join(results_dirpath, 'status.log')
    if not path.exists(status_log_filepath):
        raise BadResultsDirectoryError

    return ParserHarness(parser, job, job_keyval, status_version,
                         status_log_filepath)
Exemplo n.º 2
0
def parse_one(db, jobname, path, parse_options):
    """Parse a single job. Optionally send email on failure.

    @param db: database object.
    @param jobname: the tag used to search for existing job in db,
                    e.g. '1234-chromeos-test/host1'
    @param path: The path to the results to be parsed.
    @param parse_options: _ParseOptions instance.
    """
    reparse = parse_options.reparse
    mail_on_failure = parse_options.mail_on_failure
    dry_run = parse_options.dry_run
    suite_report = parse_options.suite_report
    datastore_creds = parse_options.datastore_creds
    export_to_gcloud_path = parse_options.export_to_gcloud_path

    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = parser_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        if not dry_run:
            for test_idx in old_tests.itervalues():
                where = {'test_idx': test_idx}
                db.delete('tko_iteration_result', where)
                db.delete('tko_iteration_perf_value', where)
                db.delete('tko_iteration_attributes', where)
                db.delete('tko_test_attributes', where)
                db.delete('tko_test_labels_tests', {'test_id': test_idx})
                db.delete('tko_tests', where)

    job.build = None
    job.board = None
    job.build_version = None
    job.suite = None
    if job.label:
        label_info = site_utils.parse_job_name(job.label)
        if label_info:
            job.build = label_info.get('build', None)
            job.build_version = label_info.get('build_version', None)
            job.board = label_info.get('board', None)
            job.suite = label_info.get('suite', None)

    # Upload job details to Sponge.
    if not dry_run:
        sponge_url = sponge_utils.upload_results(job, log=tko_utils.dprint)
        if sponge_url:
            job.keyval_dict['sponge_url'] = sponge_url

    # check for failures
    message_lines = [""]
    job_successful = True
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s" %
                         (test.subdir, test.status, test.reason))
        if test.status != 'GOOD':
            job_successful = False
            message_lines.append(
                format_failure_message(jobname, test.kernel.base, test.subdir,
                                       test.status, test.reason))
    try:
        message = "\n".join(message_lines)

        if not dry_run:
            # send out a email report of failure
            if len(message) > 2 and mail_on_failure:
                tko_utils.dprint(
                    "Sending email report of failure on %s to %s" %
                    (jobname, job.user))
                mailfailure(jobname, job, message)

            # write the job into the database.
            job_data = db.insert_job(jobname,
                                     job,
                                     parent_job_id=job_keyval.get(
                                         constants.PARENT_JOB_ID, None))

            # Upload perf values to the perf dashboard, if applicable.
            for test in job.tests:
                perf_uploader.upload_test(job, test, jobname)

            # Although the cursor has autocommit, we still need to force it to
            # commit existing changes before we can use django models, otherwise
            # it will go into deadlock when django models try to start a new
            # trasaction while the current one has not finished yet.
            db.commit()

            # Handle retry job.
            orig_afe_job_id = job_keyval.get(constants.RETRY_ORIGINAL_JOB_ID,
                                             None)
            if orig_afe_job_id:
                orig_job_idx = tko_models.Job.objects.get(
                    afe_job_id=orig_afe_job_id).job_idx
                _invalidate_original_tests(orig_job_idx, job.index)
    except Exception as e:
        metadata = {
            'path': path,
            'error': str(e),
            'details': traceback.format_exc()
        }
        tko_utils.dprint("Hit exception while uploading to tko db:\n%s" %
                         traceback.format_exc())
        autotest_es.post(use_http=True,
                         type_str='parse_failure',
                         metadata=metadata)
        raise e

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    if not dry_run:
        db.commit()

    # Generate a suite report.
    # Check whether this is a suite job, a suite job will be a hostless job, its
    # jobname will be <JOB_ID>-<USERNAME>/hostless, the suite field will not be
    # NULL. Only generate timeline report when datastore_parent_key is given.
    try:
        datastore_parent_key = job_keyval.get('datastore_parent_key', None)
        if (suite_report and jobname.endswith('/hostless')
                and job_data['suite'] and datastore_parent_key):
            tko_utils.dprint('Start dumping suite timing report...')
            timing_log = os.path.join(path, 'suite_timing.log')
            dump_cmd = (
                "%s/site_utils/dump_suite_report.py %s "
                "--output='%s' --debug" %
                (common.autotest_dir, job_data['afe_job_id'], timing_log))
            subprocess.check_output(dump_cmd, shell=True)
            tko_utils.dprint('Successfully finish dumping suite timing report')

            if (datastore_creds and export_to_gcloud_path
                    and os.path.exists(export_to_gcloud_path)):
                upload_cmd = [
                    export_to_gcloud_path, datastore_creds, timing_log,
                    '--parent_key',
                    repr(tuple(datastore_parent_key))
                ]
                tko_utils.dprint('Start exporting timeline report to gcloud')
                subprocess.check_output(upload_cmd)
                tko_utils.dprint('Successfully export timeline report to '
                                 'gcloud')
            else:
                tko_utils.dprint('DEBUG: skip exporting suite timeline to '
                                 'gcloud, because either gcloud creds or '
                                 'export_to_gcloud script is not found.')
    except Exception as e:
        tko_utils.dprint("WARNING: fail to dump/export suite report. "
                         "Error:\n%s" % e)

    # Mark GS_OFFLOADER_NO_OFFLOAD in gs_offloader_instructions at the end of
    # the function, so any failure, e.g., db connection error, will stop
    # gs_offloader_instructions being updated, and logs can be uploaded for
    # troubleshooting.
    if job_successful:
        # Check if we should not offload this test's results.
        if job_keyval.get(constants.JOB_OFFLOAD_FAILURES_KEY, False):
            # Update the gs_offloader_instructions json file.
            gs_instructions_file = os.path.join(
                path, constants.GS_OFFLOADER_INSTRUCTIONS)
            gs_offloader_instructions = {}
            if os.path.exists(gs_instructions_file):
                with open(gs_instructions_file, 'r') as f:
                    gs_offloader_instructions = json.load(f)

            gs_offloader_instructions[constants.GS_OFFLOADER_NO_OFFLOAD] = True
            with open(gs_instructions_file, 'w') as f:
                json.dump(gs_offloader_instructions, f)
Exemplo n.º 3
0
def parse_one(db, pid_file_manager, jobname, path, parse_options):
    """Parse a single job. Optionally send email on failure.

    @param db: database object.
    @param pid_file_manager: pidfile.PidFileManager object.
    @param jobname: the tag used to search for existing job in db,
                    e.g. '1234-chromeos-test/host1'
    @param path: The path to the results to be parsed.
    @param parse_options: _ParseOptions instance.
    """
    reparse = parse_options.reparse
    mail_on_failure = parse_options.mail_on_failure
    dry_run = parse_options.dry_run
    suite_report = parse_options.suite_report
    datastore_creds = parse_options.datastore_creds
    export_to_gcloud_path = parse_options.export_to_gcloud_path

    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    if old_job_idx is not None and not reparse:
        tko_utils.dprint("! Job is already parsed, done")
        return

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    parser = parser_lib.parser(status_version)
    job = parser.make_job(path)
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_log_path = _find_status_log_path(path)
    if not status_log_path:
        tko_utils.dprint("! Unable to parse job, no status file")
        return
    _parse_status_log(parser, job, status_log_path)

    if old_job_idx is not None:
        job.job_idx = old_job_idx
        unmatched_tests = _match_existing_tests(db, job)
        if not dry_run:
            _delete_tests_from_db(db, unmatched_tests)

    job.afe_job_id = tko_utils.get_afe_job_id(jobname)
    job.skylab_task_id = tko_utils.get_skylab_task_id(jobname)
    job.afe_parent_job_id = job_keyval.get(constants.PARENT_JOB_ID)
    job.skylab_parent_task_id = job_keyval.get(constants.PARENT_JOB_ID)
    job.build = None
    job.board = None
    job.build_version = None
    job.suite = None
    if job.label:
        label_info = site_utils.parse_job_name(job.label)
        if label_info:
            job.build = label_info.get('build', None)
            job.build_version = label_info.get('build_version', None)
            job.board = label_info.get('board', None)
            job.suite = label_info.get('suite', None)

    result_utils_lib.LOG =  tko_utils.dprint
    _throttle_result_size(path)

    # Record test result size to job_keyvals
    start_time = time.time()
    result_size_info = site_utils.collect_result_sizes(
            path, log=tko_utils.dprint)
    tko_utils.dprint('Finished collecting result sizes after %s seconds' %
                     (time.time()-start_time))
    job.keyval_dict.update(result_size_info.__dict__)

    # TODO(dshi): Update sizes with sponge_invocation.xml and throttle it.

    # check for failures
    message_lines = [""]
    job_successful = True
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, subdir, status, reason: %s %s %s %s"
                         % (test.testname, test.subdir, test.status,
                            test.reason))
        if test.status not in ('GOOD', 'WARN'):
            job_successful = False
            pid_file_manager.num_tests_failed += 1
            message_lines.append(format_failure_message(
                jobname, test.kernel.base, test.subdir,
                test.status, test.reason))

    message = "\n".join(message_lines)

    if not dry_run:
        # send out a email report of failure
        if len(message) > 2 and mail_on_failure:
            tko_utils.dprint("Sending email report of failure on %s to %s"
                                % (jobname, job.user))
            mailfailure(jobname, job, message)

        # Upload perf values to the perf dashboard, if applicable.
        for test in job.tests:
            perf_uploader.upload_test(job, test, jobname)

        # Upload job details to Sponge.
        sponge_url = sponge_utils.upload_results(job, log=tko_utils.dprint)
        if sponge_url:
            job.keyval_dict['sponge_url'] = sponge_url

        _write_job_to_db(db, jobname, job)

        # Verify the job data is written to the database.
        if job.tests:
            tests_in_db = db.find_tests(job.job_idx)
            tests_in_db_count = len(tests_in_db) if tests_in_db else 0
            if tests_in_db_count != len(job.tests):
                tko_utils.dprint(
                        'Failed to find enough tests for job_idx: %d. The '
                        'job should have %d tests, only found %d tests.' %
                        (job.job_idx, len(job.tests), tests_in_db_count))
                metrics.Counter(
                        'chromeos/autotest/result/db_save_failure',
                        description='The number of times parse failed to '
                        'save job to TKO database.').increment()

        # Although the cursor has autocommit, we still need to force it to
        # commit existing changes before we can use django models, otherwise
        # it will go into deadlock when django models try to start a new
        # trasaction while the current one has not finished yet.
        db.commit()

        # Handle retry job.
        orig_afe_job_id = job_keyval.get(constants.RETRY_ORIGINAL_JOB_ID,
                                            None)
        if orig_afe_job_id:
            orig_job_idx = tko_models.Job.objects.get(
                    afe_job_id=orig_afe_job_id).job_idx
            _invalidate_original_tests(orig_job_idx, job.job_idx)

    # Serializing job into a binary file
    export_tko_to_file = global_config.global_config.get_config_value(
            'AUTOSERV', 'export_tko_job_to_file', type=bool, default=False)

    binary_file_name = os.path.join(path, "job.serialize")
    if export_tko_to_file:
        export_tko_job_to_file(job, jobname, binary_file_name)

    if not dry_run:
        db.commit()

    # Generate a suite report.
    # Check whether this is a suite job, a suite job will be a hostless job, its
    # jobname will be <JOB_ID>-<USERNAME>/hostless, the suite field will not be
    # NULL. Only generate timeline report when datastore_parent_key is given.
    datastore_parent_key = job_keyval.get('datastore_parent_key', None)
    provision_job_id = job_keyval.get('provision_job_id', None)
    if (suite_report and jobname.endswith('/hostless')
        and job.suite and datastore_parent_key):
        tko_utils.dprint('Start dumping suite timing report...')
        timing_log = os.path.join(path, 'suite_timing.log')
        dump_cmd = ("%s/site_utils/dump_suite_report.py %s "
                    "--output='%s' --debug" %
                    (common.autotest_dir, job.afe_job_id,
                        timing_log))

        if provision_job_id is not None:
            dump_cmd += " --provision_job_id=%d" % int(provision_job_id)

        subprocess.check_output(dump_cmd, shell=True)
        tko_utils.dprint('Successfully finish dumping suite timing report')

        if (datastore_creds and export_to_gcloud_path
            and os.path.exists(export_to_gcloud_path)):
            upload_cmd = [export_to_gcloud_path, datastore_creds,
                            timing_log, '--parent_key',
                            datastore_parent_key]
            tko_utils.dprint('Start exporting timeline report to gcloud')
            subprocess.check_output(upload_cmd)
            tko_utils.dprint('Successfully export timeline report to '
                                'gcloud')
        else:
            tko_utils.dprint('DEBUG: skip exporting suite timeline to '
                                'gcloud, because either gcloud creds or '
                                'export_to_gcloud script is not found.')

    # Mark GS_OFFLOADER_NO_OFFLOAD in gs_offloader_instructions at the end of
    # the function, so any failure, e.g., db connection error, will stop
    # gs_offloader_instructions being updated, and logs can be uploaded for
    # troubleshooting.
    if job_successful:
        # Check if we should not offload this test's results.
        if job_keyval.get(constants.JOB_OFFLOAD_FAILURES_KEY, False):
            # Update the gs_offloader_instructions json file.
            gs_instructions_file = os.path.join(
                    path, constants.GS_OFFLOADER_INSTRUCTIONS)
            gs_offloader_instructions = {}
            if os.path.exists(gs_instructions_file):
                with open(gs_instructions_file, 'r') as f:
                    gs_offloader_instructions = json.load(f)

            gs_offloader_instructions[constants.GS_OFFLOADER_NO_OFFLOAD] = True
            with open(gs_instructions_file, 'w') as f:
                json.dump(gs_offloader_instructions, f)
Exemplo n.º 4
0
 def test_can_import_available_versions(self):
     for version in self.available_versions:
         p = parser_lib.parser(0)
         self.assertNotEqual(p, None)