Beispiel #1
0
def store_failure_lines(project, job_guid, job_log, priority):
    """This task is a wrapper for the store_failure_lines command."""
    logger.debug('Running store_failure_lines for job %s' % job_guid)
    failureline.store_failure_lines(project, job_guid, job_log)
    if settings.AUTOCLASSIFY_JOBS:
        autoclassify.apply_async(args=[project, job_guid],
                                 routing_key="autoclassify.%s" % priority)
Beispiel #2
0
def store_failure_lines(job_log, priority):
    """Store the failure lines from a log corresponding to the structured
    errorsummary file."""
    logger.debug('Running store_failure_lines for job %s' % job_log.job.id)
    failureline.store_failure_lines(job_log)
    if settings.AUTOCLASSIFY_JOBS:
        autoclassify.apply_async(args=[job_log.job.id],
                                 routing_key="autoclassify.%s" % priority)
Beispiel #3
0
def crossreference_error_lines(job_id, priority):
    """Match structured (FailureLine) and unstructured (TextLogError) lines
    for a job."""
    newrelic.agent.add_custom_parameter("job_id", job_id)
    logger.debug("Running crossreference-error-lines for job %s" % job_id)
    job = Job.objects.get(id=job_id)
    has_lines = crossreference_job(job)
    if has_lines and settings.AUTOCLASSIFY_JOBS:
        autoclassify.apply_async(args=[job_id],
                                 routing_key="autoclassify.%s" % priority)
Beispiel #4
0
def store_failure_lines(project, job_guid, job_log, priority):
    """This task is a wrapper for the store_failure_lines command."""
    try:
        logger.debug('Running store_failure_lines for job %s' % job_guid)
        failureline.store_failure_lines(project, job_guid, job_log)
        if settings.AUTOCLASSIFY_JOBS:
            autoclassify.apply_async(args=[project, job_guid],
                                     routing_key="autoclassify.%s" % priority)

    except Exception as e:
        store_failure_lines.retry(exc=e, countdown=(1 + store_failure_lines.request.retries) * 60)
Beispiel #5
0
def crossreference_error_lines(job_id, priority):
    """Match structured (FailureLine) and unstructured (TextLogError) lines
    for a job."""
    newrelic.agent.add_custom_parameter("job_id", job_id)
    logger.debug("Running crossreference-error-lines for job %s" % job_id)
    job = Job.objects.get(id=job_id)
    has_lines = crossreference_job(job)
    if has_lines and settings.AUTOCLASSIFY_JOBS:
        autoclassify.apply_async(
            args=[job_id],
            routing_key="autoclassify.%s" % priority)
Beispiel #6
0
def store_failure_lines(project, job_guid, job_log, priority):
    """This task is a wrapper for the store_failure_lines command."""
    try:
        logger.debug('Running store_failure_lines for job %s' % job_guid)
        failureline.store_failure_lines(project, job_guid, job_log)
        if settings.AUTOCLASSIFY_JOBS:
            autoclassify.apply_async(args=[project, job_guid],
                                     routing_key="autoclassify.%s" % priority)

    except Exception as e:
        store_failure_lines.retry(
            exc=e, countdown=(1 + store_failure_lines.request.retries) * 60)
Beispiel #7
0
def parse_logs(job_id, job_log_ids, priority):
    newrelic.agent.add_custom_parameter("job_id", str(job_id))

    job = Job.objects.get(id=job_id)
    job_logs = JobLog.objects.filter(id__in=job_log_ids,
                                     job=job)

    if len(job_log_ids) != len(job_logs):
        logger.warning("Failed to load all expected job ids: %s", ", ".join(job_log_ids))

    parser_tasks = {
        "errorsummary_json": store_failure_lines,
        "buildbot_text": parse_unstructured_log,
        "builds-4h": parse_unstructured_log
    }

    completed_names = set()
    exceptions = []
    for job_log in job_logs:
        parser = parser_tasks.get(job_log.name)
        if parser:
            try:
                parser(job_log)
            except Exception as e:
                exceptions.append(e)
            else:
                completed_names.add(job_log.name)

    if exceptions:
        raise exceptions[0]

    if ("errorsummary_json" in completed_names and
        ("buildbot_text" in completed_names or
         "builds-4h" in completed_names)):

        success = crossreference_error_lines(job)

        if success:
            logger.debug("Scheduling autoclassify for job %i", job_id)
            autoclassify.apply_async(
                args=[job_id],
                routing_key="autoclassify.%s" % priority)
        else:
            job.autoclassify_status = Job.SKIPPED
    else:
        job.autoclassify_status = Job.SKIPPED
    job.save()
Beispiel #8
0
def parse_logs(job_id, job_log_ids, priority):
    newrelic.agent.add_custom_parameter("job_id", str(job_id))

    job = Job.objects.get(id=job_id)
    job_logs = JobLog.objects.filter(id__in=job_log_ids, job=job)

    if len(job_log_ids) != len(job_logs):
        logger.warning("Failed to load all expected job ids: %s" %
                       ", ".join(job_log_ids))

    parser_tasks = {
        "errorsummary_json": store_failure_lines,
        "buildbot_text": parse_unstructured_log,
        "builds-4h": parse_unstructured_log
    }

    completed_names = set()
    exceptions = []
    for job_log in job_logs:
        parser = parser_tasks.get(job_log.name)
        if parser:
            try:
                parser(job_log)
            except Exception as e:
                exceptions.append(e)
            else:
                completed_names.add(job_log.name)

    if exceptions:
        raise exceptions[0]

    if ("errorsummary_json" in completed_names
            and ("buildbot_text" in completed_names
                 or "builds-4h" in completed_names)):

        success = crossreference_error_lines(job)

        if success and settings.AUTOCLASSIFY_JOBS:
            logger.debug("Scheduling autoclassify for job %i" % job_id)
            autoclassify.apply_async(args=[job_id],
                                     routing_key="autoclassify.%s" % priority)
        else:
            job.autoclassify_status = Job.SKIPPED
    else:
        job.autoclassify_status = Job.SKIPPED
    job.save()
Beispiel #9
0
def crossreference_error_lines(job_id, priority):
    """Match structured (FailureLine) and unstructured (TextLogError) lines
    for a job."""
    newrelic.agent.add_custom_parameter("job_id", job_id)
    logger.debug("Running crossreference-error-lines for job %s" % job_id)
    job = Job.objects.get(id=job_id)
    has_lines = crossreference_job(job)
    if has_lines and settings.AUTOCLASSIFY_JOBS:
        logger.debug("Scheduling autoclassify for job %i" % job_id)
        autoclassify.apply_async(
            args=[job_id],
            routing_key="autoclassify.%s" % priority)
    elif not settings.AUTOCLASSIFY_JOBS:
        job.autoclassify_status = Job.SKIPPED
        job.save(update_fields=['autoclassify_status'])
    else:
        logger.debug("Job %i didn't have any crossreferenced lines, skipping autoclassify " % job_id)
Beispiel #10
0
def crossreference_error_lines(job_id, priority):
    """Match structured (FailureLine) and unstructured (TextLogError) lines
    for a job."""
    newrelic.agent.add_custom_parameter("job_id", job_id)
    logger.debug("Running crossreference-error-lines for job %s" % job_id)
    job = Job.objects.get(id=job_id)
    has_lines = crossreference_job(job)
    if has_lines and settings.AUTOCLASSIFY_JOBS:
        logger.debug("Scheduling autoclassify for job %i" % job_id)
        autoclassify.apply_async(args=[job_id],
                                 routing_key="autoclassify.%s" % priority)
    elif not settings.AUTOCLASSIFY_JOBS:
        job.autoclassify_status = Job.SKIPPED
        job.save(update_fields=['autoclassify_status'])
    else:
        logger.debug(
            "Job %i didn't have any crossreferenced lines, skipping autoclassify "
            % job_id)
Beispiel #11
0
def parse_logs(job_id, job_log_ids, priority):
    newrelic.agent.add_custom_parameter("job_id", str(job_id))

    job = Job.objects.get(id=job_id)
    job_logs = JobLog.objects.filter(id__in=job_log_ids, job=job)

    if len(job_log_ids) != len(job_logs):
        logger.warning("Failed to load all expected job ids: %s",
                       ", ".join(job_log_ids))

    parser_tasks = {
        "errorsummary_json": store_failure_lines,
        "builds-4h": parse_unstructured_log
    }

    # We don't want to stop parsing logs for most Exceptions however we still
    # need to know one occurred so we can skip further steps and reraise to
    # trigger the retry decorator.
    first_exception = None
    completed_names = set()
    for job_log in job_logs:
        newrelic.agent.add_custom_parameter("job_log_%s_url" % job_log.name,
                                            job_log.url)
        logger.debug("parser_task for %s", job_log.id)

        # Only parse logs which haven't yet been processed or else failed on the last attempt.
        if job_log.status not in (JobLog.PENDING, JobLog.FAILED):
            logger.info(
                f'Skipping parsing for job %s since log already processed.  Log Status: {job_log.status}',
                job_log.id,
            )
            continue

        parser = parser_tasks.get(job_log.name)
        if not parser:
            continue

        try:
            parser(job_log)
        except Exception as e:
            if isinstance(e, SoftTimeLimitExceeded):
                # stop parsing further logs but raise so NewRelic and
                # Papertrail will still show output
                raise

            if first_exception is None:
                first_exception = e

            # track the exception on NewRelic but don't stop parsing future
            # log lines.
            newrelic.agent.record_exception()
        else:
            completed_names.add(job_log.name)

    # Raise so we trigger the retry decorator.
    if first_exception:
        raise first_exception

    if "errorsummary_json" in completed_names and "builds-4h" in completed_names:

        success = crossreference_job(job)

        if success:
            logger.debug("Scheduling autoclassify for job %i", job_id)
            # TODO: Replace the use of different queues for failures vs not with the
            # RabbitMQ priority feature (since the idea behind separate queues was
            # only to ensure failures are dealt with first if there is a backlog).
            queue = 'log_autoclassify_fail' if priority == 'failures' else 'log_autoclassify'
            autoclassify.apply_async(args=[job_id], queue=queue)
        else:
            job.autoclassify_status = Job.SKIPPED
    else:
        job.autoclassify_status = Job.SKIPPED
    job.save()
Beispiel #12
0
def parse_logs(job_id, job_log_ids, priority):
    newrelic.agent.add_custom_parameter("job_id", str(job_id))

    job = Job.objects.get(id=job_id)
    job_logs = JobLog.objects.filter(id__in=job_log_ids,
                                     job=job)

    if len(job_log_ids) != len(job_logs):
        logger.warning("Failed to load all expected job ids: %s", ", ".join(job_log_ids))

    parser_tasks = {
        "errorsummary_json": store_failure_lines,
        "buildbot_text": parse_unstructured_log,
        "builds-4h": parse_unstructured_log
    }

    # We don't want to stop parsing logs for most Exceptions however we still
    # need to know one occurred so we can skip further steps and reraise to
    # trigger the retry decorator.
    first_exception = None
    completed_names = set()
    for job_log in job_logs:
        newrelic.agent.add_custom_parameter("job_log_%s_url" % job_log.name, job_log.url)
        logger.debug("parser_task for %s", job_log.id)

        # Only parse logs which haven't yet been processed or else failed on the last attempt.
        if job_log.status not in (JobLog.PENDING, JobLog.FAILED):
            logger.info('Skipping parsing for job %s since log already processed', job_log.id)
            continue

        parser = parser_tasks.get(job_log.name)
        if not parser:
            continue

        try:
            parser(job_log)
        except Exception as e:
            if isinstance(e, SoftTimeLimitExceeded):
                # stop parsing further logs but raise so NewRelic and
                # Papertrail will still show output
                raise

            if first_exception is None:
                first_exception = e

            # track the exception on NewRelic but don't stop parsing future
            # log lines.
            newrelic.agent.record_exception()
        else:
            completed_names.add(job_log.name)

    # Raise so we trigger the retry decorator.
    if first_exception:
        raise first_exception

    if ("errorsummary_json" in completed_names and
        ("buildbot_text" in completed_names or
         "builds-4h" in completed_names)):

        success = crossreference_job(job)

        if success:
            logger.debug("Scheduling autoclassify for job %i", job_id)
            # TODO: Replace the use of different queues for failures vs not with the
            # RabbitMQ priority feature (since the idea behind separate queues was
            # only to ensure failures are dealt with first if there is a backlog).
            queue = 'log_autoclassify_fail' if priority == 'failures' else 'log_autoclassify'
            autoclassify.apply_async(args=[job_id], queue=queue)
        else:
            job.autoclassify_status = Job.SKIPPED
    else:
        job.autoclassify_status = Job.SKIPPED
    job.save()
Beispiel #13
0
def parse_logs(job_id, job_log_ids, priority):
    newrelic.agent.add_custom_parameter("job_id", str(job_id))

    job = Job.objects.get(id=job_id)
    job_logs = JobLog.objects.filter(id__in=job_log_ids, job=job)

    if len(job_log_ids) != len(job_logs):
        logger.warning("Failed to load all expected job ids: %s",
                       ", ".join(job_log_ids))

    parser_tasks = {
        "errorsummary_json": store_failure_lines,
        "buildbot_text": parse_unstructured_log,
        "builds-4h": parse_unstructured_log
    }

    # We don't want to stop parsing logs for most Exceptions however we still
    # need to know one occurred so we can skip further steps and reraise to
    # trigger the retry decorator.
    first_exception = None
    completed_names = set()
    for job_log in job_logs:
        newrelic.agent.add_custom_parameter("job_log_%s_url" % job_log.name,
                                            job_log.url)
        logger.debug("parser_task for %s", job_log.id)

        # Don't parse jobs which have already been parsed.
        if job_log.status == JobLog.PARSED:
            logger.info("%s log already parsed", job_log.id)
            continue

        parser = parser_tasks.get(job_log.name)
        if not parser:
            continue

        try:
            parser(job_log)
        except Exception as e:
            if isinstance(e, SoftTimeLimitExceeded):
                # stop parsing further logs but raise so NewRelic and
                # Papertrail will still show output
                raise

            if first_exception is None:
                first_exception = e

            # track the exception on NewRelic but don't stop parsing future
            # log lines.
            newrelic.agent.record_exception()
        else:
            completed_names.add(job_log.name)

    # Raise so we trigger the retry decorator.
    if first_exception:
        raise first_exception

    if ("errorsummary_json" in completed_names
            and ("buildbot_text" in completed_names
                 or "builds-4h" in completed_names)):

        success = crossreference_job(job)

        if success:
            logger.debug("Scheduling autoclassify for job %i", job_id)
            autoclassify.apply_async(args=[job_id],
                                     routing_key="autoclassify.%s" % priority)
        else:
            job.autoclassify_status = Job.SKIPPED
    else:
        job.autoclassify_status = Job.SKIPPED
    job.save()
Beispiel #14
0
def parse_logs(job_id, job_log_ids, priority):
    newrelic.agent.add_custom_parameter("job_id", str(job_id))

    job = Job.objects.get(id=job_id)
    job_logs = JobLog.objects.filter(id__in=job_log_ids,
                                     job=job)

    if len(job_log_ids) != len(job_logs):
        logger.warning("Failed to load all expected job ids: %s", ", ".join(job_log_ids))

    parser_tasks = {
        "errorsummary_json": store_failure_lines,
        "buildbot_text": parse_unstructured_log,
        "builds-4h": parse_unstructured_log
    }

    # We don't want to stop parsing logs for most Exceptions however we still
    # need to know one occurred so we can skip further steps and reraise to
    # trigger the retry decorator.
    first_exception = None
    completed_names = set()
    for job_log in job_logs:
        newrelic.agent.add_custom_parameter("job_log_%s_url" % job_log.name, job_log.url)
        logger.debug("parser_task for %s", job_log.id)

        # Don't parse jobs which have already been parsed.
        if job_log.status == JobLog.PARSED:
            logger.info("%s log already parsed", job_log.id)
            continue

        parser = parser_tasks.get(job_log.name)
        if not parser:
            continue

        try:
            parser(job_log)
        except Exception as e:
            if isinstance(e, SoftTimeLimitExceeded):
                # stop parsing further logs but raise so NewRelic and
                # Papertrail will still show output
                raise

            if first_exception is None:
                first_exception = e

            # track the exception on NewRelic but don't stop parsing future
            # log lines.
            newrelic.agent.record_exception()
        else:
            completed_names.add(job_log.name)

    # Raise so we trigger the retry decorator.
    if first_exception:
        raise first_exception

    if ("errorsummary_json" in completed_names and
        ("buildbot_text" in completed_names or
         "builds-4h" in completed_names)):

        success = crossreference_job(job)

        if success:
            logger.debug("Scheduling autoclassify for job %i", job_id)
            autoclassify.apply_async(
                args=[job_id],
                routing_key="autoclassify.%s" % priority)
        else:
            job.autoclassify_status = Job.SKIPPED
    else:
        job.autoclassify_status = Job.SKIPPED
    job.save()