Exemplo n.º 1
0
def create_build_report(job,
                        branch,
                        kernel,
                        email_format,
                        db_options,
                        mail_options=None):
    """Create the build report email to be sent.

    :param job: The name of the job.
    :type job: str
    :param  kernel: The name of the kernel.
    :type kernel: str
    :param email_format: The email format to send.
    :type email_format: list
    :param db_options: The mongodb database connection parameters.
    :type db_options: dict
    :param mail_options: The options necessary to connect to the SMTP server.
    :type mail_options: dict
    :return A tuple with the email body and subject as strings or None.
    """
    kwargs = {}
    txt_body = None
    html_body = None
    subject = None
    # This is used to provide a footer note in the email report.
    info_email = None

    fail_count = total_count = 0
    errors_count = warnings_count = 0
    fail_results = []

    if mail_options:
        info_email = mail_options.get("info_email", None)

    spec = {
        models.JOB_KEY: job,
        models.GIT_BRANCH_KEY: branch,
        models.KERNEL_KEY: kernel
    }

    database = utils.db.get_db_connection(db_options)
    total_results, total_count = utils.db.find_and_count(
        database[models.BUILD_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BUILD_SEARCH_FIELDS)

    total_unique_data = rcommon.get_unique_data(
        total_results.clone(), unique_keys=[models.ARCHITECTURE_KEY])

    spec[models.STATUS_KEY] = models.FAIL_STATUS

    fail_results, fail_count = utils.db.find_and_count(
        database[models.BUILD_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BUILD_SEARCH_FIELDS,
        sort=BUILD_SEARCH_SORT)

    failed_data = _parse_build_data(fail_results.clone())

    # Retrieve the parsed errors/warnings/mismatches summary and then
    # the details.
    errors_spec = {
        models.JOB_KEY: job,
        models.GIT_BRANCH_KEY: branch,
        models.KERNEL_KEY: kernel
    }
    errors_summary = utils.db.find_one2(
        database[models.ERRORS_SUMMARY_COLLECTION],
        errors_spec,
        fields=[models.ERRORS_KEY, models.WARNINGS_KEY, models.MISMATCHES_KEY])

    error_details = utils.db.find(database[models.ERROR_LOGS_COLLECTION],
                                  0,
                                  0,
                                  spec=errors_spec,
                                  sort=[(models.DEFCONFIG_FULL_KEY, 1)])
    error_details = [d for d in error_details.clone()]

    err_data, errors_count, warnings_count = _get_errors_count(error_details)

    kwargs = {
        "base_url": rcommon.DEFAULT_BASE_URL,
        "build_url": rcommon.DEFAULT_BUILD_URL,
        "email_format": email_format,
        "error_data": err_data,
        "error_details": error_details,
        "errors_count": errors_count,
        "errors_summary": errors_summary,
        "fail_count": fail_count,
        "failed_data": failed_data,
        "info_email": info_email,
        "pass_count": total_count - fail_count,
        "storage_url": rcommon.DEFAULT_STORAGE_URL,
        "total_count": total_count,
        "total_unique_data": total_unique_data,
        "warnings_count": warnings_count,
        "git_branch": branch,
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
    }

    kwargs["git_commit"], kwargs["git_url"] = \
        rcommon.get_git_data(job, branch, kernel, db_options)

    custom_headers = {
        rcommon.X_REPORT: rcommon.BUILD_REPORT_TYPE,
        rcommon.X_BRANCH: branch,
        rcommon.X_TREE: job,
        rcommon.X_KERNEL: kernel,
    }

    if all([fail_count == 0, total_count == 0]):
        utils.LOG.warn(
            "Nothing found for '%s-%s-%s': no build email report sent", job,
            branch, kernel)
    else:
        txt_body, html_body, subject = _create_build_email(**kwargs)

    return txt_body, html_body, subject, custom_headers
Exemplo n.º 2
0
def get_boot_data(db_options, job, branch, kernel, lab_name):
    total_count, total_unique_data = rcommon.get_total_results(
        job,
        branch,
        kernel,
        models.BOOT_COLLECTION,
        db_options,
        lab_name
    )

    total_builds = rcommon.get_total_results(
        job,
        branch,
        kernel,
        models.BUILD_COLLECTION,
        db_options
    )[0]

    git_commit, git_url = rcommon.get_git_data(job, branch, kernel, db_options)

    spec = {
        models.JOB_KEY: job,
        models.GIT_BRANCH_KEY: branch,
        models.KERNEL_KEY: kernel,
        models.STATUS_KEY: models.OFFLINE_STATUS
    }

    if lab_name:
        spec[models.LAB_NAME_KEY] = lab_name

    database = utils.db.get_db_connection(db_options)

    offline_results, offline_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    # MongoDB cursor gets overwritten somehow by the next query. Extract the
    # data before this happens.
    offline_data = None
    if offline_count > 0:
        offline_data = _parse_boot_results(offline_results.clone())[0]

    spec[models.STATUS_KEY] = {
        "$in": [models.UNTRIED_STATUS, models.UNKNOWN_STATUS]
    }
    untried_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )[1]

    spec[models.STATUS_KEY] = models.FAIL_STATUS
    fail_results, fail_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    # Calculate the PASS count based on the previous obtained values.
    pass_count = total_count - fail_count - offline_count - untried_count

    # Fill the boot data structure
    data = {
        "base_url": rcommon.DEFAULT_BASE_URL,
        "boot_url": rcommon.DEFAULT_BOOT_URL,
        "build_url": rcommon.DEFAULT_BUILD_URL,
        "conflict_count": 0,
        "conflict_data": None,
        "fail_results": fail_results,
        "fail_count": fail_count,
        "failed_data": None,
        "git_branch": branch,
        "git_commit": git_commit,
        "git_url": git_url,
        "offline_count": offline_count,
        "offline_data": offline_data,
        "pass_count": pass_count,
        "total_builds": total_builds,
        "total_count": total_count,
        "total_unique_data": total_unique_data,
        "untried_count": untried_count,
        "red": rcommon.HTML_RED,
        "boot_id_url": rcommon.BOOT_ID_URL,
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
        models.LAB_NAME_KEY: lab_name
    }

    # Get the regressions and determine which bisections to run.
    regressions_doc = database[models.BOOT_REGRESSIONS_COLLECTION].find_one(
        {models.JOB_KEY: job, models.KERNEL_KEY: kernel})

    if regressions_doc:
        data["regressions"], data["bisections"] = parse_regressions(
            regressions_doc[models.REGRESSIONS_KEY], data, db_options)
    else:
        data["regressions"], data["bisections"] = None, None

    if fail_count > 0:
        _update_boot_conflicts(data, spec, database)

    return data
Exemplo n.º 3
0
def get_boot_data(db_options, job, branch, kernel, lab_name):
    total_count, total_unique_data = rcommon.get_total_results(
        job,
        branch,
        kernel,
        models.BOOT_COLLECTION,
        db_options,
        lab_name
    )

    total_builds = rcommon.get_total_results(
        job,
        branch,
        kernel,
        models.BUILD_COLLECTION,
        db_options
    )[0]

    git_commit, git_url = rcommon.get_git_data(job, branch, kernel, db_options)

    spec = {
        models.JOB_KEY: job,
        models.GIT_BRANCH_KEY: branch,
        models.KERNEL_KEY: kernel,
        models.STATUS_KEY: models.OFFLINE_STATUS
    }

    if lab_name:
        spec[models.LAB_NAME_KEY] = lab_name

    database = utils.db.get_db_connection(db_options)

    offline_results, offline_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    # MongoDB cursor gets overwritten somehow by the next query. Extract the
    # data before this happens.
    offline_data = None
    if offline_count > 0:
        offline_data = _parse_boot_results(offline_results.clone())[0]

    spec[models.STATUS_KEY] = {
        "$in": [models.UNTRIED_STATUS, models.UNKNOWN_STATUS]
    }
    untried_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )[1]

    spec[models.STATUS_KEY] = models.FAIL_STATUS
    fail_results, fail_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    # Calculate the PASS count based on the previous obtained values.
    pass_count = total_count - fail_count - offline_count - untried_count

    # Fill the boot data structure
    data = {
        "base_url": rcommon.DEFAULT_BASE_URL,
        "boot_url": rcommon.DEFAULT_BOOT_URL,
        "build_url": rcommon.DEFAULT_BUILD_URL,
        "conflict_count": 0,
        "conflict_data": None,
        "fail_results": fail_results,
        "fail_count": fail_count,
        "failed_data": None,
        "git_branch": branch,
        "git_commit": git_commit,
        "git_url": git_url,
        "offline_count": offline_count,
        "offline_data": offline_data,
        "pass_count": pass_count,
        "total_builds": total_builds,
        "total_count": total_count,
        "total_unique_data": total_unique_data,
        "untried_count": untried_count,
        "red": rcommon.HTML_RED,
        "boot_id_url": rcommon.BOOT_ID_URL,
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
        models.LAB_NAME_KEY: lab_name
    }

    # Get the regressions and determine which bisections to run.
    regressions_doc = database[models.BOOT_REGRESSIONS_COLLECTION].find_one(
        {models.JOB_KEY: job, models.KERNEL_KEY: kernel})

    if regressions_doc:
        data["regressions"], data["bisections"] = parse_regressions(
            regressions_doc[models.REGRESSIONS_KEY], data, db_options)
    else:
        data["regressions"], data["bisections"] = None, None

    if fail_count > 0:
        _update_boot_conflicts(data, spec, database)

    return data
Exemplo n.º 4
0
def create_boot_report(job,
                       kernel,
                       lab_name, email_format, db_options, mail_options=None):
    """Create the boot report email to be sent.

    If lab_name is not None, it will trigger a boot report only for that
    specified lab.

    :param job: The name of the job.
    :type job: string
    :param  kernel: The name of the kernel.
    :type kernel: string
    :param lab_name: The name of the lab.
    :type lab_name: string
    :param email_format: The email format to send.
    :type email_format: list
    :param db_options: The mongodb database connection parameters.
    :type db_options: dict
    :param mail_options: The options necessary to connect to the SMTP server.
    :type mail_options: dict
    :return A tuple with the TXT email body, the HTML email body and the
    subject as strings or None.
    """
    kwargs = {}
    # Email TXT and HTML body.
    txt_body = None
    html_body = None
    subject = None
    # This is used to provide a footer note in the email report.
    info_email = None

    if mail_options:
        info_email = mail_options.get("info_email", None)

    total_count, total_unique_data = rcommon.get_total_results(
        job,
        kernel,
        models.BOOT_COLLECTION,
        db_options,
        lab_name=lab_name
    )

    total_builds, _ = rcommon.get_total_results(
        job,
        kernel,
        models.DEFCONFIG_COLLECTION,
        db_options
    )

    git_commit, git_url, git_branch = rcommon.get_git_data(
        job, kernel, db_options)

    spec = {
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
        models.STATUS_KEY: models.OFFLINE_STATUS
    }

    if lab_name is not None:
        spec[models.LAB_NAME_KEY] = lab_name

    database = utils.db.get_db_connection(db_options)
    offline_results, offline_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    spec[models.STATUS_KEY] = {
        "$in": [models.UNTRIED_STATUS, models.UNKNOWN_STATUS]
    }
    untried_count = 0
    _, untried_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    # MongoDB cursor gets overwritten somehow by the next query. Extract the
    # data before this happens.
    offline_data = None
    if offline_count > 0:
        offline_data, _, _, _ = _parse_boot_results(offline_results.clone())

    spec[models.STATUS_KEY] = models.FAIL_STATUS

    fail_results, fail_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT
    )

    failed_data = None
    conflict_data = None
    conflict_count = 0

    # Calculate the PASS count based on the previous obtained values.
    pass_count = total_count - fail_count - offline_count - untried_count

    # Fill the data structure for the email report creation.
    kwargs = {
        "base_url": rcommon.DEFAULT_BASE_URL,
        "boot_url": rcommon.DEFAULT_BOOT_URL,
        "build_url": rcommon.DEFAULT_BUILD_URL,
        "conflict_count": conflict_count,
        "conflict_data": conflict_data,
        "email_format": email_format,
        "fail_count": fail_count - conflict_count,
        "failed_data": failed_data,
        "git_branch": git_branch,
        "git_commit": git_commit,
        "git_url": git_url,
        "info_email": info_email,
        "offline_count": offline_count,
        "offline_data": offline_data,
        "pass_count": pass_count,
        "total_builds": total_builds,
        "total_count": total_count,
        "total_unique_data": total_unique_data,
        "untried_count": untried_count,
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
        models.LAB_NAME_KEY: lab_name
    }

    custom_headers = {
        rcommon.X_REPORT: rcommon.BOOT_REPORT_TYPE,
        rcommon.X_BRANCH: git_branch,
        rcommon.X_TREE: job,
        rcommon.X_KERNEL: kernel,
    }
    if lab_name:
        custom_headers[rcommon.X_LAB] = lab_name

    if fail_count > 0:
        failed_data, _, _, unique_data = \
            _parse_boot_results(fail_results.clone(), get_unique=True)

        # Copy the failed results here. The mongodb Cursor, for some
        # reasons gets overwritten.
        fail_results = [x for x in fail_results.clone()]

        conflict_data = None
        if all([fail_count != total_count, lab_name is None]):
            # If the number of failed boots differs from the total number of
            # boot reports, check if we have conflicting reports. We look
            # for boot reports that have the same attributes of the failed ones
            # but that indicate a PASS status.
            spec[models.STATUS_KEY] = models.PASS_STATUS
            for key, val in unique_data.iteritems():
                spec[key] = {"$in": val}

            if pass_count > 0:
                # If we have such boot reports, filter and aggregate them
                # together.
                def _conflicting_data():
                    """Local generator function to search conflicting data.

                    This is used to provide a filter mechanism during the list
                    comprehension in order to exclude `None` values.
                    """
                    for failed, passed in itertools.product(
                            fail_results, pass_results.clone()):
                        yield _search_conflicts(failed, passed)

                pass_results = utils.db.find(
                    database[models.BOOT_COLLECTION],
                    0,
                    0,
                    spec=spec,
                    fields=BOOT_SEARCH_FIELDS,
                    sort=BOOT_SEARCH_SORT
                )

                # zip() is its own inverse, when using the * operator.
                # We get back (failed,passed) tuples during the list
                # comprehension, but we need a list of values not tuples.
                # unzip it, and then chain the two resulting tuples together.
                conflicting_tuples = zip(*(
                    x for x in _conflicting_data()
                    if x is not None
                ))

                # Make sure we do not have an empty list here after filtering.
                if conflicting_tuples:
                    conflicts = itertools.chain(
                        conflicting_tuples[0], conflicting_tuples[1])
                    conflict_data, failed_data, conflict_count, _ = \
                        _parse_boot_results(conflicts,
                                            intersect_results=failed_data)

        # Update the necessary data to create the email report.
        kwargs["failed_data"] = failed_data
        kwargs["conflict_count"] = conflict_count
        kwargs["conflict_data"] = conflict_data
        kwargs["fail_count"] = fail_count - conflict_count

        txt_body, html_body, subject = _create_boot_email(**kwargs)
    elif fail_count == 0 and total_count > 0:
        txt_body, html_body, subject = _create_boot_email(**kwargs)
    elif fail_count == 0 and total_count == 0:
        utils.LOG.warn(
            "Nothing found for '%s-%s': no email report sent", job, kernel)

    return txt_body, html_body, subject, custom_headers
Exemplo n.º 5
0
def create_build_report(job,
                        kernel, email_format, db_options, mail_options=None):
    """Create the build report email to be sent.

    :param job: The name of the job.
    :type job: str
    :param  kernel: The name of the kernel.
    :type kernel: str
    :param email_format: The email format to send.
    :type email_format: list
    :param db_options: The mongodb database connection parameters.
    :type db_options: dict
    :param mail_options: The options necessary to connect to the SMTP server.
    :type mail_options: dict
    :return A tuple with the email body and subject as strings or None.
    """
    kwargs = {}
    txt_body = None
    html_body = None
    subject = None
    # This is used to provide a footer note in the email report.
    info_email = None

    fail_count = total_count = 0
    errors_count = warnings_count = 0
    fail_results = []

    if mail_options:
        info_email = mail_options.get("info_email", None)

    spec = {
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel
    }

    database = utils.db.get_db_connection(db_options)
    total_results, total_count = utils.db.find_and_count(
        database[models.DEFCONFIG_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BUILD_SEARCH_FIELDS
    )

    err_data, errors_count, warnings_count = _get_errors_count(
        total_results.clone())

    unique_keys = [models.ARCHITECTURE_KEY]
    total_unique_data = rcommon.get_unique_data(
        total_results.clone(), unique_keys=unique_keys)

    git_commit, git_url, git_branch = rcommon.get_git_data(
        job, kernel, db_options)

    spec[models.STATUS_KEY] = models.FAIL_STATUS

    fail_results, fail_count = utils.db.find_and_count(
        database[models.DEFCONFIG_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BUILD_SEARCH_FIELDS,
        sort=BUILD_SEARCH_SORT)

    failed_data = _parse_build_data(fail_results.clone())

    # Retrieve the parsed errors/warnings/mismatches summary and then
    # the details.
    errors_spec = {
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel
    }
    summary_fields = [
        models.ERRORS_KEY, models.WARNINGS_KEY, models.MISMATCHES_KEY
    ]
    errors_summary = utils.db.find_one2(
        database[models.ERRORS_SUMMARY_COLLECTION],
        errors_spec,
        summary_fields
    )

    error_details = utils.db.find(
        database[models.ERROR_LOGS_COLLECTION],
        0,
        0,
        spec=errors_spec,
        sort=[(models.DEFCONFIG_FULL_KEY, 1)]
    )
    error_details = [d for d in error_details.clone()]

    kwargs = {
        "base_url": rcommon.DEFAULT_BASE_URL,
        "build_url": rcommon.DEFAULT_BUILD_URL,
        "email_format": email_format,
        "error_data": err_data,
        "error_details": error_details,
        "errors_count": errors_count,
        "errors_summary": errors_summary,
        "fail_count": fail_count,
        "failed_data": failed_data,
        "git_branch": git_branch,
        "git_commit": git_commit,
        "git_url": git_url,
        "info_email": info_email,
        "pass_count": total_count - fail_count,
        "storage_url": rcommon.DEFAULT_STORAGE_URL,
        "total_count": total_count,
        "total_unique_data": total_unique_data,
        "warnings_count": warnings_count,
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
    }

    custom_headers = {
        rcommon.X_REPORT: rcommon.BUILD_REPORT_TYPE,
        rcommon.X_BRANCH: git_branch,
        rcommon.X_TREE: job,
        rcommon.X_KERNEL: kernel,
    }

    if all([fail_count == 0, total_count == 0]):
        utils.LOG.warn(
            "Nothing found for '%s-%s': no build email report sent",
            job, kernel)
    else:
        txt_body, html_body, subject = _create_build_email(**kwargs)

    return txt_body, html_body, subject, custom_headers
Exemplo n.º 6
0
def create_boot_report(job,
                       branch,
                       kernel,
                       lab_name,
                       email_format,
                       db_options,
                       mail_options=None):
    """Create the boot report email to be sent.

    If lab_name is not None, it will trigger a boot report only for that
    specified lab.

    :param job: The name of the job.
    :type job: string
    :param  kernel: The name of the kernel.
    :type kernel: string
    :param lab_name: The name of the lab.
    :type lab_name: string
    :param email_format: The email format to send.
    :type email_format: list
    :param db_options: The mongodb database connection parameters.
    :type db_options: dict
    :param mail_options: The options necessary to connect to the SMTP server.
    :type mail_options: dict
    :return A tuple with the TXT email body, the HTML email body and the
    subject as strings or None.
    """
    kwargs = {}
    # Email TXT and HTML body.
    txt_body = None
    html_body = None
    subject = None
    # This is used to provide a footer note in the email report.
    info_email = None

    if mail_options:
        info_email = mail_options.get("info_email", None)

    total_count, total_unique_data = rcommon.get_total_results(
        job,
        branch,
        kernel,
        models.BOOT_COLLECTION,
        db_options,
        lab_name=lab_name)

    total_builds, _ = rcommon.get_total_results(job, branch, kernel,
                                                models.BUILD_COLLECTION,
                                                db_options)

    git_commit, git_url = rcommon.get_git_data(job, branch, kernel, db_options)

    spec = {
        models.JOB_KEY: job,
        models.GIT_BRANCH_KEY: branch,
        models.KERNEL_KEY: kernel,
        models.STATUS_KEY: models.OFFLINE_STATUS
    }

    if lab_name is not None:
        spec[models.LAB_NAME_KEY] = lab_name

    database = utils.db.get_db_connection(db_options)
    offline_results, offline_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT)

    # MongoDB cursor gets overwritten somehow by the next query. Extract the
    # data before this happens.
    offline_data = None
    if offline_count > 0:
        offline_data, _, _, _ = _parse_boot_results(offline_results.clone())

    spec[models.STATUS_KEY] = {
        "$in": [models.UNTRIED_STATUS, models.UNKNOWN_STATUS]
    }
    untried_count = 0
    _, untried_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT)

    spec[models.STATUS_KEY] = models.FAIL_STATUS

    fail_results, fail_count = utils.db.find_and_count(
        database[models.BOOT_COLLECTION],
        0,
        0,
        spec=spec,
        fields=BOOT_SEARCH_FIELDS,
        sort=BOOT_SEARCH_SORT)

    failed_data = None
    conflict_data = None
    conflict_count = 0

    # Calculate the PASS count based on the previous obtained values.
    pass_count = total_count - fail_count - offline_count - untried_count

    # Get the regressions.
    regressions = database[models.BOOT_REGRESSIONS_COLLECTION].find_one({
        models.JOB_KEY:
        job,
        models.KERNEL_KEY:
        kernel
    })

    # Fill the data structure for the email report creation.
    kwargs = {
        "base_url": rcommon.DEFAULT_BASE_URL,
        "boot_url": rcommon.DEFAULT_BOOT_URL,
        "build_url": rcommon.DEFAULT_BUILD_URL,
        "conflict_count": conflict_count,
        "conflict_data": conflict_data,
        "email_format": email_format,
        "fail_count": fail_count - conflict_count,
        "failed_data": failed_data,
        "git_branch": branch,
        "git_commit": git_commit,
        "git_url": git_url,
        "info_email": info_email,
        "offline_count": offline_count,
        "offline_data": offline_data,
        "pass_count": pass_count,
        "total_builds": total_builds,
        "total_count": total_count,
        "total_unique_data": total_unique_data,
        "untried_count": untried_count,
        "regressions": regressions,
        "red": rcommon.HTML_RED,
        "boot_id_url": rcommon.BOOT_ID_URL,
        models.JOB_KEY: job,
        models.KERNEL_KEY: kernel,
        models.LAB_NAME_KEY: lab_name
    }

    custom_headers = {
        rcommon.X_REPORT: rcommon.BOOT_REPORT_TYPE,
        rcommon.X_BRANCH: branch,
        rcommon.X_TREE: job,
        rcommon.X_KERNEL: kernel,
    }
    if lab_name:
        custom_headers[rcommon.X_LAB] = lab_name

    if fail_count > 0:
        failed_data, _, _, unique_data = \
            _parse_boot_results(fail_results.clone(), get_unique=True)

        # Copy the failed results here. The mongodb Cursor, for some
        # reasons gets overwritten.
        fail_results = [x for x in fail_results.clone()]

        conflict_data = None
        if all([fail_count != total_count, lab_name is None]):
            # If the number of failed boots differs from the total number of
            # boot reports, check if we have conflicting reports. We look
            # for boot reports that have the same attributes of the failed ones
            # but that indicate a PASS status.
            spec[models.STATUS_KEY] = models.PASS_STATUS
            for key, val in unique_data.iteritems():
                spec[key] = {"$in": val}

            if pass_count > 0:
                # If we have such boot reports, filter and aggregate them
                # together.
                def _conflicting_data():
                    """Local generator function to search conflicting data.

                    This is used to provide a filter mechanism during the list
                    comprehension in order to exclude `None` values.
                    """
                    for failed, passed in itertools.product(
                            fail_results, pass_results.clone()):
                        yield _search_conflicts(failed, passed)

                pass_results = utils.db.find(database[models.BOOT_COLLECTION],
                                             0,
                                             0,
                                             spec=spec,
                                             fields=BOOT_SEARCH_FIELDS,
                                             sort=BOOT_SEARCH_SORT)

                # zip() is its own inverse, when using the * operator.
                # We get back (failed,passed) tuples during the list
                # comprehension, but we need a list of values not tuples.
                # unzip it, and then chain the two resulting tuples together.
                conflicting_tuples = zip(*(x for x in _conflicting_data()
                                           if x is not None))

                # Make sure we do not have an empty list here after filtering.
                if conflicting_tuples:
                    conflicts = itertools.chain(conflicting_tuples[0],
                                                conflicting_tuples[1])
                    conflict_data, failed_data, conflict_count, _ = \
                        _parse_boot_results(conflicts,
                                            intersect_results=failed_data)

        # Update the necessary data to create the email report.
        kwargs["failed_data"] = failed_data
        kwargs["conflict_count"] = conflict_count
        kwargs["conflict_data"] = conflict_data
        kwargs["fail_count"] = fail_count - conflict_count

        txt_body, html_body, subject = _create_boot_email(**kwargs)
    elif fail_count == 0 and total_count > 0:
        txt_body, html_body, subject = _create_boot_email(**kwargs)
    elif fail_count == 0 and total_count == 0:
        utils.LOG.warn("Nothing found for '%s-%s-%s': no email report sent",
                       job, branch, kernel)

    return txt_body, html_body, subject, custom_headers