Exemplo n.º 1
0
def _save_summary(errors, warnings, mismatches, job_id, build_doc, db_options):
    """Save the summary for errors/warnings/mismatches found."""
    ret_val = 200
    if (errors or warnings or mismatches):
        prev_doc = None
        database = utils.db.get_db_connection(db_options)
        redis_conn = redisdb.get_db_connection(db_options)
        prev_spec = {
            models.JOB_ID_KEY: job_id,
            models.JOB_KEY: build_doc.job,
            models.KERNEL_KEY: build_doc.kernel,
            models.GIT_BRANCH_KEY: build_doc.git_branch
        }

        # We might being importing documents and parsing build logs from
        # multiple processes.
        # In order to avoid having wrong data in the database, lock the
        # process here looking for the previous summary.
        lock_key = "log-parser-{:s}".format(str(job_id))
        with redis.lock.Lock(redis_conn, lock_key, timeout=5):
            prev_doc = utils.db.find_one2(
                database[models.ERRORS_SUMMARY_COLLECTION], prev_spec)

            if prev_doc:
                ret_val = _update_prev_summary(prev_doc, errors, warnings,
                                               mismatches, database)
            else:
                ret_val = _create_new_summary(errors, warnings, mismatches,
                                              job_id, build_doc, database)

    return ret_val
Exemplo n.º 2
0
def _save_summary(
        errors, warnings, mismatches, job_id, build_doc, db_options):
    """Save the summary for errors/warnings/mismatches found."""
    ret_val = 200
    if (errors or warnings or mismatches):
        prev_doc = None
        database = utils.db.get_db_connection(db_options)
        redis_conn = redisdb.get_db_connection(db_options)
        prev_spec = {
            models.JOB_ID_KEY: job_id,
            models.JOB_KEY: build_doc.job,
            models.KERNEL_KEY: build_doc.kernel,
            models.GIT_BRANCH_KEY: build_doc.git_branch
        }

        # We might being importing documents and parsing build logs from
        # multiple processes.
        # In order to avoid having wrong data in the database, lock the
        # process here looking for the previous summary.
        lock_key = "log-parser-{:s}".format(str(job_id))
        with redis.lock.Lock(redis_conn, lock_key, timeout=5):
            prev_doc = utils.db.find_one2(
                database[models.ERRORS_SUMMARY_COLLECTION], prev_spec)

            if prev_doc:
                ret_val = _update_prev_summary(
                    prev_doc, errors, warnings, mismatches, database)
            else:
                ret_val = _create_new_summary(
                    errors,
                    warnings, mismatches, job_id, build_doc, database)

    return ret_val
Exemplo n.º 3
0
def _get_or_create_job(job, kernel, git_branch, database, db_options):
    """Get or create a job in the database.

    :param job: The name of the job.
    :type job: str
    :param kernel: The name of the kernel.
    :type kernel: str
    :param database: The mongodb database connection.
    :param db_options: The database connection options.
    :type db_options: dict
    :return a 3-tuple: return value, job document and job ID.
    """
    ret_val = 201
    job_doc = None
    job_id = None

    redis_conn = redisdb.get_db_connection(db_options)

    # We might be importing build in parallel through multi-processes.
    # Keep a lock here when looking for a job or we might end up with
    # multiple job creations.
    lock_key = "build-import-{}-{}-{}".format(job, kernel, git_branch)
    with redis.lock.Lock(redis_conn, lock_key, timeout=5):
        p_doc = utils.db.find_one2(
            database[models.JOB_COLLECTION],
            {
                models.JOB_KEY: job,
                models.KERNEL_KEY: kernel,
                models.GIT_BRANCH_KEY: git_branch
            })

        if p_doc:
            job_doc = mjob.JobDocument.from_json(p_doc)
            job_id = job_doc.id
        else:
            job_doc = mjob.JobDocument(job, kernel, git_branch)
            job_doc.status = models.BUILD_STATUS
            job_doc.created_on = datetime.datetime.now(tz=bson.tz_util.utc)
            ret_val, job_id = utils.db.save(
                database, job_doc, manipulate=True)
            job_doc.id = job_id

    return ret_val, job_doc, job_id
Exemplo n.º 4
0
def _get_or_create_job(job, kernel, git_branch, database, db_options):
    """Get or create a job in the database.

    :param job: The name of the job.
    :type job: str
    :param kernel: The name of the kernel.
    :type kernel: str
    :param database: The mongodb database connection.
    :param db_options: The database connection options.
    :type db_options: dict
    :return a 3-tuple: return value, job document and job ID.
    """
    ret_val = 201
    job_doc = None
    job_id = None

    redis_conn = redisdb.get_db_connection(db_options)

    # We might be importing build in parallel through multi-processes.
    # Keep a lock here when looking for a job or we might end up with
    # multiple job creations.
    lock_key = "build-import-{}-{}-{}".format(job, kernel, git_branch)
    with redis.lock.Lock(redis_conn, lock_key, timeout=5):
        p_doc = utils.db.find_one2(
            database[models.JOB_COLLECTION], {
                models.JOB_KEY: job,
                models.KERNEL_KEY: kernel,
                models.GIT_BRANCH_KEY: git_branch
            })

        if p_doc:
            job_doc = mjob.JobDocument.from_json(p_doc)
            job_id = job_doc.id
        else:
            job_doc = mjob.JobDocument(job, kernel, git_branch)
            job_doc.status = models.BUILD_STATUS
            job_doc.created_on = datetime.datetime.now(tz=bson.tz_util.utc)
            ret_val, job_id = utils.db.save(database, job_doc)
            job_doc.id = job_id

    return ret_val, job_doc, job_id
Exemplo n.º 5
0
    def __init__(self):

        db_options = {
            "mongodb_host": topt.options.mongodb_host,
            "mongodb_password": topt.options.mongodb_password,
            "mongodb_pool": topt.options.mongodb_pool,
            "mongodb_port": topt.options.mongodb_port,
            "mongodb_user": topt.options.mongodb_user,
            "redis_db": topt.options.redis_db,
            "redis_host": topt.options.redis_host,
            "redis_password": topt.options.redis_password,
            "redis_port": topt.options.redis_port
        }

        if not self.database:
            self.database = utils.db.get_db_connection(db_options)

        if not self.redis_con:
            self.redis_con = redisdb.get_db_connection(db_options)

        settings = {
            "database": self.database,
            "redis_connection": self.redis_con,
            "dboptions": db_options,
            "default_handler_class": happ.AppHandler,
            "executor": concurrent.futures.ThreadPoolExecutor(
                topt.options.max_workers),
            "gzip": topt.options.gzip,
            "debug": topt.options.debug,
            "master_key": topt.options.master_key,
            "autoreload": topt.options.autoreload,
            "senddelay": topt.options.send_delay,
            "storage_url": topt.options.storage_url,
            "max_buffer_size": topt.options.buffer_size
        }

        hdbindexes.ensure_indexes(self.database)

        super(KernelCiBackend, self).__init__(urls.APP_URLS, **settings)
Exemplo n.º 6
0
def find(group_id, db_options={}, db=None):
    """Find the regression starting from a single test group document.

    :param group_id: The id of the test group document.
    :type group_id: str
    :param db: The database connection.
    :type db: Database connection object.
    :return tuple The return value that can be 200 (success), 201 (document
    saved) or 500 (error); a list with the IDs of the test regression documents
    or None.
    """
    if not group_id:
        utils.LOG.warn("Not searching regressions as no test group ID")
        return (200, None)

    utils.LOG.info("Searching test regressions for '{}'".format(group_id))

    if db is None:
        db = utils.db.get_db_connection(db_options)
    collection = db[models.TEST_GROUP_COLLECTION]
    group = utils.db.find_one2(collection, group_id)

    if not group:
        utils.LOG.warn("Test group not found: {}".format(group_id))
        return (500, None)

    spec = {k: group[k] for k in TEST_GROUP_SPEC_KEYS}
    last_spec = {k: v for k, v in spec.iteritems() if k != models.KERNEL_KEY}
    last_spec[models.CREATED_KEY] = {"$lt": group[models.CREATED_KEY]}
    last = collection.find_one(last_spec, sort=[(models.CREATED_KEY, -1)])
    redis_conn = redisdb.get_db_connection(db_options)
    lock_key = "-".join(group[k] for k in TEST_GROUP_SPEC_KEYS)
    # Hold a lock as multiple group results may be imported in parallel
    with redis.lock.Lock(redis_conn, lock_key, timeout=5):
        regr_ids = _add_test_group_regressions(group, last, db, spec)
    return (200, regr_ids)
Exemplo n.º 7
0
def find(group_id, db_options={}, db=None):
    """Find the regression starting from a single test group document.

    :param group_id: The id of the test group document.
    :type group_id: str
    :param db: The database connection.
    :type db: Database connection object.
    :return tuple The return value that can be 200 (success), 201 (document
    saved) or 500 (error); a list with the IDs of the test regression documents
    or None.
    """
    if not group_id:
        utils.LOG.warn("Not searching regressions as no test group ID")
        return (200, None)

    utils.LOG.info("Searching test regressions for '{}'".format(group_id))

    if db is None:
        db = utils.db.get_db_connection(db_options)
    collection = db[models.TEST_GROUP_COLLECTION]
    group = utils.db.find_one2(collection, group_id)

    if not group:
        utils.LOG.warn("Test group not found: {}".format(group_id))
        return (500, None)

    spec = {k: group[k] for k in TEST_GROUP_SPEC_KEYS}
    last_spec = {k: v for k, v in spec.iteritems() if k != models.KERNEL_KEY}
    last_spec[models.CREATED_KEY] = {"$lt": group[models.CREATED_KEY]}
    last = collection.find_one(last_spec, sort=[(models.CREATED_KEY, -1)])
    redis_conn = redisdb.get_db_connection(db_options)
    lock_key = "-".join(group[k] for k in TEST_GROUP_SPEC_KEYS)
    # Hold a lock as multiple group results may be imported in parallel
    with redis.lock.Lock(redis_conn, lock_key, timeout=5):
        regr_ids = _add_test_group_regressions(group, last, db, spec)
    return (200, regr_ids)
Exemplo n.º 8
0
def track_regression(boot_doc, pass_doc, old_regr, db_options):
    """Track the regression for the provided boot report.

    :param boot_doc: The actual boot document where we have a regression.
    :type boot_doc: dict
    :param pass_doc: The previous boot document, when we start tracking a
    regression.
    :type pass_doc: dict
    :param old_regr: The previous regressions document.
    :type old_regr: 2-tuple
    :param db_options: The database connection parameters.
    :type db_options: dict
    :return tuple The status code (200, 201, 500); and the regression
    document id.
    """
    ret_val = 201
    doc_id = None

    regr_key = create_regressions_key(boot_doc)

    b_get = boot_doc.get
    boot_id = b_get(models.ID_KEY)
    arch = b_get(models.ARCHITECTURE_KEY)
    b_instance = sanitize_key(str(b_get(models.BOARD_INSTANCE_KEY)).lower())
    board = sanitize_key(b_get(models.BOARD_KEY))
    build_env = b_get(models.BUILD_ENVIRONMENT_KEY)
    compiler_name = b_get(models.COMPILER_KEY)
    compiler_version = b_get(models.COMPILER_VERSION_KEY)
    compiler = sanitize_key(
        "-".join([compiler_name, compiler_version])
        if compiler_version else compiler_name
    )
    defconfig = sanitize_key(b_get(models.DEFCONFIG_FULL_KEY))
    job = b_get(models.JOB_KEY)
    job_id = b_get(models.JOB_ID_KEY)
    kernel = b_get(models.KERNEL_KEY)
    lab = b_get(models.LAB_NAME_KEY)
    created_on = b_get(models.CREATED_KEY)
    branch = b_get(models.GIT_BRANCH_KEY)

    # We might be importing boot in parallel through multi-processes.
    # Keep a lock here when looking for the previous regressions or we might
    # end up with multiple boot regression creations.
    redis_conn = redisdb.get_db_connection(db_options)
    lock_key = LOCK_KEY_FMT.format(job, branch, kernel)

    with redis.lock.Lock(redis_conn, lock_key, timeout=5):
        # Do we have "old" regressions?
        regr_docs = []
        if all([old_regr, old_regr[0]]):
            regr_docs = get_regressions_by_key(
                regr_key, old_regr[1])

        if pass_doc:
            regr_docs.append(pass_doc)

        # Append the actual fail boot report to the list.
        regr_docs.append(boot_doc)

        # Do we have already a regression registered for this job_id,
        # job, kernel?
        prev_reg_doc = check_prev_regression(boot_doc, boot_doc, db_options)
        if prev_reg_doc[0]:
            doc_id = prev_reg_doc[0]

            regr_data_key = \
                REGRESSION_DOT_FMT.format(models.REGRESSIONS_KEY, regr_key)

            if prev_reg_doc[1]:
                # If we also have the same key in the document, append the
                # new boot report.
                document = {"$addToSet": {regr_data_key: boot_doc}}
            else:
                # Otherwise just set the new key.
                document = {"$set": {regr_data_key: regr_docs}}

            ret_val = utils.db.update3(
                models.BOOT_REGRESSIONS_COLLECTION,
                {models.ID_KEY: prev_reg_doc[0]},
                document,
                db_options=db_options
            )
        else:
            regression_doc = {
                models.CREATED_KEY: created_on,
                models.GIT_BRANCH_KEY: branch,
                models.JOB_ID_KEY: job_id,
                models.JOB_KEY: job,
                models.KERNEL_KEY: kernel
            }

            # The regression data structure.
            # A dictionary with nested keys, whose keys in nested order are:
            # lab name
            # architecture type
            # board name
            # board instance or the string "none"
            # defconfig full string
            # compiler string (just compiler + version)
            # The regressions are stored in a list as the value of the
            # "compiler" key.
            regression_doc[models.REGRESSIONS_KEY] = {
                lab: {
                    arch: {
                        board: {
                            b_instance: {
                                defconfig: {
                                    build_env: {
                                        compiler: regr_docs
                                    }
                                }
                            }
                        }
                    }
                }
            }

            ret_val, doc_id = \
                utils.db.save3(
                    models.BOOT_REGRESSIONS_COLLECTION, regression_doc,
                    db_options=db_options)

        # Save the regressions id and boot id in an index collection.
        if all([any([ret_val == 201, ret_val == 200]), doc_id]):
            utils.db.save3(
                models.BOOT_REGRESSIONS_BY_BOOT_COLLECTION,
                {
                    models.BOOT_ID_KEY: boot_id,
                    models.BOOT_REGRESSIONS_ID_KEY: doc_id,
                    models.CREATED_KEY: created_on
                },
                db_options=db_options
            )

    return ret_val, doc_id
Exemplo n.º 9
0
def create_bisect_report(data,
                         email_options,
                         db_options,
                         base_path=utils.BASE_PATH):
    """Create the bisection report email to be sent.

    :param data: The meta-data for the bisection job.
    :type data: dictionary
    :param email_options: The email options.
    :type email_options: dict
    :param db_options: The mongodb database connection parameters.
    :type db_options: dict
    :param base_path: Path to the top-level storage directory.
    :type base_path: string
    :return A tuple with the TXT email body and the headers as dictionary.  If
    an error occured, None.
    """
    db = utils.db.get_db_connection(db_options)

    job, branch, kernel, test_case_path, lab, target = (data[k] for k in [
        models.JOB_KEY,
        models.GIT_BRANCH_KEY,
        models.KERNEL_KEY,
        models.TEST_CASE_PATH_KEY,
        models.LAB_NAME_KEY,
        models.DEVICE_TYPE_KEY,
    ])

    email_format, email_subject = (email_options[k] for k in [
        "format",
        "subject",
    ])

    specs = {
        x: data[x]
        for x in [
            models.TYPE_KEY,
            models.ARCHITECTURE_KEY,
            models.DEFCONFIG_FULL_KEY,
            models.BUILD_ENVIRONMENT_KEY,
            models.JOB_KEY,
            models.KERNEL_KEY,
            models.GIT_BRANCH_KEY,
            models.LAB_NAME_KEY,
            models.DEVICE_TYPE_KEY,
            models.BISECT_GOOD_COMMIT_KEY,
            models.BISECT_BAD_COMMIT_KEY,
            models.TEST_CASE_PATH_KEY,
        ]
    }
    doc = utils.db.find_one2(db[models.BISECT_COLLECTION], specs)
    if not doc:
        utils.LOG.warning("Failed to find bisection document")
        return None

    report_hashable_str = "-".join(
        str(x) for x in [
            doc[models.BISECT_FOUND_SUMMARY_KEY],
            doc[models.KERNEL_KEY],
        ])
    report_hash = hashlib.sha1(report_hashable_str).hexdigest()
    redisdb_conn = redisdb.get_db_connection(db_options)
    if redisdb_conn.exists(report_hash):
        utils.LOG.info("Bisection report already sent for {}: {}".format(
            doc[models.KERNEL_KEY], doc[models.BISECT_FOUND_SUMMARY_KEY]))
        return None
    redisdb_conn.set(report_hash, "bisection-report", ex=86400)

    headers = {
        rcommon.X_REPORT: rcommon.BISECT_REPORT_TYPE,
        rcommon.X_BRANCH: branch,
        rcommon.X_TREE: job,
        rcommon.X_KERNEL: kernel,
        rcommon.X_LAB: lab,
    }

    rel_path = '/'.join((job, branch, kernel) + tuple(data[k] for k in [
        models.ARCHITECTURE_KEY,
        models.DEFCONFIG_FULL_KEY,
        models.BUILD_ENVIRONMENT_KEY,
        models.LAB_NAME_KEY,
    ]))

    log_path = os.path.join(base_path, rel_path, data[models.BISECT_LOG_KEY])
    with open(log_path) as log_file:
        log_data = json.load(log_file)

    regr = utils.db.find_one2(db[models.TEST_REGRESSION_COLLECTION],
                              doc[models.REGRESSION_ID_KEY])
    test_case = utils.db.find_one2(
        db[models.TEST_CASE_COLLECTION],
        regr[models.REGRESSIONS_KEY][-1][models.TEST_CASE_ID_KEY])
    test_group = utils.db.find_one2(db[models.TEST_GROUP_COLLECTION],
                                    test_case[models.TEST_GROUP_ID_KEY])

    # Disabled until we have a working Tests view on the frontend
    # bad_details_url = '/'.join([
    #   rcommon.DEFAULT_BASE_URL, "boot", "id", str(boot_data["FAIL"]["_id"])])

    log_url_txt, log_url_html = ('/'.join([
        rcommon.DEFAULT_STORAGE_URL, rel_path, test_group[k]
    ]) for k in [models.BOOT_LOG_KEY, models.BOOT_LOG_HTML_KEY])

    cc = doc[models.COMPILER_KEY]
    cc_ver = doc[models.COMPILER_VERSION_KEY]
    compiler_str = "-".join([cc, cc_ver]) if cc_ver else cc

    template_data = {
        "subject_str": email_subject,
        "bad": doc[models.BISECT_BAD_SUMMARY_KEY],
        # "bad_details_url": bad_details_url,
        "log_url_txt": log_url_txt,
        "log_url_html": log_url_html,
        "found": doc[models.BISECT_FOUND_SUMMARY_KEY],
        "checks": doc[models.BISECT_CHECKS_KEY],
        "tree": job,
        "git_url": doc[models.GIT_URL_KEY],
        "branch": branch,
        "target": doc[models.DEVICE_TYPE_KEY],
        "arch": doc[models.ARCHITECTURE_KEY],
        "lab_name": lab,
        "defconfig": doc[models.DEFCONFIG_FULL_KEY],
        "compiler": compiler_str,
        "test_case_path": doc[models.TEST_CASE_PATH_KEY],
        "show": log_data["show"],
        "log": log_data["log"],
    }

    body = rcommon.create_txt_email("bisect.txt", **template_data)

    return body, headers