Example #1
0
def schedule_tests(branch, rev):
    from bugbug_http.app import JobInfo
    from bugbug_http import REPO_DIR

    job = JobInfo(schedule_tests, branch, rev)
    LOGGER.debug(f"Processing {job}")

    # Load the full stack of patches leading to that revision
    try:
        stack = get_hgmo_stack(branch, rev)
    except requests.exceptions.RequestException:
        LOGGER.warning(f"Push not found for {branch} @ {rev}!")
        return "NOK"

    # Apply the stack on the local repository
    try:
        revs = repository.apply_stack(REPO_DIR, stack, branch)
    except Exception as e:
        LOGGER.warning(f"Failed to apply stack {branch} @ {rev}: {e}")
        return "NOK"

    test_selection_threshold = float(
        os.environ.get("TEST_SELECTION_CONFIDENCE_THRESHOLD", 0.3))

    # Analyze patches.
    commits = repository.download_commits(REPO_DIR,
                                          revs=revs,
                                          save=False,
                                          use_single_process=True)

    tasks = MODEL_CACHE.get("testlabelselect").select_tests(
        commits, test_selection_threshold)

    reduced = MODEL_CACHE.get("testlabelselect").reduce(
        set(t for t, c in tasks.items() if c >= 0.7), 1.0)

    data = {
        "tasks":
        tasks,
        "groups":
        MODEL_CACHE.get("testgroupselect").select_tests(
            commits, test_selection_threshold),
        "reduced_tasks": {t: c
                          for t, c in tasks.items() if t in reduced},
    }
    setkey(job.result_key, orjson.dumps(data))

    return "OK"
Example #2
0
def schedule_tests(branch: str, rev: str) -> str:
    from bugbug_http.app import JobInfo
    from bugbug_http import REPO_DIR

    job = JobInfo(schedule_tests, branch, rev)
    LOGGER.info(f"Processing {job}...")

    # Pull the revision to the local repository
    LOGGER.info("Pulling commits from the remote repository...")
    repository.pull(REPO_DIR, branch, rev)

    # Load the full stack of patches leading to that revision
    LOGGER.info("Loading commits to analyze using automationrelevance...")
    try:
        revs = get_hgmo_stack(branch, rev)
    except requests.exceptions.RequestException:
        LOGGER.warning(f"Push not found for {branch} @ {rev}!")
        return "NOK"

    test_selection_threshold = float(
        os.environ.get("TEST_SELECTION_CONFIDENCE_THRESHOLD", 0.5)
    )

    # Analyze patches.
    commits = repository.download_commits(
        REPO_DIR, revs=revs, save=False, use_single_process=True, include_no_bug=True
    )

    if len(commits) > 0:
        testlabelselect_model = MODEL_CACHE.get("testlabelselect")
        testgroupselect_model = MODEL_CACHE.get("testgroupselect")

        tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)

        reduced = testlabelselect_model.reduce(
            set(t for t, c in tasks.items() if c >= 0.8), 1.0
        )

        reduced_higher = testlabelselect_model.reduce(
            set(t for t, c in tasks.items() if c >= 0.9), 1.0
        )

        groups = testgroupselect_model.select_tests(commits, test_selection_threshold)

        config_groups = testgroupselect_model.select_configs(groups.keys(), 1.0)
    else:
        tasks = {}
        reduced = {}
        groups = {}
        config_groups = {}

    data = {
        "tasks": tasks,
        "groups": groups,
        "config_groups": config_groups,
        "reduced_tasks": {t: c for t, c in tasks.items() if t in reduced},
        "reduced_tasks_higher": {t: c for t, c in tasks.items() if t in reduced_higher},
        "known_tasks": get_known_tasks(),
    }
    setkey(job.result_key, orjson.dumps(data), compress=True)

    return "OK"
Example #3
0
def schedule_tests(branch, rev):
    from bugbug_http.app import JobInfo
    from bugbug_http import REPO_DIR

    job = JobInfo(schedule_tests, branch, rev)
    LOGGER.debug(f"Processing {job}")

    # Load the full stack of patches leading to that revision
    try:
        stack = get_hgmo_stack(branch, rev)
    except requests.exceptions.RequestException:
        LOGGER.warning(f"Push not found for {branch} @ {rev}!")
        return "NOK"

    # Apply the stack on the local repository
    try:
        revs = repository.apply_stack(REPO_DIR, stack, branch)
    except Exception as e:
        LOGGER.warning(f"Failed to apply stack {branch} @ {rev}: {e}")
        return "NOK"

    test_selection_threshold = float(
        os.environ.get("TEST_SELECTION_CONFIDENCE_THRESHOLD", 0.3))

    # Analyze patches.
    commits = repository.download_commits(REPO_DIR,
                                          revs=revs,
                                          save=False,
                                          use_single_process=True)

    commit_data = commit_features.merge_commits(commits)

    def get_runnables(granularity):
        past_failures_data = test_scheduling.get_past_failures(granularity)

        push_num = past_failures_data["push_num"]
        all_runnables = past_failures_data["all_runnables"]

        commit_tests = []
        for data in test_scheduling.generate_data(past_failures_data,
                                                  commit_data, push_num,
                                                  all_runnables, [], []):
            if granularity == "label" and not data["name"].startswith("test-"):
                continue

            commit_test = commit_data.copy()
            commit_test["test_job"] = data
            commit_tests.append(commit_test)

        probs = MODEL_CACHE.get(f"test{granularity}select").classify(
            commit_tests, probabilities=True)
        selected_indexes = np.argwhere(
            probs[:, 1] > test_selection_threshold)[:, 0]
        return {
            commit_tests[i]["test_job"]["name"]:
            math.floor(probs[i, 1] * 100) / 100
            for i in selected_indexes
        }

    data = {
        "tasks": get_runnables("label"),
        "groups": get_runnables("group"),
    }
    setkey(job.result_key, orjson.dumps(data))

    return "OK"