Example #1
0
def test_all(g: Graph) -> None:
    tasks = [f"windows10/opt-{chr(i)}" for i in range(len(g.vs))]

    try:
        test_scheduling.close_failing_together_db("label")
    except AssertionError:
        pass
    test_scheduling.remove_failing_together_db("label")

    # TODO: Also add some couples that are *not* failing together.
    ft: dict[str, dict[str, tuple[float, float]]] = {}

    for edge in g.es:
        task1 = tasks[edge.tuple[0]]
        task2 = tasks[edge.tuple[1]]
        assert task1 < task2
        if task1 not in ft:
            ft[task1] = {}
        ft[task1][task2] = (0.1, 1.0)

    failing_together = test_scheduling.get_failing_together_db("label", False)
    for t, ts in ft.items():
        failing_together[t.encode("ascii")] = pickle.dumps(ts)

    test_scheduling.close_failing_together_db("label")

    result = testselect.reduce_configs(tasks, 1.0)
    hypothesis.note(f"Result: {sorted(result)}")
    assert len(result) == len(g.components())
Example #2
0
def test_reduce13(failing_together: LMDBDict) -> None:
    failing_together[b"windows10/opt-2"] = pickle.dumps({
        "windows10/opt-3": (0.1, 0.0),
        "windows10/opt-4": (0.1, 1.0),
        "windows10/opt-5": (0.1, 0.0),
    })
    failing_together[b"windows10/opt-3"] = pickle.dumps({
        "windows10/opt-4": (0.1, 0.0),
        "windows10/opt-5": (0.1, 1.0),
    })

    result = testselect.reduce_configs(
        {
            "windows10/opt-2",
            "windows10/opt-3",
            "windows10/opt-4",
            "windows10/opt-5",
        },
        1.0,
        True,
    )
    assert result == {"windows10/opt-2", "windows10/opt-5"} or result == {
        "windows10/opt-3",
        "windows10/opt-4",
    }
Example #3
0
def test_reduce6(failing_together: LMDBDict) -> None:
    failing_together[b"windows10/opt-a"] = pickle.dumps({
        "windows10/opt-d": (0.1, 1.0),
    })
    failing_together[b"windows10/opt-c"] = pickle.dumps({
        "windows10/opt-d": (0.1, 1.0),
    })

    result = testselect.reduce_configs(
        {
            "windows10/opt-a",
            "windows10/opt-b",
            "windows10/opt-c",
            "windows10/opt-d",
            "windows10/opt-e",
        },
        1.0,
    )
    assert (result == {
        "windows10/opt-a",
        "windows10/opt-b",
        "windows10/opt-e",
    } or result == {
        "windows10/opt-c",
        "windows10/opt-b",
        "windows10/opt-e",
    } or result == {
        "windows10/opt-d",
        "windows10/opt-b",
        "windows10/opt-e",
    })
Example #4
0
def test_reduce1(failing_together: LMDBDict) -> None:
    failing_together[b"test-linux1804-64/debug"] = pickle.dumps({
        "test-windows10/debug": (0.1, 1.0),
        "test-windows10/opt": (0.1, 1.0),
        "test-linux1804-64/opt": (0.1, 1.0),
    })
    failing_together[b"test-linux1804-64/opt"] = pickle.dumps({
        "test-windows10/opt": (0.1, 0.91),
    })
    failing_together[b"test-linux1804-64-asan/debug"] = pickle.dumps({
        "test-linux1804-64/debug": (0.1, 1.0),
    })

    assert testselect.reduce_configs({"test-linux1804-64/debug"},
                                     1.0) == {"test-linux1804-64/debug"}
    assert testselect.reduce_configs(
        {"test-linux1804-64/debug", "test-windows10/debug"},
        1.0) == {"test-linux1804-64/debug"}
    assert testselect.reduce_configs(
        {"test-linux1804-64/debug", "test-windows10/opt"},
        1.0) == {"test-linux1804-64/debug"}
    assert testselect.reduce_configs(
        {"test-linux1804-64/opt", "test-windows10/opt"}, 1.0) == {
            "test-linux1804-64/opt",
            "test-windows10/opt",
        }
    assert testselect.reduce_configs(
        {"test-linux1804-64/opt", "test-windows10/opt"},
        0.9) == {"test-linux1804-64/opt"}
    assert testselect.reduce_configs(
        {"test-linux1804-64/opt", "test-linux1804-64/debug"},
        1.0) == {"test-linux1804-64/opt"}
    assert testselect.reduce_configs(
        {"test-linux1804-64-asan/debug", "test-linux1804-64/debug"},
        1.0) == {"test-linux1804-64/debug"}

    # Test case where the second task is not present in the failing together stats of the first.
    assert testselect.reduce_configs(
        {"test-linux1804-64-asan/debug", "test-windows10/opt"},
        1.0) == {"test-linux1804-64-asan/debug", "test-windows10/opt"}

    # Test case where a task is not present at all in the failing together DB.
    assert testselect.reduce_configs(
        {"test-linux1804-64-qr/debug", "test-windows10/opt"}, 1.0) == {
            "test-linux1804-64-qr/debug",
            "test-windows10/opt",
        }
Example #5
0
def test_reduce5(failing_together: LMDBDict) -> None:
    failing_together[b"linux1804-64/opt-a"] = pickle.dumps({
        "windows10/opt-d": (0.1, 1.0),
    })
    failing_together[b"windows10/opt-c"] = pickle.dumps({
        "windows10/opt-d": (0.1, 1.0),
    })

    result = testselect.reduce_configs(
        {"linux1804-64/opt-a", "windows10/opt-c", "windows10/opt-d"}, 1.0)
    assert result == {
        "windows10/opt-d",
    }
Example #6
0
def test_reduce2(failing_together: LMDBDict) -> None:
    failing_together[b"windows10/opt-a"] = pickle.dumps({
        "windows10/opt-b": (0.1, 1.0),
        "windows10/opt-c": (0.1, 0.3),
        "windows10/opt-d": (0.1, 1.0),
    })
    failing_together[b"windows10/opt-b"] = pickle.dumps({
        "windows10/opt-c": (0.1, 1.0),
        "windows10/opt-d": (0.1, 0.3),
    })
    test_scheduling.close_failing_together_db("label")

    assert testselect.reduce_configs(
        {
            "windows10/opt-a", "windows10/opt-b", "windows10/opt-c",
            "windows10/opt-d"
        },
        1.0,
    ) == {
        "windows10/opt-b",
    }
Example #7
0
def schedule_tests(branch: str, rev: str) -> str:
    from bugbug_http import REPO_DIR
    from bugbug_http.app import JobInfo

    job = JobInfo(schedule_tests, branch, rev)
    LOGGER.info(f"Processing {job}...")

    # Pull the revision to the local repository
    LOGGER.info("Pulling commits from the remote repository...")
    repository.pull(REPO_DIR, branch, rev)

    # Load the full stack of patches leading to that revision
    LOGGER.info("Loading commits to analyze using automationrelevance...")
    try:
        revs = get_hgmo_stack(branch, rev)
    except requests.exceptions.RequestException:
        LOGGER.warning(f"Push not found for {branch} @ {rev}!")
        return "NOK"

    test_selection_threshold = float(
        os.environ.get("TEST_SELECTION_CONFIDENCE_THRESHOLD", 0.5))

    # Analyze patches.
    commits = repository.download_commits(REPO_DIR,
                                          revs=revs,
                                          save=False,
                                          use_single_process=True,
                                          include_no_bug=True)

    if len(commits) > 0:
        testlabelselect_model = MODEL_CACHE.get("testlabelselect")
        testgroupselect_model = MODEL_CACHE.get("testgroupselect")

        tasks = testlabelselect_model.select_tests(commits,
                                                   test_selection_threshold)

        reduced = testselect.reduce_configs(
            set(t for t, c in tasks.items() if c >= 0.8), 1.0)

        reduced_higher = testselect.reduce_configs(
            set(t for t, c in tasks.items() if c >= 0.9), 1.0)

        groups = testgroupselect_model.select_tests(commits,
                                                    test_selection_threshold)

        config_groups = testselect.select_configs(groups.keys(), 0.9)
    else:
        tasks = {}
        reduced = set()
        groups = {}
        config_groups = {}

    data = {
        "tasks": tasks,
        "groups": groups,
        "config_groups": config_groups,
        "reduced_tasks": {t: c
                          for t, c in tasks.items() if t in reduced},
        "reduced_tasks_higher":
        {t: c
         for t, c in tasks.items() if t in reduced_higher},
        "known_tasks": get_known_tasks(),
    }
    setkey(job.result_key, orjson.dumps(data), compress=True)

    return "OK"