Exemplo n.º 1
0
def test_classify_good_push_only_intermittent_failures(monkeypatch):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    test_selection_data = {"groups": {"group1": 0.7, "group2": 0.3}}
    likely_regressions = {"group3", "group4"}
    are_cross_config = [False for i in range(0, len(GROUP_SUMMARIES_DEFAULT))]
    generate_mocks(
        monkeypatch,
        push,
        test_selection_data,
        likely_regressions,
        are_cross_config,
    )

    assert push.classify() == (
        PushStatus.GOOD,
        Regressions(
            real={},
            # All groups aren't cross config failures and were either selected by bugbug
            # with low confidence or not at all (no confidence)
            intermittent={
                "group1": make_tasks("group1"),
                "group2": make_tasks("group2"),
                "group3": make_tasks("group3"),
                "group4": make_tasks("group4"),
                "group5": make_tasks("group5"),
            },
            unknown={},
        ),
    )
Exemplo n.º 2
0
def test_create_push(responses):
    responses.add(
        responses.GET,
        "https://hg.mozilla.org/integration/autoland/json-pushes?version=2&startID=122&endID=123",
        json={
            "pushes": {
                "123": {
                    "changesets": ["123456"],
                    "date": 1213174092,
                    "user": "******",
                },
            },
        },
        status=200,
    )
    responses.add(
        responses.GET,
        HGMO.JSON_TEMPLATE.format(branch="integration/autoland", rev="abcdef"),
        json={"node": "abcdef"},
        status=200,
    )
    responses.add(
        responses.GET,
        HGMO.JSON_TEMPLATE.format(branch="integration/autoland", rev="123456"),
        json={"node": "123456"},
        status=200,
    )

    p1 = Push("abcdef")
    p2 = p1.create_push(123)
    assert p2.rev == "123456"
    assert p2.id == 123
    assert p2.date == 1213174092
Exemplo n.º 3
0
def test_push_child_raises(responses):
    rev = "a" * 40

    # Try and mozilla-unified are not supported.
    for branch in ("try", "mozilla-unified"):
        push = Push(rev, branch=branch)
        with pytest.raises(ChildPushNotFound):
            push.child

    # A push with no children raises.
    push = Push(rev, branch="integration/autoland")
    push._id = 100
    url = HgRev.JSON_PUSHES_TEMPLATE.format(
        branch=push.branch,
        push_id_start=push.id,
        push_id_end=push.id + 1,
    )
    responses.add(
        responses.GET,
        url,
        json={
            "lastpushid": push.id,
            "pushes": {}
        },
        status=200,
    )

    with pytest.raises(ChildPushNotFound):
        push.child
Exemplo n.º 4
0
def test_get_test_selection_data_from_bugbug_handle_exceeded_timeout(
        responses, monkeypatch):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision"
    responses.add(responses.GET,
                  task_url,
                  status=200,
                  json={"taskId": "a" * 10})

    cache_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/queue/v1/task/aaaaaaaaaa/artifacts/public/bugbug-push-schedules.json"
    responses.add(responses.GET, cache_url, status=404)

    url = f"{bugbug.BUGBUG_BASE_URL}/push/{branch}/{rev}/schedules"
    responses.add(responses.GET, url, status=202)

    monkeypatch.setattr(bugbug, "DEFAULT_RETRY_TIMEOUT", 3)
    monkeypatch.setattr(bugbug, "DEFAULT_RETRY_INTERVAL", 1)
    with pytest.raises(bugbug.BugbugTimeoutException) as e:
        push.get_test_selection_data()
    assert str(
        e.value) == "Timed out waiting for result from Bugbug HTTP Service"

    assert len(responses.calls) == 5
    assert [(call.request.method, call.request.url)
            for call in responses.calls] == [
                ("GET", task_url),
                ("GET", cache_url),
                # We retry 3 times the call to the Bugbug HTTP service
                ("GET", url),
                ("GET", url),
                ("GET", url),
            ]
Exemplo n.º 5
0
def test_push_does_not_exist(responses):
    # We hit hgmo when 'rev' is less than 40 characters.
    rev = "foobar"
    responses.add(
        responses.GET,
        HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(
            branch="integration/autoland", rev="foobar"),
        json={"error": f"unknown revision '{rev}'"},
        status=404,
    )

    with pytest.raises(PushNotFound):
        Push(rev)

    # Otherwise we need to hit hgmo some other way.
    rev = "a" * 40
    responses.add(
        responses.GET,
        HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(
            branch="integration/autoland", rev=rev),
        json={"error": f"unknown revision '{rev}'"},
        status=404,
    )
    p = Push(rev)
    with pytest.raises(PushNotFound):
        p.id
Exemplo n.º 6
0
def test_push_parent_on_autoland(responses):
    ctx = {
        "branch": "integration/autoland",
        "push_id_start": "121",
        "push_id_end": "122",
    }
    responses.add(
        responses.GET,
        HgRev.JSON_PUSHES_TEMPLATE.format(**ctx),
        json={
            "pushes": {
                "122": {
                    "changesets": [{
                        "node": "b" * 40
                    }],
                    "date": 1213174092,
                    "user": "******",
                },
            },
        },
        status=200,
    )

    p1 = Push("a" * 40)
    p1._id = 123
    parent = p1.parent

    assert parent.id == 122
    assert parent.rev == "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
Exemplo n.º 7
0
def test_classify_almost_good_push(monkeypatch, test_selection_data,
                                   are_cross_config):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)
    generate_mocks(
        monkeypatch,
        push,
        test_selection_data,
        set(),
        are_cross_config,
    )

    assert push.classify(
        unknown_from_regressions=False,
        consistent_failures_counts=None,
        consider_children_pushes_configs=False,
    ) == (
        PushStatus.UNKNOWN,
        Regressions(
            real={},
            intermittent={},
            unknown={
                "group1": make_tasks("group1"),
                "group2": make_tasks("group2"),
                "group3": make_tasks("group3"),
                "group4": make_tasks("group4"),
                "group5": make_tasks("group5"),
            },
        ),
    )
Exemplo n.º 8
0
def test_get_test_selection_data_from_bugbug(responses):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision"
    responses.add(responses.GET,
                  task_url,
                  status=200,
                  json={"taskId": "a" * 10})

    cache_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/queue/v1/task/aaaaaaaaaa/artifacts/public/bugbug-push-schedules.json"
    responses.add(responses.GET, cache_url, status=404)

    url = f"{bugbug.BUGBUG_BASE_URL}/push/{branch}/{rev}/schedules"
    responses.add(responses.GET, url, status=200, json=SCHEDULES_EXTRACT)

    data = push.get_test_selection_data()
    assert data == SCHEDULES_EXTRACT

    assert len(responses.calls) == 3
    assert [(call.request.method, call.request.url)
            for call in responses.calls] == [
                ("GET", task_url),
                ("GET", cache_url),
                ("GET", url),
            ]
Exemplo n.º 9
0
def test_classify_almost_bad_push(monkeypatch, test_selection_data,
                                  likely_regressions, are_cross_config):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)
    generate_mocks(
        monkeypatch,
        push,
        test_selection_data,
        likely_regressions,
        are_cross_config,
    )

    assert push.classify() == (
        PushStatus.UNKNOWN,
        Regressions(
            real={},
            intermittent={},
            unknown={
                "group1": make_tasks("group1"),
                "group2": make_tasks("group2"),
                "group3": make_tasks("group3"),
                "group4": make_tasks("group4"),
                "group5": make_tasks("group5"),
            },
        ),
    )
Exemplo n.º 10
0
def test_generate_all_shadow_scheduler_tasks(responses):
    rev = "a" * 40
    shadow_schedulers = (
        (
            "bar",
            ["task-1", "task-3", "task-4"],
        ),  # names will be generated alphabetically
        ("foo", ["task-2", "task-4"]),
    )

    push = Push(rev)
    responses.add(
        responses.GET,
        get_index_url(push.index + ".taskgraph.decision"),
        json={"taskId": 1},
        status=200,
    )

    id = count(2)
    responses.add(
        responses.GET,
        get_artifact_url(1, "public/task-graph.json"),
        json={
            next(id): {
                "label": f"source-test-shadow-scheduler-{s[0]}"
            }
            for s in shadow_schedulers
        },
        status=200,
    )

    id = count(2)
    for ss in shadow_schedulers:
        s_id = next(id)
        responses.add(
            responses.GET,
            get_index_url(f"{push.index}.source.shadow-scheduler-{ss[0]}"),
            json={"taskId": s_id},
            status=200,
        )

        responses.add(
            responses.GET,
            get_artifact_url(s_id,
                             "public/shadow-scheduler/optimized-tasks.json"),
            stream=True,
            json={next(id): {
                "label": task
            }
                  for task in ss[1]},
            status=200,
        )

    # retrieve the data
    for i, (name,
            tasks) in enumerate(push.generate_all_shadow_scheduler_tasks()):
        print(i, name, tasks)
        assert name == shadow_schedulers[i][0]
        assert tasks == set(shadow_schedulers[i][1])
Exemplo n.º 11
0
def test_classify(monkeypatch, classify_regressions_return_value,
                  expected_result):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    def mock_return(self, *args, **kwargs):
        return classify_regressions_return_value

    monkeypatch.setattr(Push, "classify_regressions", mock_return)
    assert push.classify()[0] == expected_result
Exemplo n.º 12
0
def test_classify_bad_push_some_real_failures(monkeypatch):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    test_selection_data = {
        "groups": {
            "group1": 0.99,
            "group2": 0.95,
            "group3": 0.91
        }
    }
    likely_regressions = {"group1", "group2", "group3"}
    are_cross_config = [
        False if i % 2 else True
        for i in range(0, len(GROUP_SUMMARIES_DEFAULT))
    ]
    generate_mocks(
        monkeypatch,
        push,
        test_selection_data,
        likely_regressions,
        set(),
        are_cross_config,
    )

    assert push.classify(
        unknown_from_regressions=False, consider_children_pushes_configs=False
    ) == (
        PushStatus.BAD,
        Regressions(
            # group1 & group3 were both selected by bugbug with high confidence, likely to regress
            # and are cross config failures
            real={
                "group1": make_tasks("group1"),
                "group3": make_tasks("group3")
            },
            # group4 isn't a cross config failure and was not selected by bugbug (no confidence)
            intermittent={"group4": make_tasks("group4")},
            # group2 isn't a cross config failure but was selected with high confidence by bugbug
            # group5 is a cross config failure but was not selected by bugbug nor likely to regress
            unknown={
                "group2": make_tasks("group2"),
                "group5": make_tasks("group5")
            },
        ),
        ToRetriggerOrBackfill(
            real_retrigger={"group2": make_tasks("group2")},
            intermittent_retrigger={"group5": make_tasks("group5")},
            backfill={},
        ),
    )
Exemplo n.º 13
0
def test_classify(monkeypatch, classify_regressions_return_value,
                  expected_result):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    def mock_return(self, *args, **kwargs):
        return classify_regressions_return_value, ToRetriggerOrBackfill(
            real_retrigger={},
            intermittent_retrigger={},
            backfill={},
        )

    monkeypatch.setattr(Push, "classify_regressions", mock_return)
    assert push.classify()[0] == expected_result
Exemplo n.º 14
0
def test_backfill_incomplete_secret(responses, secret_content, create_task):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)
    decision_task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision"
    responses.add(
        responses.GET, decision_task_url, status=200, json={"taskId": "a" * 10}
    )

    responses.add(
        responses.GET,
        get_artifact_url(push.decision_task.id, "public/actions.json"),
        status=200,
        json=ACTIONS_ARTIFACT_EXTRACT,
    )

    # Update configuration
    config._config["taskcluster_firefox_ci"] = secret_content

    task = create_task(label="foobar")
    with pytest.raises(
        AssertionError,
        match="Missing Taskcluster Firefox CI credentials in mozci config secret",
    ):
        task.backfill(push)
Exemplo n.º 15
0
def test_push_parent_on_try_fails_when_not_a_push_head(responses,
                                                       create_changesets):
    changesets = create_changesets(3)
    head = changesets[-1]["node"]
    ctx = {
        "branch": "try",
        "rev": head,
    }
    responses.add(
        responses.GET,
        HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(**ctx),
        json={"changesets": changesets},
        status=200,
    )

    # We raise if rev is not found or a push head anywhere.
    ctx["rev"] = changesets[0]["parents"][0]
    for branch in (
            "mozilla-central",
            "mozilla-beta",
            "mozilla-release",
            "integration/autoland",
    ):
        ctx["branch"] = branch
        responses.add(
            responses.GET,
            HgRev.AUTOMATION_RELEVANCE_TEMPLATE.format(**ctx),
            json={"changesets": changesets},
            status=200,
        )

    push = Push(head, branch="try")
    with pytest.raises(ParentPushNotFound):
        push.parent
Exemplo n.º 16
0
def test_caching_of_push(cache):
    # A recent push will have almost no tasks in AD, few days later it will have
    # all data come from AD and after 6 weeks it will have no data there.
    # Results data for a task will either come via AD or through the errorsummary artifact
    # via Taskcluster. Regardless of which source was used we store in the same data
    # in the cache.

    # Once this push is older than a year update the revision
    # Once this push is older than 6 weeks the test will run slower because
    # all test tasks results will come from Taskcluster
    REV = "08c29f9d87799463cdf99ab81f08f62339b49328"  # Push from Jul. 23, 2020.
    BRANCH = "mozilla-central"
    TASKS_KEY = "{}/{}/tasks".format(BRANCH, REV)

    # Making sure there's nothing left in the cache
    if cache.get(TASKS_KEY):
        cache.forget(TASKS_KEY)
    assert cache.get(TASKS_KEY) is None

    push = Push(REV, branch=BRANCH)
    # Q: Calling push.tasks a second time would hit the cache; Should we test that scenario?
    assert len(push.tasks) > 0
    cached_tasks = cache.get(TASKS_KEY)
    assert cached_tasks is not None
    TOTAL_TEST_TASKS = 3517
    # Testing that the tasks associated to a push have been cached
    assert len(cached_tasks) == TOTAL_TEST_TASKS
    assert len(cached_tasks) == len(push.tasks)
    assert cached_tasks == push.tasks
Exemplo n.º 17
0
def test_push_bugs_multiple(responses):
    rev = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}",
        json={
            "changesets": [
                {
                    "bugs": [{
                        "no": "1617050"
                    }, {
                        "no": "123"
                    }]
                },
                {
                    "bugs": [{
                        "no": "1617050"
                    }]
                },
                {
                    "bugs": [{
                        "no": "456"
                    }]
                },
            ]
        },
        status=200,
    )

    p = Push(rev)
    assert p.bugs == {"123", "456", "1617050"}
Exemplo n.º 18
0
def test_push_tasks_with_cached_completed_tasks(monkeypatch, responses):
    rev = "abcdef"
    branch = "autoland"

    cached_tasks = [
        Task.create(id=1,
                    label="test-task",
                    result="passed",
                    state="completed")
    ]
    monkeypatch.setattr(config.cache, "get", lambda x: cached_tasks)

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}",
        json={"changesets": [{
            "node": rev,
            "pushdate": [1638349140]
        }]},
        status=200,
    )

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.autoland.revision.abcdef.taskgraph.decision",
        json={"taskId": 1},
        status=200,
    )

    push = Push(rev, branch)
    tasks = push.tasks
    assert len(tasks) == 1
Exemplo n.º 19
0
def test_backfill_trigger_hook_error(responses, create_task):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)
    decision_task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision"
    responses.add(
        responses.GET, decision_task_url, status=200, json={"taskId": "a" * 10}
    )

    responses.add(
        responses.GET,
        get_artifact_url(push.decision_task.id, "public/actions.json"),
        status=200,
        json=ACTIONS_ARTIFACT_EXTRACT,
    )

    config._config["taskcluster_firefox_ci"] = {
        "client_id": "a client id",
        "access_token": "an access token",
    }

    hookGroupId = ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookGroupId"]
    hookId = ACTIONS_ARTIFACT_EXTRACT["actions"][0]["hookId"].replace("/", "%2F")
    responses.add(
        responses.POST,
        f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/hooks/v1/hooks/{hookGroupId}/{hookId}/trigger",
        status=500,
    )

    task = create_task(label="foobar")
    with pytest.raises(TaskclusterRestFailure):
        task.backfill(push)
Exemplo n.º 20
0
def test_retrigger_should_not_retrigger(responses, create_task):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    task = create_task(label="foobar")
    task.retrigger(push)
Exemplo n.º 21
0
def run(args):
    manifestsA = get_manifests_by_task(Push(args.revA, branch=args.branch))
    manifestsB = get_manifests_by_task(Push(args.revB, branch=args.branch))

    labels = sorted(set(list(manifestsA.keys()) + list(manifestsB.keys())))
    if args.task_filter:
        fltr = re.compile(args.task_filter)
        labels = filter(fltr.search, labels)

    for label in labels:
        logger.info(f"Processing {label}")

        if label not in manifestsA:
            logger.warning(f"{label} not run in rev1!")
            continue

        if label not in manifestsB:
            logger.warning(f"{label} not run in rev2!")
            continue

        groupsA = sorted(manifestsA[label])
        groupsB = sorted(manifestsB[label])
        if groupsA == groupsB:
            logger.info(f"{label} matches!")
            continue

        logger.warning(f"{label} doesn't match!")
        out = unified_diff(groupsA,
                           groupsB,
                           fromfile=f'Rev 1: {args.revA}',
                           tofile=f'Rev 2: {args.revB}',
                           n=8)
        diff = []
        for line in out:
            line = line.rstrip()
            if line.startswith('+'):
                line = f"<green>{line}</green>"

            elif line.startswith('-'):
                line = f"<red>{line}</red>"

            diff.append(line)

        diff = '\n'.join(diff)
        logger.opt(ansi=True).info("Diff:\n" + f"{diff}")

    return []
Exemplo n.º 22
0
Arquivo: push.py Projeto: jmaher/mozci
    def handle(self):
        push = Push(self.argument("rev"), self.argument("branch"))

        table = []
        for task in sorted(push.tasks, key=lambda t: t.label):
            table.append([task.label, task.result or "running"])

        self.line(tabulate(table, headers=["Label", "Result"]))
Exemplo n.º 23
0
def test_iterate_parents(responses):
    rev = "a" * 40
    branch = "integration/autoland"
    push = Push(rev, branch)

    push_id = 10
    depth = 5

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/{branch}/json-automationrelevance/{rev}",
        json={"changesets": [
            {
                "pushid": push_id
            },
        ]},
        status=200,
    )

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/{branch}/json-pushes?version=2&full=1&startID={push_id-2-depth}&endID={push_id-1}",
        json={
            "pushes": {
                push_id - i: {
                    "changesets": [{
                        "node":
                        chr(ord("a") + i) * 40,
                        "desc":
                        "A nice description about Bug 1234567",
                    }],
                    "date":
                    1,
                }
                for i in range(1, depth + 2)
            }
        },
        status=200,
    )

    for other in push._iterate_parents(depth):
        assert other.id == push_id
        push_id -= 1
Exemplo n.º 24
0
def test_classify_almost_bad_push(monkeypatch, test_selection_data,
                                  likely_regressions, are_cross_config,
                                  to_retrigger):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)
    generate_mocks(
        monkeypatch,
        push,
        test_selection_data,
        likely_regressions,
        set(),
        are_cross_config,
    )

    to_retrigger_or_backill = {
        "real_retrigger": {},
        "intermittent_retrigger": {},
        "backfill": {},
    }
    for key, groups in to_retrigger.items():
        to_retrigger[key] = {group: make_tasks(group) for group in groups}
    to_retrigger_or_backill.update(to_retrigger)

    assert push.classify(
        unknown_from_regressions=False,
        consistent_failures_counts=None,
        consider_children_pushes_configs=False,
    ) == (
        PushStatus.UNKNOWN,
        Regressions(
            real={},
            intermittent={},
            unknown={
                "group1": make_tasks("group1"),
                "group2": make_tasks("group2"),
                "group3": make_tasks("group3"),
                "group4": make_tasks("group4"),
                "group5": make_tasks("group5"),
            },
        ),
        ToRetriggerOrBackfill(**to_retrigger_or_backill),
    )
Exemplo n.º 25
0
def main(argv):
    if len(argv) != 3:
        print(f"{sys.argv[0]} branch revs")
        return 1

    branch = argv[1]
    revs = argv[2].split(",")[0]

    # {
    #   (test, subtest): {
    #       config: {
    #           status: int
    #       }
    #   }
    # }
    results = collections.defaultdict(
        lambda: collections.defaultdict(lambda: collections.defaultdict(int)))

    push = Push(revs, branch)
    for (config, group), summary in push.config_group_summaries.items():
        if not ("-fis-" in config
                and group.startswith("testing/web-platform")):
            continue

        print(config, group)

        for task in summary.tasks:
            paths = [a for a in task.artifacts if a.endswith("wptreport.json")]
            for path in paths:
                run = task.get_artifact(path)

                for result in run["results"]:
                    name = (result["test"], "")
                    status = result["status"]
                    results[name][config][status] += 1

                    for subtest in result["subtests"]:
                        name = (result["test"], subtest["name"])
                        status = subtest["status"]
                        results[name][config][status] += 1

    # figure out what other statuses exists
    print("test,subtest,config,OK,PASS,SKIP,FAIL")
    for (test, subtest), configs in results.items():
        for config, statuses in configs.items():
            line = [
                test,
                subtest,
                config,
                *[str(statuses[k]) for k in ["OK", "PASS", "SKIP", "FAIL"]],
            ]
            print(",".join(line))

    return 0
Exemplo n.º 26
0
def run(args):
    push = Push(args.rev)

    num_scheduled = len(push.scheduled_task_labels)
    num_total = len(push.target_task_labels)
    percentage = round(float(num_scheduled) / num_total * 100, 1)
    all_regressions = push.get_possible_regressions("label") | push.get_likely_regressions("label")

    return [[
        'Tasks Scheduled',
        'Tasks Total',
        'Percentage',
        'Total Hours (scheduled)',
        'Backed Out',
        'Regressions (possible)',
        'Regressions (likely)',
        'Caught',
        'Missed',
    ], [
        num_scheduled,
        num_total,
        percentage,
        push.scheduled_duration,
        push.backedout,
        len(push.get_possible_regressions("label")),
        len(push.get_likely_regressions("label")),
        len(all_regressions & push.scheduled_task_labels),
        len(all_regressions - push.scheduled_task_labels),
    ]]
Exemplo n.º 27
0
def test_get_shadow_scheduler_tasks_fallback(responses):
    rev = "a" * 40
    ss = ("foo", ["task-2", "task-4"])
    ss_id = 1

    push = Push(rev)
    responses.add(
        responses.GET,
        get_index_url(f"{push.index}.source.shadow-scheduler-{ss[0]}"),
        json={"taskId": ss_id},
        status=200,
    )

    responses.add(
        responses.GET,
        get_artifact_url(ss_id,
                         "public/shadow-scheduler/optimized-tasks.json"),
        status=404,
    )

    # utility file will also try the old deployment
    responses.add(
        responses.GET,
        get_artifact_url(ss_id,
                         "public/shadow-scheduler/optimized-tasks.json",
                         old_deployment=True),
        status=404,
    )

    responses.add(
        responses.GET,
        get_artifact_url(ss_id,
                         "public/shadow-scheduler/optimized_tasks.list"),
        stream=True,
        body="\n".join(ss[1]),
        status=200,
    )

    assert push.get_shadow_scheduler_tasks(ss[0]) == set(ss[1])
Exemplo n.º 28
0
def run_combinations_for_push(push):
    push = Push(push["rev"], branch=push["branch"])

    push_dir = f"{BASE_OUTPUT_DIR}/{push.id}"
    if not os.path.exists(push_dir):
        os.makedirs(push_dir)

    csv_rows = []
    for parameters in PARAMETERS_COMBINATIONS:
        run_id = uuid.uuid4()

        start = time.time()
        try:
            classification, regressions = push.classify(**parameters)
            end = time.time()

            # Only save results to a JSON file if the execution was successful
            classification_name = classification.name
            create_json_file(push, run_id, classification_name, regressions)
        except Exception as e:
            end = time.time()
            classification_name = "SYSTEM_ERROR"
            logger.error(
                f"An error occurred during the classification of push {push.push_uuid}: {e}"
            )

        csv_rows.append(
            {
                "run_uuid": run_id,
                "push_uuid": push.push_uuid,
                **parameters,
                "classification": classification_name,
                "time_spent": round(end - start, 3),
                "now": datetime.datetime.now(),
            }
        )

    return csv_rows
Exemplo n.º 29
0
def test_create_push(responses):
    ctx = {
        "branch": "integration/autoland",
        "push_id_start": "122",
        "push_id_end": "123",
    }
    responses.add(
        responses.GET,
        HGMO.JSON_PUSHES_TEMPLATE.format(**ctx),
        json={
            "pushes": {
                "123": {
                    "changesets": ["123456"],
                    "date": 1213174092,
                    "user": "******",
                },
            },
        },
        status=200,
    )
    responses.add(
        responses.GET,
        HGMO.JSON_TEMPLATE.format(branch="integration/autoland", rev="abcdef"),
        json={"node": "abcdef"},
        status=200,
    )
    responses.add(
        responses.GET,
        HGMO.JSON_TEMPLATE.format(branch="integration/autoland", rev="123456"),
        json={"node": "123456"},
        status=200,
    )

    p1 = Push("abcdef")
    p2 = p1.create_push(123)
    assert p2.rev == "123456"
    assert p2.id == 123
    assert p2.date == 1213174092
Exemplo n.º 30
0
def test_get_test_selection_data_from_bugbug_handle_errors(
        responses, monkeypatch):
    rev = "a" * 40
    branch = "autoland"
    push = Push(rev, branch)

    task_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/index/v1/task/gecko.v2.{branch}.revision.{rev}.taskgraph.decision"
    responses.add(responses.GET,
                  task_url,
                  status=200,
                  json={"taskId": "a" * 10})

    cache_url = f"{PRODUCTION_TASKCLUSTER_ROOT_URL}/api/queue/v1/task/aaaaaaaaaa/artifacts/public/bugbug-push-schedules.json"
    responses.add(responses.GET, cache_url, status=404)

    url = f"{bugbug.BUGBUG_BASE_URL}/push/{branch}/{rev}/schedules"
    responses.add(responses.GET, url, status=500)

    monkeypatch.setattr(bugbug, "DEFAULT_RETRY_TIMEOUT", 3)
    monkeypatch.setattr(bugbug, "DEFAULT_RETRY_INTERVAL", 1)
    with pytest.raises(SourcesNotFound) as e:
        push.get_test_selection_data()
    assert (
        e.value.msg ==
        "No registered sources were able to fulfill 'push_test_selection_data'!"
    )

    assert len(responses.calls) == 5
    assert [(call.request.method, call.request.url)
            for call in responses.calls] == [
                ("GET", task_url),
                ("GET", cache_url),
                # We retry 3 times the call to the Bugbug HTTP service
                ("GET", url),
                ("GET", url),
                ("GET", url),
            ]