Ejemplo n.º 1
0
def test_fixed_by_commit_push_wasnt_backedout(monkeypatch, create_pushes):
    """
    Tests the scenario where a task succeeded in a parent push, didn't run in the
    push of interest and failed in a following push, with 'fixed by commit' information
    pointing to a back-out of another push.
    """
    monkeypatch.setattr(HGMO, "is_backout", property(lambda cls: True))

    p = create_pushes(4)
    i = 1  # the index of the push we are mainly interested in

    p[i - 1].tasks = [
        Task.create(id="1", label="test-failure-current", result="success")
    ]
    p[i + 1].tasks = [
        Task.create(
            id="1",
            label="test-failure-current",
            result="testfailed",
            classification="fixed by commit",
            classification_note="xxx",
        )
    ]
    p[i + 1].backedoutby = "012c3f1626b3e9bcd803d19aaf9584a81c5c95de"

    assert p[i].get_regressions("label") == {}
    assert p[i + 1].get_regressions("label") == {}
Ejemplo n.º 2
0
def test_far_intermittent_without_classification_and_not_backedout(
    monkeypatch, create_pushes
):
    """
    Tests the scenario where a task succeeded in a parent push, didn't run in the
    in the push of interest, was intermittent in a following push, which was not
    backed-out and didn't have a classification.
    """
    monkeypatch.setattr(HGMO, "is_backout", property(lambda cls: True))

    p = create_pushes(4)
    i = 1  # the index of the push we are mainly interested in

    p[i - 1].tasks = [Task.create(id="1", label="test-intermittent", result="success")]
    p[i + 1].tasks = [
        Task.create(id="1", label="test-intermittent", result="success"),
        Task.create(
            id="2",
            label="test-intermittent",
            result="testfailed",
            classification="not classified",
        ),
    ]

    assert p[i].get_regressions("label") == {"test-intermittent": 4}
    assert p[i + 1].get_regressions("label") == {"test-intermittent": 4}
Ejemplo n.º 3
0
def test_intermittent_fixed_by_commit(monkeypatch, create_pushes):
    """
    Tests the scenario where a task succeeded in a parent push, didn't run in the
    in the push of interest, was intermittent in a following push, which was
    backed-out and had a 'fixed by commit' classification.
    """
    monkeypatch.setattr(HGMO, "is_backout", property(lambda cls: True))

    p = create_pushes(5)
    i = 2  # the index of the push we are mainly interested in

    p[i - 2].tasks = [Task.create(id="1", label="test-intermittent", result="success")]
    p[i - 2].backedoutby = None
    p[i].backedoutby = "d25e5c66de225e2d1b989af61a0420874707dd14"
    p[i + 1].tasks = [
        Task.create(id="1", label="test-intermittent", result="success"),
        Task.create(
            id="2",
            label="test-intermittent",
            result="testfailed",
            classification="fixed by commit",
            classification_note="d25e5c66de225e2d1b989af61a0420874707dd14",
        ),
    ]
    p[i + 1].backedoutby = "012c3f1626b3e9bcd803d19aaf9584a81c5c95de"

    assert p[i].get_regressions("label") == {"test-intermittent": 0}
    assert p[i + 1].get_regressions("label") == {}
Ejemplo n.º 4
0
def test_intermittent_classification(monkeypatch, create_pushes):
    """
    Tests the scenario where a task succeeded in a parent push, didn't run in the
    in the push of interest, failed in a following push, which was
    backed-out and had a 'intermittent' classification.
    """
    monkeypatch.setattr(HGMO, "is_backout", property(lambda cls: True))

    p = create_pushes(5)
    i = 2  # the index of the push we are mainly interested in

    p[i - 1].tasks = [Task.create(id="1", label="test-intermittent", result="success")]
    p[i].backedoutby = "xxx"
    p[i + 1].tasks = [
        Task.create(
            id="1",
            label="test-intermittent",
            result="testfailed",
            classification="intermittent",
        )
    ]
    p[i + 1].backedoutby = "yyy"

    assert p[i].get_regressions("label") == {}
    assert p[i + 1].get_regressions("label") == {}
Ejemplo n.º 5
0
def test_succeeded_in_parent_didnt_run_in_current_passed_in_child_failed_in_grandchild(
    create_pushes,
):
    """
    Tests the scenario where a task succeeded in a parent push, didn't run in the
    push of interest, succeeded in a following push, and failed in a second
    following push.
    """
    p = create_pushes(7)
    i = 3  # the index of the push we are mainly interested in

    p[i - 1].tasks = [Task.create(id="1", label="test-prova", result="success")]
    p[i + 1].tasks = [Task.create(id="1", label="test-prova", result="success")]
    p[i + 2].tasks = [
        Task.create(
            id="1",
            label="test-prova",
            result="testfailed",
            classification="not classified",
        )
    ]
    p[i + 2].backedoutby = "xxx"

    assert p[i - 2].get_regressions("label") == {}
    assert p[i - 1].get_regressions("label") == {}
    assert p[i].get_regressions("label") == {}
    assert p[i + 1].get_regressions("label") == {}
    assert p[i + 2].get_regressions("label") == {"test-prova": 0}
Ejemplo n.º 6
0
    def get_shadow_scheduler_tasks(self, name):
        """Returns all tasks the given shadow scheduler would have scheduled,
        or None if the given scheduler didn't run.

        Args:
            name (str): The name of the shadow scheduler to query.

        Returns:
            list: All task labels that would have been scheduled.
        """
        index = self.index + ".source.shadow-scheduler-{}".format(name)
        task = Task(id=find_task_id(index))
        labels = task.get_artifact("public/shadow-scheduler/optimized_tasks.list")
        return set(labels.splitlines())
Ejemplo n.º 7
0
def test_push_tasks_with_cached_completed_tasks(monkeypatch, responses):
    rev = "abcdef"
    branch = "autoland"

    cached_tasks = [
        Task.create(id=1,
                    label="test-task",
                    result="passed",
                    state="completed")
    ]
    monkeypatch.setattr(config.cache, "get", lambda x: cached_tasks)

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}",
        json={"changesets": [{
            "node": rev,
            "pushdate": [1638349140]
        }]},
        status=200,
    )

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.autoland.revision.abcdef.taskgraph.decision",
        json={"taskId": 1},
        status=200,
    )

    push = Push(rev, branch)
    tasks = push.tasks
    assert len(tasks) == 1
Ejemplo n.º 8
0
def test_fixed_by_commit_task_didnt_run_in_parents(monkeypatch, create_pushes):
    """
    Tests the scenario where a task didn't run in a parent push, didn't run in the
    push of interest and failed in a following push, with 'fixed by commit' information
    pointing to the back-outs.
    """
    monkeypatch.setattr(HGMO, "is_backout", property(lambda cls: True))

    p = create_pushes(4)
    i = 1  # the index of the push we are mainly interested in

    p[i].backedoutby = "d25e5c66de225e2d1b989af61a0420874707dd14"

    p[i + 1].tasks = [
        Task.create(
            id="1",
            label="test-failure-current",
            result="testfailed",
            classification="fixed by commit",
            classification_note="d25e5c66de225e2d1b989af61a0420874707dd14",
        )
    ]
    p[i + 1].backedoutby = "012c3f1626b3e9bcd803d19aaf9584a81c5c95de"

    assert p[i].get_regressions("label") == {"test-failure-current": 0}
    assert p[i + 1].get_regressions("label") == {}
Ejemplo n.º 9
0
def test_fixed_by_commit_no_backout(monkeypatch, create_pushes):
    """
    Tests the scenario where two tasks succeeded in a parent push, didn't run in the
    push of interest and failed in a following push, with 'fixed by commit' information
    pointing to a bustage fix.
    """

    def mock_is_backout(cls):
        if cls.context["rev"] == "xxx":
            return False

        return True

    monkeypatch.setattr(HGMO, "is_backout", property(mock_is_backout))

    p = create_pushes(4)
    i = 1  # the index of the push we are mainly interested in

    p[i - 1].tasks = [
        Task.create(id="1", label="test-failure-current", result="success"),
        Task.create(id="1", label="test-failure-next", result="success"),
    ]
    p[i].backedoutby = "d25e5c66de225e2d1b989af61a0420874707dd14"

    p[i + 1].tasks = [
        Task.create(
            id="1",
            label="test-failure-current",
            result="testfailed",
            classification="fixed by commit",
            classification_note="xxx",
        ),
        Task.create(
            id="1",
            label="test-failure-next",
            result="testfailed",
            classification="fixed by commit",
            classification_note="012c3f1626b3",
        ),
    ]
    p[i + 1].backedoutby = "012c3f1626b3e9bcd803d19aaf9584a81c5c95de"

    assert p[i].get_regressions("label") == {"test-failure-current": 1}
    assert p[i + 1].get_regressions("label") == {
        "test-failure-current": 1,
        "test-failure-next": 0,
    }
Ejemplo n.º 10
0
    def decision_task(self):
        """A representation of the decision task.

        Returns:
            Task: A `Task` instance representing the decision task.
        """
        index = self.index + ".taskgraph.decision"
        return Task.create(index=index)
Ejemplo n.º 11
0
    def decision_task(self):
        """A representation of the decision task.

        Returns:
            Task: A `Task` instance representing the decision task.
        """
        index = self.index + ".taskgraph.decision"
        task_id = find_task_id(index)
        return Task(id=task_id)
Ejemplo n.º 12
0
def test_failed_and_not_backedout(create_pushes):
    """
    Tests the scenario where a task failed in a push which was not backed-out.
    """
    p = create_pushes(3)
    i = 1  # the index of the push we are mainly interested in

    p[i - 1].tasks = [Task.create(id="1", label="test-prova", result="success")]
    p[i].tasks = [
        Task.create(
            id="1",
            label="test-prova",
            result="testfailed",
            classification="not classified",
        )
    ]

    assert p[i].get_regressions("label") == {"test-prova": 0}
Ejemplo n.º 13
0
def test_far_child_failed_and_backedout(create_pushes):
    """
    Tests the scenario where a task didn't run in the push of interest, which was not
    backed-out, and failed in a (far away) following push.
    """
    p = create_pushes(3 + (MAX_DEPTH // 2 + 1))
    i = 1  # the index of the push we are mainly interested in

    p[i - 1].tasks = [Task.create(id="1", label="test-prova", result="success")]
    p[len(p) - 2].tasks = [
        Task.create(
            id="1",
            label="test-prova",
            result="testfailed",
            classification="not classified",
        )
    ]

    assert p[i].get_regressions("label") == {}
Ejemplo n.º 14
0
def test_succeeded_and_backedout(create_pushes):
    """
    Tests the scenario where a task succeeded in a push which was backed-out.
    """
    p = create_pushes(3)
    i = 1  # the index of the push we are mainly interested in

    p[i].tasks = [Task.create(id="1", label="test-prova", result="success")]
    p[i].backedoutby = "xxx"

    assert p[i].get_regressions("label") == {}
Ejemplo n.º 15
0
def test_configuration():
    assert (Task.create(
        id=1, label="test-windows7-32/debug-reftest-gpu-e10s-1").configuration
            == "test-windows7-32/debug-*-gpu-e10s")
    assert (Task.create(
        id=1,
        label="test-linux1804-64/debug-mochitest-plain-gpu-e10s").configuration
            == "test-linux1804-64/debug-*-e10s")
    assert (Task.create(
        id=1,
        label=
        "test-macosx1014-64-shippable/opt-web-platform-tests-wdspec-headless-e10s-1",
    ).configuration == "test-macosx1014-64-shippable/opt-*-headless-e10s")
    assert (Task.create(
        id=1, label="test-linux1804-64-asan/opt-web-platform-tests-e10s-3").
            configuration == "test-linux1804-64-asan/opt-*-e10s")
    assert (Task.create(
        id=1,
        label="test-linux1804-64-qr/debug-web-platform-tests-wdspec-fis-e10s-1",
    ).configuration == "test-linux1804-64-qr/debug-*-fis-e10s")
    assert (Task.create(
        id=1,
        label=
        "test-windows7-32-shippable/opt-firefox-ui-functional-remote-e10s",
    ).configuration == "test-windows7-32-shippable/opt-*-e10s")
Ejemplo n.º 16
0
def test_results_for_incomplete_task(responses):
    push = FakePush("autoland", "rev")

    for state in ["running", "pending", "unscheduled", "exception"]:
        task = Task.create(
            id=1,
            label="test-task",
            state="running",
        )
        task.retrieve_results(push)
        assert task.results == []

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/1/artifacts",
        json={
            "artifacts": [{"name": "errorsummary.log"}],
        },
        status=200,
    )

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/1/artifacts/errorsummary.log",
        body=r"""
            {"action": "test_groups", "line": 3, "groups": ["layout/base/tests/browser.ini"]}
            {"status": "OK", "duration": 12430, "line": 4465, "group": "layout/base/tests/browser.ini", "action": "group_result"}
        """.strip(),
        status=200,
    )

    task = Task.create(
        id=1,
        label="test-task",
        state="completed",
    )
    task.retrieve_results(push)
    assert task.results == [
        GroupResult(group="layout/base/tests/browser.ini", ok=True, duration=12430),
    ]
Ejemplo n.º 17
0
def test_to_json():
    kwargs = {
        "id": 1,
        "label": "foobar",
        "result": "pass",
        "duration": 100,
    }
    task = Task.create(**kwargs)
    result = task.to_json()
    json.dumps(result)  # assert doesn't raise

    for k, v in kwargs.items():
        assert k in result
        assert result[k] == v
Ejemplo n.º 18
0
Archivo: push.py Proyecto: ahal/mozci
    def get_shadow_scheduler_tasks(self, name: str) -> List[dict]:
        """Returns all tasks the given shadow scheduler would have scheduled,
        or None if the given scheduler didn't run.

        Args:
            name (str): The name of the shadow scheduler to query.

        Returns:
            set: All task labels that would have been scheduled.
        """
        index = self.index + ".source.shadow-scheduler-{}".format(name)
        task = Task.create(index=index)

        optimized = task.get_artifact(
            "public/shadow-scheduler/optimized-tasks.json")
        return list(optimized.values())
Ejemplo n.º 19
0
    def _normalized_tasks(tasks):
        # If we are missing one of these keys, discard the task.
        required_keys = (
            "id",
            "label",
            "result",
        )

        # Normalize and validate.
        normalized_tasks = []
        for task in tasks.values():
            missing = [k for k in required_keys if k not in task]
            taskstr = task.get("label", task["id"])

            if missing:
                logger.trace(
                    f"Skipping task '{taskstr}' because it is missing "
                    f"the following attributes: {', '.join(missing)}")
                continue

            if task.get("tags"):
                task["tags"] = {t["name"]: t["value"] for t in task["tags"]}

            if task.get("classification_note"):
                if isinstance(task["classification_note"], list):
                    task["classification_note"] = task["classification_note"][
                        -1]

            groups = task.pop("_result_group", None)
            oks = task.pop("_result_ok", None)

            if groups is not None:
                if oks:
                    task["_results"] = [
                        GroupResult(group=group, ok=ok)
                        for group, ok in zip(groups, oks)
                    ]

            normalized_tasks.append(task)

        return [Task.create(**task) for task in normalized_tasks]
Ejemplo n.º 20
0
    def get_shadow_scheduler_tasks(self, name: str) -> Set[str]:
        """Returns all tasks the given shadow scheduler would have scheduled,
        or None if the given scheduler didn't run.

        Args:
            name (str): The name of the shadow scheduler to query.

        Returns:
            set: All task labels that would have been scheduled.
        """
        index = self.index + ".source.shadow-scheduler-{}".format(name)
        task = Task.create(index=index)

        try:
            optimized = task.get_artifact(
                "public/shadow-scheduler/optimized-tasks.json")
            return set(t["label"] for t in optimized.values())
        except ArtifactNotFound:
            # TODO Legacy artifact format, remove after Jan 1st 2021.
            labels = task.get_artifact(
                "public/shadow-scheduler/optimized_tasks.list")
            return set(labels.splitlines())
Ejemplo n.º 21
0
def test_finalized_push_tasks_with_cache(monkeypatch, responses):
    rev = "abcdef"
    branch = "autoland"

    cached_tasks = [Task.create(id=1, label="test-task", result="passed")]
    monkeypatch.setattr(config.cache, "get", lambda x: cached_tasks)
    monkeypatch.setattr(Push, "is_finalized", True)

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}",
        json={"changesets": [{
            "node": rev,
            "pushdate": [1638349140]
        }]},
        status=200,
    )

    push = Push(rev, branch)
    tasks = push.tasks
    assert len(tasks) == 1
    assert tasks == cached_tasks
Ejemplo n.º 22
0
def test_create(responses):
    # Creating a task with just a label doesn't work.
    with pytest.raises(TypeError):
        Task.create(label="foobar")

    # Specifying an id works with or without label.
    assert Task.create(id=1, label="foobar").label == "foobar"
    assert Task.create(id=1).label is None

    # Can also specify an index.
    index = "index.path"
    responses.add(
        responses.GET,
        get_index_url(index),
        json={"taskId": 1},
        status=200,
    )
    assert Task.create(index=index, label="foobar").label == "foobar"
    assert Task.create(index=index).label is None

    # Specifying non-existent task index raises.
    responses.replace(responses.GET, get_index_url(index), status=404)
    with pytest.raises(TaskNotFound):
        Task.create(index=index)
Ejemplo n.º 23
0
    task.retrieve_results(push)
    assert task.results == [
        GroupResult(group="layout/base/tests/browser.ini", ok=True),
    ]


@pytest.mark.parametrize(
    "group_summary, expected_result",
    [
        (
            GroupSummary(
                "group1",
                [
                    Task.create(
                        id=i,
                        label=f"test-task{i}",
                        _results=[GroupResult(group="group1", ok=False), GR_2, GR_3],
                    )
                    for i in range(1, 11)
                ],
            ),
            False,
        ),  # All related tasks failed
        (
            GroupSummary(
                "group1",
                [
                    Task.create(
                        id=i,
                        label=f"test-task{i}",
                        _results=[
Ejemplo n.º 24
0
    def tasks(self):
        """All tasks that ran on the push, including retriggers and backfills.

        Returns:
            list: A list of `Task` objects.
        """
        # Gather information from the treeherder table.
        tasks = []
        try:
            tasks = data.handler.get("push_tasks",
                                     branch=self.branch,
                                     rev=self.rev)
        except MissingDataError:
            pass

        # Gather task tags from the task table.
        try:
            tags_by_task = data.handler.get("push_tasks_tags",
                                            branch=self.branch,
                                            rev=self.rev)
            for task in tasks:
                tags = tags_by_task.get(task["id"])
                if tags:
                    if "tags" not in task:
                        task["tags"] = {}
                    task["tags"].update(tags)
        except MissingDataError:
            pass

        # Let's gather error/results from cache or AD/Taskcluster
        test_tasks_results = config.cache.get(self.push_uuid, {})
        was_cached = len(test_tasks_results.keys()) != 0
        groups = None
        if not was_cached:
            # Gather information from the unittest table. We allow missing data for this table because
            # ActiveData only holds very recent data in it, but we have fallbacks on Taskcluster
            # artifacts.
            try:
                groups = data.handler.get("push_test_groups",
                                          branch=self.branch,
                                          rev=self.rev)
                for task in tasks:
                    results = groups.get(task["id"])
                    if results is not None:
                        task["_results"] = [
                            GroupResult(group=group, ok=ok)
                            for group, ok in results.items()
                        ]

            except MissingDataError:
                pass

        tasks = [Task.create(**task) for task in tasks]

        # Add any data available in the cache
        if was_cached:
            # Let's add cached error summaries to TestTasks
            for t in tasks:
                if isinstance(t, TestTask):
                    error_summary = test_tasks_results.get(t.id)
                    # Only Test tasks have stored error summary information in the cache
                    if error_summary:
                        t._errors = error_summary["errors"]
                        t._results = error_summary["results"]
            logger.debug(
                "Fetched tasks errors/results for {} from the cache".format(
                    self.push_uuid))

        # Gather group data which could have been missing in ActiveData.
        concurrent.futures.wait(
            [
                Push.THREAD_POOL_EXECUTOR.submit(lambda task: task.groups,
                                                 task)
                for task in tasks if isinstance(task, TestTask)
            ],
            return_when=concurrent.futures.FIRST_EXCEPTION,
        )

        # Now we can cache the results.
        self._cache_test_tasks(tasks)

        return tasks
Ejemplo n.º 25
0
        "test-windows10-64-2004-qr/debug-web-platform-tests-swr-e10s-9",
        "test-windows10-64-2004-qr/debug-mochitest-devtools-chrome-fis-e10s-1",
    ],
}

NUMBER_OF_DEFAULT_GROUPS = 5
NUMBER_OF_INTERMITTENT_GROUPS_IN_DEFAULT = 2
GROUP_SUMMARIES_DEFAULT = {
    group.name: group
    for group in [
        GroupSummary(
            f"group{i}",
            [
                Task.create(
                    id=j,
                    label=f"test-task{j}",
                    result="failed",
                    _results=[GroupResult(group=f"group{i}", ok=False)],
                ) for j in range(1, 4)
            ] + ([
                Task.create(
                    id=4,
                    label="test-task1",
                    result="passed",
                    _results=[GroupResult(group=f"group{i}", ok=True)],
                )
            ] if i <= NUMBER_OF_INTERMITTENT_GROUPS_IN_DEFAULT else []),
        ) for i in range(1, NUMBER_OF_DEFAULT_GROUPS + 1)
    ]
}

Ejemplo n.º 26
0
def test_push_tasks_with_cached_uncompleted_tasks(monkeypatch, responses):
    rev = "abcdef"
    branch = "autoland"

    cached_tasks = [Task.create(id=1, label="test-task", state="running")]
    monkeypatch.setattr(config.cache, "get", lambda x: cached_tasks)

    responses.add(
        responses.GET,
        f"https://hg.mozilla.org/integration/autoland/json-automationrelevance/{rev}",
        json={"changesets": [{
            "node": rev,
            "pushdate": [1638349140]
        }]},
        status=200,
    )

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.autoland.revision.abcdef.taskgraph.decision",
        json={"taskId": 1},
        status=200,
    )

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/1",
        json={"taskGroupId": "xyz789"},
        status=200,
    )

    responses.add(
        responses.GET,
        "https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task-group/xyz789/list",
        json={
            "tasks": [
                {
                    "task": {
                        "extra": {
                            "treeherder": {
                                "tier": 3
                            },
                        },
                        "metadata": {
                            "name": "task-A",
                        },
                        "tags": {
                            "name": "tag-A"
                        },
                    },
                    "status": {
                        "taskId": "abc13",
                        "state": "unscheduled",
                    },
                },
                {
                    "task": {
                        "extra": {
                            "treeherder": {
                                "tier": 1
                            },
                        },
                        "metadata": {
                            "name": "task-B",
                        },
                        "tags": {
                            "name": "tag-A"
                        },
                    },
                    "status": {
                        "taskId": "abc123",
                        "state": "unscheduled",
                    },
                },
            ]
        },
        status=200,
    )

    responses.add(
        responses.GET,
        "https://treeherder.mozilla.org/api/project/autoland/note/push_notes/?revision=abcdef&format=json",
        json={},
        status=200,
    )

    push = Push(rev, branch)
    tasks = push.tasks
    assert len(tasks) == 1
Ejemplo n.º 27
0
        GroupResult(group="layout/base/tests/browser.ini", ok=True, duration=12430),
    ]


@pytest.mark.parametrize(
    "group_summary, expected_result",
    [
        (
            GroupSummary(
                "group1",
                [
                    Task.create(
                        id=1,
                        label="test-task1",
                        _results=[
                            GroupResult(group="group1", ok=False, duration=42),
                            GR_2,
                            GR_3,
                        ],
                    )
                ],
            ),
            None,
        ),  # Only one task run and failed
        (
            GroupSummary(
                "group1",
                [
                    Task.create(
                        id=1,
                        label="test-linux1804-64/opt-xpcshell-e10s-1",
Ejemplo n.º 28
0
def test_GroupSummary_classifications():
    task1 = Task.create(
        id=1,
        label="test-task1",
        result="failed",
        classification="fixed by commit",
        classification_note="xxx",
    )
    task1._results = [GroupResult("group1", False, duration=42)]
    assert GroupSummary("group1", [task1]).classifications == [
        ("fixed by commit", "xxx")
    ]
    with pytest.raises(AssertionError):
        GroupSummary("group2", [task1])

    task1 = Task.create(
        id=1,
        label="test-task1",
        result="failed",
        classification="fixed by commit",
        classification_note="xxx",
    )
    task1._results = [
        GroupResult("group1", False, duration=42),
        GroupResult("group2", False, duration=42),
    ]
    assert GroupSummary("group1", [task1]).classifications == [
        ("fixed by commit", "xxx")
    ]
    assert GroupSummary("group2", [task1]).classifications == [
        ("fixed by commit", "xxx")
    ]

    task1 = Task.create(
        id=1, label="test-task1", result="failed", classification="intermittent"
    )
    task1._results = [
        GroupResult("group1", False, duration=42),
        GroupResult("group2", False, duration=42),
    ]
    assert GroupSummary("group1", [task1]).classifications == [("intermittent", None)]
    assert GroupSummary("group2", [task1]).classifications == [("intermittent", None)]

    task1 = Task.create(
        id=1,
        label="test-task1",
        result="failed",
        classification="fixed by commit",
        classification_note="xxx",
    )
    task1._results = [
        GroupResult("group1", True, duration=42),
        GroupResult("group2", False, duration=42),
    ]
    assert GroupSummary("group1", [task1]).classifications == []
    assert GroupSummary("group2", [task1]).classifications == [
        ("fixed by commit", "xxx")
    ]

    task1 = Task.create(
        id=1,
        label="test-task1",
        result="failed",
        classification="fixed by commit",
        classification_note="xxx",
    )
    task1._results = [
        GroupResult("group1", True, duration=42),
        GroupResult("group2", False, duration=42),
    ]
    task2 = Task.create(
        id=1, label="test-task1", result="failed", classification="intermittent"
    )
    task2._results = [
        GroupResult("group1", False, duration=42),
        GroupResult("group2", False, duration=42),
    ]
    assert GroupSummary("group1", [task1, task2]).classifications == [
        ("intermittent", None)
    ]
    assert GroupSummary("group2", [task1, task2]).classifications == [
        ("fixed by commit", "xxx"),
        ("intermittent", None),
    ]
Ejemplo n.º 29
0
    def tasks(self):
        """All tasks that ran on the push, including retriggers and backfills.

        Returns:
            list: A list of `Task` objects.
        """

        args = Namespace(rev=self.rev, branch=self.branch)
        tasks = defaultdict(dict)
        retries = defaultdict(int)

        list_keys = (
            "_result_ok",
            "_result_group",
        )

        def add(result):
            if "header" in result:
                result["data"] = [
                    {
                        field: value
                        for field, value in zip(result["header"], entry)
                        if value is not None
                    }
                    for entry in result["data"]
                ]

            for task in result["data"]:
                if "id" not in task:
                    logger.trace(f"Skipping {task} because of missing id.")
                    continue

                task_id = task["id"]

                # If a task is re-run, use the data from the last run.
                if "retry_id" in task:
                    if task["retry_id"] < retries[task_id]:
                        logger.trace(
                            f"Skipping {task} because there is a newer run of it."
                        )
                        continue

                    retries[task_id] = task["retry_id"]

                    # We don't need to store the retry ID.
                    del task["retry_id"]

                cur_task = tasks[task_id]

                for key, val in task.items():
                    if key in list_keys:
                        if key not in cur_task:
                            cur_task[key] = []

                        cur_task[key].append(val)
                    else:
                        cur_task[key] = val

        # Gather information from the treeherder table.
        try:
            add(run_query("push_tasks_from_treeherder", args))
        except MissingDataError:
            pass

        # Gather information from the unittest table. We allow missing data for this table because
        # ActiveData only holds very recent data in it, but we have fallbacks on Taskcluster
        # artifacts.
        # TODO: We have fallbacks for groups and results, but not for kind.
        try:
            add(run_query("push_tasks_results_from_unittest", args))
        except MissingDataError:
            pass

        try:
            add(run_query("push_tasks_groups_from_unittest", args))
        except MissingDataError:
            pass

        # If we are missing one of these keys, discard the task.
        required_keys = (
            "id",
            "label",
        )

        # Normalize and validate.
        normalized_tasks = []
        for task in tasks.values():
            missing = [k for k in required_keys if k not in task]
            taskstr = task.get("label", task["id"])

            if missing:
                logger.trace(
                    f"Skipping task '{taskstr}' because it is missing "
                    f"the following attributes: {', '.join(missing)}"
                )
                continue

            if task.get("tags"):
                task["tags"] = {t["name"]: t["value"] for t in task["tags"]}

            if task.get("classification_note"):
                if isinstance(task["classification_note"], list):
                    task["classification_note"] = task["classification_note"][-1]

            if task.get("_groups"):
                if isinstance(task["_groups"], str):
                    task["_groups"] = [task["_groups"]]

            if task.get("_result_ok"):
                oks = task.pop("_result_ok")

                if task.get("_result_group"):
                    groups = task.pop("_result_group")

                    task["_results"] = [
                        GroupResult(group=group, ok=ok)
                        for group, ok in zip(groups, oks)
                    ]

            normalized_tasks.append(task)

        return [Task.create(**task) for task in normalized_tasks]
Ejemplo n.º 30
0
 def inner(**kwargs):
     nonlocal id
     task = Task.create(id=id, **kwargs)
     id += 1
     return task