コード例 #1
0
ファイル: workflow.py プロジェクト: Daggron/code-review
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        self.workflow = CodeReview(
            api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
            url=taskcluster_config.secrets["PHABRICATOR"]["url"],
            publish=taskcluster_config.secrets["PHABRICATOR"].get(
                "publish", False),
            risk_analysis_reviewers=taskcluster_config.secrets.get(
                "risk_analysis_reviewers", []),
            community_config=taskcluster_config.secrets.get(
                "taskcluster_community"),
        )
        self.workflow.register(self.bus)

        # Build mercurial worker and queue
        self.mercurial = MercurialWorker(
            QUEUE_MERCURIAL,
            QUEUE_PHABRICATOR_RESULTS,
            repositories=self.workflow.get_repositories(
                taskcluster_config.secrets["repositories"], cache_root),
        )
        self.mercurial.register(self.bus)

        # Create web server
        self.webserver = WebServer(QUEUE_WEB_BUILDS)
        self.webserver.register(self.bus)

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(QUEUE_MONITORING,
                                     taskcluster_config.secrets["admins"],
                                     MONITORING_PERIOD)
        self.monitoring.register(self.bus)
コード例 #2
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_failure_general(PhabricatorMock, mock_mc):
    """
    Run mercurial worker on a single diff
    and check the treeherder link publication as an artifact
    Use a Python common exception to trigger a broken build
    """
    diff = {
        "phid": "PHID-DIFF-test123",
        "id": 1234,
        "baseRevision": None,
        "revisionPHID": "PHID-DREV-deadbeef",
    }
    build = MockBuild(1234, "PHID-REPO-mc", 5678, "PHID-somehash", diff)
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    bus = MessageBus()
    bus.add_queue("phabricator")

    # Get initial tip commit in repo
    initial = mock_mc.repo.tip()

    # The patched and config files should not exist at first
    repo_dir = mock_mc.repo.root().decode("utf-8")
    config = os.path.join(repo_dir, "try_task_config.json")
    target = os.path.join(repo_dir, "test.txt")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    # Raise an exception during the workflow to trigger a broken build
    def boom(*args):
        raise Exception("Boom")

    mock_mc.apply_build = boom

    worker = MercurialWorker(
        "mercurial", "phabricator", repositories={"PHID-REPO-mc": mock_mc}
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1
    task = asyncio.create_task(worker.run())

    # Check the unit result was published
    mode, out_build, details = await bus.receive("phabricator")
    assert mode == "fail:general"
    assert out_build == build
    assert details["duration"] > 0
    assert details["message"] == "Boom"
    task.cancel()

    # Clone should not be modified
    tip = mock_mc.repo.tip()
    assert tip.node == initial.node
コード例 #3
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_treeherder_link(PhabricatorMock, mock_mc):
    """
    Run mercurial worker on a single diff
    and check the treeherder link publication as an artifact
    """
    # Preload the build
    diff = {
        "phid": "PHID-DIFF-test123",
        "revisionPHID": "PHID-DREV-deadbeef",
        "id": 1234,
        "baseRevision": "abcdef12345",
    }
    build = MockBuild(1234, "PHID-REPO-mc", 5678, "PHID-HMBT-somehash", diff)
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    bus = MessageBus()
    bus.add_queue("phabricator")

    # Get initial tip commit in repo
    initial = mock_mc.repo.tip()

    # The patched and config files should not exist at first
    repo_dir = mock_mc.repo.root().decode("utf-8")
    config = os.path.join(repo_dir, "try_task_config.json")
    target = os.path.join(repo_dir, "test.txt")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    worker = MercurialWorker(
        "mercurial", "phabricator", repositories={"PHID-REPO-mc": mock_mc}
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1
    task = asyncio.create_task(worker.run())

    # Check the treeherder link was queued
    mode, out_build, details = await bus.receive("phabricator")
    tip = mock_mc.repo.tip()
    assert mode == "success"
    assert out_build == build
    assert details[
        "treeherder_url"
    ] == "https://treeherder.mozilla.org/#/jobs?repo=try&revision={}".format(
        tip.node.decode("utf-8")
    )
    assert details["revision"] == tip.node.decode("utf-8")
    task.cancel()

    # Tip should be updated
    assert tip.node != initial.node
コード例 #4
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_failure_mercurial(PhabricatorMock, mock_mc):
    """
    Run mercurial worker on a single diff
    and check the treeherder link publication as an artifact
    Apply a bad mercurial patch to trigger a mercurial fail
    """
    diff = {
        "revisionPHID": "PHID-DREV-666",
        "baseRevision": "missing",
        "phid": "PHID-DIFF-666",
        "id": 666,
    }
    build = MockBuild(1234, "PHID-REPO-mc", 5678, "PHID-build-666", diff)
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    bus = MessageBus()
    bus.add_queue("phabricator")

    # Get initial tip commit in repo
    initial = mock_mc.repo.tip()

    # The patched and config files should not exist at first
    repo_dir = mock_mc.repo.root().decode("utf-8")
    config = os.path.join(repo_dir, "try_task_config.json")
    target = os.path.join(repo_dir, "test.txt")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    worker = MercurialWorker(
        "mercurial", "phabricator", repositories={"PHID-REPO-mc": mock_mc}
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1
    task = asyncio.create_task(worker.run())

    # Check the treeherder link was queued
    mode, out_build, details = await bus.receive("phabricator")
    assert mode == "fail:mercurial"
    assert out_build == build
    assert details["duration"] > 0
    assert details["message"] == MERCURIAL_FAILURE
    task.cancel()

    # Clone should not be modified
    tip = mock_mc.repo.tip()
    assert tip.node == initial.node
コード例 #5
0
ファイル: workflow.py プロジェクト: tp-tc/code-review
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        self.workflow = CodeReview(
            api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
            url=taskcluster_config.secrets["PHABRICATOR"]["url"],
            publish=taskcluster_config.secrets["PHABRICATOR"].get(
                "publish", False),
            risk_analysis_reviewers=taskcluster_config.secrets.get(
                "risk_analysis_reviewers", []),
            community_config=taskcluster_config.secrets.get(
                "taskcluster_community"),
            user_blacklist=taskcluster_config.secrets["user_blacklist"],
        )
        self.workflow.register(self.bus)

        # Build mercurial worker and queue
        self.mercurial = MercurialWorker(
            QUEUE_MERCURIAL,
            QUEUE_PHABRICATOR_RESULTS,
            repositories=self.workflow.get_repositories(
                taskcluster_config.secrets["repositories"], cache_root),
        )
        self.mercurial.register(self.bus)

        # Create web server
        self.webserver = WebServer(QUEUE_WEB_BUILDS)
        self.webserver.register(self.bus)

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(QUEUE_MONITORING,
                                     taskcluster_config.secrets["admins"],
                                     MONITORING_PERIOD)
        self.monitoring.register(self.bus)

        # Create pulse listener for unit test failures
        if self.workflow.publish:
            self.pulse = PulseListener(
                QUEUE_PULSE,
                "exchange/taskcluster-queue/v1/task-completed",
                "*.*.gecko-level-3._",
                taskcluster_config.secrets["pulse_user"],
                taskcluster_config.secrets["pulse_password"],
            )
            self.pulse.register(self.bus)
コード例 #6
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_crash_utf8_author(PhabricatorMock, mock_mc):
    """
    Run mercurial worker on a single diff
    but the patch author has utf-8 chars in its name
    """
    diff = {
        "revisionPHID": "PHID-DREV-badutf8",
        "baseRevision": "missing",
        "phid": "PHID-DIFF-badutf8",
        "id": 555,
    }
    build = MockBuild(4444, "PHID-REPO-mc", 5555, "PHID-build-badutf8", diff)
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    bus = MessageBus()
    bus.add_queue("phabricator")

    # The patched and config files should not exist at first
    repo_dir = mock_mc.repo.root().decode("utf-8")
    config = os.path.join(repo_dir, "try_task_config.json")
    target = os.path.join(repo_dir, "test.txt")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    worker = MercurialWorker(
        "mercurial", "phabricator", repositories={"PHID-REPO-mc": mock_mc}
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1

    # Run the mercurial worker on that patch only
    task = asyncio.create_task(worker.run())
    mode, out_build, details = await bus.receive("phabricator")
    task.cancel()

    # Check we have the patch with utf-8 author properly applied
    assert [(c.author, c.desc) for c in mock_mc.repo.log()] == [
        (
            b"libmozevent <*****@*****.**>",
            b"try_task_config for code-review\n"
            b"Differential Diff: PHID-DIFF-badutf8",
        ),
        (
            b"Andr\xc3\xa9 XXXX <*****@*****.**>",
            b"This patch has an author with utf8 chars\n"
            b"Differential Diff: PHID-DIFF-badutf8",
        ),
        (b"test", b"Readme"),
    ]

    # The phab output should be successful
    assert mode == "success"
    assert out_build == build
    assert details[
        "treeherder_url"
    ] == "https://treeherder.mozilla.org/#/jobs?repo=try&revision={}".format(
        mock_mc.repo.tip().node.decode("utf-8")
    )
    assert details["revision"] == mock_mc.repo.tip().node.decode("utf-8")
コード例 #7
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_push_to_try_nss(PhabricatorMock, mock_nss):
    """
    Run mercurial worker on a single diff
    with a push to try server, but with NSS support (try syntax)
    """
    diff = {
        "phid": "PHID-DIFF-test123",
        "revisionPHID": "PHID-DREV-deadbeef",
        "id": 1234,
        # Revision does not exist, will apply on tip
        "baseRevision": "abcdef12345",
    }
    build = MockBuild(1234, "PHID-REPO-nss", 5678, "PHID-HMBT-deadbeef", diff)
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    bus = MessageBus()
    bus.add_queue("phabricator")

    # Get initial tip commit in repo
    initial = mock_nss.repo.tip()

    # The patched and config files should not exist at first
    repo_dir = mock_nss.repo.root().decode("utf-8")
    config = os.path.join(repo_dir, "try_task_config.json")
    target = os.path.join(repo_dir, "test.txt")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    worker = MercurialWorker(
        "mercurial", "phabricator", repositories={"PHID-REPO-nss": mock_nss}
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1
    task = asyncio.create_task(worker.run())

    # Check the treeherder link was queued
    mode, out_build, details = await bus.receive("phabricator")
    tip = mock_nss.repo.tip()
    assert mode == "success"
    assert out_build == build
    assert details[
        "treeherder_url"
    ] == "https://treeherder.mozilla.org/#/jobs?repo=try&revision={}".format(
        tip.node.decode("utf-8")
    )
    assert details["revision"] == tip.node.decode("utf-8")
    task.cancel()

    # The target should have content now
    assert os.path.exists(target)
    assert open(target).read() == "First Line\nSecond Line\n"

    # The config should have content now
    assert os.path.exists(config)
    assert json.load(open(config)) == {
        "version": 2,
        "parameters": {
            "code-review": {"phabricator-build-target": "PHID-HMBT-deadbeef"}
        },
    }

    # Get tip commit in repo
    # It should be different from the initial one (patches + config have applied)
    assert tip.node != initial.node

    # Check all commits messages
    assert [c.desc for c in mock_nss.repo.log()] == [
        b"try: -a -b XXX -c YYY",
        b"Bug XXX - A second commit message\nDifferential Diff: PHID-DIFF-test123",
        b"Bug XXX - A first commit message\nDifferential Diff: PHID-DIFF-xxxx",
        b"Readme",
    ]

    # Check the push to try has been called
    # with tip commit
    ssh_conf = 'ssh -o StrictHostKeyChecking="no" -o User="******" -o IdentityFile="{}"'.format(
        mock_nss.ssh_key_path
    )
    mock_nss.repo.push.assert_called_with(
        dest=b"http://nss/try", force=True, rev=tip.node, ssh=ssh_conf.encode("utf-8")
    )
コード例 #8
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_dont_push_skippable_files_to_try(PhabricatorMock, mock_mc):
    """
    Run mercurial worker on a single diff
    that skips the push to try server
    """
    bus = MessageBus()
    bus.add_queue("phabricator")

    # Preload the build
    diff = {
        "phid": "PHID-DIFF-test123",
        "revisionPHID": "PHID-DREV-deadbeef",
        "id": 1234,
        # Revision does not exist, will apply on tip
        "baseRevision": "abcdef12345",
    }
    build = MockBuild(1234, "PHID-REPO-mc", 5678, "PHID-HMBT-deadbeef", diff)
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    # Get initial tip commit in repo
    initial = mock_mc.repo.tip()

    # The patched and config files should not exist at first
    repo_dir = mock_mc.repo.root().decode("utf-8")
    config = os.path.join(repo_dir, "try_task_config.json")
    target = os.path.join(repo_dir, "test.txt")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    worker = MercurialWorker(
        "mercurial",
        "phabricator",
        repositories={"PHID-REPO-mc": mock_mc},
        skippable_files=["test.txt"],
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1
    task = asyncio.create_task(worker.run())

    # Check the treeherder link was queued
    mode, out_build, details = await bus.receive("phabricator")
    tip = mock_mc.repo.tip()
    assert mode == "fail:ineligible"
    assert out_build == build
    assert (
        details["message"]
        == "Modified files match skippable internal configuration files"
    )
    task.cancel()

    # The target should have content now
    assert os.path.exists(target)
    assert open(target).read() == "First Line\nSecond Line\n"

    # Get tip commit in repo
    # It should be different from the initial one (patches + config have applied)
    assert tip.node != initial.node

    # Check all commits messages
    assert [c.desc for c in mock_mc.repo.log()] == [
        b"Bug XXX - A second commit message\nDifferential Diff: PHID-DIFF-test123",
        b"Bug XXX - A first commit message\nDifferential Diff: PHID-DIFF-xxxx",
        b"Readme",
    ]

    # Check all commits authors
    assert [c.author for c in mock_mc.repo.log()] == [
        b"John Doe <*****@*****.**>",
        b"randomUsername <random>",
        b"test",
    ]

    # Check the push to try has not been called
    mock_mc.repo.push.assert_not_called()
コード例 #9
0
ファイル: test_mercurial.py プロジェクト: mozilla/libmozevent
async def test_push_to_try_existing_rev(PhabricatorMock, mock_mc):
    """
    Run mercurial worker on a single diff
    with a push to try server
    but applying on an existing revision
    """
    bus = MessageBus()
    bus.add_queue("phabricator")
    repo_dir = mock_mc.repo.root().decode("utf-8")

    def _readme(content):
        # Make a commit on README.md in the repo
        readme = os.path.join(repo_dir, "README.md")
        with open(readme, "a") as f:
            f.write(content)
        _, rev = mock_mc.repo.commit(message=content.encode("utf-8"), user=b"test")
        return rev

    # Make two commits, the first one is our base
    base = _readme("Local base for diffs")
    extra = _readme("EXTRA")

    # Preload the build
    diff = {
        "phid": "PHID-DIFF-solo",
        "revisionPHID": "PHID-DREV-solo",
        "id": 9876,
        # Revision does not exist, will apply on tip
        "baseRevision": base,
    }
    build = MockBuild(1234, "PHID-REPO-mc", 5678, "PHID-HMBT-deadbeef", diff)
    build.revision_url = "http://phab.test/D1234"
    with PhabricatorMock as phab:
        phab.load_patches_stack(build)

    # The patched and config files should not exist at first
    target = os.path.join(repo_dir, "solo.txt")
    config = os.path.join(repo_dir, "try_task_config.json")
    assert not os.path.exists(target)
    assert not os.path.exists(config)

    worker = MercurialWorker(
        "mercurial", "phabricator", repositories={"PHID-REPO-mc": mock_mc}
    )
    worker.register(bus)
    assert len(worker.repositories) == 1

    await bus.send("mercurial", build)
    assert bus.queues["mercurial"].qsize() == 1
    task = asyncio.create_task(worker.run())

    # Check the treeherder link was queued
    mode, out_build, details = await bus.receive("phabricator")
    tip = mock_mc.repo.tip()
    assert mode == "success"
    assert out_build == build
    assert details[
        "treeherder_url"
    ] == "https://treeherder.mozilla.org/#/jobs?repo=try&revision={}".format(
        tip.node.decode("utf-8")
    )
    assert details["revision"] == tip.node.decode("utf-8")
    task.cancel()

    # The target should have content now
    assert os.path.exists(target)
    assert open(target).read() == "Solo PATCH\n"

    # Check the try_task_config file
    assert os.path.exists(config)
    assert json.load(open(config)) == {
        "version": 2,
        "parameters": {
            "target_tasks_method": "codereview",
            "optimize_target_tasks": True,
            "phabricator_diff": "PHID-HMBT-deadbeef",
        },
    }

    # Get tip commit in repo
    # It should be different from the initial one (patches and config have applied)
    assert tip.node != base
    assert (
        tip.desc
        == b"""try_task_config for http://phab.test/D1234
Differential Diff: PHID-DIFF-solo"""
    )

    # Check the push to try has been called
    # with tip commit
    ssh_conf = 'ssh -o StrictHostKeyChecking="no" -o User="******" -o IdentityFile="{}"'.format(
        mock_mc.ssh_key_path
    )
    mock_mc.repo.push.assert_called_with(
        dest=b"http://mozilla-central/try",
        force=True,
        rev=tip.node,
        ssh=ssh_conf.encode("utf-8"),
    )

    # Check the parent is the solo patch commit
    parents = mock_mc.repo.parents(tip.node)
    assert len(parents) == 1
    parent = parents[0]
    assert (
        parent.desc
        == b"A nice human readable commit message\nDifferential Diff: PHID-DIFF-solo"
    )

    # Check the grand parent is the base, not extra
    great_parents = mock_mc.repo.parents(parent.node)
    assert len(great_parents) == 1
    great_parent = great_parents[0]
    assert great_parent.node == base

    # Extra commit should not appear
    assert parent.node != extra
    assert great_parent.node != extra
    assert "EXTRA" not in open(os.path.join(repo_dir, "README.md")).read()
コード例 #10
0
ファイル: workflow.py プロジェクト: sylvestre/code-review
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        publish = taskcluster_config.secrets["PHABRICATOR"].get(
            "publish", False)

        # Check the redis support is enabled on Heroku
        if heroku.in_dyno():
            assert self.bus.redis_enabled is True, "Need Redis on Heroku"

        community_config = taskcluster_config.secrets.get(
            "taskcluster_community")
        test_selection_enabled = taskcluster_config.secrets.get(
            "test_selection_enabled", False)

        # Run webserver & pulse on web dyno or single instance
        if not heroku.in_dyno() or heroku.in_web_dyno():

            # Create web server
            self.webserver = WebServer(QUEUE_WEB_BUILDS)
            self.webserver.register(self.bus)

            # Create pulse listener
            exchanges = {}
            if taskcluster_config.secrets["autoland_enabled"]:
                logger.info("Autoland ingestion is enabled")
                # autoland ingestion
                exchanges[QUEUE_PULSE_AUTOLAND] = [(PULSE_TASK_GROUP_RESOLVED,
                                                    ["#.gecko-level-3.#"])]

            # Create pulse listeners for bugbug test selection task and unit test failures.
            if community_config is not None and test_selection_enabled:
                exchanges[QUEUE_PULSE_TRY_TASK_END] = [
                    (PULSE_TASK_COMPLETED, ["#.gecko-level-1.#"]),
                    (PULSE_TASK_FAILED, ["#.gecko-level-1.#"]),
                    # https://bugzilla.mozilla.org/show_bug.cgi?id=1599863
                    # (
                    #    "exchange/taskcluster-queue/v1/task-exception",
                    #    ["#.gecko-level-1.#"],
                    # ),
                ]

                self.community_pulse = PulseListener(
                    {
                        QUEUE_PULSE_BUGBUG_TEST_SELECT: [(
                            "exchange/taskcluster-queue/v1/task-completed",
                            ["route.project.relman.bugbug.test_select"],
                        )]
                    },
                    taskcluster_config.secrets["communitytc_pulse_user"],
                    taskcluster_config.secrets["communitytc_pulse_password"],
                    "communitytc",
                )
                # Manually register to set queue as redis
                self.community_pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
                self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            else:
                self.community_pulse = None

            if exchanges:
                self.pulse = PulseListener(
                    exchanges,
                    taskcluster_config.secrets["pulse_user"],
                    taskcluster_config.secrets["pulse_password"],
                )
                # Manually register to set queue as redis
                self.pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            else:
                self.pulse = None

        else:
            self.bugbug_utils = None
            self.webserver = None
            self.pulse = None
            self.community_pulse = None
            logger.info("Skipping webserver, bugbug and pulse consumers")

            # Register queues for workers
            self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
            self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            self.bus.add_queue(QUEUE_WEB_BUILDS, redis=True)

        # Run work processes on worker dyno or single instance
        if not heroku.in_dyno() or heroku.in_worker_dyno():
            self.workflow = CodeReview(
                api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
                url=taskcluster_config.secrets["PHABRICATOR"]["url"],
                publish=publish,
                user_blacklist=taskcluster_config.secrets["user_blacklist"],
            )
            self.workflow.register(self.bus)

            # Build mercurial worker and queue
            self.mercurial = MercurialWorker(
                QUEUE_MERCURIAL,
                QUEUE_MERCURIAL_APPLIED,
                repositories=self.workflow.get_repositories(
                    taskcluster_config.secrets["repositories"],
                    cache_root,
                    default_ssh_key=taskcluster_config.secrets["ssh_key"],
                ),
            )
            self.mercurial.register(self.bus)

            # Setup monitoring for newly created tasks
            self.monitoring = Monitoring(
                taskcluster_config,
                QUEUE_MONITORING,
                taskcluster_config.secrets["admins"],
                MONITORING_PERIOD,
            )
            self.monitoring.register(self.bus)

            # Setup monitoring for newly created community tasks
            if community_config is not None:
                self.community_monitoring = Monitoring(
                    community_taskcluster_config,
                    QUEUE_MONITORING_COMMUNITY,
                    taskcluster_config.secrets["admins"],
                    MONITORING_PERIOD,
                )
                self.community_monitoring.register(self.bus)
            else:
                self.community_monitoring = None

            self.bugbug_utils = BugbugUtils(self.workflow.api)
            self.bugbug_utils.register(self.bus)
        else:
            self.workflow = None
            self.mercurial = None
            self.monitoring = None
            self.community_monitoring = None
            self.bugbug_utils = None
            logger.info("Skipping workers consumers")
コード例 #11
0
ファイル: workflow.py プロジェクト: sylvestre/code-review
class Events(object):
    """
    Listen to HTTP notifications from phabricator and trigger new try jobs
    """
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        publish = taskcluster_config.secrets["PHABRICATOR"].get(
            "publish", False)

        # Check the redis support is enabled on Heroku
        if heroku.in_dyno():
            assert self.bus.redis_enabled is True, "Need Redis on Heroku"

        community_config = taskcluster_config.secrets.get(
            "taskcluster_community")
        test_selection_enabled = taskcluster_config.secrets.get(
            "test_selection_enabled", False)

        # Run webserver & pulse on web dyno or single instance
        if not heroku.in_dyno() or heroku.in_web_dyno():

            # Create web server
            self.webserver = WebServer(QUEUE_WEB_BUILDS)
            self.webserver.register(self.bus)

            # Create pulse listener
            exchanges = {}
            if taskcluster_config.secrets["autoland_enabled"]:
                logger.info("Autoland ingestion is enabled")
                # autoland ingestion
                exchanges[QUEUE_PULSE_AUTOLAND] = [(PULSE_TASK_GROUP_RESOLVED,
                                                    ["#.gecko-level-3.#"])]

            # Create pulse listeners for bugbug test selection task and unit test failures.
            if community_config is not None and test_selection_enabled:
                exchanges[QUEUE_PULSE_TRY_TASK_END] = [
                    (PULSE_TASK_COMPLETED, ["#.gecko-level-1.#"]),
                    (PULSE_TASK_FAILED, ["#.gecko-level-1.#"]),
                    # https://bugzilla.mozilla.org/show_bug.cgi?id=1599863
                    # (
                    #    "exchange/taskcluster-queue/v1/task-exception",
                    #    ["#.gecko-level-1.#"],
                    # ),
                ]

                self.community_pulse = PulseListener(
                    {
                        QUEUE_PULSE_BUGBUG_TEST_SELECT: [(
                            "exchange/taskcluster-queue/v1/task-completed",
                            ["route.project.relman.bugbug.test_select"],
                        )]
                    },
                    taskcluster_config.secrets["communitytc_pulse_user"],
                    taskcluster_config.secrets["communitytc_pulse_password"],
                    "communitytc",
                )
                # Manually register to set queue as redis
                self.community_pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
                self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            else:
                self.community_pulse = None

            if exchanges:
                self.pulse = PulseListener(
                    exchanges,
                    taskcluster_config.secrets["pulse_user"],
                    taskcluster_config.secrets["pulse_password"],
                )
                # Manually register to set queue as redis
                self.pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            else:
                self.pulse = None

        else:
            self.bugbug_utils = None
            self.webserver = None
            self.pulse = None
            self.community_pulse = None
            logger.info("Skipping webserver, bugbug and pulse consumers")

            # Register queues for workers
            self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
            self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            self.bus.add_queue(QUEUE_WEB_BUILDS, redis=True)

        # Run work processes on worker dyno or single instance
        if not heroku.in_dyno() or heroku.in_worker_dyno():
            self.workflow = CodeReview(
                api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
                url=taskcluster_config.secrets["PHABRICATOR"]["url"],
                publish=publish,
                user_blacklist=taskcluster_config.secrets["user_blacklist"],
            )
            self.workflow.register(self.bus)

            # Build mercurial worker and queue
            self.mercurial = MercurialWorker(
                QUEUE_MERCURIAL,
                QUEUE_MERCURIAL_APPLIED,
                repositories=self.workflow.get_repositories(
                    taskcluster_config.secrets["repositories"],
                    cache_root,
                    default_ssh_key=taskcluster_config.secrets["ssh_key"],
                ),
            )
            self.mercurial.register(self.bus)

            # Setup monitoring for newly created tasks
            self.monitoring = Monitoring(
                taskcluster_config,
                QUEUE_MONITORING,
                taskcluster_config.secrets["admins"],
                MONITORING_PERIOD,
            )
            self.monitoring.register(self.bus)

            # Setup monitoring for newly created community tasks
            if community_config is not None:
                self.community_monitoring = Monitoring(
                    community_taskcluster_config,
                    QUEUE_MONITORING_COMMUNITY,
                    taskcluster_config.secrets["admins"],
                    MONITORING_PERIOD,
                )
                self.community_monitoring.register(self.bus)
            else:
                self.community_monitoring = None

            self.bugbug_utils = BugbugUtils(self.workflow.api)
            self.bugbug_utils.register(self.bus)
        else:
            self.workflow = None
            self.mercurial = None
            self.monitoring = None
            self.community_monitoring = None
            self.bugbug_utils = None
            logger.info("Skipping workers consumers")

    def run(self):
        consumers = []

        # Code review main workflow
        if self.workflow:
            consumers += [
                # Process Phabricator build received from webserver
                self.bus.run(self.workflow.process_build,
                             QUEUE_WEB_BUILDS,
                             sequential=False),
                # Publish results on Phabricator
                self.bus.run(
                    self.workflow.publish_results,
                    QUEUE_PHABRICATOR_RESULTS,
                    sequential=False,
                ),
                # Trigger autoland tasks
                self.bus.run(
                    self.workflow.trigger_autoland,
                    QUEUE_PULSE_AUTOLAND,
                    sequential=False,
                ),
                # Send to phabricator results publication for normal processing and to bugbug for further analysis
                self.bus.dispatch(
                    QUEUE_MERCURIAL_APPLIED,
                    [QUEUE_PHABRICATOR_RESULTS, QUEUE_BUGBUG_TRY_PUSH],
                ),
            ]

        if self.bugbug_utils:
            consumers += [
                self.bus.run(self.bugbug_utils.process_build,
                             QUEUE_BUGBUG,
                             sequential=False),
                self.bus.run(
                    self.bugbug_utils.process_push,
                    QUEUE_BUGBUG_TRY_PUSH,
                    sequential=False,
                ),
                self.bus.run(
                    self.bugbug_utils.got_try_task_end,
                    QUEUE_PULSE_TRY_TASK_END,
                    sequential=False,
                ),
                self.bus.run(
                    self.bugbug_utils.got_bugbug_test_select_end,
                    QUEUE_PULSE_BUGBUG_TEST_SELECT,
                    sequential=False,
                ),
            ]

        # Add mercurial task
        if self.mercurial:
            consumers.append(self.mercurial.run())

        # Add monitoring task
        if self.monitoring:
            consumers.append(self.monitoring.run())

        # Add community monitoring task
        if self.community_monitoring:
            consumers.append(self.community_monitoring.run())

        # Add pulse listener for task results.
        if self.pulse:
            consumers.append(self.pulse.run())

        # Add communitytc pulse listener for test selection results.
        if self.community_pulse:
            consumers.append(self.community_pulse.run())

        # Start the web server in its own process
        if self.webserver:
            self.webserver.start()

        if consumers:
            # Run all tasks concurrently
            run_tasks(consumers)
        else:
            # Keep the web server process running
            asyncio.get_event_loop().run_forever()

        # Make sure any pending task is run.
        run_tasks(asyncio.Task.all_tasks())

        # Stop the webserver when other async processes are stopped
        if self.webserver:
            self.webserver.stop()
コード例 #12
0
ファイル: workflow.py プロジェクト: Daggron/code-review
class Events(object):
    """
    Listen to HTTP notifications from phabricator and trigger new try jobs
    """
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        self.workflow = CodeReview(
            api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
            url=taskcluster_config.secrets["PHABRICATOR"]["url"],
            publish=taskcluster_config.secrets["PHABRICATOR"].get(
                "publish", False),
            risk_analysis_reviewers=taskcluster_config.secrets.get(
                "risk_analysis_reviewers", []),
            community_config=taskcluster_config.secrets.get(
                "taskcluster_community"),
        )
        self.workflow.register(self.bus)

        # Build mercurial worker and queue
        self.mercurial = MercurialWorker(
            QUEUE_MERCURIAL,
            QUEUE_PHABRICATOR_RESULTS,
            repositories=self.workflow.get_repositories(
                taskcluster_config.secrets["repositories"], cache_root),
        )
        self.mercurial.register(self.bus)

        # Create web server
        self.webserver = WebServer(QUEUE_WEB_BUILDS)
        self.webserver.register(self.bus)

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(QUEUE_MONITORING,
                                     taskcluster_config.secrets["admins"],
                                     MONITORING_PERIOD)
        self.monitoring.register(self.bus)

    def run(self):
        consumers = [
            # Code review main workflow
            self.workflow.run(),
            # Add mercurial task
            self.mercurial.run(),
            # Add monitoring task
            self.monitoring.run(),
        ]

        # Publish results on Phabricator
        if self.workflow.publish:
            consumers.append(
                self.bus.run(self.workflow.publish_results,
                             QUEUE_PHABRICATOR_RESULTS))

        # Start the web server in its own process
        self.webserver.start()

        # Run all tasks concurrently
        run_tasks(consumers)

        # Stop the webserver when other async processes are stopped
        self.webserver.stop()