Ejemplo n.º 1
0
    def __init__(self):
        # Create message bus shared amongst process
        self.bus = MessageBus()

        # Build code coverage workflow
        self.workflow = CodeCoverage(
            taskcluster_config.secrets["hook_id"],
            taskcluster_config.secrets["hook_group_id"],
            self.bus,
        )

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(
            taskcluster_config,
            QUEUE_MONITORING,
            taskcluster_config.secrets["admins"],
            7 * 3600,
        )
        self.monitoring.register(self.bus)

        # Create pulse listener for code coverage
        self.pulse = PulseListener(
            {
                QUEUE_PULSE: [
                    ("exchange/taskcluster-queue/v1/task-group-resolved", ["#"])
                ]
            },
            taskcluster_config.secrets["pulse_user"],
            taskcluster_config.secrets["pulse_password"],
        )
        self.pulse.register(self.bus)
Ejemplo n.º 2
0
class Events(object):
    """
    Listen to pulse events and trigger new code coverage tasks
    """

    def __init__(self):
        # Create message bus shared amongst process
        self.bus = MessageBus()

        # Build code coverage workflow
        self.workflow = CodeCoverage(
            taskcluster_config.secrets["hook_id"],
            taskcluster_config.secrets["hook_group_id"],
            self.bus,
        )

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(
            taskcluster_config,
            QUEUE_MONITORING,
            taskcluster_config.secrets["admins"],
            7 * 3600,
        )
        self.monitoring.register(self.bus)

        # Create pulse listener for code coverage
        self.pulse = PulseListener(
            {
                QUEUE_PULSE: [
                    ("exchange/taskcluster-queue/v1/task-group-resolved", ["#"])
                ]
            },
            taskcluster_config.secrets["pulse_user"],
            taskcluster_config.secrets["pulse_password"],
        )
        self.pulse.register(self.bus)

    def run(self):

        consumers = [
            # Code coverage main workflow
            self.workflow.run(),
            # Add monitoring task
            self.monitoring.run(),
            # Add pulse task
            self.pulse.run(),
        ]

        # Run all tasks concurrently
        run_tasks(consumers)
Ejemplo n.º 3
0
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        self.workflow = CodeReview(
            api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
            url=taskcluster_config.secrets["PHABRICATOR"]["url"],
            publish=taskcluster_config.secrets["PHABRICATOR"].get(
                "publish", False),
            risk_analysis_reviewers=taskcluster_config.secrets.get(
                "risk_analysis_reviewers", []),
            community_config=taskcluster_config.secrets.get(
                "taskcluster_community"),
            user_blacklist=taskcluster_config.secrets["user_blacklist"],
        )
        self.workflow.register(self.bus)

        # Build mercurial worker and queue
        self.mercurial = MercurialWorker(
            QUEUE_MERCURIAL,
            QUEUE_PHABRICATOR_RESULTS,
            repositories=self.workflow.get_repositories(
                taskcluster_config.secrets["repositories"], cache_root),
        )
        self.mercurial.register(self.bus)

        # Create web server
        self.webserver = WebServer(QUEUE_WEB_BUILDS)
        self.webserver.register(self.bus)

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(QUEUE_MONITORING,
                                     taskcluster_config.secrets["admins"],
                                     MONITORING_PERIOD)
        self.monitoring.register(self.bus)

        # Create pulse listener for unit test failures
        if self.workflow.publish:
            self.pulse = PulseListener(
                QUEUE_PULSE,
                "exchange/taskcluster-queue/v1/task-completed",
                "*.*.gecko-level-3._",
                taskcluster_config.secrets["pulse_user"],
                taskcluster_config.secrets["pulse_password"],
            )
            self.pulse.register(self.bus)
Ejemplo n.º 4
0
def test_bugbug_routes():
    """
    Test the bugbug routes match our expected configuration
    """
    config = {
        "bugbug_firefox": [
            ("exchange/taskcluster-queue/v1/task-completed", ["#.gecko-level-1.#"]),
            ("exchange/taskcluster-queue/v1/task-failed", ["#.gecko-level-1.#"]),
        ],
        "bugbug_community": [
            (
                "exchange/taskcluster-queue/v1/task-completed",
                ["route.project.relman.bugbug.test_select"],
            )
        ],
    }
    pulse = PulseListener(config, "user", "password")

    assert (
        pulse.find_matching_queues(
            "exchange/taskcluster-queue/v1/task-completed",
            [
                b"primary.fUAKaIdkSF6K1NlOgx7-LA.0.aws.i-0a45c84b1709af6a7.gecko-t.t-win10-64.gecko-level-1.RHY-YSgBQ7KlTAaQ5ZWP5g._",
                b"route.tc-treeherder.v2.try.028980a035fb3e214f7645675a01a52234aad0fe.455891",
            ],
        )
        == {"bugbug_firefox"}
    )

    assert (
        pulse.find_matching_queues(
            "exchange/taskcluster-queue/v1/task-completed",
            [
                b"primary.OhtlizLqT9ah2jVkUL-yvg.0.community-tc-workers-google.8155538221748661937.proj-relman.compute-large.-.OhtlizLqT9ah2jVkUL-yvg._",
                b"[email protected]",
                b"route.notify.irc-channel.#bugbug.on-failed",
                b"route.index.project.relman.bugbug.test_select.latest",
                b"route.index.project.relman.bugbug.test_select.diff.196676",
                b"route.project.relman.bugbug.test_select",
            ],
        )
        == {"bugbug_community"}
    )
Ejemplo n.º 5
0
def test_matching_routes():
    """
    Test the queue detection for a pulse message
    """
    config = {
        "queue_A": [("exchange/x", ["#"])],
        "queue_B": [("exchange/x", ["prefix.#"]), ("exchange/y", ["#-gecko-#"])],
    }
    pulse = PulseListener(config, "user", "password")

    # Simple test case
    assert pulse.find_matching_queues("exchange/x", [b"whatever"]) == {"queue_A"}

    # Bad exchange
    assert not pulse.find_matching_queues("exchange/YY", [b"whatever"])

    # get A and B
    assert pulse.find_matching_queues("exchange/x", [b"prefix.sometask.XYZ"]) == {
        "queue_A",
        "queue_B",
    }

    # Only gecko
    assert pulse.find_matching_queues("exchange/y", [b"something-gecko-123"]) == {
        "queue_B"
    }
Ejemplo n.º 6
0
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        publish = taskcluster_config.secrets["PHABRICATOR"].get(
            "publish", False)

        # Check the redis support is enabled on Heroku
        if heroku.in_dyno():
            assert self.bus.redis_enabled is True, "Need Redis on Heroku"

        community_config = taskcluster_config.secrets.get(
            "taskcluster_community")
        test_selection_enabled = taskcluster_config.secrets.get(
            "test_selection_enabled", False)

        # Run webserver & pulse on web dyno or single instance
        if not heroku.in_dyno() or heroku.in_web_dyno():

            # Create web server
            self.webserver = WebServer(QUEUE_WEB_BUILDS)
            self.webserver.register(self.bus)

            # Create pulse listener
            exchanges = {}
            if taskcluster_config.secrets["autoland_enabled"]:
                logger.info("Autoland ingestion is enabled")
                # autoland ingestion
                exchanges[QUEUE_PULSE_AUTOLAND] = [(PULSE_TASK_GROUP_RESOLVED,
                                                    ["#.gecko-level-3.#"])]

            # Create pulse listeners for bugbug test selection task and unit test failures.
            if community_config is not None and test_selection_enabled:
                exchanges[QUEUE_PULSE_TRY_TASK_END] = [
                    (PULSE_TASK_COMPLETED, ["#.gecko-level-1.#"]),
                    (PULSE_TASK_FAILED, ["#.gecko-level-1.#"]),
                    # https://bugzilla.mozilla.org/show_bug.cgi?id=1599863
                    # (
                    #    "exchange/taskcluster-queue/v1/task-exception",
                    #    ["#.gecko-level-1.#"],
                    # ),
                ]

                self.community_pulse = PulseListener(
                    {
                        QUEUE_PULSE_BUGBUG_TEST_SELECT: [(
                            "exchange/taskcluster-queue/v1/task-completed",
                            ["route.project.relman.bugbug.test_select"],
                        )]
                    },
                    taskcluster_config.secrets["communitytc_pulse_user"],
                    taskcluster_config.secrets["communitytc_pulse_password"],
                    "communitytc",
                )
                # Manually register to set queue as redis
                self.community_pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
                self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            else:
                self.community_pulse = None

            if exchanges:
                self.pulse = PulseListener(
                    exchanges,
                    taskcluster_config.secrets["pulse_user"],
                    taskcluster_config.secrets["pulse_password"],
                )
                # Manually register to set queue as redis
                self.pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            else:
                self.pulse = None

        else:
            self.bugbug_utils = None
            self.webserver = None
            self.pulse = None
            self.community_pulse = None
            logger.info("Skipping webserver, bugbug and pulse consumers")

            # Register queues for workers
            self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
            self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            self.bus.add_queue(QUEUE_WEB_BUILDS, redis=True)

        # Run work processes on worker dyno or single instance
        if not heroku.in_dyno() or heroku.in_worker_dyno():
            self.workflow = CodeReview(
                api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
                url=taskcluster_config.secrets["PHABRICATOR"]["url"],
                publish=publish,
                user_blacklist=taskcluster_config.secrets["user_blacklist"],
            )
            self.workflow.register(self.bus)

            # Build mercurial worker and queue
            self.mercurial = MercurialWorker(
                QUEUE_MERCURIAL,
                QUEUE_MERCURIAL_APPLIED,
                repositories=self.workflow.get_repositories(
                    taskcluster_config.secrets["repositories"],
                    cache_root,
                    default_ssh_key=taskcluster_config.secrets["ssh_key"],
                ),
            )
            self.mercurial.register(self.bus)

            # Setup monitoring for newly created tasks
            self.monitoring = Monitoring(
                taskcluster_config,
                QUEUE_MONITORING,
                taskcluster_config.secrets["admins"],
                MONITORING_PERIOD,
            )
            self.monitoring.register(self.bus)

            # Setup monitoring for newly created community tasks
            if community_config is not None:
                self.community_monitoring = Monitoring(
                    community_taskcluster_config,
                    QUEUE_MONITORING_COMMUNITY,
                    taskcluster_config.secrets["admins"],
                    MONITORING_PERIOD,
                )
                self.community_monitoring.register(self.bus)
            else:
                self.community_monitoring = None

            self.bugbug_utils = BugbugUtils(self.workflow.api)
            self.bugbug_utils.register(self.bus)
        else:
            self.workflow = None
            self.mercurial = None
            self.monitoring = None
            self.community_monitoring = None
            self.bugbug_utils = None
            logger.info("Skipping workers consumers")
Ejemplo n.º 7
0
class Events(object):
    """
    Listen to HTTP notifications from phabricator and trigger new try jobs
    """
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        publish = taskcluster_config.secrets["PHABRICATOR"].get(
            "publish", False)

        # Check the redis support is enabled on Heroku
        if heroku.in_dyno():
            assert self.bus.redis_enabled is True, "Need Redis on Heroku"

        community_config = taskcluster_config.secrets.get(
            "taskcluster_community")
        test_selection_enabled = taskcluster_config.secrets.get(
            "test_selection_enabled", False)

        # Run webserver & pulse on web dyno or single instance
        if not heroku.in_dyno() or heroku.in_web_dyno():

            # Create web server
            self.webserver = WebServer(QUEUE_WEB_BUILDS)
            self.webserver.register(self.bus)

            # Create pulse listener
            exchanges = {}
            if taskcluster_config.secrets["autoland_enabled"]:
                logger.info("Autoland ingestion is enabled")
                # autoland ingestion
                exchanges[QUEUE_PULSE_AUTOLAND] = [(PULSE_TASK_GROUP_RESOLVED,
                                                    ["#.gecko-level-3.#"])]

            # Create pulse listeners for bugbug test selection task and unit test failures.
            if community_config is not None and test_selection_enabled:
                exchanges[QUEUE_PULSE_TRY_TASK_END] = [
                    (PULSE_TASK_COMPLETED, ["#.gecko-level-1.#"]),
                    (PULSE_TASK_FAILED, ["#.gecko-level-1.#"]),
                    # https://bugzilla.mozilla.org/show_bug.cgi?id=1599863
                    # (
                    #    "exchange/taskcluster-queue/v1/task-exception",
                    #    ["#.gecko-level-1.#"],
                    # ),
                ]

                self.community_pulse = PulseListener(
                    {
                        QUEUE_PULSE_BUGBUG_TEST_SELECT: [(
                            "exchange/taskcluster-queue/v1/task-completed",
                            ["route.project.relman.bugbug.test_select"],
                        )]
                    },
                    taskcluster_config.secrets["communitytc_pulse_user"],
                    taskcluster_config.secrets["communitytc_pulse_password"],
                    "communitytc",
                )
                # Manually register to set queue as redis
                self.community_pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
                self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            else:
                self.community_pulse = None

            if exchanges:
                self.pulse = PulseListener(
                    exchanges,
                    taskcluster_config.secrets["pulse_user"],
                    taskcluster_config.secrets["pulse_password"],
                )
                # Manually register to set queue as redis
                self.pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            else:
                self.pulse = None

        else:
            self.bugbug_utils = None
            self.webserver = None
            self.pulse = None
            self.community_pulse = None
            logger.info("Skipping webserver, bugbug and pulse consumers")

            # Register queues for workers
            self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
            self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            self.bus.add_queue(QUEUE_WEB_BUILDS, redis=True)

        # Run work processes on worker dyno or single instance
        if not heroku.in_dyno() or heroku.in_worker_dyno():
            self.workflow = CodeReview(
                api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
                url=taskcluster_config.secrets["PHABRICATOR"]["url"],
                publish=publish,
                user_blacklist=taskcluster_config.secrets["user_blacklist"],
            )
            self.workflow.register(self.bus)

            # Build mercurial worker and queue
            self.mercurial = MercurialWorker(
                QUEUE_MERCURIAL,
                QUEUE_MERCURIAL_APPLIED,
                repositories=self.workflow.get_repositories(
                    taskcluster_config.secrets["repositories"],
                    cache_root,
                    default_ssh_key=taskcluster_config.secrets["ssh_key"],
                ),
            )
            self.mercurial.register(self.bus)

            # Setup monitoring for newly created tasks
            self.monitoring = Monitoring(
                taskcluster_config,
                QUEUE_MONITORING,
                taskcluster_config.secrets["admins"],
                MONITORING_PERIOD,
            )
            self.monitoring.register(self.bus)

            # Setup monitoring for newly created community tasks
            if community_config is not None:
                self.community_monitoring = Monitoring(
                    community_taskcluster_config,
                    QUEUE_MONITORING_COMMUNITY,
                    taskcluster_config.secrets["admins"],
                    MONITORING_PERIOD,
                )
                self.community_monitoring.register(self.bus)
            else:
                self.community_monitoring = None

            self.bugbug_utils = BugbugUtils(self.workflow.api)
            self.bugbug_utils.register(self.bus)
        else:
            self.workflow = None
            self.mercurial = None
            self.monitoring = None
            self.community_monitoring = None
            self.bugbug_utils = None
            logger.info("Skipping workers consumers")

    def run(self):
        consumers = []

        # Code review main workflow
        if self.workflow:
            consumers += [
                # Process Phabricator build received from webserver
                self.bus.run(self.workflow.process_build,
                             QUEUE_WEB_BUILDS,
                             sequential=False),
                # Publish results on Phabricator
                self.bus.run(
                    self.workflow.publish_results,
                    QUEUE_PHABRICATOR_RESULTS,
                    sequential=False,
                ),
                # Trigger autoland tasks
                self.bus.run(
                    self.workflow.trigger_autoland,
                    QUEUE_PULSE_AUTOLAND,
                    sequential=False,
                ),
                # Send to phabricator results publication for normal processing and to bugbug for further analysis
                self.bus.dispatch(
                    QUEUE_MERCURIAL_APPLIED,
                    [QUEUE_PHABRICATOR_RESULTS, QUEUE_BUGBUG_TRY_PUSH],
                ),
            ]

        if self.bugbug_utils:
            consumers += [
                self.bus.run(self.bugbug_utils.process_build,
                             QUEUE_BUGBUG,
                             sequential=False),
                self.bus.run(
                    self.bugbug_utils.process_push,
                    QUEUE_BUGBUG_TRY_PUSH,
                    sequential=False,
                ),
                self.bus.run(
                    self.bugbug_utils.got_try_task_end,
                    QUEUE_PULSE_TRY_TASK_END,
                    sequential=False,
                ),
                self.bus.run(
                    self.bugbug_utils.got_bugbug_test_select_end,
                    QUEUE_PULSE_BUGBUG_TEST_SELECT,
                    sequential=False,
                ),
            ]

        # Add mercurial task
        if self.mercurial:
            consumers.append(self.mercurial.run())

        # Add monitoring task
        if self.monitoring:
            consumers.append(self.monitoring.run())

        # Add community monitoring task
        if self.community_monitoring:
            consumers.append(self.community_monitoring.run())

        # Add pulse listener for task results.
        if self.pulse:
            consumers.append(self.pulse.run())

        # Add communitytc pulse listener for test selection results.
        if self.community_pulse:
            consumers.append(self.community_pulse.run())

        # Start the web server in its own process
        if self.webserver:
            self.webserver.start()

        if consumers:
            # Run all tasks concurrently
            run_tasks(consumers)
        else:
            # Keep the web server process running
            asyncio.get_event_loop().run_forever()

        # Make sure any pending task is run.
        run_tasks(asyncio.Task.all_tasks())

        # Stop the webserver when other async processes are stopped
        if self.webserver:
            self.webserver.stop()
Ejemplo n.º 8
0
class Events(object):
    """
    Listen to HTTP notifications from phabricator and trigger new try jobs
    """
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        self.workflow = CodeReview(
            api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
            url=taskcluster_config.secrets["PHABRICATOR"]["url"],
            publish=taskcluster_config.secrets["PHABRICATOR"].get(
                "publish", False),
            risk_analysis_reviewers=taskcluster_config.secrets.get(
                "risk_analysis_reviewers", []),
            community_config=taskcluster_config.secrets.get(
                "taskcluster_community"),
            user_blacklist=taskcluster_config.secrets["user_blacklist"],
        )
        self.workflow.register(self.bus)

        # Build mercurial worker and queue
        self.mercurial = MercurialWorker(
            QUEUE_MERCURIAL,
            QUEUE_PHABRICATOR_RESULTS,
            repositories=self.workflow.get_repositories(
                taskcluster_config.secrets["repositories"], cache_root),
        )
        self.mercurial.register(self.bus)

        # Create web server
        self.webserver = WebServer(QUEUE_WEB_BUILDS)
        self.webserver.register(self.bus)

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(QUEUE_MONITORING,
                                     taskcluster_config.secrets["admins"],
                                     MONITORING_PERIOD)
        self.monitoring.register(self.bus)

        # Create pulse listener for unit test failures
        if self.workflow.publish:
            self.pulse = PulseListener(
                QUEUE_PULSE,
                "exchange/taskcluster-queue/v1/task-completed",
                "*.*.gecko-level-3._",
                taskcluster_config.secrets["pulse_user"],
                taskcluster_config.secrets["pulse_password"],
            )
            self.pulse.register(self.bus)

    def run(self):
        consumers = [
            # Code review main workflow
            self.workflow.run(),
            # Add mercurial task
            self.mercurial.run(),
            # Add monitoring task
            self.monitoring.run(),
        ]

        # Publish results on Phabricator
        if self.workflow.publish:
            consumers.append(
                self.bus.run(self.workflow.publish_results,
                             QUEUE_PHABRICATOR_RESULTS))

            consumers.append(self.pulse.run())
            consumers.append(
                self.bus.run(self.workflow.parse_pulse, QUEUE_PULSE))

        # Start the web server in its own process
        self.webserver.start()

        # Run all tasks concurrently
        run_tasks(consumers)

        # Stop the webserver when other async processes are stopped
        self.webserver.stop()