Exemple #1
0
async def test_conversion():
    """
    Test message conversion between 2 queues
    """
    bus = MessageBus()
    bus.add_queue("input")
    bus.add_queue(
        "output",
        maxsize=3)  # limit size to immediately stop execution for unit test
    assert isinstance(bus.queues["input"], asyncio.Queue)
    assert isinstance(bus.queues["output"], asyncio.Queue)
    assert bus.queues["input"].qsize() == 0
    assert bus.queues["output"].qsize() == 0

    await bus.send("input", "test x")
    await bus.send("input", "hello world.")
    await bus.send("output", "lowercase")

    # Convert all strings from input in uppercase
    assert bus.queues["input"].qsize() == 2
    task = asyncio.create_task(
        bus.run(lambda x: x.upper(), "input", ["output"]))

    await bus.receive("output") == "lowercase"
    await bus.receive("output") == "TEST X"
    await bus.receive("output") == "HELLO WORLD."
    task.cancel()
    assert bus.queues["input"].qsize() == 0
    assert bus.queues["output"].qsize() == 0
Exemple #2
0
async def test_run_async_parallel():
    """
    Test using run to get messages from a queue using an async function executed in parallel
    """
    bus = MessageBus()
    bus.add_queue("input")
    bus.add_queue("end", maxsize=1)
    assert isinstance(bus.queues["input"], asyncio.Queue)
    assert bus.queues["input"].qsize() == 0

    await bus.send("input", 0)
    await bus.send("input", 1)
    await bus.send("input", 2)

    assert bus.queues["input"].qsize() == 3

    done = {0: False, 1: False, 2: False}

    async def torun(count):
        await asyncio.sleep(0)

        if count == 0:
            # Wait for 7 seconds, in the meantime other tasks will be scheduled
            # and executed.
            await asyncio.sleep(7)
        elif count == 1:
            pass
        elif count == 2:
            await bus.send("end", count)
        else:
            assert False

        done[count] = True

    task = asyncio.create_task(bus.run(torun, "input", sequential=False))

    await bus.receive("end") == 1
    task.cancel()
    assert bus.queues["input"].qsize() == 0
    assert bus.queues["end"].qsize() == 0
    assert done == {0: False, 1: True, 2: True}
Exemple #3
0
async def test_run_async_without_output_queue():
    """
    Test using run to get messages from a queue using an async function and without an output queue
    """
    bus = MessageBus()
    bus.add_queue("input")
    bus.add_queue("end", maxsize=1)
    assert isinstance(bus.queues["input"], asyncio.Queue)
    assert bus.queues["input"].qsize() == 0

    await bus.send("input", "test x")
    await bus.send("input", "hello world.")

    assert bus.queues["input"].qsize() == 2

    count = 0

    async def torun(payload):
        nonlocal count

        if count == 0:
            assert payload == "test x"
        elif count == 1:
            assert payload == "hello world."
            await bus.send("end", count)
        else:
            assert False

        count += 1

    task = asyncio.create_task(bus.run(torun, "input"))

    await bus.receive("end") == 1
    task.cancel()
    assert bus.queues["input"].qsize() == 0
    assert bus.queues["end"].qsize() == 0
Exemple #4
0
class Events(object):
    """
    Listen to HTTP notifications from phabricator and trigger new try jobs
    """
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        publish = taskcluster_config.secrets["PHABRICATOR"].get(
            "publish", False)

        # Check the redis support is enabled on Heroku
        if heroku.in_dyno():
            assert self.bus.redis_enabled is True, "Need Redis on Heroku"

        community_config = taskcluster_config.secrets.get(
            "taskcluster_community")
        test_selection_enabled = taskcluster_config.secrets.get(
            "test_selection_enabled", False)

        # Run webserver & pulse on web dyno or single instance
        if not heroku.in_dyno() or heroku.in_web_dyno():

            # Create web server
            self.webserver = WebServer(QUEUE_WEB_BUILDS)
            self.webserver.register(self.bus)

            # Create pulse listener
            exchanges = {}
            if taskcluster_config.secrets["autoland_enabled"]:
                logger.info("Autoland ingestion is enabled")
                # autoland ingestion
                exchanges[QUEUE_PULSE_AUTOLAND] = [(PULSE_TASK_GROUP_RESOLVED,
                                                    ["#.gecko-level-3.#"])]

            # Create pulse listeners for bugbug test selection task and unit test failures.
            if community_config is not None and test_selection_enabled:
                exchanges[QUEUE_PULSE_TRY_TASK_END] = [
                    (PULSE_TASK_COMPLETED, ["#.gecko-level-1.#"]),
                    (PULSE_TASK_FAILED, ["#.gecko-level-1.#"]),
                    # https://bugzilla.mozilla.org/show_bug.cgi?id=1599863
                    # (
                    #    "exchange/taskcluster-queue/v1/task-exception",
                    #    ["#.gecko-level-1.#"],
                    # ),
                ]

                self.community_pulse = PulseListener(
                    {
                        QUEUE_PULSE_BUGBUG_TEST_SELECT: [(
                            "exchange/taskcluster-queue/v1/task-completed",
                            ["route.project.relman.bugbug.test_select"],
                        )]
                    },
                    taskcluster_config.secrets["communitytc_pulse_user"],
                    taskcluster_config.secrets["communitytc_pulse_password"],
                    "communitytc",
                )
                # Manually register to set queue as redis
                self.community_pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
                self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            else:
                self.community_pulse = None

            if exchanges:
                self.pulse = PulseListener(
                    exchanges,
                    taskcluster_config.secrets["pulse_user"],
                    taskcluster_config.secrets["pulse_password"],
                )
                # Manually register to set queue as redis
                self.pulse.bus = self.bus
                self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            else:
                self.pulse = None

        else:
            self.bugbug_utils = None
            self.webserver = None
            self.pulse = None
            self.community_pulse = None
            logger.info("Skipping webserver, bugbug and pulse consumers")

            # Register queues for workers
            self.bus.add_queue(QUEUE_PULSE_AUTOLAND, redis=True)
            self.bus.add_queue(QUEUE_PULSE_BUGBUG_TEST_SELECT, redis=True)
            self.bus.add_queue(QUEUE_PULSE_TRY_TASK_END, redis=True)
            self.bus.add_queue(QUEUE_WEB_BUILDS, redis=True)

        # Run work processes on worker dyno or single instance
        if not heroku.in_dyno() or heroku.in_worker_dyno():
            self.workflow = CodeReview(
                api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
                url=taskcluster_config.secrets["PHABRICATOR"]["url"],
                publish=publish,
                user_blacklist=taskcluster_config.secrets["user_blacklist"],
            )
            self.workflow.register(self.bus)

            # Build mercurial worker and queue
            self.mercurial = MercurialWorker(
                QUEUE_MERCURIAL,
                QUEUE_MERCURIAL_APPLIED,
                repositories=self.workflow.get_repositories(
                    taskcluster_config.secrets["repositories"],
                    cache_root,
                    default_ssh_key=taskcluster_config.secrets["ssh_key"],
                ),
            )
            self.mercurial.register(self.bus)

            # Setup monitoring for newly created tasks
            self.monitoring = Monitoring(
                taskcluster_config,
                QUEUE_MONITORING,
                taskcluster_config.secrets["admins"],
                MONITORING_PERIOD,
            )
            self.monitoring.register(self.bus)

            # Setup monitoring for newly created community tasks
            if community_config is not None:
                self.community_monitoring = Monitoring(
                    community_taskcluster_config,
                    QUEUE_MONITORING_COMMUNITY,
                    taskcluster_config.secrets["admins"],
                    MONITORING_PERIOD,
                )
                self.community_monitoring.register(self.bus)
            else:
                self.community_monitoring = None

            self.bugbug_utils = BugbugUtils(self.workflow.api)
            self.bugbug_utils.register(self.bus)
        else:
            self.workflow = None
            self.mercurial = None
            self.monitoring = None
            self.community_monitoring = None
            self.bugbug_utils = None
            logger.info("Skipping workers consumers")

    def run(self):
        consumers = []

        # Code review main workflow
        if self.workflow:
            consumers += [
                # Process Phabricator build received from webserver
                self.bus.run(self.workflow.process_build,
                             QUEUE_WEB_BUILDS,
                             sequential=False),
                # Publish results on Phabricator
                self.bus.run(
                    self.workflow.publish_results,
                    QUEUE_PHABRICATOR_RESULTS,
                    sequential=False,
                ),
                # Trigger autoland tasks
                self.bus.run(
                    self.workflow.trigger_autoland,
                    QUEUE_PULSE_AUTOLAND,
                    sequential=False,
                ),
                # Send to phabricator results publication for normal processing and to bugbug for further analysis
                self.bus.dispatch(
                    QUEUE_MERCURIAL_APPLIED,
                    [QUEUE_PHABRICATOR_RESULTS, QUEUE_BUGBUG_TRY_PUSH],
                ),
            ]

        if self.bugbug_utils:
            consumers += [
                self.bus.run(self.bugbug_utils.process_build,
                             QUEUE_BUGBUG,
                             sequential=False),
                self.bus.run(
                    self.bugbug_utils.process_push,
                    QUEUE_BUGBUG_TRY_PUSH,
                    sequential=False,
                ),
                self.bus.run(
                    self.bugbug_utils.got_try_task_end,
                    QUEUE_PULSE_TRY_TASK_END,
                    sequential=False,
                ),
                self.bus.run(
                    self.bugbug_utils.got_bugbug_test_select_end,
                    QUEUE_PULSE_BUGBUG_TEST_SELECT,
                    sequential=False,
                ),
            ]

        # Add mercurial task
        if self.mercurial:
            consumers.append(self.mercurial.run())

        # Add monitoring task
        if self.monitoring:
            consumers.append(self.monitoring.run())

        # Add community monitoring task
        if self.community_monitoring:
            consumers.append(self.community_monitoring.run())

        # Add pulse listener for task results.
        if self.pulse:
            consumers.append(self.pulse.run())

        # Add communitytc pulse listener for test selection results.
        if self.community_pulse:
            consumers.append(self.community_pulse.run())

        # Start the web server in its own process
        if self.webserver:
            self.webserver.start()

        if consumers:
            # Run all tasks concurrently
            run_tasks(consumers)
        else:
            # Keep the web server process running
            asyncio.get_event_loop().run_forever()

        # Make sure any pending task is run.
        run_tasks(asyncio.Task.all_tasks())

        # Stop the webserver when other async processes are stopped
        if self.webserver:
            self.webserver.stop()
Exemple #5
0
class Events(object):
    """
    Listen to HTTP notifications from phabricator and trigger new try jobs
    """
    def __init__(self, cache_root):
        # Create message bus shared amongst processes
        self.bus = MessageBus()

        self.workflow = CodeReview(
            api_key=taskcluster_config.secrets["PHABRICATOR"]["api_key"],
            url=taskcluster_config.secrets["PHABRICATOR"]["url"],
            publish=taskcluster_config.secrets["PHABRICATOR"].get(
                "publish", False),
            risk_analysis_reviewers=taskcluster_config.secrets.get(
                "risk_analysis_reviewers", []),
            community_config=taskcluster_config.secrets.get(
                "taskcluster_community"),
        )
        self.workflow.register(self.bus)

        # Build mercurial worker and queue
        self.mercurial = MercurialWorker(
            QUEUE_MERCURIAL,
            QUEUE_PHABRICATOR_RESULTS,
            repositories=self.workflow.get_repositories(
                taskcluster_config.secrets["repositories"], cache_root),
        )
        self.mercurial.register(self.bus)

        # Create web server
        self.webserver = WebServer(QUEUE_WEB_BUILDS)
        self.webserver.register(self.bus)

        # Setup monitoring for newly created tasks
        self.monitoring = Monitoring(QUEUE_MONITORING,
                                     taskcluster_config.secrets["admins"],
                                     MONITORING_PERIOD)
        self.monitoring.register(self.bus)

    def run(self):
        consumers = [
            # Code review main workflow
            self.workflow.run(),
            # Add mercurial task
            self.mercurial.run(),
            # Add monitoring task
            self.monitoring.run(),
        ]

        # Publish results on Phabricator
        if self.workflow.publish:
            consumers.append(
                self.bus.run(self.workflow.publish_results,
                             QUEUE_PHABRICATOR_RESULTS))

        # Start the web server in its own process
        self.webserver.start()

        # Run all tasks concurrently
        run_tasks(consumers)

        # Stop the webserver when other async processes are stopped
        self.webserver.stop()