Ejemplo n.º 1
0
 async def inner():
     async with PgRenderLocker() as locker1:
         async with PgRenderLocker() as locker2:
             async with locker1.render_lock(1) as lock1:
                 with self.assertRaises(WorkflowAlreadyLocked):
                     async with locker2.render_lock(1) as lock2:
                         await lock2.stall_others()
                 await lock1.stall_others()
Ejemplo n.º 2
0
 async def inner():
     async with PgRenderLocker() as locker1:
         async with PgRenderLocker() as locker2:
             async with locker1.render_lock(1) as lock1:
                 # do not raise WorkflowAlreadyLocked here: it's a
                 # different workflow
                 async with locker2.render_lock(2) as lock2:
                     await lock2.stall_others()
                 await lock1.stall_others()
Ejemplo n.º 3
0
        async def inner():
            async with PgRenderLocker() as locker1:
                async with PgRenderLocker() as locker2:
                    async with locker1.render_lock(1) as lock1:
                        await lock1.stall_others()

                    # do not raise WorkflowAlreadyLocked here
                    async with locker2.render_lock(1) as lock1:
                        await lock1.stall_others()
Ejemplo n.º 4
0
 async def inner():
     async with PgRenderLocker() as locker1:
         async with PgRenderLocker() as locker2:
             async with locker1.render_lock(1) as lock1:
                 # "break" locker2: make it raise an exception
                 with self.assertRaises(WorkflowAlreadyLocked):
                     async with locker2.render_lock(1) as lock2:
                         await lock2.stall_others()
                 await lock1.stall_others()
             # now locker2 should be reset to its original state --
             # meaning it can acquire a lock just fine
             async with locker2.render_lock(1) as lock2:
                 await lock2.stall_others()
Ejemplo n.º 5
0
 async def inner():
     async with PgRenderLocker() as locker:
         done, _ = await asyncio.wait(
             {use_lock(locker, i) for i in range(5)}
         )
         for task in done:
             task.result()  # throw error, if any
Ejemplo n.º 6
0
async def main():
    """Queue fetches for users' "automatic updates".

    Run this forever, as a singleton daemon.
    """
    from .autoupdate import queue_fetches  # AFTER django.setup()
    from cjwstate import rabbitmq
    from cjwstate.rabbitmq.connection import open_global_connection

    async with PgRenderLocker() as pg_render_locker, open_global_connection(
    ) as rabbitmq_connection:
        await rabbitmq_connection.exchange_declare(rabbitmq.GroupsExchange)
        await rabbitmq_connection.queue_declare(rabbitmq.Fetch, durable=True)

        while not rabbitmq_connection.closed.done():
            t1 = time.time()

            await benchmark(logger, queue_fetches(pg_render_locker),
                            "queue_fetches()")

            # Try to fetch at the beginning of each interval. Canonical example
            # is FetchInterval=60: queue all our fetches as soon as the minute
            # hand of the clock moves.

            next_t = (math.floor(t1 / FetchInterval) + 1) * FetchInterval
            delay = max(0, next_t - time.time())
            # Sleep ... or die, if RabbitMQ dies.
            await asyncio.wait({rabbitmq_connection.closed},
                               timeout=delay)  # raise

        await rabbitmq_connection.closed  # raise on failure
        # Now, raise on _success_! We should never get here
        raise RuntimeError(
            "RabbitMQ closed successfully. That's strange because cron never closes it."
        )
Ejemplo n.º 7
0
        async def inner():
            async with PgRenderLocker() as locker:
                async with locker.render_lock(1) as lock1:
                    async with locker.render_lock(2) as lock2:
                        await lock2.stall_others()

                    async with locker.render_lock(2) as lock2:
                        await lock2.stall_others()
                    await lock1.stall_others()
Ejemplo n.º 8
0
 async def inner():
     async with PgRenderLocker() as locker1:
         async with PgRenderLocker() as locker2:
             last_line = 'the initial value'
             async with locker1.render_lock(1) as lock1:
                 await lock1.stall_others()
                 async def stalling_op():
                     nonlocal last_line
                     async with locker2.render_lock(1) as lock2:
                         last_line = 'entered stalling_op'
                         await lock2.stall_others()
                     last_line = 'exited stalling_op'
                 task = asyncio.create_task(stalling_op())
                 await asyncio.sleep(0)
                 # Even though we started stalling_op(), it will stall
                 # rather than acquire a lock.
                 self.assertEqual(last_line, 'the initial value')
             await task
             self.assertEqual(last_line, 'exited stalling_op')
Ejemplo n.º 9
0
async def main_loop():
    """
    Run fetchers and renderers, forever.
    """
    async with PgRenderLocker() as pg_render_locker:

        @rabbitmq.manual_acking_callback
        async def render_callback(message, ack):
            return await handle_render(message, ack, pg_render_locker)

        connection = rabbitmq.get_connection()
        connection.declare_queue_consume(rabbitmq.Render, render_callback)
        # Run forever
        await connection._closed_event.wait()
Ejemplo n.º 10
0
async def queue_fetches_forever():
    async with PgRenderLocker() as pg_render_locker:
        while True:
            t1 = time.time()

            await benchmark(logger, queue_fetches(pg_render_locker),
                            'queue_fetches()')

            # Try to fetch at the beginning of each interval. Canonical example
            # is FetchInterval=60: queue all our fetches as soon as the minute
            # hand of the clock moves.

            next_t = (math.floor(t1 / FetchInterval) + 1) * FetchInterval
            delay = max(0, next_t - time.time())
            await asyncio.sleep(delay)
Ejemplo n.º 11
0
async def main():
    """Run fetchers and renderers, forever."""
    # import AFTER django.setup()
    import cjwstate.modules
    from cjworkbench.pg_render_locker import PgRenderLocker
    from cjwstate import rabbitmq
    from cjwstate.rabbitmq.connection import open_global_connection
    from .render import handle_render

    cjwstate.modules.init_module_system()

    async with PgRenderLocker() as pg_render_locker, open_global_connection() as rabbitmq_connection:
        await rabbitmq_connection.queue_declare(rabbitmq.Render, durable=True)
        await rabbitmq_connection.exchange_declare(rabbitmq.GroupsExchange)
        # Render; ack; render; ack ... forever.
        async with rabbitmq_connection.acking_consumer(rabbitmq.Render) as consumer:
            async for message_bytes in consumer:
                message = msgpack.unpackb(message_bytes)
                # Crash on error, and don't ack.
                await handle_render(message, pg_render_locker)