Ejemplo n.º 1
0
 async def inner():
     async with PgLocker() as locker1:
         async with PgLocker() as locker2:
             async with locker1.render_lock(1):
                 with self.assertRaises(WorkflowAlreadyLocked):
                     async with locker2.render_lock(1):
                         pass
Ejemplo n.º 2
0
 async def inner():
     async with PgLocker() as locker1:
         async with PgLocker() as locker2:
             async with locker1.render_lock(1):
                 # do not raise WorkflowAlreadyLocked here: it's a
                 # different workflow
                 async with locker2.render_lock(2):
                     pass
Ejemplo n.º 3
0
        async def inner():
            async with PgLocker() as locker1:
                async with PgLocker() as locker2:
                    async with locker1.render_lock(1):
                        pass

                    # do not raise WorkflowAlreadyLocked here
                    async with locker2.render_lock(1):
                        pass
Ejemplo n.º 4
0
 async def inner():
     async with PgLocker() as locker:
         done, _ = await asyncio.wait(
             {use_lock(locker, i)
              for i in range(3)})
         for task in done:
             task.result()  # throw error, if any
Ejemplo n.º 5
0
        async def inner():
            async with PgLocker() as locker:
                async with locker.render_lock(1):
                    async with locker.render_lock(2):
                        pass

                    async with locker.render_lock(2):
                        pass
Ejemplo n.º 6
0
async def queue_fetches_forever():
    async with PgLocker() as pg_locker:
        while True:
            t1 = time.time()

            await benchmark(queue_fetches(pg_locker), 'queue_fetches()')

            # Try to fetch at the beginning of each interval. Canonical example
            # is FetchInterval=60: queue all our fetches as soon as the minute
            # hand of the clock moves.

            next_t = (math.floor(t1 / FetchInterval) + 1) * FetchInterval
            delay = max(0, next_t - time.time())
            await asyncio.sleep(delay)
Ejemplo n.º 7
0
async def queue_fetches(pg_locker: PgLocker):
    """
    Queue all pending fetches in RabbitMQ.

    We'll set is_busy=True as we queue them, so we don't send double-fetches.
    """
    wf_modules = await load_pending_wf_modules()

    for workflow_id, wf_module in wf_modules:
        # Don't schedule a fetch if we're currently rendering.
        #
        # This still lets us schedule a fetch if a render is _queued_, so it
        # doesn't solve any races. But it should lower the number of fetches of
        # resource-intensive workflows.
        #
        # Using pg_locker means we can only queue a fetch _between_ renders.
        # The render queue may be non-empty (we aren't testing that); but we're
        # giving the workers a chance to tackle some of the backlog.
        try:
            async with pg_locker.render_lock(workflow_id):
                # At this moment, the workflow isn't rendering. Let's pass
                # through and queue the fetch.
                pass

            logger.info('Queue fetch of wf_module(%d, %d)', workflow_id,
                        wf_module.id)
            await set_wf_module_busy(wf_module)
            await websockets.ws_client_send_delta_async(
                workflow_id, {
                    'updateWfModules': {
                        str(wf_module.id): {
                            'is_busy': True,
                            'fetch_error': ''
                        }
                    }
                })
            await rabbitmq.queue_fetch(wf_module)
        except WorkflowAlreadyLocked:
            # Don't queue a fetch. We'll revisit this WfModule next time we
            # query for pending fetches.
            pass