Beispiel #1
0
def named_queue(url):
    """Named queues facilitate discrete queues on a single backend."""

    foo_state = []
    def foo_func(arg):
        foo_state.append(arg)
    foo_broker = get_broker(url, 'foo')
    foo_broker.expose(foo_func)

    bar_state = []
    def bar_func(arg):
        bar_state.append(arg)
    bar_broker = get_broker(url, 'bar')
    bar_broker.expose(bar_func)

    with thread_worker(foo_broker), thread_worker(bar_broker):

        # -- task-invoking code, usually another process --
        f = get_queue(url, 'foo')
        f.foo_func(1)

        b = get_queue(url, 'bar')
        b.bar_func(2)

        eventually((lambda:(foo_state, bar_state)), ([1], [2]))
Beispiel #2
0
def result_status(url):
    """Deferred results can be queried for task status.

    A lock is used to control state interactions between the producer
    and the worker for illustration purposes only. This type of
    lock-step interaction is not normally needed or even desired.
    """
    lock = TimeoutLock(locked=True)

    def func(arg):
        lock.acquire()
        return arg

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker, lock):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        res = q.func('arg')

        eventually((lambda:res.status), const.ENQUEUED)
        eq_(repr(res), "<Deferred func [default:%s] enqueued>" % res.id)

        lock.release()
        eventually((lambda:res.status), const.PROCESSING)
        eq_(repr(res), "<Deferred func [default:%s] processing>" % res.id)

        lock.release()
        assert res.wait(WAIT), repr(res)
        eq_(repr(res), "<Deferred func [default:%s] success>" % res.id)

        eq_(res.value, 'arg')
Beispiel #3
0
def named_queue(url):
    """Named queues facilitate discrete queues on a single backend."""

    foo_state = []

    def foo_func(arg):
        foo_state.append(arg)

    foo_broker = get_broker(url, 'foo')
    foo_broker.expose(foo_func)

    bar_state = []

    def bar_func(arg):
        bar_state.append(arg)

    bar_broker = get_broker(url, 'bar')
    bar_broker.expose(bar_func)

    with thread_worker(foo_broker), thread_worker(bar_broker):

        # -- task-invoking code, usually another process --
        f = get_queue(url, 'foo')
        f.foo_func(1)

        b = get_queue(url, 'bar')
        b.bar_func(2)

        eventually((lambda: (foo_state, bar_state)), ([1], [2]))
Beispiel #4
0
def expose_method(url):
    """Object methods can be exposed too, not just functions."""
    class Database(object):
        """stateful storage"""
        value = None

        def update(self, value):
            self.value = value

    class TaskObj(object):
        """object with task definitions"""
        def __init__(self, db):
            self.db = db

        def update_value(self, value):
            self.db.update(value)

    db = Database()
    obj = TaskObj(db)
    broker = get_broker(url)
    broker.expose(obj.update_value)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)
        q.update_value(2)
        eventually((lambda: db.value), 2)
Beispiel #5
0
def task_namespaces(url):
    """Task namepsaces are used to arrange tasks similar to the Python
    package/module hierarchy.
    """

    state = set()
    __name__ = 'module.path'

    ts = TaskSpace(__name__)

    @ts.task
    def foo():
        state.add('foo')

    @ts.task
    def bar(arg):
        state.add(arg)

    broker = get_broker(url)
    broker.expose(ts)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        q.module.path.foo()
        q.module.path.bar(1)

        eventually((lambda: state), {'foo', 1})
Beispiel #6
0
def result_status(url):
    """Deferred results can be queried for task status.

    A lock is used to control state interactions between the producer
    and the worker for illustration purposes only. This type of
    lock-step interaction is not normally needed or even desired.
    """
    lock = TimeoutLock(locked=True)

    def func(arg):
        lock.acquire()
        return arg

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker, lock):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        res = q.func('arg')

        eventually((lambda: res.status), const.ENQUEUED)
        eq_(repr(res), "<Deferred func [default:%s] enqueued>" % res.id)

        lock.release()
        eventually((lambda: res.status), const.PROCESSING)
        eq_(repr(res), "<Deferred func [default:%s] processing>" % res.id)

        lock.release()
        assert res.wait(WAIT), repr(res)
        eq_(repr(res), "<Deferred func [default:%s] success>" % res.id)

        eq_(res.value, 'arg')
Beispiel #7
0
def expose_method(url):
    """Object methods can be exposed too, not just functions."""

    class Database(object):
        """stateful storage"""
        value = None
        def update(self, value):
            self.value = value

    class TaskObj(object):
        """object with task definitions"""
        def __init__(self, db):
            self.db = db
        def update_value(self, value):
            self.db.update(value)

    db = Database()
    obj = TaskObj(db)
    broker = get_broker(url)
    broker.expose(obj.update_value)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)
        q.update_value(2)
        eventually((lambda:db.value), 2)
Beispiel #8
0
def task_namespaces(url):
    """Task namepsaces are used to arrange tasks similar to the Python
    package/module hierarchy.
    """

    state = set()
    __name__ = 'module.path'

    ts = TaskSpace(__name__)

    @ts.task
    def foo():
        state.add('foo')

    @ts.task
    def bar(arg):
        state.add(arg)

    broker = get_broker(url)
    broker.expose(ts)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        q.module.path.foo()
        q.module.path.bar(1)

        eventually((lambda:state), {'foo', 1})
Beispiel #9
0
    def func(arg, lock=None):
        # signal func started
        touch(join(tmp, 'func.started'))

        # wait for unlock
        eventually(reader(tmp, 'func.unlock'), '')

        # write data to file
        touch(join(tmp, 'func.out'), arg)

        log.debug('func complete')
Beispiel #10
0
    def func(arg, lock=None):
        # signal func started
        touch(join(tmp, 'func.started'))

        # wait for unlock
        eventually(reader(tmp, 'func.unlock'), '')

        # write data to file
        touch(join(tmp, 'func.out'), arg)

        log.debug('func complete')
Beispiel #11
0
def simple(url):
    """A simple example demonstrating WorQ mechanics"""
    state = []

    def func(arg):
        state.append(arg)

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        q.func('arg')

        eventually((lambda: state), ['arg'])
Beispiel #12
0
def simple(url):
    """A simple example demonstrating WorQ mechanics"""
    state = []

    def func(arg):
        state.append(arg)

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        q.func('arg')

        eventually((lambda:state), ['arg'])
Beispiel #13
0
def more_namespaces(url):
    state = set()

    foo = TaskSpace('foo')
    bar = TaskSpace('foo.bar')
    baz = TaskSpace('foo.bar.baz')

    @foo.task
    def join(arg):
        state.add('foo-join %s' % arg)

    @bar.task
    def kick(arg):
        state.add('bar-kick %s' % arg)

    @baz.task
    def join(arg):
        state.add('baz-join %s' % arg)

    @baz.task
    def kick(arg):
        state.add('baz-kick %s' % arg)

    broker = get_broker(url)
    broker.expose(foo)
    broker.expose(bar)
    broker.expose(baz)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        q.foo.join(1)
        q.foo.bar.kick(2)
        q.foo.bar.baz.join(3)
        q.foo.bar.baz.kick(4)

        eventually((lambda:state), {
            'foo-join 1',
            'bar-kick 2',
            'baz-join 3',
            'baz-kick 4',
        })
Beispiel #14
0
def more_namespaces(url):
    state = set()

    foo = TaskSpace('foo')
    bar = TaskSpace('foo.bar')
    baz = TaskSpace('foo.bar.baz')

    @foo.task
    def join(arg):
        state.add('foo-join %s' % arg)

    @bar.task
    def kick(arg):
        state.add('bar-kick %s' % arg)

    @baz.task
    def join(arg):
        state.add('baz-join %s' % arg)

    @baz.task
    def kick(arg):
        state.add('baz-kick %s' % arg)

    broker = get_broker(url)
    broker.expose(foo)
    broker.expose(bar)
    broker.expose(baz)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        q.foo.join(1)
        q.foo.bar.kick(2)
        q.foo.bar.baz.join(3)
        q.foo.bar.baz.kick(4)

        eventually((lambda: state), {
            'foo-join 1',
            'bar-kick 2',
            'baz-join 3',
            'baz-kick 4',
        })
Beispiel #15
0
def test_Queue_len(url):
    lock = TimeoutLock(locked=True)

    def func(arg=None):
        pass

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker, lock):
        q = get_queue(url)
        eq_(len(q), 0)

        r0 = q.func()
        eq_(len(q), 1)

        r1 = q.func()
        r2 = q.func(r1)
        eq_(len(q), 3)

        eventually((lambda: lock.locked), True)
        lock.release()
        assert r0.wait(timeout=WAIT), repr(r0)
        eq_(len(q), 2)

        eventually((lambda: lock.locked), True)
        lock.release()
        eventually((lambda: lock.locked), True)
        lock.release()
        assert r2.wait(timeout=WAIT), repr(r2)
        eq_(len(q), 0)
Beispiel #16
0
def test_Queue_len(url):
    lock = TimeoutLock(locked=True)
    def func(arg=None):
        pass
    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker, lock):
        q = get_queue(url)
        eq_(len(q), 0)

        r0 = q.func()
        eq_(len(q), 1)

        r1 = q.func()
        r2 = q.func(r1)
        eq_(len(q), 3)

        eventually((lambda:lock.locked), True)
        lock.release()
        assert r0.wait(timeout=WAIT), repr(r0)
        eq_(len(q), 2)

        eventually((lambda:lock.locked), True)
        lock.release()
        eventually((lambda:lock.locked), True)
        lock.release()
        assert r2.wait(timeout=WAIT), repr(r2)
        eq_(len(q), 0)
Beispiel #17
0
def test_WorkerPool_worker_shutdown_on_parent_die(url):
    with tempdir() as tmp:

        logpath = join(tmp, 'output.log')
        proc = run_in_subprocess(worker_pool, url,
            WorkerPool_worker_shutdown_on_parent_die_init,
            (tmp, logpath))

        with printlog(logpath), force_kill_on_exit(proc):

            res = get_queue(url).getpid()
            assert res.wait(WAIT), repr(res)

            os.kill(proc.pid, signal.SIGKILL) # force kill pool master
            eventually(proc.is_alive, False, timeout=WAIT)

        try:
            eventually(pid_running(res.value), False,
                timeout=WAIT, poll_interval=0.1)
        except Exception:
            os.kill(res.value, signal.SIGTERM) # clean up
            raise
Beispiel #18
0
def ignore_result(url):
    """Tell the queue to ignore the task result when the result is not
    important. This is done by creating a ``Task`` object with custom
    options for more efficient queue operation.
    """
    state = []

    def func(arg):
        state.append(arg)

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        f = Task(q.func, ignore_result=True)
        res = f(3)

        eq_(res, None)  # verify that we did not get a deferred result
        eventually((lambda: state), [3])
Beispiel #19
0
def test_WorkerPool_worker_shutdown_on_parent_die(url):
    with tempdir() as tmp:

        logpath = join(tmp, 'output.log')
        proc = run_in_subprocess(worker_pool, url,
            WorkerPool_worker_shutdown_on_parent_die_init,
            (tmp, logpath))

        with printlog(logpath), force_kill_on_exit(proc):

            res = get_queue(url).getpid()
            assert res.wait(WAIT), repr(res)

            os.kill(proc.pid, signal.SIGKILL)  # force kill pool master
            eventually(proc.is_alive, False, timeout=WAIT)

        try:
            eventually(pid_running(res.value), False,
                timeout=WAIT, poll_interval=0.1)
        except Exception:
            os.kill(res.value, signal.SIGTERM)  # clean up
            raise
Beispiel #20
0
def ignore_result(url):
    """Tell the queue to ignore the task result when the result is not
    important. This is done by creating a ``Task`` object with custom
    options for more efficient queue operation.
    """
    state = []

    def func(arg):
        state.append(arg)

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker):

        # -- task-invoking code, usually another process --
        q = get_queue(url)

        f = Task(q.func, ignore_result=True)
        res = f(3)

        eq_(res, None) # verify that we did not get a deferred result
        eventually((lambda:state), [3])
Beispiel #21
0
def Broker_duplicate_task_id(url, identifier):
    lock = TimeoutLock(locked=True)
    state = []

    def func(arg):
        lock.acquire()
        return arg

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker, lock):
        q = get_queue(url)

        task = Task(q.func, id=identifier)
        res = task(1)

        eventually((lambda: res.status), const.ENQUEUED)
        msg = 'func [default:int] cannot enqueue task with duplicate id'
        with assert_raises(DuplicateTask, msg):
            task(2)

        lock.release()
        eventually((lambda: res.status), const.PROCESSING)
        msg = 'func [default:int] cannot enqueue task with duplicate id'
        with assert_raises(DuplicateTask, msg):
            task(3)

        lock.release()
        assert res.wait(timeout=WAIT), repr(res)
        eq_(res.value, 1)

        res = task(4)
        eventually((lambda: res.status), const.ENQUEUED)
        lock.release()
        eventually((lambda: res.status), const.PROCESSING)
        lock.release()
        assert res.wait(timeout=WAIT), repr(res)
        eq_(res.value, 4)
Beispiel #22
0
def Broker_duplicate_task_id(url, identifier):
    lock = TimeoutLock(locked=True)
    state = []

    def func(arg):
        lock.acquire()
        return arg

    broker = get_broker(url)
    broker.expose(func)
    with thread_worker(broker, lock):
        q = get_queue(url)

        task = Task(q.func, id=identifier)
        res = task(1)

        eventually((lambda:res.status), const.ENQUEUED)
        msg = 'func [default:int] cannot enqueue task with duplicate id'
        with assert_raises(DuplicateTask, msg):
            task(2)

        lock.release()
        eventually((lambda:res.status), const.PROCESSING)
        msg = 'func [default:int] cannot enqueue task with duplicate id'
        with assert_raises(DuplicateTask, msg):
            task(3)

        lock.release()
        assert res.wait(timeout=WAIT), repr(res)
        eq_(res.value, 1)

        res = task(4)
        eventually((lambda:res.status), const.ENQUEUED)
        lock.release()
        eventually((lambda:res.status), const.PROCESSING)
        lock.release()
        assert res.wait(timeout=WAIT), repr(res)
        eq_(res.value, 4)
Beispiel #23
0
def test_WorkerPool_sigterm(url):
    with tempdir() as tmp:

        logpath = join(tmp, 'output.log')
        proc = run_in_subprocess(worker_pool, url,
            WorkerPool_sigterm_init, (tmp, logpath), workers=3)

        with printlog(logpath), force_kill_on_exit(proc):

            q = get_queue(url)

            q.func('text')

            eventually(reader(tmp, 'func.started'), '')

            proc.terminate() # signal pool shutdown
            touch(join(tmp, 'func.unlock')) # allow func to proceed

            eventually(reader(tmp, 'func.out'), 'text')
            eventually(verify_shutdown(proc), True, timeout=WAIT)
Beispiel #24
0
def test_WorkerPool_sigterm(url):
    with tempdir() as tmp:

        logpath = join(tmp, 'output.log')
        proc = run_in_subprocess(worker_pool, url,
            WorkerPool_sigterm_init, (tmp, logpath), workers=3)

        with printlog(logpath), force_kill_on_exit(proc):

            q = get_queue(url)

            q.func('text')

            eventually(reader(tmp, 'func.started'), '')

            proc.terminate()  # signal pool shutdown
            touch(join(tmp, 'func.unlock'))  # allow func to proceed

            eventually(reader(tmp, 'func.out'), 'text')
            eventually(verify_shutdown(proc), True, timeout=WAIT)