Пример #1
0
    def _generate_test_chain(n):
        gt_result = n * (1 + n) / 2

        first = add.s(0, 0)
        body = [add.s(i + 1) for i in range(N)]
        c = chain(first, *body)
        return c, gt_result
Пример #2
0
def test_delay():
    result = add.delay(2, 2).get()
    log.info('add.delay 2, 2) = %s' % result)

    result = add.s(2, 2).delay().get()
    log.info('add.s(2, 2).delay().get() = %s' % result)

    result = add.s(2).delay(12).get()
    print(result)
Пример #3
0
def test_grouping():
    result = group(add.s(i, i) for i in range(10, 20))().get()
    print(result)

    result = chain(add.s(2, 2) | mul.s((4)))().get()
    print(result)

    result = chord((mul.s(i, i) for i in range(10)), xsum.s())().get()
    print(result)

    result = (group(mul.s(i, i) for i in range(10)) | xsum.s())().get()
    print(result)
Пример #4
0
def main():
    while True:
        time.sleep(5)
        print("Adding work to queue")
        chain = add.s(1, 2) | step2.s()
        res = chain()
        for result, value in res.collect(intermediate=True):
            print(value)
Пример #5
0
def test_group_results():
    job = group([
        add.s(2, 4),
        mul.s(4, 9),
        xsum.s([2, 3, 5]),
    ])
    result = job.apply_async()
    print(result.ready())
    print(result.successful())
    print(result.join())
    print(result.get())
Пример #6
0
    def _generate_test_chord(n):
        gt_result = n
        task = add.si(1, 0)

        def _generate_chord_inner(n):
            if n <= 0:
                return tsum.s()
            else:
                return chord(header=[tsum.s(), task.clone()],
                             body=_generate_chord_inner(n - 1))

        return chord(header=add.s(0, 1),
                     body=_generate_chord_inner(n - 1)), gt_result
Пример #7
0
def arrange_tasks():
    # normal call
    # add(0, 10)

    # send task message
    add.delay(1, 10)
    add.apply_async((2, 10))

    # executes 10 seconds from now
    add.apply_async((3, 10), countdown=5)

    # executes 10 seconds from now - eta
    add.apply_async((4, 10), eta=datetime.utcnow() + timedelta(seconds=6))

    # linking(callbacks / errbacks)
    add.apply_async((5, 10), link=add.s(7))
Пример #8
0
    def do_job1(self, feature_hash):
        '''
        function that mocks doing a specific job, it will pick
        up a worker based on some policy, here just hardcoding
        the policy

        this job is doing (2 + 2) * 3 - 6 == 6, in the first queue via WORKER1

        @param {string} feature_hash - the feature hash that representing a series of tasks
        '''
        self._configure_routing(feature_hash, WORKERS["WORKER1"])

        # print("job1: %s" % self._celery_app.control.inspect().active())

        job = chain(
            add.s(2, 2).set(queue=feature_hash, routing_key=feature_hash),
            mult.s(3).set(queue=feature_hash, routing_key=feature_hash),
            minus.s(6).set(queue=feature_hash, routing_key=feature_hash))

        job.delay()
Пример #9
0
from tasks import add
from celery import group
# se lanzar 100 tareas
for i in range(1, 100):
    result = add.apply_async((i, i), )
    print(i, result.get())
# se lanzan 4 tareas dentro de un grupo
numbers = [(2, 2), (4, 4), (8, 8), (16, 16)]
res = group(add.s(i, j) for i, j in numbers).apply_async(queue='priority.high',
                                                         serializer="json")
# aquí tenemos el resultado de las tareas
print(res.get())
# raise exception still can run
# res = (
#     group(
#         (add.s(4, 4) | mul.s(2)), # 16
#         (add.s(5, 4) | mul.s(3)), # 27
#         (add.s(5, 5) | mul.s(4))  # 40
#     )
#     | xsum.s() # 83
# )
# res.apply_async()

# raise exception still can run, and call handle error function
res = (
    group(
        (add.s(4, 4) | mul.s(2)),  # 16
        (add.s(5, 4) | mul.s(3)),  # 27
        (add.s(5, 5) | mul.s(4))  # 40
    )
    | xsum.s()  # 83
)
res.apply_async(link_error=report_error())

# raise exception then it's run forever
# res = (
#     group(
#         (add.s(4, 4) | mul.s(2)), # 16
#         (add.s(5, 4) | mul.s(3)), # 27
#         (add.s(5, 5) | mul.s(4))  # 40
#     )
#     | xsum.s() # 83
Пример #11
0
from tasks import add
from celery import group

r = group(add.s(i, i) for i in range(2))()
print(r.get())
Пример #12
0
def grouped():
    # this will run in concurrent according to celery concurrency config
    group(add.s(i, i) for i in xrange(10))()
    return "grouped"
Пример #13
0
from celery import chord
from tasks import add, tsum

print chord(add.s(i, i) for i in xrange(100))(tsum.s()).get()
Пример #14
0
from tasks import add, mul, build_insight
from celery import chain

res = (add.s(4, 4) | mul.s(4)).apply_async()
# build_insight.delay()

Пример #15
0
from tasks import add
from celery import group

r = group(add.s(i, i) for i in range(1000000)).apply_async()
print(r.get())
Пример #16
0
from tasks import add, xsum

# Basic example
res = add.delay(5, 5)
print(res.get())

# Same
res = add.apply_async((5, 5))
print(res.get())

# Subprocess
signature = add.s(5, 5)
res = signature.apply_async()
# res = signature.delay()
print(res.get())

# Callback Example
res = add.apply_async((2, 2), link=add.s(8))
print(res.get())

# Chaining
from celery import chain
chaining = chain(add.s(2, 2), add.s(4), add.s(8))  # 2 + 2 + 4 + 8
res = chaining.delay()
print(res.get())

chaining = add.s(2, 2) | add.s(4) | add.s(8)
res = chaining.delay()
print(res.get())

# Grouping (Parallel)
Пример #17
0
from celery import group, chord
from tasks import add, reduce_
import time

callback = reduce_.s()
header = [add.s(i, i) for i in range(10)]

res = chord(header)(callback)


a = res.get(timeout=1)

print(a)
Пример #18
0
from tasks import throw
from celery import chord, group
from tasks import add, throw
c = chord([add.s(4, 4), throw.s(), add.s(8, 8)])

Пример #19
0
def test():
    res = add.s(2, 2).delay()
    time.sleep(1)
    print(res.state)
    print(res.result)
Пример #20
0
# run_chain.py
from tasks import add
from celery import chain

# task signature 생성
sig1 = add.s(3, 5)
sig2 = add.s(8)
sig3 = add.s(2)

# chain signature 생성
sig_chain = chain(sig1, sig2, sig3)
print(f"chain: {sig_chain}")

# 비동기 작업 실행
res = sig_chain()

print(" 작업 ID 조회 ".center(50, "="))
print(f"  chain: {res.id}")

print(" 작업 완료여부 조회 ".center(50, "="))
print(f"  chain: {res.ready()}")

print(" 결과 데이터 조회 (완료될때까지 Pause) ".center(50, "="))
print(f"  chain: {res.get()}")

print(" 작업 완료여부 조회 ".center(50, "="))
print(f"  chain: {res.ready()}")
Пример #21
0
from celery import chain, group, chord

from tasks import add, xsum, raise_error, on_chord_error


if __name__ == '__main__':
    ## chain primitive
    res = chain(add.s(2, 2), add.s(4), add.s(8))()
    print('Chain result: %s' % res.get())
    
    # shortcut of above
    res = (add.s(2, 2) | add.s(4) | add.s(8))()
    print('Chain shortcut: %s' % res.get())
    
    res = chain(add.si(2, 2), add.si(4, 5), add.si(8, 8))()
    print('Chain with independent task: %s' % res.get())           # 8 + 8
    print('Parent result: %s' % res.parent.get())                  # 4 + 5
    print('Parent of parent result: %s' % res.parent.parent.get()) # 2 + 2
    
    
    ## group primitive
    res = group(add.s(i, i) for i in range(10))()
    print('Group result: %s' % res.get())
    
    
    ## chord primitive
    res = chord((add.s(i, i) for i in range(10)), xsum.s())()
    # is equal to: group(add.s(i, i) for i in range(10)) | xsum.s()
    print('Chord result: %s' % res.get())
    
    res = chord([add.s(2, 2), raise_error.s(), add.s(4, 4)], xsum.s())()
Пример #22
0
from celery import signature

from tasks import add

if __name__ == '__main__':
    # shorcut: add.s(2, 2)
    # or: signature('tasks.add', args=(2, 2), countdown=10)
    sig = add.signature((2, 2), countdown=3)
    print(sig, json.dumps(sig))

    # call local
    ret = sig()
    print('Call local: %s' % ret)

    # call remote
    # NOTE: ~sig is equal to sig.delay().get()
    async_ret = sig.delay()
    ret = async_ret.get()
    print('Call remote: %s' % ret)

    # paitial
    partial = add.s(2)
    async_ret = partial.delay(3)
    ret = async_ret.get()
    print('Partial: %s' % ret)

    async_ret = add.apply_async((2, 2),
                                link=add.signature((3, 3), immutable=True))
    ret = async_ret.get()
    print('Use signature in task link: %s' % ret)
Пример #23
0
# run_group.py
from tasks import add
from celery import group

# task signature 생성
sigs = [add.s(i, i + 1) for i in range(5)]

# group signature 생성
job = group(sigs)
print(f"group: {job}")

res = job()

print(" 작업 조회 ".center(50, "="))
print(f"  task id: {res.id}")
print(f"  result type: {type(res)}")

print(" 상태 조회 ".center(50, "="))
print(f"  task ready: {res.ready()}")

print(" 결과 반환 ".center(50, "="))
print(f"  task completed: {res.completed_count()}")  # 완료한 subtask 갯수 출력
print(f"  task results: {res.get()}")
print(f"  task results: {res.join()}")  # 호출한 순서대로 리턴
print(f"  task completed: {res.completed_count()}")

print(" 상태 조회 ".center(50, "="))
print(f"  task ready: {res.ready()}")
print(f"  task successful: {res.successful()}")  # 모든 작업이 성공적으로 완료되었는 지
Пример #24
0
from tasks import add, append, xsum
from celery import chord, group

print("HERE")
res = chord((add.s(i, i) for i in range(10)), xsum.s())()
print(res.get())
res = group(add.s(i, i) for i in range(10))()
print(res.get())
res = chord(add.s(x, x) for x in range(7))(append.si('complete and '),
                                           interval=1)
print(res.get())
Пример #25
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-

from tasks import add
import celery

ids = []
for i in range(1000):
    ids.append(i)

rs = []
for i in ids:
    r = add.s([i, 2], [i + 1, 3])
    rs.append(r)

jobs = celery.group(rs)
results = jobs.apply_async()
print results.get()
Пример #26
0
def chained():
    chain(add.s(1, 2) | add.s(1) | add.s(2))()
    return "chained"
Пример #27
0
Файл: foo.py Проект: demien/test
from tasks import add, sleep, xsum, error, dummy
from celery import chord
from celery import Celery

if __name__ == '__main__':
    # x = chord((add.s(i, i) for i in xrange(10)), sleep.si())
    # print type(x)
    # x()
    # x = add.delay(1, 2).get()
    c = add.s(1, 2) | add.s(3) | error.s() | dummy.s()
    print c()
    # sleep.delay()
    # chain()
    # print add.name
    # add.delay(4, 4)
    # app = Celery('tasks', broker='redis://localhost:6379/0')
    # print app.control.inspect().active_queues()
    # app.control.add_consumer('add', reply=True)
Пример #28
0
import time

from celery import group, chain, chord

from tasks import sendmail, add, take, no_argument, xsum

# 默认一条celery队列,但是在任务执行的过程中,因为只有一条队列,所以任务执行是同步的。
result = sendmail.delay(dict(to='www.com'))
# result1 = sendmail.apply_async(args=(dict(to='*****@*****.**'), ))
# result2 = add.apply_async(args=(2, 3))

# 多条队列同步进行,需要开启多个路由, 需要在后面传入队列参数queue

result3 = take.apply_async(args=(10, 1), queue='web_tasks')
# 关联任务, 将前一个任务的结果作为参数传入下一个任务
result4 = add.apply_async(args=(2, 2), link=add.s(3), queue='default')
# # 关联任务, 将前一个任务的结果作为参数传入下一个任务
# result5 = add.apply_async(args=(2, 2), link=add.s(4), queue='default')
# # 关联任务, 将前一个任务的结果作为参数传入下一个任务
result6 = add.apply_async(args=(2, 2), link=no_argument.si(), queue='default')
# # 关联任务, 将前一个任务的结果作为参数传入下一个任务
result7 = add.apply_async(args=(2, 2), link=no_argument.signature(immutable=True), queue='default')
# 过期时间
result8 = add.apply_async(args=(2, 3), expires=10, queue='default')
# 并行调度, 结果返回列表
result9 = group(add.s(i, i) for i in range(10))(queue='default')
# 串行调度, 结果16
result10 = chain(add.s(2, 2), add.s(4), add.s(8))()
# chord - 带回调的 group
#result11 = chord((add.s(i, i) for i in range(10)), xsum.s())(queue='default')