Exemple #1
0
def pusher(task_queue: Queue, event: Event, broker: Broker = None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type broker:
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}"))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e, traceback.format_exc())
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = SignedPackage.loads(task[1])
                except (TypeError, BadSignature) as e:
                    logger.error(e, traceback.format_exc())
                    broker.fail(ack_id)
                    continue
                task["ack_id"] = ack_id
                task_queue.put(task)
            logger.debug(_(f"queueing from {broker.list_key}"))
        if event.is_set():
            break
    logger.info(_(f"{current_process().name} stopped pushing tasks"))
Exemple #2
0
 def __init__(
     self,
     stop_event,
     start_event,
     cluster_id,
     broker=None,
     timeout=Conf.TIMEOUT,
     start=True,
 ):
     # Make sure we catch signals for the pool
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     signal.signal(signal.SIGTERM, signal.SIG_DFL)
     self.pid = current_process().pid
     self.cluster_id = cluster_id
     self.parent_pid = get_ppid()
     self.name = current_process().name
     self.broker = broker or get_broker()
     self.reincarnations = 0
     self.tob = timezone.now()
     self.stop_event = stop_event
     self.start_event = start_event
     self.pool_size = Conf.WORKERS
     self.pool = []
     self.timeout = timeout
     self.task_queue = (Queue(
         maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue())
     self.result_queue = Queue()
     self.event_out = Event()
     self.monitor = None
     self.pusher = None
     if start:
         self.start()
Exemple #3
0
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    task_queue = Queue()
    result_queue = Queue()
    task = signing.SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    cluster.worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    cluster.monitor(result_queue)
    return task['id']
Exemple #4
0
def worker(task_queue: Queue,
           result_queue: Queue,
           timer: Value,
           timeout: int = Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :param timeout: number of seconds wait for a worker to finish.
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(_(f"{name} ready for work at {current_process().pid}"))
    task_count = 0
    if timeout is None:
        timeout = -1
    # Start reading the task queue
    for task in iter(task_queue.get, "STOP"):
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # Get the function from the task
        logger.info(_(f'{name} processing [{task["name"]}]'))
        f = task["func"]
        # if it's not an instance try to get it from the string
        if not callable(task["func"]):
            f = pydoc.locate(f)
        close_old_django_connections()
        timer_value = task.pop("timeout", timeout)
        # signal execution
        pre_execute.send(sender="django_q", func=f, task=task)
        # execute the payload
        timer.value = timer_value  # Busy
        try:
            res = f(*task["args"], **task["kwargs"])
            result = (res, True)
        except Exception as e:
            result = (f"{e} : {traceback.format_exc()}", False)
            if error_reporter:
                error_reporter.report()
            if task.get("sync", False):
                raise
        with timer.get_lock():
            # Process result
            task["result"] = result[0]
            task["success"] = result[1]
            task["stopped"] = timezone.now()
            result_queue.put(task)
            timer.value = -1  # Idle
            # Recycle
            if task_count == Conf.RECYCLE or rss_check():
                timer.value = -2  # Recycled
                break
    logger.info(_(f"{name} stopped doing work"))
Exemple #5
0
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async('math.copysign', 1, -1, cached=True, broker=broker)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
Exemple #6
0
def _sync(pack):
    # Python 2.6 is unable to handle this import on top of the file
    # because it creates a circular dependency between tasks and cluster
    from django_q.cluster import worker, monitor
    """Simulate a package travelling through the cluster."""
    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    monitor(result_queue)
    return task['id']
    def test_post_execute_signal(self, broker):
        broker.list_key = "post_execute_test:q"
        broker.delete_queue()
        self.signal_was_called: bool = False
        self.task: Optional[dict] = None
        self.func = None

        def handler(sender, task, **kwargs):
            self.signal_was_called = True
            self.task = task

        post_execute.connect(handler)
        task_id = async_task("math.copysign", 1, -1, broker=broker)
        task_queue = Queue()
        result_queue = Queue()
        event = Event()
        event.set()
        pusher(task_queue, event, broker=broker)
        task_queue.put("STOP")
        worker(task_queue, result_queue, Value("f", -1))
        result_queue.put("STOP")
        monitor(result_queue, broker)
        broker.delete_queue()
        assert self.signal_was_called is True
        assert self.task.get("id") == task_id
        assert self.task.get("result") == -1
        post_execute.disconnect(handler)
def test_acknowledge_failure_override():
    class VerifyAckMockBroker(Broker):
        def __init__(self, *args, **kwargs):
            super(VerifyAckMockBroker, self).__init__(*args, **kwargs)
            self.acknowledgements = {}

        def acknowledge(self, task_id):
            count = self.acknowledgements.get(task_id, 0)
            self.acknowledgements[task_id] = count + 1

    tag = uuid()
    task_fail_ack = {
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_fail_ack_id",
        "ack_failure": True,
        "func": "math.copysign",
        "args": (1, -1),
        "kwargs": {},
        "started": timezone.now(),
        "stopped": timezone.now(),
        "success": False,
        "result": None,
    }

    tag = uuid()
    task_fail_no_ack = task_fail_ack.copy()
    task_fail_no_ack.update({
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_fail_no_ack_id"
    })
    del task_fail_no_ack["ack_failure"]

    tag = uuid()
    task_success_ack = task_fail_ack.copy()
    task_success_ack.update({
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_success_ack_id",
        "success": True,
    })
    del task_success_ack["ack_failure"]

    result_queue = Queue()
    result_queue.put(task_fail_ack)
    result_queue.put(task_fail_no_ack)
    result_queue.put(task_success_ack)
    result_queue.put("STOP")
    broker = VerifyAckMockBroker(list_key="key")

    monitor(result_queue, broker)

    assert broker.acknowledgements.get("test_fail_ack_id") == 1
    assert broker.acknowledgements.get("test_fail_no_ack_id") is None
    assert broker.acknowledgements.get("test_success_ack_id") == 1
Exemple #9
0
def test_acknowledge_failure_override():
    class VerifyAckMockBroker(Broker):
        def __init__(self, *args, **kwargs):
            super(VerifyAckMockBroker, self).__init__(*args, **kwargs)
            self.acknowledgements = {}

        def acknowledge(self, task_id):
            count = self.acknowledgements.get(task_id, 0)
            self.acknowledgements[task_id] = count + 1

    tag = uuid()
    task_fail_ack = {
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_fail_ack_id',
        'ack_failure': True,
        'func': 'math.copysign',
        'args': (1, -1),
        'kwargs': {},
        'started': timezone.now(),
        'stopped': timezone.now(),
        'success': False,
        'result': None
    }

    tag = uuid()
    task_fail_no_ack = task_fail_ack.copy()
    task_fail_no_ack.update({
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_fail_no_ack_id'
    })
    del task_fail_no_ack['ack_failure']

    tag = uuid()
    task_success_ack = task_fail_ack.copy()
    task_success_ack.update({
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_success_ack_id',
        'success': True,
    })
    del task_success_ack['ack_failure']

    result_queue = Queue()
    result_queue.put(task_fail_ack)
    result_queue.put(task_fail_no_ack)
    result_queue.put(task_success_ack)
    result_queue.put('STOP')
    broker = VerifyAckMockBroker(list_key='key')

    monitor(result_queue, broker)

    assert broker.acknowledgements.get('test_fail_ack_id') == 1
    assert broker.acknowledgements.get('test_fail_no_ack_id') is None
    assert broker.acknowledgements.get('test_success_ack_id') == 1
Exemple #10
0
def test_bad_secret(broker, monkeypatch):
    broker.list_key = "test_bad_secret:q"
    async_task("math.copysign", 1, -1, broker=broker)
    stop_event = Event()
    stop_event.set()
    start_event = Event()
    cluster_id = uuidlib.uuid4()
    s = Sentinel(stop_event,
                 start_event,
                 cluster_id=cluster_id,
                 broker=broker,
                 start=False)
    Stat(s).save()
    # change the SECRET
    monkeypatch.setattr(Conf, "SECRET_KEY", "OOPS")
    stat = Stat.get_all()
    assert len(stat) == 0
    assert Stat.get(pid=s.parent_pid, cluster_id=cluster_id) is None
    task_queue = Queue()
    pusher(task_queue, stop_event, broker=broker)
    result_queue = Queue()
    task_queue.put("STOP")
    worker(
        task_queue,
        result_queue,
        Value("f", -1),
    )
    assert result_queue.qsize() == 0
    broker.delete_queue()
Exemple #11
0
def test_max_rss(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = 'test_max_rss_test:q'
    async_task('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    cluster_id = uuidlib.uuid4()
    # override settings
    monkeypatch.setattr(Conf, 'MAX_RSS', 40000)
    monkeypatch.setattr(Conf, 'WORKERS', 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, cluster_id=cluster_id, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async_task('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push the task
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 1
    # save_limit test
    monkeypatch.setattr(Conf, 'SAVE_LIMIT', 1)
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
Exemple #12
0
 def __init__(self, stop_event, start_event, broker=None, timeout=Conf.TIMEOUT, start=True):
     # Make sure we catch signals for the pool
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     signal.signal(signal.SIGTERM, signal.SIG_DFL)
     self.pid = current_process().pid
     self.parent_pid = get_ppid()
     self.name = current_process().name
     self.broker = broker or get_broker()
     self.reincarnations = 0
     self.tob = timezone.now()
     self.stop_event = stop_event
     self.start_event = start_event
     self.pool_size = Conf.WORKERS
     self.pool = []
     self.timeout = timeout
     self.task_queue = Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
     self.result_queue = Queue()
     self.event_out = Event()
     self.monitor = None
     self.pusher = None
     if start:
         self.start()
Exemple #13
0
def test_bad_secret(broker, monkeypatch):
    broker.list_key = 'test_bad_secret:q'
    async_task('math.copysign', 1, -1, broker=broker)
    stop_event = Event()
    stop_event.set()
    start_event = Event()
    s = Sentinel(stop_event, start_event, broker=broker, start=False)
    Stat(s).save()
    # change the SECRET
    monkeypatch.setattr(Conf, "SECRET_KEY", "OOPS")
    stat = Stat.get_all()
    assert len(stat) == 0
    assert Stat.get(s.parent_pid) is None
    task_queue = Queue()
    pusher(task_queue, stop_event, broker=broker)
    result_queue = Queue()
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1), )
    assert result_queue.qsize() == 0
    broker.delete_queue()
Exemple #14
0
def test_recycle(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = "test_recycle_test:q"
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    cluster_id = uuidlib.uuid4()
    # override settings
    monkeypatch.setattr(Conf, "RECYCLE", 2)
    monkeypatch.setattr(Conf, "WORKERS", 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, cluster_id=cluster_id, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, broker=broker)
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value("f", -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    monkeypatch.setattr(Conf, "SAVE_LIMIT", 1)
    result_queue.put("STOP")
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
Exemple #15
0
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    from django_q.cluster import monitor, worker

    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put("STOP")
    worker(task_queue, result_queue, Value("f", -1))
    result_queue.put("STOP")
    monitor(result_queue)
    task_queue.close()
    task_queue.join_thread()
    result_queue.close()
    result_queue.join_thread()
    return task["id"]
Exemple #16
0
def test_enqueue(broker, admin_user):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    a = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, hook='django_q.tests.test_cluster.assert_result',
                   broker=broker)
    b = async_task('django_q.tests.tasks.count_letters2', WordClass(), hook='django_q.tests.test_cluster.assert_result',
                   broker=broker)
    # unknown argument
    c = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, 'oneargumentoomany',
                   hook='django_q.tests.test_cluster.assert_bad_result', broker=broker)
    # unknown function
    d = async_task('django_q.tests.tasks.does_not_exist', WordClass(), hook='django_q.tests.test_cluster.assert_bad_result',
                   broker=broker)
    # function without result
    e = async_task('django_q.tests.tasks.countdown', 100000, broker=broker)
    # function as instance
    f = async_task(multiply, 753, 2, hook=assert_result, broker=broker)
    # model as argument
    g = async_task('django_q.tests.tasks.get_task_name', Task(name='John'), broker=broker)
    # args,kwargs, group and broken hook
    h = async_task('django_q.tests.tasks.word_multiply', 2, word='django', hook='fail.me', broker=broker)
    # args unpickle test
    j = async_task('django_q.tests.tasks.get_user_id', admin_user, broker=broker, group='test_j')
    # q_options and save opt_out test
    k = async_task('django_q.tests.tasks.get_user_id', admin_user,
                   q_options={'broker': broker, 'group': 'test_k', 'save': False, 'timeout': 90})
    # test unicode
    assert Task(name='Amalia').__str__()=='Amalia'
    # check if everything has a task id
    assert isinstance(a, str)
    assert isinstance(b, str)
    assert isinstance(c, str)
    assert isinstance(d, str)
    assert isinstance(e, str)
    assert isinstance(f, str)
    assert isinstance(g, str)
    assert isinstance(h, str)
    assert isinstance(j, str)
    assert isinstance(k, str)
    # run the cluster to execute the tasks
    task_count = 10
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push the tasks
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    # test wait timeout
    assert result(j, wait=10) is None
    assert fetch(j, wait=10) is None
    assert result_group('test_j', wait=10) is None
    assert result_group('test_j', count=2, wait=10) is None
    assert fetch_group('test_j', wait=10) is None
    assert fetch_group('test_j', count=2, wait=10) is None
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # Check the results
    # task a
    result_a = fetch(a)
    assert result_a is not None
    assert result_a.success is True
    assert result(a) == 1506
    # task b
    result_b = fetch(b)
    assert result_b is not None
    assert result_b.success is True
    assert result(b) == 1506
    # task c
    result_c = fetch(c)
    assert result_c is not None
    assert result_c.success is False
    # task d
    result_d = fetch(d)
    assert result_d is not None
    assert result_d.success is False
    # task e
    result_e = fetch(e)
    assert result_e is not None
    assert result_e.success is True
    assert result(e) is None
    # task f
    result_f = fetch(f)
    assert result_f is not None
    assert result_f.success is True
    assert result(f) == 1506
    # task g
    result_g = fetch(g)
    assert result_g is not None
    assert result_g.success is True
    assert result(g) == 'John'
    # task h
    result_h = fetch(h)
    assert result_h is not None
    assert result_h.success is True
    assert result(h) == 12
    # task j
    result_j = fetch(j)
    assert result_j is not None
    assert result_j.success is True
    assert result_j.result == result_j.args[0].id
    # check fetch, result by name
    assert fetch(result_j.name) == result_j
    assert result(result_j.name) == result_j.result
    # groups
    assert result_group('test_j')[0] == result_j.result
    assert result_j.group_result()[0] == result_j.result
    assert result_group('test_j', failures=True)[0] == result_j.result
    assert result_j.group_result(failures=True)[0] == result_j.result
    assert fetch_group('test_j')[0].id == [result_j][0].id
    assert fetch_group('test_j', failures=False)[0].id == [result_j][0].id
    assert count_group('test_j') == 1
    assert result_j.group_count() == 1
    assert count_group('test_j', failures=True) == 0
    assert result_j.group_count(failures=True) == 0
    assert delete_group('test_j') == 1
    assert result_j.group_delete() == 0
    deleted_group = delete_group('test_j', tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    deleted_group = result_j.group_delete(tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    # task k should not have been saved
    assert fetch(k) is None
    assert fetch(k, 100) is None
    assert result(k, 100) is None
    broker.delete_queue()
def test_scheduler(broker, monkeypatch):
    broker.list_key = "scheduler_test:q"
    broker.delete_queue()
    schedule = create_schedule(
        "math.copysign",
        1,
        -1,
        name="test math",
        hook="django_q.tests.tasks.result",
        schedule_type=Schedule.HOURLY,
        repeats=1,
    )
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule(
            "math.copysign",
            1,
            -1,
            name="test math",
            hook="django_q.tests.tasks.result",
            schedule_type=Schedule.HOURLY,
            repeats=1,
        )
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put("STOP")
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value("b", -1))
    assert result_queue.qsize() == 1
    result_queue.put("STOP")
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).shift(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.ONCE,
        repeats=-1,
        hook="django_q.tests.tasks.result",
    )
    assert hasattr(once_schedule, "pk") is True
    # negative repeats
    always_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.DAILY,
        repeats=-1,
        hook="django_q.tests.tasks.result",
    )
    assert hasattr(always_schedule, "pk") is True
    # Minute schedule
    minute_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.MINUTES,
        minutes=10,
    )
    assert hasattr(minute_schedule, "pk") is True
    # Cron schedule
    cron_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.CRON,
        cron="0 22 * * 1-5",
    )
    assert hasattr(cron_schedule, "pk") is True
    assert cron_schedule.full_clean() is None
    assert cron_schedule.__str__() == "django_q.tests.tasks.word_multiply"
    with pytest.raises(ValidationError):
        create_schedule(
            "django_q.tests.tasks.word_multiply",
            2,
            word="django",
            schedule_type=Schedule.CRON,
            cron="0 22 * * 1-12",
        )
    # All other types
    for t in Schedule.TYPE:
        if t[0] == Schedule.CRON:
            continue
        schedule = create_schedule(
            "django_q.tests.tasks.word_multiply",
            2,
            word="django",
            schedule_type=t[0],
            repeats=1,
            hook="django_q.tests.tasks.result",
        )
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(
        func="django_q.tests.tasks.word_multiply",
        args="2",
        kwargs='word="django"',
        schedule_type=Schedule.DAILY,
    )
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, "CATCH_UP", True)
    now = timezone.now()
    schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="catch_up",
        schedule_type=Schedule.HOURLY,
        next_run=timezone.now() - timedelta(hours=12),
        repeats=-1,
    )
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, "CATCH_UP", False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()

    monkeypatch.setattr(Conf, "PREFIX", "some_cluster_name")
    # create a schedule on another cluster
    schedule = create_schedule(
        "math.copysign",
        1,
        -1,
        name="test schedule on a another cluster",
        hook="django_q.tests.tasks.result",
        schedule_type=Schedule.HOURLY,
        cluster="some_other_cluster_name",
        repeats=1,
    )
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)

    # queue must be empty
    assert task_queue.qsize() == 0

    monkeypatch.setattr(Conf, "PREFIX", "default")
    # create a schedule on the same cluster
    schedule = create_schedule(
        "math.copysign",
        1,
        -1,
        name="test schedule with no cluster",
        hook="django_q.tests.tasks.result",
        schedule_type=Schedule.HOURLY,
        cluster="default",
        repeats=1,
    )
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)

    # queue must contain a task
    assert task_queue.qsize() == 1
Exemple #18
0
def test_scheduler(broker, monkeypatch):
    broker.list_key = 'scheduler_test:q'
    broker.delete_queue()
    schedule = create_schedule('math.copysign',
                               1, -1,
                               name='test math',
                               hook='django_q.tests.tasks.result',
                               schedule_type=Schedule.HOURLY,
                               repeats=1)
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule('math.copysign',
                                   1, -1,
                                   name='test math',
                                   hook='django_q.tests.tasks.result',
                                   schedule_type=Schedule.HOURLY,
                                   repeats=1)
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put('STOP')
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('b', -1))
    assert result_queue.qsize() == 1
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).shift(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.ONCE,
                                    repeats=-1,
                                    hook='django_q.tests.tasks.result'
                                    )
    assert hasattr(once_schedule, 'pk') is True
    # negative repeats
    always_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.DAILY,
                                      repeats=-1,
                                      hook='django_q.tests.tasks.result'
                                      )
    assert hasattr(always_schedule, 'pk') is True
    # Minute schedule
    minute_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.MINUTES,
                                      minutes=10)
    assert hasattr(minute_schedule, 'pk') is True
    # Cron schedule
    cron_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.CRON,
                                    cron="0 22 * * 1-5")
    assert hasattr(cron_schedule, 'pk') is True
    assert cron_schedule.full_clean() is None
    assert cron_schedule.__str__() == 'django_q.tests.tasks.word_multiply'
    with pytest.raises(ValidationError):
        create_schedule('django_q.tests.tasks.word_multiply',
                        2,
                        word='django',
                        schedule_type=Schedule.CRON,
                        cron="0 22 * * 1-12")
    # All other types
    for t in Schedule.TYPE:
        if t[0] == Schedule.CRON:
            continue
        schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                   2,
                                   word='django',
                                   schedule_type=t[0],
                                   repeats=1,
                                   hook='django_q.tests.tasks.result'
                                   )
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(func='django_q.tests.tasks.word_multiply',
                            args='2',
                            kwargs='word="django"',
                            schedule_type=Schedule.DAILY
                            )
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, 'CATCH_UP', True)
    now = timezone.now()
    schedule = create_schedule('django_q.tests.tasks.word_multiply',
                               2,
                               word='catch_up',
                               schedule_type=Schedule.HOURLY,
                               next_run=timezone.now() - timedelta(hours=12),
                               repeats=-1
                               )
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, 'CATCH_UP', False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()
Exemple #19
0
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    monitor(result_queue)
    task_queue.close()
    task_queue.join_thread()
    result_queue.close()
    result_queue.join_thread()
    return task['id']
Exemple #20
0
def worker(id: str,
           cluster_id: str,
           task_queue: Queue,
           result_queue: Queue,
           timer: Value,
           timeout: int = Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :param timeout: number of seconds wait for a worker to finish.
    :type id: str
    :type cluster_id: str
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(_(f"{name} ready for work at {current_process().pid}"))
    # Create Worker model
    model = WorkerModel.objects.create(id=id,
                                       cluster_id=cluster_id,
                                       pid=current_process().pid,
                                       task=None)
    task_count = 0
    if timeout is None:
        timeout = -1
    # Start reading the task queue
    for task in iter(
            task_queue.get, "STOP"
    ):  # Task should be provided as task ID which is then retrieved
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # Get the function from the task
        logger.info(_(f'{name} processing [{task["name"]}]'))
        f = task["func"]
        # if it's not an instance try to get it from the string
        if not callable(task["func"]):
            try:
                module, func = f.rsplit(".", 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError) as e:
                result = (e, False)
                if error_reporter:
                    error_reporter.report()
        # We're still going
        if not result:
            close_old_django_connections()
            # Set worker task details
            # model.task = get_task_representation(task)
            # model.save(update_fields=['task'])
            timer_value = task.pop("timeout", timeout)
            # signal execution
            pre_execute.send(sender="django_q", func=f, task=task)
            # execute the payload
            timer.value = timer_value  # Busy
            try:
                res = f(*task["args"], **task["kwargs"])
                result = (res, True)
            except Exception as e:
                result = (f"{e} : {traceback.format_exc()}", False)
                if error_reporter:
                    error_reporter.report()
                if task.get("sync", False):
                    raise
            # Clear task details
            # model.task = None
            # model.save(update_fields=['task'])
        with timer.get_lock():
            # Process result
            task["result"] = result[0]
            task["success"] = result[1]
            task["stopped"] = timezone.now()
            result_queue.put(task)
            timer.value = -1  # Idle
            # Recycle
            if task_count == Conf.RECYCLE or rss_check():
                timer.value = -2  # Recycled
                break
    logger.info(_(f"{name} stopped doing work"))
Exemple #21
0
def test_simple_async_report_send(rf, admin_user):
    broker = get_broker()
    assert broker.queue_size() == 0

    request = rf.get("/")
    request.query_params = {}
    request.user = admin_user

    report = LeaseStatisticReport()
    response = report.get_response(request)
    assert response.data
    assert broker.queue_size() == 1

    # Run async task
    task_queue = Queue()
    result_queue = Queue()
    event = Event()
    event.set()
    pusher(task_queue, event, broker=broker)
    assert task_queue.qsize() == 1
    assert queue_size(broker=broker) == 0
    task_queue.put("STOP")
    worker(task_queue, result_queue, Value("f", -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    result_queue.put("STOP")
    monitor(result_queue)
    assert result_queue.qsize() == 0
    broker.delete_queue()

    # Test report file have been sent via email
    assert len(mail.outbox) == 1
    assert len(mail.outbox[0].attachments) == 1
Exemple #22
0
class Sentinel(object):
    def __init__(
        self,
        stop_event,
        start_event,
        cluster_id,
        broker=None,
        timeout=Conf.TIMEOUT,
        start=True,
    ):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.cluster_id = cluster_id
        self.parent_pid = get_ppid()
        self.name = current_process().name
        self.broker = broker or get_broker()
        self.reincarnations = 0
        self.tob = timezone.now()
        self.stop_event = stop_event
        self.start_event = start_event
        self.pool_size = Conf.WORKERS
        self.pool = []
        self.timeout = timeout
        self.task_queue = (Queue(
            maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue())
        self.result_queue = Queue()
        self.event_out = Event()
        self.monitor = None
        self.pusher = None
        if start:
            self.start()

    def start(self):
        self.broker.ping()
        self.spawn_cluster()
        self.guard()

    def status(self):
        if not self.start_event.is_set() and not self.stop_event.is_set():
            return Conf.STARTING
        elif self.start_event.is_set() and not self.stop_event.is_set():
            if self.result_queue.empty() and self.task_queue.empty():
                return Conf.IDLE
            return Conf.WORKING
        elif self.stop_event.is_set() and self.start_event.is_set():
            if self.monitor.is_alive() or self.pusher.is_alive() or len(
                    self.pool) > 0:
                return Conf.STOPPING
            return Conf.STOPPED

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        p = Process(target=target, args=args)
        p.daemon = True
        if target == worker:
            p.daemon = Conf.DAEMONIZE_WORKERS
            p.timer = args[2]
            self.pool.append(p)
        p.start()
        return p

    def spawn_pusher(self):
        return self.spawn_process(pusher, self.task_queue, self.event_out,
                                  self.broker)

    def spawn_worker(self):
        self.spawn_process(worker, self.task_queue, self.result_queue,
                           Value("f", -1), self.timeout)

    def spawn_monitor(self):
        return self.spawn_process(monitor, self.result_queue, self.broker)

    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        close_old_django_connections()
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(
                _(f"reincarnated monitor {process.name} after sudden death"))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(
                _(f"reincarnated pusher {process.name} after sudden death"))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if process.timer.value == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(
                    _(f"reincarnated worker {process.name} after timeout"))
            elif int(process.timer.value) == -2:
                logger.info(_(f"recycled worker {process.name}"))
            else:
                logger.error(
                    _(f"reincarnated worker {process.name} after death"))

        self.reincarnations += 1

    def spawn_cluster(self):
        self.pool = []
        Stat(self).save()
        close_old_django_connections()
        # spawn worker pool
        for __ in range(self.pool_size):
            self.spawn_worker()
        # spawn auxiliary
        self.monitor = self.spawn_monitor()
        self.pusher = self.spawn_pusher()
        # set worker cpu affinity if needed
        if psutil and Conf.CPU_AFFINITY:
            set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])

    def guard(self):
        logger.info(
            _(f"{current_process().name} guarding cluster {humanize(self.cluster_id.hex)}"
              ))
        self.start_event.set()
        Stat(self).save()
        logger.info(_(f"Q Cluster {humanize(self.cluster_id.hex)} running."))
        counter = 0
        cycle = Conf.GUARD_CYCLE  # guard loop sleep in seconds
        # Guard loop. Runs at least once
        while not self.stop_event.is_set() or not counter:
            # Check Workers
            for p in self.pool:
                with p.timer.get_lock():
                    # Are you alive?
                    if not p.is_alive() or p.timer.value == 0:
                        self.reincarnate(p)
                        continue
                    # Decrement timer if work is being done
                    if p.timer.value > 0:
                        p.timer.value -= cycle
            # Check Monitor
            if not self.monitor.is_alive():
                self.reincarnate(self.monitor)
            # Check Pusher
            if not self.pusher.is_alive():
                self.reincarnate(self.pusher)
            # Call scheduler once a minute (or so)
            counter += cycle
            if counter >= 30 and Conf.SCHEDULER:
                counter = 0
                scheduler(broker=self.broker)
            # Save current status
            Stat(self).save()
            sleep(cycle)
        self.stop()

    def stop(self):
        Stat(self).save()
        name = current_process().name
        logger.info(_(f"{name} stopping cluster processes"))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.pusher.is_alive():
            sleep(0.1)
            Stat(self).save()
        # Put poison pills in the queue
        for __ in range(len(self.pool)):
            self.task_queue.put("STOP")
        self.task_queue.close()
        # wait for the task queue to empty
        self.task_queue.join_thread()
        # Wait for all the workers to exit
        while len(self.pool):
            for p in self.pool:
                if not p.is_alive():
                    self.pool.remove(p)
            sleep(0.1)
            Stat(self).save()
        # Finally stop the monitor
        self.result_queue.put("STOP")
        self.result_queue.close()
        # Wait for the result queue to empty
        self.result_queue.join_thread()
        logger.info(_(f"{name} waiting for the monitor."))
        # Wait for everything to close or time out
        count = 0
        if not self.timeout:
            self.timeout = 30
        while self.status() == Conf.STOPPING and count < self.timeout * 10:
            sleep(0.1)
            Stat(self).save()
            count += 1
        # Final status
        Stat(self).save()
Exemple #23
0
def test_cluster(broker):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    task = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, broker=broker)
    assert broker.queue_size() == 1
    task_queue = Queue()
    assert task_queue.qsize() == 0
    result_queue = Queue()
    assert result_queue.qsize() == 0
    event = Event()
    event.set()
    # Test push
    pusher(task_queue, event, broker=broker)
    assert task_queue.qsize() == 1
    assert queue_size(broker=broker) == 0
    # Test work
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    # Test monitor
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # check result
    assert result(task) == 1506
    broker.delete_queue()
Exemple #24
0
class Sentinel(object):
    def __init__(self, stop_event, start_event, broker=None, timeout=Conf.TIMEOUT, start=True):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.parent_pid = get_ppid()
        self.name = current_process().name
        self.broker = broker or get_broker()
        self.reincarnations = 0
        self.tob = timezone.now()
        self.stop_event = stop_event
        self.start_event = start_event
        self.pool_size = Conf.WORKERS
        self.pool = []
        self.timeout = timeout
        self.task_queue = Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
        self.result_queue = Queue()
        self.event_out = Event()
        self.monitor = None
        self.pusher = None
        if start:
            self.start()

    def start(self):
        self.broker.ping()
        self.spawn_cluster()
        self.guard()

    def status(self):
        if not self.start_event.is_set() and not self.stop_event.is_set():
            return Conf.STARTING
        elif self.start_event.is_set() and not self.stop_event.is_set():
            if self.result_queue.empty() and self.task_queue.empty():
                return Conf.IDLE
            return Conf.WORKING
        elif self.stop_event.is_set() and self.start_event.is_set():
            if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
                return Conf.STOPPING
            return Conf.STOPPED

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        p = Process(target=target, args=args)
        p.daemon = True
        if target == worker:
            p.daemon = Conf.DAEMONIZE_WORKERS
            p.timer = args[2]
            self.pool.append(p)
        p.start()
        return p

    def spawn_pusher(self):
        return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker)

    def spawn_worker(self):
        self.spawn_process(worker, self.task_queue, self.result_queue, Value('f', -1), self.timeout)

    def spawn_monitor(self):
        return self.spawn_process(monitor, self.result_queue, self.broker)

    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        db.connections.close_all()  # Close any old connections
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if self.timeout and int(process.timer.value) == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(_("reincarnated worker {} after timeout").format(process.name))
            elif int(process.timer.value) == -2:
                logger.info(_("recycled worker {}").format(process.name))
            else:
                logger.error(_("reincarnated worker {} after death").format(process.name))

        self.reincarnations += 1

    def spawn_cluster(self):
        self.pool = []
        Stat(self).save()
        db.connection.close()
        # spawn worker pool
        for __ in range(self.pool_size):
            self.spawn_worker()
        # spawn auxiliary
        self.monitor = self.spawn_monitor()
        self.pusher = self.spawn_pusher()
        # set worker cpu affinity if needed
        if psutil and Conf.CPU_AFFINITY:
            set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])

    def guard(self):
        logger.info(_('{} guarding cluster at {}').format(current_process().name, self.pid))
        self.start_event.set()
        Stat(self).save()
        logger.info(_('Q Cluster-{} running.').format(self.parent_pid))
        scheduler(broker=self.broker)
        counter = 0
        cycle = Conf.GUARD_CYCLE  # guard loop sleep in seconds
        # Guard loop. Runs at least once
        while not self.stop_event.is_set() or not counter:
            # Check Workers
            for p in self.pool:
                # Are you alive?
                if not p.is_alive() or (self.timeout and p.timer.value == 0):
                    self.reincarnate(p)
                    continue
                # Decrement timer if work is being done
                if self.timeout and p.timer.value > 0:
                    p.timer.value -= cycle
            # Check Monitor
            if not self.monitor.is_alive():
                self.reincarnate(self.monitor)
            # Check Pusher
            if not self.pusher.is_alive():
                self.reincarnate(self.pusher)
            # Call scheduler once a minute (or so)
            counter += cycle
            if counter >= 30 and Conf.SCHEDULER:
                counter = 0
                scheduler(broker=self.broker)
            # Save current status
            Stat(self).save()
            sleep(cycle)
        self.stop()

    def stop(self):
        Stat(self).save()
        name = current_process().name
        logger.info(_('{} stopping cluster processes').format(name))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.pusher.is_alive():
            sleep(0.1)
            Stat(self).save()
        # Put poison pills in the queue
        for __ in range(len(self.pool)):
            self.task_queue.put('STOP')
        self.task_queue.close()
        # wait for the task queue to empty
        self.task_queue.join_thread()
        # Wait for all the workers to exit
        while len(self.pool):
            for p in self.pool:
                if not p.is_alive():
                    self.pool.remove(p)
            sleep(0.1)
            Stat(self).save()
        # Finally stop the monitor
        self.result_queue.put('STOP')
        self.result_queue.close()
        # Wait for the result queue to empty
        self.result_queue.join_thread()
        logger.info(_('{} waiting for the monitor.').format(name))
        # Wait for everything to close or time out
        count = 0
        if not self.timeout:
            self.timeout = 30
        while self.status() == Conf.STOPPING and count < self.timeout * 10:
            sleep(0.1)
            Stat(self).save()
            count += 1
        # Final status
        Stat(self).save()
Exemple #25
0
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async_task('math.copysign', 1, -1, cached=True, broker=broker)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
Exemple #26
0
class Conf:
    """
    Configuration class
    """

    try:
        conf = settings.Q_CLUSTER
    except AttributeError:
        conf = {}

    # Redis server configuration . Follows standard redis keywords
    REDIS = conf.get("redis", {})

    # Support for Django-Redis connections

    DJANGO_REDIS = conf.get("django_redis", None)

    # Disque broker
    DISQUE_NODES = conf.get("disque_nodes", None)

    # Optional Authentication
    DISQUE_AUTH = conf.get("disque_auth", None)

    # Optional Fast acknowledge
    DISQUE_FASTACK = conf.get("disque_fastack", False)

    # IronMQ broker
    IRON_MQ = conf.get("iron_mq", None)

    # SQS broker
    SQS = conf.get("sqs", None)

    # ORM broker
    ORM = conf.get("orm", None)

    # ORM support for read/write replicas
    HAS_REPLICA = conf.get("has_replica", False)

    # Custom broker class
    BROKER_CLASS = conf.get("broker_class", None)

    # Database Poll
    POLL = conf.get("poll", 0.2)

    # MongoDB broker
    MONGO = conf.get("mongo", None)
    MONGO_DB = conf.get("mongo_db", None)

    # Name of the cluster or site. For when you run multiple sites on one redis server
    PREFIX = conf.get("name", "default")

    # Log output level
    LOG_LEVEL = conf.get("log_level", "INFO")

    # Maximum number of successful tasks kept in the database. 0 saves everything. -1 saves none
    # Failures are always saved
    SAVE_LIMIT = conf.get("save_limit", 250)

    # Guard loop sleep in seconds. Should be between 0 and 60 seconds.
    GUARD_CYCLE = conf.get("guard_cycle", 0.5)

    # Disable the scheduler
    SCHEDULER = conf.get("scheduler", True)

    # Number of workers in the pool. Default is cpu count if implemented, otherwise 4.
    WORKERS = conf.get("workers", False)
    if not WORKERS:
        try:
            WORKERS = cpu_count()
            # in rare cases this might fail
        except NotImplementedError:
            # try psutil
            if psutil:
                WORKERS = psutil.cpu_count() or 4
            else:
                # sensible default
                WORKERS = 4

    # Option to undaemonize the workers and allow them to spawn child processes
    DAEMONIZE_WORKERS = conf.get("daemonize_workers", True)

    # Maximum number of tasks that each cluster can work on
    QUEUE_LIMIT = conf.get("queue_limit", int(WORKERS)**2)

    # Sets compression of redis packages
    COMPRESSED = conf.get("compress", False)

    # Number of tasks each worker can handle before it gets recycled. Useful for releasing memory
    RECYCLE = conf.get("recycle", 500)

    # The maximum resident set size in kilobytes before a worker will recycle. Useful for limiting memory usage
    # Not available on all platforms
    MAX_RSS = conf.get("max_rss", None)

    # Number of seconds to wait for a worker to finish.
    TIMEOUT = conf.get("timeout", None)

    # Whether to acknowledge unsuccessful tasks.
    # This causes failed tasks to be considered delivered, thereby removing them from
    # the task queue. Defaults to False.
    ACK_FAILURES = conf.get("ack_failures", False)

    # Number of seconds to wait for acknowledgement before retrying a task
    # Only works with brokers that guarantee delivery. Defaults to 60 seconds.
    RETRY = conf.get("retry", 60)

    # Verify if retry and timeout settings are correct
    if not TIMEOUT or (TIMEOUT > RETRY):
        warn(
            """Retry and timeout are misconfigured. Set retry larger than timeout, 
        failure to do so will cause the tasks to be retriggered before completion. 
        See https://django-q.readthedocs.io/en/latest/configure.html#retry for details."""
        )

    # Sets the amount of tasks the cluster will try to pop off the broker.
    # If it supports bulk gets.
    BULK = conf.get("bulk", 1)

    # The Django Admin label for this app
    LABEL = conf.get("label", "Django Q")

    # Sets the number of processors for each worker, defaults to all.
    CPU_AFFINITY = conf.get("cpu_affinity", 0)

    # Global sync option to for debugging
    SYNC = conf.get("sync", False)

    # The Django cache to use
    CACHE = conf.get("cache", "default")

    # Use the cache as result backend. Can be 'True' or an integer representing the global cache timeout.
    # i.e 'cached: 60' , will make all results go the cache and expire in 60 seconds.
    CACHED = conf.get("cached", False)

    # If set to False the scheduler won't execute tasks in the past.
    # Instead it will run once and reschedule the next run in the future. Defaults to True.
    CATCH_UP = conf.get("catch_up", True)

    # Use the secret key for package signing
    # Django itself should raise an error if it's not configured
    SECRET_KEY = settings.SECRET_KEY

    # The redis stats key
    Q_STAT = f"django_q:{PREFIX}:cluster"

    # Optional error reporting setup
    ERROR_REPORTER = conf.get("error_reporter", {})

    # Optional attempt count. set to 0 for infinite attempts
    MAX_ATTEMPTS = conf.get("max_attempts", 0)

    # OSX doesn't implement qsize because of missing sem_getvalue()
    try:
        QSIZE = Queue().qsize() == 0
    except (NotImplementedError, OSError):
        QSIZE = False

    # Getting the signal names
    SIGNAL_NAMES = dict((getattr(signal, n), n) for n in dir(signal)
                        if n.startswith("SIG") and "_" not in n)

    # Translators: Cluster status descriptions
    STARTING = _("Starting")
    WORKING = _("Working")
    IDLE = _("Idle")
    STOPPED = _("Stopped")
    STOPPING = _("Stopping")

    # to manage workarounds during testing
    TESTING = conf.get("testing", False)
Exemple #27
0
def test_scheduler(broker, monkeypatch):
    broker.list_key = 'scheduler_test:q'
    broker.delete_queue()
    schedule = create_schedule('math.copysign',
                               1, -1,
                               name='test math',
                               hook='django_q.tests.tasks.result',
                               schedule_type=Schedule.HOURLY,
                               repeats=1)
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule('math.copysign',
                                   1, -1,
                                   name='test math',
                                   hook='django_q.tests.tasks.result',
                                   schedule_type=Schedule.HOURLY,
                                   repeats=1)
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put('STOP')
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('b', -1))
    assert result_queue.qsize() == 1
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).replace(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.ONCE,
                                    repeats=-1,
                                    hook='django_q.tests.tasks.result'
                                    )
    assert hasattr(once_schedule, 'pk') is True
    # negative repeats
    always_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.DAILY,
                                      repeats=-1,
                                      hook='django_q.tests.tasks.result'
                                      )
    assert hasattr(always_schedule, 'pk') is True
    # Minute schedule
    minute_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.MINUTES,
                                      minutes=10)
    assert hasattr(minute_schedule, 'pk') is True
    # All other types
    for t in Schedule.TYPE:
        schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                   2,
                                   word='django',
                                   schedule_type=t[0],
                                   repeats=1,
                                   hook='django_q.tests.tasks.result'
                                   )
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(func='django_q.tests.tasks.word_multiply',
                            args='2',
                            kwargs='word="django"',
                            schedule_type=Schedule.DAILY
                            )
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, 'CATCH_UP', True)
    now = timezone.now()
    schedule = create_schedule('django_q.tests.tasks.word_multiply',
                               2,
                               word='catch_up',
                               schedule_type=Schedule.HOURLY,
                               next_run=timezone.now() - timedelta(hours=12),
                               repeats=-1
                               )
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, 'CATCH_UP', False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()
Exemple #28
0
    def run_synchronously(pack):
        """Method to run a task synchoronously"""

        from django_tenant_schemas_q.cluster import worker, monitor

        task_queue = Queue()
        result_queue = Queue()
        task = SignedPackage.loads(pack)
        task_queue.put(task)
        task_queue.put("STOP")
        worker(task_queue, result_queue, Value("f", -1))
        result_queue.put("STOP")
        monitor(result_queue)
        task_queue.close()
        task_queue.join_thread()
        result_queue.close()
        result_queue.join_thread()
        return task["id"]
Exemple #29
0
class Conf(object):
    """
    Configuration class
    """
    try:
        conf = settings.Q_CLUSTER
    except AttributeError:
        conf = {}

    # Redis server configuration . Follows standard redis keywords
    REDIS = conf.get('redis', {})

    # Support for Django-Redis connections

    DJANGO_REDIS = conf.get('django_redis', None)

    # Disque broker
    DISQUE_NODES = conf.get('disque_nodes', None)

    # Optional Authentication
    DISQUE_AUTH = conf.get('disque_auth', None)

    # Optional Fast acknowledge
    DISQUE_FASTACK = conf.get('disque_fastack', False)

    # IronMQ broker
    IRON_MQ = conf.get('iron_mq', None)

    # SQS broker
    SQS = conf.get('sqs', None)

    # ORM broker
    ORM = conf.get('orm', None)

    # Custom broker class
    BROKER_CLASS = conf.get('broker_class', None)

    # Database Poll
    POLL = conf.get('poll', 0.2)

    # MongoDB broker
    MONGO = conf.get('mongo', None)
    MONGO_DB = conf.get('mongo_db', None)

    # Name of the cluster or site. For when you run multiple sites on one redis server
    PREFIX = conf.get('name', 'default')

    # Log output level
    LOG_LEVEL = conf.get('log_level', 'INFO')

    # Maximum number of successful tasks kept in the database. 0 saves everything. -1 saves none
    # Failures are always saved
    SAVE_LIMIT = conf.get('save_limit', 250)

    # Guard loop sleep in seconds. Should be between 0 and 60 seconds.
    GUARD_CYCLE = conf.get('guard_cycle', 0.5)
    
    # Disable the scheduler
    SCHEDULER = conf.get('scheduler', True)

    # Number of workers in the pool. Default is cpu count if implemented, otherwise 4.
    WORKERS = conf.get('workers', False)
    if not WORKERS:
        try:
            WORKERS = cpu_count()
            # in rare cases this might fail
        except NotImplementedError:
            # try psutil
            if psutil:
                WORKERS = psutil.cpu_count() or 4
            else:
                # sensible default
                WORKERS = 4

    # Option to undaemonize the workers and allow them to spawn child processes
    DAEMONIZE_WORKERS = conf.get('daemonize_workers', True)

    # Maximum number of tasks that each cluster can work on
    QUEUE_LIMIT = conf.get('queue_limit', int(WORKERS) ** 2)

    # Sets compression of redis packages
    COMPRESSED = conf.get('compress', False)

    # Number of tasks each worker can handle before it gets recycled. Useful for releasing memory
    RECYCLE = conf.get('recycle', 500)

    # Number of seconds to wait for a worker to finish.
    TIMEOUT = conf.get('timeout', None)

    # Whether to acknowledge unsuccessful tasks.
    # This causes failed tasks to be considered delivered, thereby removing them from
    # the task queue. Defaults to False.
    ACK_FAILURES = conf.get('ack_failures', False)

    # Number of seconds to wait for acknowledgement before retrying a task
    # Only works with brokers that guarantee delivery. Defaults to 60 seconds.
    RETRY = conf.get('retry', 60)

    # Sets the amount of tasks the cluster will try to pop off the broker.
    # If it supports bulk gets.
    BULK = conf.get('bulk', 1)

    # The Django Admin label for this app
    LABEL = conf.get('label', 'Django Q')

    # Sets the number of processors for each worker, defaults to all.
    CPU_AFFINITY = conf.get('cpu_affinity', 0)

    # Global sync option to for debugging
    SYNC = conf.get('sync', False)

    # The Django cache to use
    CACHE = conf.get('cache', 'default')

    # Use the cache as result backend. Can be 'True' or an integer representing the global cache timeout.
    # i.e 'cached: 60' , will make all results go the cache and expire in 60 seconds.
    CACHED = conf.get('cached', False)

    # If set to False the scheduler won't execute tasks in the past.
    # Instead it will run once and reschedule the next run in the future. Defaults to True.
    CATCH_UP = conf.get('catch_up', True)

    # Use the secret key for package signing
    # Django itself should raise an error if it's not configured
    SECRET_KEY = settings.SECRET_KEY

    # The redis stats key
    Q_STAT = 'django_q:{}:cluster'.format(PREFIX)

    # Optional error reporting setup
    ERROR_REPORTER = conf.get('error_reporter', {})

    # OSX doesn't implement qsize because of missing sem_getvalue()
    try:
        QSIZE = Queue().qsize() == 0
    except NotImplementedError:
        QSIZE = False

    # Getting the signal names
    SIGNAL_NAMES = dict((getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') and '_' not in n)

    # Translators: Cluster status descriptions
    STARTING = _('Starting')
    WORKING = _('Working')
    IDLE = _("Idle")
    STOPPED = _('Stopped')
    STOPPING = _('Stopping')

    # to manage workarounds during testing
    TESTING = conf.get('testing', False)