示例#1
0
def test_bad_secret(broker, monkeypatch):
    broker.list_key = "test_bad_secret:q"
    async_task("math.copysign", 1, -1, broker=broker)
    stop_event = Event()
    stop_event.set()
    start_event = Event()
    cluster_id = uuidlib.uuid4()
    s = Sentinel(stop_event,
                 start_event,
                 cluster_id=cluster_id,
                 broker=broker,
                 start=False)
    Stat(s).save()
    # change the SECRET
    monkeypatch.setattr(Conf, "SECRET_KEY", "OOPS")
    stat = Stat.get_all()
    assert len(stat) == 0
    assert Stat.get(pid=s.parent_pid, cluster_id=cluster_id) is None
    task_queue = Queue()
    pusher(task_queue, stop_event, broker=broker)
    result_queue = Queue()
    task_queue.put("STOP")
    worker(
        task_queue,
        result_queue,
        Value("f", -1),
    )
    assert result_queue.qsize() == 0
    broker.delete_queue()
示例#2
0
def test_simple_async_report_send(rf, admin_user):
    broker = get_broker()
    assert broker.queue_size() == 0

    request = rf.get("/")
    request.query_params = {}
    request.user = admin_user

    report = LeaseStatisticReport()
    response = report.get_response(request)
    assert response.data
    assert broker.queue_size() == 1

    # Run async task
    task_queue = Queue()
    result_queue = Queue()
    event = Event()
    event.set()
    pusher(task_queue, event, broker=broker)
    assert task_queue.qsize() == 1
    assert queue_size(broker=broker) == 0
    task_queue.put("STOP")
    worker(task_queue, result_queue, Value("f", -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    result_queue.put("STOP")
    monitor(result_queue)
    assert result_queue.qsize() == 0
    broker.delete_queue()

    # Test report file have been sent via email
    assert len(mail.outbox) == 1
    assert len(mail.outbox[0].attachments) == 1
示例#3
0
def test_max_rss(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = 'test_max_rss_test:q'
    async_task('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    cluster_id = uuidlib.uuid4()
    # override settings
    monkeypatch.setattr(Conf, 'MAX_RSS', 40000)
    monkeypatch.setattr(Conf, 'WORKERS', 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, cluster_id=cluster_id, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async_task('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push the task
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 1
    # save_limit test
    monkeypatch.setattr(Conf, 'SAVE_LIMIT', 1)
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
示例#4
0
    def test_post_execute_signal(self, broker):
        broker.list_key = "post_execute_test:q"
        broker.delete_queue()
        self.signal_was_called: bool = False
        self.task: Optional[dict] = None
        self.func = None

        def handler(sender, task, **kwargs):
            self.signal_was_called = True
            self.task = task

        post_execute.connect(handler)
        task_id = async_task("math.copysign", 1, -1, broker=broker)
        task_queue = Queue()
        result_queue = Queue()
        event = Event()
        event.set()
        pusher(task_queue, event, broker=broker)
        task_queue.put("STOP")
        worker(task_queue, result_queue, Value("f", -1))
        result_queue.put("STOP")
        monitor(result_queue, broker)
        broker.delete_queue()
        assert self.signal_was_called is True
        assert self.task.get("id") == task_id
        assert self.task.get("result") == -1
        post_execute.disconnect(handler)
示例#5
0
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task = async_task('math.copysign', 1, -1, cached=True, broker=broker)
    task_id = task['id']
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async_task('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
示例#6
0
def test_cluster(broker):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    task = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, broker=broker)
    assert broker.queue_size() == 1
    task_queue = Queue()
    assert task_queue.qsize() == 0
    result_queue = Queue()
    assert result_queue.qsize() == 0
    event = Event()
    event.set()
    # Test push
    pusher(task_queue, event, broker=broker)
    assert task_queue.qsize() == 1
    assert queue_size(broker=broker) == 0
    # Test work
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    # Test monitor
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # check result
    assert result(task) == 1506
    broker.delete_queue()
示例#7
0
def pusher(task_queue: Queue, event: Event, broker: Broker = None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type broker:
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}"))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e, traceback.format_exc())
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = SignedPackage.loads(task[1])
                except (TypeError, BadSignature) as e:
                    logger.error(e, traceback.format_exc())
                    broker.fail(ack_id)
                    continue
                task["ack_id"] = ack_id
                task_queue.put(task)
            logger.debug(_(f"queueing from {broker.list_key}"))
        if event.is_set():
            break
    logger.info(_(f"{current_process().name} stopped pushing tasks"))
示例#8
0
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async('math.copysign', 1, -1, cached=True, broker=broker)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
示例#9
0
def worker(task_queue: Queue,
           result_queue: Queue,
           timer: Value,
           timeout: int = Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :param timeout: number of seconds wait for a worker to finish.
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(_(f"{name} ready for work at {current_process().pid}"))
    task_count = 0
    if timeout is None:
        timeout = -1
    # Start reading the task queue
    for task in iter(task_queue.get, "STOP"):
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # Get the function from the task
        logger.info(_(f'{name} processing [{task["name"]}]'))
        f = task["func"]
        # if it's not an instance try to get it from the string
        if not callable(task["func"]):
            f = pydoc.locate(f)
        close_old_django_connections()
        timer_value = task.pop("timeout", timeout)
        # signal execution
        pre_execute.send(sender="django_q", func=f, task=task)
        # execute the payload
        timer.value = timer_value  # Busy
        try:
            res = f(*task["args"], **task["kwargs"])
            result = (res, True)
        except Exception as e:
            result = (f"{e} : {traceback.format_exc()}", False)
            if error_reporter:
                error_reporter.report()
            if task.get("sync", False):
                raise
        with timer.get_lock():
            # Process result
            task["result"] = result[0]
            task["success"] = result[1]
            task["stopped"] = timezone.now()
            result_queue.put(task)
            timer.value = -1  # Idle
            # Recycle
            if task_count == Conf.RECYCLE or rss_check():
                timer.value = -2  # Recycled
                break
    logger.info(_(f"{name} stopped doing work"))
示例#10
0
def test_bad_secret(broker, monkeypatch):
    broker.list_key = 'test_bad_secret:q'
    async_task('math.copysign', 1, -1, broker=broker)
    stop_event = Event()
    stop_event.set()
    start_event = Event()
    s = Sentinel(stop_event, start_event, broker=broker, start=False)
    Stat(s).save()
    # change the SECRET
    monkeypatch.setattr(Conf, "SECRET_KEY", "OOPS")
    stat = Stat.get_all()
    assert len(stat) == 0
    assert Stat.get(s.parent_pid) is None
    task_queue = Queue()
    pusher(task_queue, stop_event, broker=broker)
    result_queue = Queue()
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1), )
    assert result_queue.qsize() == 0
    broker.delete_queue()
示例#11
0
def test_acknowledge_failure_override():
    class VerifyAckMockBroker(Broker):
        def __init__(self, *args, **kwargs):
            super(VerifyAckMockBroker, self).__init__(*args, **kwargs)
            self.acknowledgements = {}

        def acknowledge(self, task_id):
            count = self.acknowledgements.get(task_id, 0)
            self.acknowledgements[task_id] = count + 1

    tag = uuid()
    task_fail_ack = {
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_fail_ack_id",
        "ack_failure": True,
        "func": "math.copysign",
        "args": (1, -1),
        "kwargs": {},
        "started": timezone.now(),
        "stopped": timezone.now(),
        "success": False,
        "result": None,
    }

    tag = uuid()
    task_fail_no_ack = task_fail_ack.copy()
    task_fail_no_ack.update({
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_fail_no_ack_id"
    })
    del task_fail_no_ack["ack_failure"]

    tag = uuid()
    task_success_ack = task_fail_ack.copy()
    task_success_ack.update({
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_success_ack_id",
        "success": True,
    })
    del task_success_ack["ack_failure"]

    result_queue = Queue()
    result_queue.put(task_fail_ack)
    result_queue.put(task_fail_no_ack)
    result_queue.put(task_success_ack)
    result_queue.put("STOP")
    broker = VerifyAckMockBroker(list_key="key")

    monitor(result_queue, broker)

    assert broker.acknowledgements.get("test_fail_ack_id") == 1
    assert broker.acknowledgements.get("test_fail_no_ack_id") is None
    assert broker.acknowledgements.get("test_success_ack_id") == 1
示例#12
0
def test_recycle(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = "test_recycle_test:q"
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    cluster_id = uuidlib.uuid4()
    # override settings
    monkeypatch.setattr(Conf, "RECYCLE", 2)
    monkeypatch.setattr(Conf, "WORKERS", 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, cluster_id=cluster_id, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    async_task("django_q.tests.tasks.multiply", 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, broker=broker)
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value("f", -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    monkeypatch.setattr(Conf, "SAVE_LIMIT", 1)
    result_queue.put("STOP")
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
示例#13
0
def test_acknowledge_failure_override():
    class VerifyAckMockBroker(Broker):
        def __init__(self, *args, **kwargs):
            super(VerifyAckMockBroker, self).__init__(*args, **kwargs)
            self.acknowledgements = {}

        def acknowledge(self, task_id):
            count = self.acknowledgements.get(task_id, 0)
            self.acknowledgements[task_id] = count + 1

    tag = uuid()
    task_fail_ack = {
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_fail_ack_id',
        'ack_failure': True,
        'func': 'math.copysign',
        'args': (1, -1),
        'kwargs': {},
        'started': timezone.now(),
        'stopped': timezone.now(),
        'success': False,
        'result': None
    }

    tag = uuid()
    task_fail_no_ack = task_fail_ack.copy()
    task_fail_no_ack.update({
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_fail_no_ack_id'
    })
    del task_fail_no_ack['ack_failure']

    tag = uuid()
    task_success_ack = task_fail_ack.copy()
    task_success_ack.update({
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_success_ack_id',
        'success': True,
    })
    del task_success_ack['ack_failure']

    result_queue = Queue()
    result_queue.put(task_fail_ack)
    result_queue.put(task_fail_no_ack)
    result_queue.put(task_success_ack)
    result_queue.put('STOP')
    broker = VerifyAckMockBroker(list_key='key')

    monitor(result_queue, broker)

    assert broker.acknowledgements.get('test_fail_ack_id') == 1
    assert broker.acknowledgements.get('test_fail_no_ack_id') is None
    assert broker.acknowledgements.get('test_success_ack_id') == 1
示例#14
0
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    task_queue = Queue()
    result_queue = Queue()
    task = signing.SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    cluster.worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    cluster.monitor(result_queue)
    return task['id']
示例#15
0
def _sync(pack):
    # Python 2.6 is unable to handle this import on top of the file
    # because it creates a circular dependency between tasks and cluster
    from django_q.cluster import worker, monitor
    """Simulate a package travelling through the cluster."""
    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    monitor(result_queue)
    return task['id']
示例#16
0
文件: tasks.py 项目: Koed00/django-q
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    monitor(result_queue)
    task_queue.close()
    task_queue.join_thread()
    result_queue.close()
    result_queue.join_thread()
    return task['id']
示例#17
0
文件: tasks.py 项目: randlet/django-q
def _sync(pack):
    """Simulate a package travelling through the cluster."""
    from django_q.cluster import worker, monitor
    task_queue = Queue()
    result_queue = Queue()
    task = SignedPackage.loads(pack)
    task_queue.put(task)
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    result_queue.put('STOP')
    monitor(result_queue)
    task_queue.close()
    task_queue.join_thread()
    result_queue.close()
    result_queue.join_thread()
    return task['id']
示例#18
0
    def run_synchronously(pack):
        """Method to run a task synchoronously"""

        from django_tenant_schemas_q.cluster import worker, monitor

        task_queue = Queue()
        result_queue = Queue()
        task = SignedPackage.loads(pack)
        task_queue.put(task)
        task_queue.put("STOP")
        worker(task_queue, result_queue, Value("f", -1))
        result_queue.put("STOP")
        monitor(result_queue)
        task_queue.close()
        task_queue.join_thread()
        result_queue.close()
        result_queue.join_thread()
        return task["id"]
示例#19
0
def test_scheduler(broker, monkeypatch):
    broker.list_key = "scheduler_test:q"
    broker.delete_queue()
    schedule = create_schedule(
        "math.copysign",
        1,
        -1,
        name="test math",
        hook="django_q.tests.tasks.result",
        schedule_type=Schedule.HOURLY,
        repeats=1,
    )
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule(
            "math.copysign",
            1,
            -1,
            name="test math",
            hook="django_q.tests.tasks.result",
            schedule_type=Schedule.HOURLY,
            repeats=1,
        )
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put("STOP")
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value("b", -1))
    assert result_queue.qsize() == 1
    result_queue.put("STOP")
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).shift(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.ONCE,
        repeats=-1,
        hook="django_q.tests.tasks.result",
    )
    assert hasattr(once_schedule, "pk") is True
    # negative repeats
    always_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.DAILY,
        repeats=-1,
        hook="django_q.tests.tasks.result",
    )
    assert hasattr(always_schedule, "pk") is True
    # Minute schedule
    minute_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.MINUTES,
        minutes=10,
    )
    assert hasattr(minute_schedule, "pk") is True
    # Cron schedule
    cron_schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="django",
        schedule_type=Schedule.CRON,
        cron="0 22 * * 1-5",
    )
    assert hasattr(cron_schedule, "pk") is True
    assert cron_schedule.full_clean() is None
    assert cron_schedule.__str__() == "django_q.tests.tasks.word_multiply"
    with pytest.raises(ValidationError):
        create_schedule(
            "django_q.tests.tasks.word_multiply",
            2,
            word="django",
            schedule_type=Schedule.CRON,
            cron="0 22 * * 1-12",
        )
    # All other types
    for t in Schedule.TYPE:
        if t[0] == Schedule.CRON:
            continue
        schedule = create_schedule(
            "django_q.tests.tasks.word_multiply",
            2,
            word="django",
            schedule_type=t[0],
            repeats=1,
            hook="django_q.tests.tasks.result",
        )
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(
        func="django_q.tests.tasks.word_multiply",
        args="2",
        kwargs='word="django"',
        schedule_type=Schedule.DAILY,
    )
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, "CATCH_UP", True)
    now = timezone.now()
    schedule = create_schedule(
        "django_q.tests.tasks.word_multiply",
        2,
        word="catch_up",
        schedule_type=Schedule.HOURLY,
        next_run=timezone.now() - timedelta(hours=12),
        repeats=-1,
    )
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, "CATCH_UP", False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()

    monkeypatch.setattr(Conf, "PREFIX", "some_cluster_name")
    # create a schedule on another cluster
    schedule = create_schedule(
        "math.copysign",
        1,
        -1,
        name="test schedule on a another cluster",
        hook="django_q.tests.tasks.result",
        schedule_type=Schedule.HOURLY,
        cluster="some_other_cluster_name",
        repeats=1,
    )
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)

    # queue must be empty
    assert task_queue.qsize() == 0

    monkeypatch.setattr(Conf, "PREFIX", "default")
    # create a schedule on the same cluster
    schedule = create_schedule(
        "math.copysign",
        1,
        -1,
        name="test schedule with no cluster",
        hook="django_q.tests.tasks.result",
        schedule_type=Schedule.HOURLY,
        cluster="default",
        repeats=1,
    )
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)

    # queue must contain a task
    assert task_queue.qsize() == 1
示例#20
0
class Sentinel(object):
    def __init__(
        self,
        stop_event,
        start_event,
        cluster_id,
        broker=None,
        timeout=Conf.TIMEOUT,
        start=True,
    ):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.cluster_id = cluster_id
        self.parent_pid = get_ppid()
        self.name = current_process().name
        self.broker = broker or get_broker()
        self.reincarnations = 0
        self.tob = timezone.now()
        self.stop_event = stop_event
        self.start_event = start_event
        self.pool_size = Conf.WORKERS
        self.pool = []
        self.timeout = timeout
        self.task_queue = (Queue(
            maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue())
        self.result_queue = Queue()
        self.event_out = Event()
        self.monitor = None
        self.pusher = None
        if start:
            self.start()

    def start(self):
        self.broker.ping()
        self.spawn_cluster()
        self.guard()

    def status(self):
        if not self.start_event.is_set() and not self.stop_event.is_set():
            return Conf.STARTING
        elif self.start_event.is_set() and not self.stop_event.is_set():
            if self.result_queue.empty() and self.task_queue.empty():
                return Conf.IDLE
            return Conf.WORKING
        elif self.stop_event.is_set() and self.start_event.is_set():
            if self.monitor.is_alive() or self.pusher.is_alive() or len(
                    self.pool) > 0:
                return Conf.STOPPING
            return Conf.STOPPED

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        p = Process(target=target, args=args)
        p.daemon = True
        if target == worker:
            p.daemon = Conf.DAEMONIZE_WORKERS
            p.timer = args[2]
            self.pool.append(p)
        p.start()
        return p

    def spawn_pusher(self):
        return self.spawn_process(pusher, self.task_queue, self.event_out,
                                  self.broker)

    def spawn_worker(self):
        self.spawn_process(worker, self.task_queue, self.result_queue,
                           Value("f", -1), self.timeout)

    def spawn_monitor(self):
        return self.spawn_process(monitor, self.result_queue, self.broker)

    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        close_old_django_connections()
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(
                _(f"reincarnated monitor {process.name} after sudden death"))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(
                _(f"reincarnated pusher {process.name} after sudden death"))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if process.timer.value == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(
                    _(f"reincarnated worker {process.name} after timeout"))
            elif int(process.timer.value) == -2:
                logger.info(_(f"recycled worker {process.name}"))
            else:
                logger.error(
                    _(f"reincarnated worker {process.name} after death"))

        self.reincarnations += 1

    def spawn_cluster(self):
        self.pool = []
        Stat(self).save()
        close_old_django_connections()
        # spawn worker pool
        for __ in range(self.pool_size):
            self.spawn_worker()
        # spawn auxiliary
        self.monitor = self.spawn_monitor()
        self.pusher = self.spawn_pusher()
        # set worker cpu affinity if needed
        if psutil and Conf.CPU_AFFINITY:
            set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])

    def guard(self):
        logger.info(
            _(f"{current_process().name} guarding cluster {humanize(self.cluster_id.hex)}"
              ))
        self.start_event.set()
        Stat(self).save()
        logger.info(_(f"Q Cluster {humanize(self.cluster_id.hex)} running."))
        counter = 0
        cycle = Conf.GUARD_CYCLE  # guard loop sleep in seconds
        # Guard loop. Runs at least once
        while not self.stop_event.is_set() or not counter:
            # Check Workers
            for p in self.pool:
                with p.timer.get_lock():
                    # Are you alive?
                    if not p.is_alive() or p.timer.value == 0:
                        self.reincarnate(p)
                        continue
                    # Decrement timer if work is being done
                    if p.timer.value > 0:
                        p.timer.value -= cycle
            # Check Monitor
            if not self.monitor.is_alive():
                self.reincarnate(self.monitor)
            # Check Pusher
            if not self.pusher.is_alive():
                self.reincarnate(self.pusher)
            # Call scheduler once a minute (or so)
            counter += cycle
            if counter >= 30 and Conf.SCHEDULER:
                counter = 0
                scheduler(broker=self.broker)
            # Save current status
            Stat(self).save()
            sleep(cycle)
        self.stop()

    def stop(self):
        Stat(self).save()
        name = current_process().name
        logger.info(_(f"{name} stopping cluster processes"))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.pusher.is_alive():
            sleep(0.1)
            Stat(self).save()
        # Put poison pills in the queue
        for __ in range(len(self.pool)):
            self.task_queue.put("STOP")
        self.task_queue.close()
        # wait for the task queue to empty
        self.task_queue.join_thread()
        # Wait for all the workers to exit
        while len(self.pool):
            for p in self.pool:
                if not p.is_alive():
                    self.pool.remove(p)
            sleep(0.1)
            Stat(self).save()
        # Finally stop the monitor
        self.result_queue.put("STOP")
        self.result_queue.close()
        # Wait for the result queue to empty
        self.result_queue.join_thread()
        logger.info(_(f"{name} waiting for the monitor."))
        # Wait for everything to close or time out
        count = 0
        if not self.timeout:
            self.timeout = 30
        while self.status() == Conf.STOPPING and count < self.timeout * 10:
            sleep(0.1)
            Stat(self).save()
            count += 1
        # Final status
        Stat(self).save()
示例#21
0
def worker(id: str,
           cluster_id: str,
           task_queue: Queue,
           result_queue: Queue,
           timer: Value,
           timeout: int = Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :param timeout: number of seconds wait for a worker to finish.
    :type id: str
    :type cluster_id: str
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(_(f"{name} ready for work at {current_process().pid}"))
    # Create Worker model
    model = WorkerModel.objects.create(id=id,
                                       cluster_id=cluster_id,
                                       pid=current_process().pid,
                                       task=None)
    task_count = 0
    if timeout is None:
        timeout = -1
    # Start reading the task queue
    for task in iter(
            task_queue.get, "STOP"
    ):  # Task should be provided as task ID which is then retrieved
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # Get the function from the task
        logger.info(_(f'{name} processing [{task["name"]}]'))
        f = task["func"]
        # if it's not an instance try to get it from the string
        if not callable(task["func"]):
            try:
                module, func = f.rsplit(".", 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError) as e:
                result = (e, False)
                if error_reporter:
                    error_reporter.report()
        # We're still going
        if not result:
            close_old_django_connections()
            # Set worker task details
            # model.task = get_task_representation(task)
            # model.save(update_fields=['task'])
            timer_value = task.pop("timeout", timeout)
            # signal execution
            pre_execute.send(sender="django_q", func=f, task=task)
            # execute the payload
            timer.value = timer_value  # Busy
            try:
                res = f(*task["args"], **task["kwargs"])
                result = (res, True)
            except Exception as e:
                result = (f"{e} : {traceback.format_exc()}", False)
                if error_reporter:
                    error_reporter.report()
                if task.get("sync", False):
                    raise
            # Clear task details
            # model.task = None
            # model.save(update_fields=['task'])
        with timer.get_lock():
            # Process result
            task["result"] = result[0]
            task["success"] = result[1]
            task["stopped"] = timezone.now()
            result_queue.put(task)
            timer.value = -1  # Idle
            # Recycle
            if task_count == Conf.RECYCLE or rss_check():
                timer.value = -2  # Recycled
                break
    logger.info(_(f"{name} stopped doing work"))
示例#22
0
def test_enqueue(broker, admin_user):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    a = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, hook='django_q.tests.test_cluster.assert_result',
                   broker=broker)
    b = async_task('django_q.tests.tasks.count_letters2', WordClass(), hook='django_q.tests.test_cluster.assert_result',
                   broker=broker)
    # unknown argument
    c = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, 'oneargumentoomany',
                   hook='django_q.tests.test_cluster.assert_bad_result', broker=broker)
    # unknown function
    d = async_task('django_q.tests.tasks.does_not_exist', WordClass(), hook='django_q.tests.test_cluster.assert_bad_result',
                   broker=broker)
    # function without result
    e = async_task('django_q.tests.tasks.countdown', 100000, broker=broker)
    # function as instance
    f = async_task(multiply, 753, 2, hook=assert_result, broker=broker)
    # model as argument
    g = async_task('django_q.tests.tasks.get_task_name', Task(name='John'), broker=broker)
    # args,kwargs, group and broken hook
    h = async_task('django_q.tests.tasks.word_multiply', 2, word='django', hook='fail.me', broker=broker)
    # args unpickle test
    j = async_task('django_q.tests.tasks.get_user_id', admin_user, broker=broker, group='test_j')
    # q_options and save opt_out test
    k = async_task('django_q.tests.tasks.get_user_id', admin_user,
                   q_options={'broker': broker, 'group': 'test_k', 'save': False, 'timeout': 90})
    # test unicode
    assert Task(name='Amalia').__str__()=='Amalia'
    # check if everything has a task id
    assert isinstance(a, str)
    assert isinstance(b, str)
    assert isinstance(c, str)
    assert isinstance(d, str)
    assert isinstance(e, str)
    assert isinstance(f, str)
    assert isinstance(g, str)
    assert isinstance(h, str)
    assert isinstance(j, str)
    assert isinstance(k, str)
    # run the cluster to execute the tasks
    task_count = 10
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push the tasks
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    # test wait timeout
    assert result(j, wait=10) is None
    assert fetch(j, wait=10) is None
    assert result_group('test_j', wait=10) is None
    assert result_group('test_j', count=2, wait=10) is None
    assert fetch_group('test_j', wait=10) is None
    assert fetch_group('test_j', count=2, wait=10) is None
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # Check the results
    # task a
    result_a = fetch(a)
    assert result_a is not None
    assert result_a.success is True
    assert result(a) == 1506
    # task b
    result_b = fetch(b)
    assert result_b is not None
    assert result_b.success is True
    assert result(b) == 1506
    # task c
    result_c = fetch(c)
    assert result_c is not None
    assert result_c.success is False
    # task d
    result_d = fetch(d)
    assert result_d is not None
    assert result_d.success is False
    # task e
    result_e = fetch(e)
    assert result_e is not None
    assert result_e.success is True
    assert result(e) is None
    # task f
    result_f = fetch(f)
    assert result_f is not None
    assert result_f.success is True
    assert result(f) == 1506
    # task g
    result_g = fetch(g)
    assert result_g is not None
    assert result_g.success is True
    assert result(g) == 'John'
    # task h
    result_h = fetch(h)
    assert result_h is not None
    assert result_h.success is True
    assert result(h) == 12
    # task j
    result_j = fetch(j)
    assert result_j is not None
    assert result_j.success is True
    assert result_j.result == result_j.args[0].id
    # check fetch, result by name
    assert fetch(result_j.name) == result_j
    assert result(result_j.name) == result_j.result
    # groups
    assert result_group('test_j')[0] == result_j.result
    assert result_j.group_result()[0] == result_j.result
    assert result_group('test_j', failures=True)[0] == result_j.result
    assert result_j.group_result(failures=True)[0] == result_j.result
    assert fetch_group('test_j')[0].id == [result_j][0].id
    assert fetch_group('test_j', failures=False)[0].id == [result_j][0].id
    assert count_group('test_j') == 1
    assert result_j.group_count() == 1
    assert count_group('test_j', failures=True) == 0
    assert result_j.group_count(failures=True) == 0
    assert delete_group('test_j') == 1
    assert result_j.group_delete() == 0
    deleted_group = delete_group('test_j', tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    deleted_group = result_j.group_delete(tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    # task k should not have been saved
    assert fetch(k) is None
    assert fetch(k, 100) is None
    assert result(k, 100) is None
    broker.delete_queue()
示例#23
0
文件: cluster.py 项目: bob-r/django-q
class Sentinel(object):
    def __init__(self, stop_event, start_event, broker=None, timeout=Conf.TIMEOUT, start=True):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.parent_pid = get_ppid()
        self.name = current_process().name
        self.broker = broker or get_broker()
        self.reincarnations = 0
        self.tob = timezone.now()
        self.stop_event = stop_event
        self.start_event = start_event
        self.pool_size = Conf.WORKERS
        self.pool = []
        self.timeout = timeout
        self.task_queue = Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
        self.result_queue = Queue()
        self.event_out = Event()
        self.monitor = None
        self.pusher = None
        if start:
            self.start()

    def start(self):
        self.broker.ping()
        self.spawn_cluster()
        self.guard()

    def status(self):
        if not self.start_event.is_set() and not self.stop_event.is_set():
            return Conf.STARTING
        elif self.start_event.is_set() and not self.stop_event.is_set():
            if self.result_queue.empty() and self.task_queue.empty():
                return Conf.IDLE
            return Conf.WORKING
        elif self.stop_event.is_set() and self.start_event.is_set():
            if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
                return Conf.STOPPING
            return Conf.STOPPED

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        p = Process(target=target, args=args)
        p.daemon = True
        if target == worker:
            p.daemon = Conf.DAEMONIZE_WORKERS
            p.timer = args[2]
            self.pool.append(p)
        p.start()
        return p

    def spawn_pusher(self):
        return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker)

    def spawn_worker(self):
        self.spawn_process(worker, self.task_queue, self.result_queue, Value('f', -1), self.timeout)

    def spawn_monitor(self):
        return self.spawn_process(monitor, self.result_queue, self.broker)

    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        db.connections.close_all()  # Close any old connections
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if self.timeout and int(process.timer.value) == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(_("reincarnated worker {} after timeout").format(process.name))
            elif int(process.timer.value) == -2:
                logger.info(_("recycled worker {}").format(process.name))
            else:
                logger.error(_("reincarnated worker {} after death").format(process.name))

        self.reincarnations += 1

    def spawn_cluster(self):
        self.pool = []
        Stat(self).save()
        db.connection.close()
        # spawn worker pool
        for __ in range(self.pool_size):
            self.spawn_worker()
        # spawn auxiliary
        self.monitor = self.spawn_monitor()
        self.pusher = self.spawn_pusher()
        # set worker cpu affinity if needed
        if psutil and Conf.CPU_AFFINITY:
            set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])

    def guard(self):
        logger.info(_('{} guarding cluster at {}').format(current_process().name, self.pid))
        self.start_event.set()
        Stat(self).save()
        logger.info(_('Q Cluster-{} running.').format(self.parent_pid))
        scheduler(broker=self.broker)
        counter = 0
        cycle = Conf.GUARD_CYCLE  # guard loop sleep in seconds
        # Guard loop. Runs at least once
        while not self.stop_event.is_set() or not counter:
            # Check Workers
            for p in self.pool:
                # Are you alive?
                if not p.is_alive() or (self.timeout and p.timer.value == 0):
                    self.reincarnate(p)
                    continue
                # Decrement timer if work is being done
                if self.timeout and p.timer.value > 0:
                    p.timer.value -= cycle
            # Check Monitor
            if not self.monitor.is_alive():
                self.reincarnate(self.monitor)
            # Check Pusher
            if not self.pusher.is_alive():
                self.reincarnate(self.pusher)
            # Call scheduler once a minute (or so)
            counter += cycle
            if counter >= 30 and Conf.SCHEDULER:
                counter = 0
                scheduler(broker=self.broker)
            # Save current status
            Stat(self).save()
            sleep(cycle)
        self.stop()

    def stop(self):
        Stat(self).save()
        name = current_process().name
        logger.info(_('{} stopping cluster processes').format(name))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.pusher.is_alive():
            sleep(0.1)
            Stat(self).save()
        # Put poison pills in the queue
        for __ in range(len(self.pool)):
            self.task_queue.put('STOP')
        self.task_queue.close()
        # wait for the task queue to empty
        self.task_queue.join_thread()
        # Wait for all the workers to exit
        while len(self.pool):
            for p in self.pool:
                if not p.is_alive():
                    self.pool.remove(p)
            sleep(0.1)
            Stat(self).save()
        # Finally stop the monitor
        self.result_queue.put('STOP')
        self.result_queue.close()
        # Wait for the result queue to empty
        self.result_queue.join_thread()
        logger.info(_('{} waiting for the monitor.').format(name))
        # Wait for everything to close or time out
        count = 0
        if not self.timeout:
            self.timeout = 30
        while self.status() == Conf.STOPPING and count < self.timeout * 10:
            sleep(0.1)
            Stat(self).save()
            count += 1
        # Final status
        Stat(self).save()
示例#24
0
def test_scheduler(broker, monkeypatch):
    broker.list_key = 'scheduler_test:q'
    broker.delete_queue()
    schedule = create_schedule('math.copysign',
                               1, -1,
                               name='test math',
                               hook='django_q.tests.tasks.result',
                               schedule_type=Schedule.HOURLY,
                               repeats=1)
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule('math.copysign',
                                   1, -1,
                                   name='test math',
                                   hook='django_q.tests.tasks.result',
                                   schedule_type=Schedule.HOURLY,
                                   repeats=1)
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put('STOP')
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('b', -1))
    assert result_queue.qsize() == 1
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).shift(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.ONCE,
                                    repeats=-1,
                                    hook='django_q.tests.tasks.result'
                                    )
    assert hasattr(once_schedule, 'pk') is True
    # negative repeats
    always_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.DAILY,
                                      repeats=-1,
                                      hook='django_q.tests.tasks.result'
                                      )
    assert hasattr(always_schedule, 'pk') is True
    # Minute schedule
    minute_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.MINUTES,
                                      minutes=10)
    assert hasattr(minute_schedule, 'pk') is True
    # Cron schedule
    cron_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.CRON,
                                    cron="0 22 * * 1-5")
    assert hasattr(cron_schedule, 'pk') is True
    assert cron_schedule.full_clean() is None
    assert cron_schedule.__str__() == 'django_q.tests.tasks.word_multiply'
    with pytest.raises(ValidationError):
        create_schedule('django_q.tests.tasks.word_multiply',
                        2,
                        word='django',
                        schedule_type=Schedule.CRON,
                        cron="0 22 * * 1-12")
    # All other types
    for t in Schedule.TYPE:
        if t[0] == Schedule.CRON:
            continue
        schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                   2,
                                   word='django',
                                   schedule_type=t[0],
                                   repeats=1,
                                   hook='django_q.tests.tasks.result'
                                   )
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(func='django_q.tests.tasks.word_multiply',
                            args='2',
                            kwargs='word="django"',
                            schedule_type=Schedule.DAILY
                            )
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, 'CATCH_UP', True)
    now = timezone.now()
    schedule = create_schedule('django_q.tests.tasks.word_multiply',
                               2,
                               word='catch_up',
                               schedule_type=Schedule.HOURLY,
                               next_run=timezone.now() - timedelta(hours=12),
                               repeats=-1
                               )
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, 'CATCH_UP', False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()
示例#25
0
def test_scheduler(broker, monkeypatch):
    broker.list_key = 'scheduler_test:q'
    broker.delete_queue()
    schedule = create_schedule('math.copysign',
                               1, -1,
                               name='test math',
                               hook='django_q.tests.tasks.result',
                               schedule_type=Schedule.HOURLY,
                               repeats=1)
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule('math.copysign',
                                   1, -1,
                                   name='test math',
                                   hook='django_q.tests.tasks.result',
                                   schedule_type=Schedule.HOURLY,
                                   repeats=1)
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put('STOP')
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('b', -1))
    assert result_queue.qsize() == 1
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).replace(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.ONCE,
                                    repeats=-1,
                                    hook='django_q.tests.tasks.result'
                                    )
    assert hasattr(once_schedule, 'pk') is True
    # negative repeats
    always_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.DAILY,
                                      repeats=-1,
                                      hook='django_q.tests.tasks.result'
                                      )
    assert hasattr(always_schedule, 'pk') is True
    # Minute schedule
    minute_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.MINUTES,
                                      minutes=10)
    assert hasattr(minute_schedule, 'pk') is True
    # All other types
    for t in Schedule.TYPE:
        schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                   2,
                                   word='django',
                                   schedule_type=t[0],
                                   repeats=1,
                                   hook='django_q.tests.tasks.result'
                                   )
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(func='django_q.tests.tasks.word_multiply',
                            args='2',
                            kwargs='word="django"',
                            schedule_type=Schedule.DAILY
                            )
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, 'CATCH_UP', True)
    now = timezone.now()
    schedule = create_schedule('django_q.tests.tasks.word_multiply',
                               2,
                               word='catch_up',
                               schedule_type=Schedule.HOURLY,
                               next_run=timezone.now() - timedelta(hours=12),
                               repeats=-1
                               )
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, 'CATCH_UP', False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()