예제 #1
0
파일: cluster.py 프로젝트: lucemia/django-q
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
    """
    Sets the cpu affinity for the supplied processes.
    Requires the optional psutil module.
    :param int n: affinity
    :param list process_ids: a list of pids
    :param bool actual: Test workaround for Travis not supporting cpu affinity
    """
    # check if we have the psutil module
    if not psutil:
        logger.warning('Skipping cpu affinity because psutil was not found.')
        return
    # check if the platform supports cpu_affinity
    if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'):
        logger.warning('Faking cpu affinity because it is not supported on this platform')
        actual = False
    # get the available processors
    cpu_list = list(range(psutil.cpu_count()))
    # affinities of 0 or gte cpu_count, equals to no affinity
    if not n or n >= len(cpu_list):
        return
    # spread the workers over the available processors.
    index = 0
    for pid in process_ids:
        affinity = []
        for k in range(n):
            if index == len(cpu_list):
                index = 0
            affinity.append(cpu_list[index])
            index += 1
        if psutil.pid_exists(pid):
            p = psutil.Process(pid)
            if actual:
                p.cpu_affinity(affinity)
            logger.info(_('{} will use cpu {}').format(pid, affinity))
예제 #2
0
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
    """
    Sets the cpu affinity for the supplied processes.
    Requires the optional psutil module.
    :param int n: affinity
    :param list process_ids: a list of pids
    :param bool actual: Test workaround for Travis not supporting cpu affinity
    """
    # check if we have the psutil module
    if not psutil:
        logger.warning('Skipping cpu affinity because psutil was not found.')
        return
    # check if the platform supports cpu_affinity
    if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'):
        logger.warning('Faking cpu affinity because it is not supported on this platform')
        actual = False
    # get the available processors
    cpu_list = list(range(psutil.cpu_count()))
    # affinities of 0 or gte cpu_count, equals to no affinity
    if not n or n >= len(cpu_list):
        return
    # spread the workers over the available processors.
    index = 0
    for pid in process_ids:
        affinity = []
        for k in range(n):
            if index == len(cpu_list):
                index = 0
            affinity.append(cpu_list[index])
            index += 1
        if psutil.pid_exists(pid):
            p = psutil.Process(pid)
            if actual:
                p.cpu_affinity(affinity)
            logger.info(_('{} will use cpu {}').format(pid, affinity))
예제 #3
0
def test_mongo(monkeypatch):
    monkeypatch.setattr(Conf, 'MONGO', {'host': '127.0.0.1', 'port': 27017})
    # check broker
    broker = get_broker(list_key='mongo_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    tasks = []
    for i in range(5):
        tasks.append(broker.dequeue()[0])
    assert broker.lock_size() == 5
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    assert broker.queue_size() == 0
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
예제 #4
0
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async ('math.copysign', 1, -1, cached=True, broker=broker)
    async ('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async ('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async ('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async ('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async ('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async ('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
예제 #5
0
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async('math.copysign', 1, -1, cached=True, broker=broker)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
예제 #6
0
def test_mongo(monkeypatch):
    monkeypatch.setattr(Conf, 'MONGO', {'host': '127.0.0.1', 'port': 27017})
    # check broker
    broker = get_broker(list_key='mongo_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    tasks = []
    for i in range(5):
        tasks.append(broker.dequeue()[0])
    assert broker.lock_size() == 5
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    assert broker.queue_size() == 0
예제 #7
0
def test_info():
    info()
    do_sync()
    info()
    for _ in range(24):
        do_sync()
    info()
예제 #8
0
파일: test_monitor.py 프로젝트: za/django-q
def test_info():
    info()
    do_sync()
    info()
    for _ in range(24):
        do_sync()
    info()
예제 #9
0
def test_disque(monkeypatch):
    monkeypatch.setattr(Conf, 'DISQUE_NODES', ['127.0.0.1:7711'])
    # check broker
    broker = get_broker(list_key='disque_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    monkeypatch.setattr(Conf, 'DISQUE_FASTACK', True)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
    # connection test
    monkeypatch.setattr(Conf, 'DISQUE_NODES',
                        ['127.0.0.1:7798', '127.0.0.1:7799'])
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
예제 #10
0
def test_iter(broker):
    broker.purge_queue()
    broker.cache.clear()
    it = [i for i in range(10)]
    it2 = [(1, -1), (2, -1), (3, -4), (5, 6)]
    it3 = (1, 2, 3, 4, 5)
    t = async_iter('math.floor', it, sync=True)
    t2 = async_iter('math.copysign', it2, sync=True)
    t3 = async_iter('math.floor', it3, sync=True)
    t4 = async_iter('math.floor', (1,), sync=True)
    result_t = result(t)
    assert result_t is not None
    task_t = fetch(t)
    assert task_t.result == result_t
    assert result(t2) is not None
    assert result(t3) is not None
    assert result(t4)[0] == 1
    # test iter class
    i = Iter('math.copysign', sync=True, cached=True)
    i.append(1, -1)
    i.append(2, -1)
    i.append(3, -4)
    i.append(5, 6)
    assert i.started is False
    assert i.length() == 4
    assert i.run() is not None
    assert len(i.result()) == 4
    assert len(i.fetch().result) == 4
    i.append(1, -7)
    assert i.result() is None
    i.run()
    assert len(i.result()) == 5
예제 #11
0
def test_iter(broker):
    broker.purge_queue()
    broker.cache.clear()
    it = [i for i in range(10)]
    it2 = [(1, -1), (2, -1), (3, -4), (5, 6)]
    it3 = (1, 2, 3, 4, 5)
    t = async_iter('math.floor', it, sync=True)
    t2 = async_iter('math.copysign', it2, sync=True)
    t3 = async_iter('math.floor', it3, sync=True)
    t4 = async_iter('math.floor', (1, ), sync=True)
    result_t = result(t)
    assert result_t is not None
    task_t = fetch(t)
    assert task_t.result == result_t
    assert result(t2) is not None
    assert result(t3) is not None
    assert result(t4)[0] == 1
    # test iter class
    i = Iter('math.copysign', sync=True, cached=True)
    i.append(1, -1)
    i.append(2, -1)
    i.append(3, -4)
    i.append(5, 6)
    assert i.started is False
    assert i.length() == 4
    assert i.run() is not None
    assert len(i.result()) == 4
    assert len(i.fetch().result) == 4
    i.append(1, -7)
    assert i.result() is None
    i.run()
    assert len(i.result()) == 5
예제 #12
0
def test_ironmq(monkeypatch):
    monkeypatch.setattr(
        Conf, 'IRON_MQ', {
            'token': os.getenv('IRON_MQ_TOKEN'),
            'project_id': os.getenv('IRON_MQ_PROJECT_ID')
        })
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # monkeypatch.setattr(Conf, 'RETRY', 1)
    # broker.enqueue('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
예제 #13
0
def canceled_sqs(monkeypatch):
    monkeypatch.setattr(Conf, 'SQS', {'aws_region': os.getenv('AWS_REGION'),
                                      'aws_access_key_id': os.getenv('AWS_ACCESS_KEY_ID'),
                                      'aws_secret_access_key': os.getenv('AWS_SECRET_ACCESS_KEY')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    sleep(2)
    # Sometimes SQS is not linear
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task = task[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    monkeypatch.setattr(Conf, 'RETRY', 60)
    broker.enqueue('test')
    sleep(1)
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task_id = task[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue('test')
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 12)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
예제 #14
0
def test_orm(monkeypatch):
    monkeypatch.setattr(Conf, 'ORM', 'default')
    # check broker
    broker = get_broker(list_key='orm_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    assert broker.lock_size() == Conf.BULK
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
예제 #15
0
def test_disque(monkeypatch):
    monkeypatch.setattr(Conf, 'DISQUE_NODES', ['127.0.0.1:7711'])
    # check broker
    broker = get_broker(list_key='disque_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    monkeypatch.setattr(Conf, 'DISQUE_FASTACK', True)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # connection test
    monkeypatch.setattr(Conf, 'DISQUE_NODES', ['127.0.0.1:7798', '127.0.0.1:7799'])
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
예제 #16
0
def test_orm(monkeypatch):
    monkeypatch.setattr(Conf, 'ORM', 'default')
    # check broker
    broker = get_broker(list_key='orm_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    assert broker.lock_size() == Conf.BULK
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
예제 #17
0
def test_ironmq(monkeypatch):
    monkeypatch.setattr(Conf, 'IRON_MQ', {'token': os.getenv('IRON_MQ_TOKEN'),
                                          'project_id': os.getenv('IRON_MQ_PROJECT_ID')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # monkeypatch.setattr(Conf, 'RETRY', 1)
    # broker.enqueue('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
예제 #18
0
파일: cluster.py 프로젝트: lucemia/django-q
 def spawn_cluster(self):
     self.pool = []
     Stat(self).save()
     db.connection.close()
     # spawn worker pool
     for __ in range(self.pool_size):
         self.spawn_worker()
     # spawn auxiliary
     self.monitor = self.spawn_monitor()
     self.pusher = self.spawn_pusher()
     # set worker cpu affinity if needed
     if psutil and Conf.CPU_AFFINITY:
         set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
예제 #19
0
 def spawn_cluster(self):
     self.pool = []
     Stat(self).save()
     db.connection.close()
     # spawn worker pool
     for __ in range(self.pool_size):
         self.spawn_worker()
     # spawn auxiliary
     self.monitor = self.spawn_monitor()
     self.pusher = self.spawn_pusher()
     # set worker cpu affinity if needed
     if psutil and Conf.CPU_AFFINITY:
         set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
예제 #20
0
파일: cluster.py 프로젝트: lucemia/django-q
 def stop(self):
     Stat(self).save()
     name = current_process().name
     logger.info(_('{} stopping cluster processes').format(name))
     # Stopping pusher
     self.event_out.set()
     # Wait for it to stop
     while self.pusher.is_alive():
         sleep(0.1)
         Stat(self).save()
     # Put poison pills in the queue
     for __ in range(len(self.pool)):
         self.task_queue.put('STOP')
     self.task_queue.close()
     # wait for the task queue to empty
     self.task_queue.join_thread()
     # Wait for all the workers to exit
     while len(self.pool):
         for p in self.pool:
             if not p.is_alive():
                 self.pool.remove(p)
         sleep(0.1)
         Stat(self).save()
     # Finally stop the monitor
     self.result_queue.put('STOP')
     self.result_queue.close()
     # Wait for the result queue to empty
     self.result_queue.join_thread()
     logger.info(_('{} waiting for the monitor.').format(name))
     # Wait for everything to close or time out
     count = 0
     if not self.timeout:
         self.timeout = 30
     while self.status() == Conf.STOPPING and count < self.timeout * 10:
         sleep(0.1)
         Stat(self).save()
         count += 1
     # Final status
     Stat(self).save()
예제 #21
0
 def stop(self):
     Stat(self).save()
     name = current_process().name
     logger.info(_('{} stopping cluster processes').format(name))
     # Stopping pusher
     self.event_out.set()
     # Wait for it to stop
     while self.pusher.is_alive():
         sleep(0.1)
         Stat(self).save()
     # Put poison pills in the queue
     for __ in range(len(self.pool)):
         self.task_queue.put('STOP')
     self.task_queue.close()
     # wait for the task queue to empty
     self.task_queue.join_thread()
     # Wait for all the workers to exit
     while len(self.pool):
         for p in self.pool:
             if not p.is_alive():
                 self.pool.remove(p)
         sleep(0.1)
         Stat(self).save()
     # Finally stop the monitor
     self.result_queue.put('STOP')
     self.result_queue.close()
     # Wait for the result queue to empty
     self.result_queue.join_thread()
     logger.info(_('{} waiting for the monitor.').format(name))
     # Wait for everything to close or time out
     count = 0
     if not self.timeout:
         self.timeout = 30
     while self.status() == Conf.STOPPING and count < self.timeout * 10:
         sleep(0.1)
         Stat(self).save()
         count += 1
     # Final status
     Stat(self).save()
예제 #22
0
def test_async(broker, admin_user):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    a = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, hook='django_q.tests.test_cluster.assert_result',
              broker=broker)
    b = async('django_q.tests.tasks.count_letters2', WordClass(), hook='django_q.tests.test_cluster.assert_result',
              broker=broker)
    # unknown argument
    c = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, 'oneargumentoomany',
              hook='django_q.tests.test_cluster.assert_bad_result', broker=broker)
    # unknown function
    d = async('django_q.tests.tasks.does_not_exist', WordClass(), hook='django_q.tests.test_cluster.assert_bad_result',
              broker=broker)
    # function without result
    e = async('django_q.tests.tasks.countdown', 100000, broker=broker)
    # function as instance
    f = async(multiply, 753, 2, hook=assert_result, broker=broker)
    # model as argument
    g = async('django_q.tests.tasks.get_task_name', Task(name='John'), broker=broker)
    # args,kwargs, group and broken hook
    h = async('django_q.tests.tasks.word_multiply', 2, word='django', hook='fail.me', broker=broker)
    # args unpickle test
    j = async('django_q.tests.tasks.get_user_id', admin_user, broker=broker, group='test_j')
    # q_options and save opt_out test
    k = async('django_q.tests.tasks.get_user_id', admin_user,
              q_options={'broker': broker, 'group': 'test_k', 'save': False, 'timeout': 90})
    # check if everything has a task id
    assert isinstance(a, str)
    assert isinstance(b, str)
    assert isinstance(c, str)
    assert isinstance(d, str)
    assert isinstance(e, str)
    assert isinstance(f, str)
    assert isinstance(g, str)
    assert isinstance(h, str)
    assert isinstance(j, str)
    assert isinstance(k, str)
    # run the cluster to execute the tasks
    task_count = 10
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push the tasks
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    # test wait timeout
    assert result(j, wait=10) is None
    assert fetch(j, wait=10) is None
    assert result_group('test_j', wait=10) is None
    assert result_group('test_j', count=2, wait=10) is None
    assert fetch_group('test_j', wait=10) is None
    assert fetch_group('test_j', count=2, wait=10) is None
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # Check the results
    # task a
    result_a = fetch(a)
    assert result_a is not None
    assert result_a.success is True
    assert result(a) == 1506
    # task b
    result_b = fetch(b)
    assert result_b is not None
    assert result_b.success is True
    assert result(b) == 1506
    # task c
    result_c = fetch(c)
    assert result_c is not None
    assert result_c.success is False
    # task d
    result_d = fetch(d)
    assert result_d is not None
    assert result_d.success is False
    # task e
    result_e = fetch(e)
    assert result_e is not None
    assert result_e.success is True
    assert result(e) is None
    # task f
    result_f = fetch(f)
    assert result_f is not None
    assert result_f.success is True
    assert result(f) == 1506
    # task g
    result_g = fetch(g)
    assert result_g is not None
    assert result_g.success is True
    assert result(g) == 'John'
    # task h
    result_h = fetch(h)
    assert result_h is not None
    assert result_h.success is True
    assert result(h) == 12
    # task j
    result_j = fetch(j)
    assert result_j is not None
    assert result_j.success is True
    assert result_j.result == result_j.args[0].id
    # check fetch, result by name
    assert fetch(result_j.name) == result_j
    assert result(result_j.name) == result_j.result
    # groups
    assert result_group('test_j')[0] == result_j.result
    assert result_j.group_result()[0] == result_j.result
    assert result_group('test_j', failures=True)[0] == result_j.result
    assert result_j.group_result(failures=True)[0] == result_j.result
    assert fetch_group('test_j')[0].id == [result_j][0].id
    assert fetch_group('test_j', failures=False)[0].id == [result_j][0].id
    assert count_group('test_j') == 1
    assert result_j.group_count() == 1
    assert count_group('test_j', failures=True) == 0
    assert result_j.group_count(failures=True) == 0
    assert delete_group('test_j') == 1
    assert result_j.group_delete() == 0
    deleted_group = delete_group('test_j', tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    deleted_group = result_j.group_delete(tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    # task k should not have been saved
    assert fetch(k) is None
    assert fetch(k, 100) is None
    assert result(k, 100) is None
    broker.delete_queue()
예제 #23
0
def canceled_sqs(monkeypatch):
    monkeypatch.setattr(
        Conf, 'SQS', {
            'aws_region': os.getenv('AWS_REGION'),
            'aws_access_key_id': os.getenv('AWS_ACCESS_KEY_ID'),
            'aws_secret_access_key': os.getenv('AWS_SECRET_ACCESS_KEY')
        })
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    sleep(2)
    # Sometimes SQS is not linear
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task = task[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    monkeypatch.setattr(Conf, 'RETRY', 60)
    broker.enqueue('test')
    sleep(1)
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task_id = task[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue('test')
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 12)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
예제 #24
0
def test_async(broker, admin_user):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    a = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, hook='django_q.tests.test_cluster.assert_result',
              broker=broker)
    b = async('django_q.tests.tasks.count_letters2', WordClass(), hook='django_q.tests.test_cluster.assert_result',
              broker=broker)
    # unknown argument
    c = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, 'oneargumentoomany',
              hook='django_q.tests.test_cluster.assert_bad_result', broker=broker)
    # unknown function
    d = async('django_q.tests.tasks.does_not_exist', WordClass(), hook='django_q.tests.test_cluster.assert_bad_result',
              broker=broker)
    # function without result
    e = async('django_q.tests.tasks.countdown', 100000, broker=broker)
    # function as instance
    f = async(multiply, 753, 2, hook=assert_result, broker=broker)
    # model as argument
    g = async('django_q.tests.tasks.get_task_name', Task(name='John'), broker=broker)
    # args,kwargs, group and broken hook
    h = async('django_q.tests.tasks.word_multiply', 2, word='django', hook='fail.me', broker=broker)
    # args unpickle test
    j = async('django_q.tests.tasks.get_user_id', admin_user, broker=broker, group='test_j')
    # q_options and save opt_out test
    k = async('django_q.tests.tasks.get_user_id', admin_user,
              q_options={'broker': broker, 'group': 'test_k', 'save': False, 'timeout': 90})
    # check if everything has a task id
    assert isinstance(a, str)
    assert isinstance(b, str)
    assert isinstance(c, str)
    assert isinstance(d, str)
    assert isinstance(e, str)
    assert isinstance(f, str)
    assert isinstance(g, str)
    assert isinstance(h, str)
    assert isinstance(j, str)
    assert isinstance(k, str)
    # run the cluster to execute the tasks
    task_count = 10
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push the tasks
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    # test wait timeout
    assert result(j, wait=10) is None
    assert fetch(j, wait=10) is None
    assert result_group('test_j', wait=10) is None
    assert result_group('test_j', count=2, wait=10) is None
    assert fetch_group('test_j', wait=10) is None
    assert fetch_group('test_j', count=2, wait=10) is None
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # Check the results
    # task a
    result_a = fetch(a)
    assert result_a is not None
    assert result_a.success is True
    assert result(a) == 1506
    # task b
    result_b = fetch(b)
    assert result_b is not None
    assert result_b.success is True
    assert result(b) == 1506
    # task c
    result_c = fetch(c)
    assert result_c is not None
    assert result_c.success is False
    # task d
    result_d = fetch(d)
    assert result_d is not None
    assert result_d.success is False
    # task e
    result_e = fetch(e)
    assert result_e is not None
    assert result_e.success is True
    assert result(e) is None
    # task f
    result_f = fetch(f)
    assert result_f is not None
    assert result_f.success is True
    assert result(f) == 1506
    # task g
    result_g = fetch(g)
    assert result_g is not None
    assert result_g.success is True
    assert result(g) == 'John'
    # task h
    result_h = fetch(h)
    assert result_h is not None
    assert result_h.success is True
    assert result(h) == 12
    # task j
    result_j = fetch(j)
    assert result_j is not None
    assert result_j.success is True
    assert result_j.result == result_j.args[0].id
    # check fetch, result by name
    assert fetch(result_j.name) == result_j
    assert result(result_j.name) == result_j.result
    # groups
    assert result_group('test_j')[0] == result_j.result
    assert result_j.group_result()[0] == result_j.result
    assert result_group('test_j', failures=True)[0] == result_j.result
    assert result_j.group_result(failures=True)[0] == result_j.result
    assert fetch_group('test_j')[0].id == [result_j][0].id
    assert fetch_group('test_j', failures=False)[0].id == [result_j][0].id
    assert count_group('test_j') == 1
    assert result_j.group_count() == 1
    assert count_group('test_j', failures=True) == 0
    assert result_j.group_count(failures=True) == 0
    assert delete_group('test_j') == 1
    assert result_j.group_delete() == 0
    deleted_group = delete_group('test_j', tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    deleted_group = result_j.group_delete(tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    # task k should not have been saved
    assert fetch(k) is None
    assert fetch(k, 100) is None
    assert result(k, 100) is None
    broker.delete_queue()