Exemple #1
0
def test_redis(monkeypatch):
    monkeypatch.setattr(Conf, 'DJANGO_REDIS', None)
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    monkeypatch.setattr(Conf, 'REDIS', {'host': '127.0.0.1', 'port': 7799})
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
Exemple #2
0
def test_redis(monkeypatch):
    monkeypatch.setattr(Conf, 'DJANGO_REDIS', None)
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    monkeypatch.setattr(Conf, 'REDIS', {'host': '127.0.0.1', 'port': 7799})
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
Exemple #3
0
def test_redis():
    Conf.DJANGO_REDIS = None
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    Conf.REDIS = {"host": "127.0.0.1", "port": 7799}
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
    Conf.REDIS = None
    Conf.DJANGO_REDIS = "default"
Exemple #4
0
def test_redis():
    Conf.DJANGO_REDIS = None
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    Conf.REDIS = {'host': '127.0.0.1', 'port': 7799}
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
    Conf.REDIS = None
    Conf.DJANGO_REDIS = 'default'
def test_redis():
    Conf.DJANGO_REDIS = None
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    Conf.REDIS = {'host': '127.0.0.1', 'port': 7799}
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
    Conf.REDIS = None
    Conf.DJANGO_REDIS = 'default'
Exemple #6
0
def test_redis(monkeypatch):
    monkeypatch.setattr(Conf, "DJANGO_REDIS", None)
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    monkeypatch.setattr(Conf, "REDIS", {"host": "127.0.0.1", "port": 7799})
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
    monkeypatch.setattr(Conf, "REDIS", "redis://127.0.0.1:7799")
    broker = get_broker()
    with pytest.raises(Exception):
        broker.ping()
Exemple #7
0
 def __init__(self, chain=None, group=None, cached=Conf.CACHED, sync=Conf.SYNC):
     self.chain = chain or []
     self.group = group or ''
     self.broker = get_broker()
     self.cached = cached
     self.sync = sync
     self.started = False
Exemple #8
0
def delete_cached(task_id, broker=None):
    """
    Delete a task from the cache backend
    """
    if not broker:
        broker = get_broker()
    return broker.cache.delete('{}:{}'.format(broker.list_key, task_id))
Exemple #9
0
def fetch_group_cached(group_id, failures=True, wait=0, count=None, broker=None):
    """
    Return a list of Tasks for a task group in the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time()
    if count:
        while True:
            if count_group_cached(group_id) == count or wait and (time() - start) * 1000 >= wait >= 0:
                break
            sleep(0.01)
    while True:
        group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
        if group_list:
            task_list = []
            for task_key in group_list:
                task = SignedPackage.loads(broker.cache.get(task_key))
                if task['success'] or failures:
                    t = Task(id=task['id'],
                             name=task['name'],
                             func=task['func'],
                             hook=task.get('hook'),
                             args=task['args'],
                             kwargs=task['kwargs'],
                             started=task['started'],
                             stopped=task['stopped'],
                             result=task['result'],
                             group=task.get('group'),
                             success=task['success'])
                    task_list.append(t)
            return task_list
        if (time() - start) * 1000 >= wait >= 0:
            break
        sleep(0.01)
Exemple #10
0
def fetch_cached(task_id, wait=0, broker=None):
    """
    Return the processed task from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time()
    while True:
        r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
        if r:
            task = SignedPackage.loads(r)
            t = Task(id=task['id'],
                     name=task['name'],
                     func=task['func'],
                     hook=task.get('hook'),
                     args=task['args'],
                     kwargs=task['kwargs'],
                     started=task['started'],
                     stopped=task['stopped'],
                     result=task['result'],
                     success=task['success'])
            return t
        if (time() - start) * 1000 >= wait >= 0:
            break
        sleep(0.01)
Exemple #11
0
def async (func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = ('hook', 'group', 'save', 'sync', 'cached', 'iter_count',
                'iter_cached', 'chain', 'broker')
    q_options = keywords.pop('q_options', None)
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1], 'name': tag[0], 'func': func, 'args': args}
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop('broker', get_broker())
    # overrides
    if 'cached' not in task and Conf.CACHED:
        task['cached'] = Conf.CACHED
    if 'sync' not in task and Conf.SYNC:
        task['sync'] = Conf.SYNC
    # finalize
    task['kwargs'] = keywords
    task['started'] = timezone.now()
    # sign it
    pack = signing.SignedPackage.dumps(task)
    if task.get('sync', False):
        return _sync(pack)
    # push it
    broker.enqueue(pack)
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Exemple #12
0
def test_disque(monkeypatch):
    monkeypatch.setattr(Conf, "DISQUE_NODES", ["127.0.0.1:7711"])
    # check broker
    broker = get_broker(list_key="disque_test")
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # async_task
    broker.enqueue("test")
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, "RETRY", 1)
    broker.enqueue("test")
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue("test")
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue("test")
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue("test")
    monkeypatch.setattr(Conf, "BULK", 5)
    monkeypatch.setattr(Conf, "DISQUE_FASTACK", True)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue("test")
    broker.enqueue("test")
    broker.delete_queue()
    assert broker.queue_size() == 0
    # connection test
    monkeypatch.setattr(Conf, "DISQUE_NODES", ["127.0.0.1:7798", "127.0.0.1:7799"])
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
    # connection test with no nodes
    monkeypatch.setattr(Conf, "DISQUE_NODES", None)
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
Exemple #13
0
def broker():
    Conf.DISQUE_NODES = None
    Conf.IRON_MQ = None
    Conf.SQS = None
    Conf.ORM = None
    Conf.DJANGO_REDIS = 'default'
    return get_broker()
Exemple #14
0
def test_custom():
    Conf.BROKER_CLASS = 'brokers.redis_broker.Redis'
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.__class__.__name__ == 'Redis'
    Conf.BROKER_CLASS = None
Exemple #15
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, 'STOP'):
        # save the result
        if task.get('cached', False):
            save_cached(task, broker)
        else:
            save_task(task, broker)
        # acknowledge and log the result
        if task['success']:
            # acknowledge
            ack_id = task.pop('ack_id', False)
            if ack_id:
                broker.acknowledge(ack_id)
            # log success
            logger.info(_("Processed [{}]").format(task['name']))
        else:
            # log failure
            logger.error(_("Failed [{}] - {}").format(task['name'], task['result']))
    logger.info(_("{} stopped monitoring results").format(name))
Exemple #16
0
def result_group_cached(group_id,
                        failures=False,
                        wait=0,
                        count=None,
                        broker=None):
    """
    Return a list of results for a task group from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time()
    if count:
        while True:
            if count_group_cached(group_id) == count or wait and (
                    time() - start) * 1000 >= wait > 0:
                break
            sleep(0.01)
    while True:
        group_list = broker.cache.get('{}:{}:keys'.format(
            broker.list_key, group_id))
        if group_list:
            result_list = []
            for task_key in group_list:
                task = SignedPackage.loads(broker.cache.get(task_key))
                if task['success'] or failures:
                    result_list.append(task['result'])
            return result_list
        if (time() - start) * 1000 >= wait >= 0:
            break
        sleep(0.01)
Exemple #17
0
def async_iter(func, args_iter, **kwargs):
    """
    enqueues a function with iterable arguments
    """
    iter_count = len(args_iter)
    iter_group = uuid()[1]
    # clean up the kwargs
    options = kwargs.get("q_options", kwargs)
    options.pop("hook", None)
    options["broker"] = options.get("broker", get_broker())
    options["group"] = iter_group
    options["iter_count"] = iter_count
    if options.get("cached", None):
        options["iter_cached"] = options["cached"]
    options["cached"] = True
    # save the original arguments
    broker = options["broker"]
    broker.cache.set(
        f"{broker.list_key}:{iter_group}:args", SignedPackage.dumps(args_iter)
    )
    for args in args_iter:
        if not isinstance(args, tuple):
            args = (args,)
        async_task(func, *args, **options)
    return iter_group
Exemple #18
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemple #19
0
def fetch_cached(task_id, wait=0, broker=None):
    """
    Return the processed task from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time()
    while True:
        r = broker.cache.get(f"{broker.list_key}:{task_id}")
        if r:
            task = SignedPackage.loads(r)
            return Task(
                id=task["id"],
                name=task["name"],
                func=task["func"],
                hook=task.get("hook"),
                args=task["args"],
                kwargs=task["kwargs"],
                started=task["started"],
                stopped=task["stopped"],
                result=task["result"],
                success=task["success"],
            )
        if (time() - start) * 1000 >= wait >= 0:
            break
        sleep(0.01)
Exemple #20
0
def delete_cached(task_id, broker=None):
    """
    Delete a task from the cache backend
    """
    if not broker:
        broker = get_broker()
    return broker.cache.delete(f"{broker.list_key}:{task_id}")
Exemple #21
0
def async_task(func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = (
        "hook",
        "group",
        "save",
        "sync",
        "cached",
        "ack_failure",
        "iter_count",
        "iter_cached",
        "chain",
        "broker",
        "timeout",
    )
    q_options = keywords.pop("q_options", {})
    # get an id
    tag = uuid()
    # build the task package
    task = {
        "id":
        tag[1],
        "name":
        keywords.pop("task_name", None) or q_options.pop("task_name", None)
        or tag[0],
        "func":
        func,
        "args":
        args,
    }
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop("broker", get_broker())
    # overrides
    if "cached" not in task and Conf.CACHED:
        task["cached"] = Conf.CACHED
    if "sync" not in task and Conf.SYNC:
        task["sync"] = Conf.SYNC
    if "ack_failure" not in task and Conf.ACK_FAILURES:
        task["ack_failure"] = Conf.ACK_FAILURES
    # finalize
    task["kwargs"] = keywords
    task["started"] = timezone.now()
    # signal it
    pre_enqueue.send(sender="django_q", task=task)
    # sign it
    pack = SignedPackage.dumps(task)
    if task.get("sync", False):
        return _sync(pack)
    # push it
    enqueue_id = broker.enqueue(pack)
    logger.info(f"Enqueued {enqueue_id}")
    logger.debug(f"Pushed {tag}")
    return task["id"]
Exemple #22
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemple #23
0
def test_simple_async_report_send(rf, admin_user):
    broker = get_broker()
    assert broker.queue_size() == 0

    request = rf.get("/")
    request.query_params = {}
    request.user = admin_user

    report = LeaseStatisticReport()
    response = report.get_response(request)
    assert response.data
    assert broker.queue_size() == 1

    # Run async task
    task_queue = Queue()
    result_queue = Queue()
    event = Event()
    event.set()
    pusher(task_queue, event, broker=broker)
    assert task_queue.qsize() == 1
    assert queue_size(broker=broker) == 0
    task_queue.put("STOP")
    worker(task_queue, result_queue, Value("f", -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    result_queue.put("STOP")
    monitor(result_queue)
    assert result_queue.qsize() == 0
    broker.delete_queue()

    # Test report file have been sent via email
    assert len(mail.outbox) == 1
    assert len(mail.outbox[0].attachments) == 1
Exemple #24
0
 def handle(self, *args, **options):
     self.stdout.write(self.style.SUCCESS(
         'Starting qcluster for queue {!r}'.format(options['queue'])))
     q = Cluster(get_broker(options['queue']))
     q.start()
     if options.get('run_once', False):
         q.stop()
Exemple #25
0
def pusher(task_queue: Queue, event: Event, broker: Broker = None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type broker:
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}"))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e, traceback.format_exc())
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = SignedPackage.loads(task[1])
                except (TypeError, BadSignature) as e:
                    logger.error(e, traceback.format_exc())
                    broker.fail(ack_id)
                    continue
                task["ack_id"] = ack_id
                task_queue.put(task)
            logger.debug(_(f"queueing from {broker.list_key}"))
        if event.is_set():
            break
    logger.info(_(f"{current_process().name} stopped pushing tasks"))
Exemple #26
0
def test_custom():
    Conf.BROKER_CLASS = 'brokers.redis_broker.Redis'
    broker = get_broker()
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.__class__.__name__ == 'Redis'
    Conf.BROKER_CLASS = None
Exemple #27
0
 def spawn_eligible(self):
     for fileset in self._enum_eligible_filesets():
         async_task('planb.tasks.conditional_run',
                    fileset.pk,
                    broker=get_broker(settings.Q_MAIN_QUEUE),
                    q_options={'hook': 'planb.tasks.finalize_run'})
         logger.info('[%s] Scheduled backup', fileset)
Exemple #28
0
 def create_async_tasks_chain(chain,
                              group=None,
                              cached=Conf.CACHED,
                              sync=Conf.SYNC,
                              broker=None):
     """
     Wrapper method around async_chain that enqueues a chain of tasks
     the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
     """
     if not group:
         group = uuid()[1]
     args = ()
     kwargs = {}
     task = chain.pop(0)
     if type(task) is not tuple:
         task = (task, )
     if len(task) > 1:
         args = task[1]
     if len(task) > 2:
         kwargs = task[2]
     kwargs["chain"] = chain
     kwargs["group"] = group
     kwargs["cached"] = cached
     kwargs["sync"] = sync
     kwargs["broker"] = broker or get_broker()
     QUtilities.add_async_task(task[0], *args, **kwargs)
     return group
Exemple #29
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, 'STOP'):
        # acknowledge
        ack_id = task.pop('ack_id', False)
        if ack_id:
            broker.acknowledge(ack_id)
        # save the result
        if task.get('cached', False):
            save_cached(task, broker)
        else:
            save_task(task, broker)
        # log the result
        if task['success']:
            logger.info(_("Processed [{}]").format(task['name']))
        else:
            logger.error(
                _("Failed [{}] - {}").format(task['name'], task['result']))
    logger.info(_("{} stopped monitoring results").format(name))
Exemple #30
0
def fetch_group_cached(group_id, failures=True, wait=0, count=None, broker=None):
    """
    Return a list of Tasks for a task group in the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time.time()
    if count:
        while True:
            if count_group_cached(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:
                break
            time.sleep(0.01)
    while True:
        group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
        if group_list:
            task_list = []
            for task_key in group_list:
                task = signing.SignedPackage.loads(broker.cache.get(task_key))
                if task['success'] or failures:
                    t = Task(id=task['id'],
                             name=task['name'],
                             func=task['func'],
                             hook=task.get('hook'),
                             args=task['args'],
                             kwargs=task['kwargs'],
                             started=task['started'],
                             stopped=task['stopped'],
                             result=task['result'],
                             group=task.get('group'),
                             success=task['success'])
                    task_list.append(t)
            return task_list
        if (time.time() - start) * 1000 >= wait >= 0:
            break
        time.sleep(0.01)
Exemple #31
0
 def __init__(self,
              stop_event,
              start_event,
              broker=None,
              timeout=Conf.TIMEOUT,
              start=True):
     # Make sure we catch signals for the pool
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     signal.signal(signal.SIGTERM, signal.SIG_DFL)
     self.pid = current_process().pid
     self.parent_pid = get_ppid()
     self.name = current_process().name
     self.broker = broker or get_broker()
     self.reincarnations = 0
     self.tob = timezone.now()
     self.stop_event = stop_event
     self.start_event = start_event
     self.pool_size = Conf.WORKERS
     self.pool = []
     self.timeout = timeout
     self.task_queue = Queue(
         maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
     self.result_queue = Queue()
     self.event_out = Event()
     self.monitor = None
     self.pusher = None
     if start:
         self.start()
Exemple #32
0
 def __init__(self, chain=None, group=None, cached=Conf.CACHED, sync=Conf.SYNC):
     self.chain = chain or []
     self.group = group or ''
     self.broker = get_broker()
     self.cached = cached
     self.sync = sync
     self.started = False
Exemple #33
0
def async_chain(chain,
                group=None,
                cached=Conf.CACHED,
                sync=Conf.SYNC,
                broker=None):
    """
    enqueues a chain of tasks
    the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
    """
    if not group:
        group = uuid()[1]
    args = ()
    kwargs = {}
    task = chain.pop(0)
    if type(task) is not tuple:
        task = (task, )
    if len(task) > 1:
        args = task[1]
    if len(task) > 2:
        kwargs = task[2]
    kwargs['chain'] = chain
    kwargs['group'] = group
    kwargs['cached'] = cached
    kwargs['sync'] = sync
    kwargs['broker'] = broker or get_broker()
    async_task(task[0], *args, **kwargs)
    return group
Exemple #34
0
def delete_cached(task_id, broker=None):
    """
    Delete a task from the cache backend
    """
    if not broker:
        broker = get_broker()
    return broker.cache.delete('{}:{}'.format(broker.list_key, task_id))
def test_disque(monkeypatch):
    monkeypatch.setattr(Conf, 'DISQUE_NODES', ['127.0.0.1:7711'])
    # check broker
    broker = get_broker(list_key='disque_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    monkeypatch.setattr(Conf, 'DISQUE_FASTACK', True)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
    # connection test
    monkeypatch.setattr(Conf, 'DISQUE_NODES',
                        ['127.0.0.1:7798', '127.0.0.1:7799'])
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
Exemple #36
0
def fetch_cached(task_id, wait=0, broker=None):
    """
    Return the processed task from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time.time()
    while True:
        r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
        if r:
            task = signing.SignedPackage.loads(r)
            t = Task(id=task['id'],
                     name=task['name'],
                     func=task['func'],
                     hook=task.get('hook'),
                     args=task['args'],
                     kwargs=task['kwargs'],
                     started=task['started'],
                     stopped=task['stopped'],
                     result=task['result'],
                     success=task['success'])
            return t
        if (time.time() - start) * 1000 >= wait >= 0:
            break
        time.sleep(0.01)
Exemple #37
0
def test_monitor(monkeypatch):
    cluster_id = uuid.uuid4()
    assert Stat.get(pid=0, cluster_id=cluster_id).sentinel == 0
    c = Cluster()
    c.start()
    stats = monitor(run_once=True)
    assert get_ids() is True
    c.stop()
    assert len(stats) > 0
    found_c = False
    for stat in stats:
        if stat.cluster_id == c.cluster_id:
            found_c = True
            assert stat.uptime() > 0
            assert stat.empty_queues() is True
            break
    assert found_c is True
    # test lock size
    monkeypatch.setattr(Conf, 'ORM', 'default')
    b = get_broker('monitor_test')
    b.enqueue('test')
    b.dequeue()
    assert b.lock_size() == 1
    monitor(run_once=True, broker=b)
    b.delete_queue()
Exemple #38
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_(f"{name} monitoring at {current_process().pid}"))
    for task in iter(result_queue.get, "STOP"):
        # save the result
        if task.get("cached", False):
            save_cached(task, broker)
        else:
            save_task(task, broker)
        # acknowledge result
        ack_id = task.pop("ack_id", False)
        if ack_id and (task["success"] or task.get("ack_failure", False)):
            broker.acknowledge(ack_id)
        # log the result
        if task["success"]:
            # log success
            logger.info(_(f"Processed [{task['name']}]"))
        else:
            # log failure
            logger.error(_(f"Failed [{task['name']}] - {task['result']}"))
    logger.info(_(f"{name} stopped monitoring results"))
Exemple #39
0
def pusher(task_queue, event, broker=None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e)
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = signing.SignedPackage.loads(task[1])
                except (TypeError, signing.BadSignature) as e:
                    logger.error(e)
                    broker.fail(ack_id)
                    continue
                task['ack_id'] = ack_id
                task_queue.put(task)
            logger.debug(_('queueing from {}').format(broker.list_key))
        if event.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
Exemple #40
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, "STOP"):
        # acknowledge
        ack_id = task.pop("ack_id", False)
        if ack_id:
            broker.acknowledge(ack_id)
        # save the result
        if task.get("cached", False):
            save_cached(task, broker)
        else:
            save_task(task)
        # log the result
        if task["success"]:
            logger.info(_("Processed [{}]").format(task["name"]))
        else:
            logger.error(_("Failed [{}] - {}").format(task["name"], task["result"]))
    logger.info(_("{} stopped monitoring results").format(name))
Exemple #41
0
def pusher(task_queue, event, broker=None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(
        _('{} pushing tasks at {}').format(current_process().name,
                                           current_process().pid))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e)
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = signing.SignedPackage.loads(task[1])
                except (TypeError, signing.BadSignature) as e:
                    logger.error(e)
                    broker.fail(ack_id)
                    continue
                task['ack_id'] = ack_id
                task_queue.put(task)
            logger.debug(_('queueing from {}').format(broker.list_key))
        if event.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
def test_mongo(monkeypatch):
    monkeypatch.setattr(Conf, 'MONGO', {'host': '127.0.0.1', 'port': 27017})
    # check broker
    broker = get_broker(list_key='mongo_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    tasks = []
    for i in range(5):
        tasks.append(broker.dequeue()[0])
    assert broker.lock_size() == 5
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    assert broker.queue_size() == 0
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
def test_disque():
    Conf.DISQUE_NODES = ['127.0.0.1:7711']
    # check broker
    broker = get_broker(list_key='disque_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    Conf.RETRY = 1
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    Conf.BULK = 5
    Conf.DISQUE_FASTACK = True
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # connection test
    Conf.DISQUE_NODES = ['127.0.0.1:7798', '127.0.0.1:7799']
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
    # back to django-redis
    Conf.DISQUE_NODES = None
    Conf.DISQUE_FASTACK = False
Exemple #44
0
def canceled_sqs(monkeypatch):
    monkeypatch.setattr(Conf, 'SQS', {'aws_region': os.getenv('AWS_REGION'),
                                      'aws_access_key_id': os.getenv('AWS_ACCESS_KEY_ID'),
                                      'aws_secret_access_key': os.getenv('AWS_SECRET_ACCESS_KEY')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    sleep(2)
    # Sometimes SQS is not linear
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task = task[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    monkeypatch.setattr(Conf, 'RETRY', 60)
    broker.enqueue('test')
    sleep(1)
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task_id = task[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue('test')
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 12)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
Exemple #45
0
def test_sqs():
    Conf.IRON_MQ = None
    Conf.DISQUE_NODES = None
    Conf.SQS = {
        "aws_region": os.getenv("AWS_REGION"),
        "aws_access_key_id": os.getenv("AWS_ACCESS_KEY_ID"),
        "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"),
    }
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue("test")
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    Conf.RETRY = 1
    broker.enqueue("test")
    assert broker.dequeue() is not None
    sleep(1.5)
    task = broker.dequeue()[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(1.5)
    # delete job
    broker.enqueue("test")
    task_id = broker.dequeue()[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue("test")
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue("test")
    Conf.BULK = 12
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue("test")
    broker.purge_queue()
    broker.delete_queue()
    # back to django-redis
    Conf.SQS = None
    Conf.BULK = 1
    Conf.DJANGO_REDIS = "default"
def test_mongo():
    Conf.MONGO = {'host': '127.0.0.1', 'port': 27017}
    # check broker
    broker = get_broker(list_key='mongo_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    Conf.RETRY = 1
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    tasks = []
    for i in range(5):
        tasks.append(broker.dequeue()[0])
    assert broker.lock_size() == 5
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    assert broker.queue_size() == 0
    # back to django-redis
    Conf.ORM = None
def test_ironmq():
    Conf.DISQUE_NODES = None
    Conf.SQS = None
    Conf.IRON_MQ = {'token': os.getenv('IRON_MQ_TOKEN'),
                    'project_id': os.getenv('IRON_MQ_PROJECT_ID')}
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    #Conf.RETRY = 1
    #broker.enqueue('test')
    #assert broker.dequeue() is not None
    #sleep(3)
    # assert broker.dequeue() is not None
    #task = broker.dequeue()[0]
    #assert len(task) > 0
    #broker.acknowledge(task[0])
    #sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    Conf.BULK = 5
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # back to django-redis
    Conf.IRON_MQ = None
    Conf.DJANGO_REDIS = 'default'
Exemple #48
0
 def __init__(self, func=None, args=None, kwargs=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None):
     self.func = func
     self.args = args or []
     self.kwargs = kwargs or {}
     self.id = ''
     self.broker = broker or get_broker()
     self.cached = cached
     self.sync = sync
     self.started = False
Exemple #49
0
def test_disque(monkeypatch):
    monkeypatch.setattr(Conf, 'DISQUE_NODES', ['127.0.0.1:7711'])
    # check broker
    broker = get_broker(list_key='disque_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    monkeypatch.setattr(Conf, 'DISQUE_FASTACK', True)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # connection test
    monkeypatch.setattr(Conf, 'DISQUE_NODES', ['127.0.0.1:7798', '127.0.0.1:7799'])
    with pytest.raises(redis.exceptions.ConnectionError):
        broker.get_connection()
def test_orm():
    Conf.ORM = 'default'
    # check broker
    broker = get_broker(list_key='orm_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    Conf.RETRY = 1
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    Conf.BULK = 5
    tasks = broker.dequeue()
    assert broker.lock_size() == Conf.BULK
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
    # back to django-redis
    Conf.ORM = None
Exemple #51
0
def test_mongo(monkeypatch):
    monkeypatch.setattr(Conf, 'MONGO', {'host': '127.0.0.1', 'port': 27017})
    # check broker
    broker = get_broker(list_key='mongo_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # async_task
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    tasks = []
    for i in range(5):
        tasks.append(broker.dequeue()[0])
    assert broker.lock_size() == 5
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    assert broker.queue_size() == 0
Exemple #52
0
def delete_group_cached(group_id, broker=None):
    """
    Delete a group from the cache backend
    """
    if not broker:
        broker = get_broker()
    group_key = '{}:{}:keys'.format(broker.list_key, group_id)
    group_list = broker.cache.get(group_key)
    broker.cache.delete_many(group_list)
    broker.cache.delete(group_key)
Exemple #53
0
 def __init__(self, broker=None):
     self.broker = broker or get_broker()
     self.sentinel = None
     self.stop_event = None
     self.start_event = None
     self.pid = current_process().pid
     self.host = socket.gethostname()
     self.timeout = Conf.TIMEOUT
     signal.signal(signal.SIGTERM, self.sig_handler)
     signal.signal(signal.SIGINT, self.sig_handler)
Exemple #54
0
def test_ironmq():
    Conf.DISQUE_NODES = None
    Conf.SQS = None
    Conf.IRON_MQ = {"token": os.getenv("IRON_MQ_TOKEN"), "project_id": os.getenv("IRON_MQ_PROJECT_ID")}
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue("test")
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue("test")
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    Conf.RETRY = 1
    broker.enqueue("test")
    assert broker.dequeue() is not None
    sleep(1.5)
    task = broker.dequeue()[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(1.5)
    # delete job
    task_id = broker.enqueue("test")
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue("test")
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue("test")
    Conf.BULK = 5
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue("test")
    broker.enqueue("test")
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # back to django-redis
    Conf.IRON_MQ = None
    Conf.DJANGO_REDIS = "default"
Exemple #55
0
def test_orm(monkeypatch):
    monkeypatch.setattr(Conf, 'ORM', 'default')
    # check broker
    broker = get_broker(list_key='orm_test')
    assert broker.ping() is True
    assert broker.info() is not None
    # clear before we start
    broker.delete_queue()
    # enqueue
    broker.enqueue('test')
    assert broker.queue_size() == 1
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.queue_size() == 0
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    assert broker.queue_size() == 1
    broker.dequeue()
    assert broker.queue_size() == 0
    sleep(1.5)
    assert broker.queue_size() == 1
    task = broker.dequeue()[0]
    assert broker.queue_size() == 0
    broker.acknowledge(task[0])
    sleep(1.5)
    assert broker.queue_size() == 0
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    assert broker.lock_size() == Conf.BULK
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # test lock size
    assert broker.lock_size() == 0
    # test duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.delete_queue()
    assert broker.queue_size() == 0
Exemple #56
0
def queue_size(broker=None):
    """
    Returns the current queue size.
    Note that this doesn't count any tasks currently being processed by workers.

    :param broker: optional broker
    :return: current queue size
    :rtype: int
    """
    if not broker:
        broker = get_broker()
    return broker.queue_size()
Exemple #57
0
def test_ironmq(monkeypatch):
    monkeypatch.setattr(Conf, 'IRON_MQ', {'token': os.getenv('IRON_MQ_TOKEN'),
                                          'project_id': os.getenv('IRON_MQ_PROJECT_ID')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # monkeypatch.setattr(Conf, 'RETRY', 1)
    # broker.enqueue('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
Exemple #58
0
def result_cached(task_id, wait=0, broker=None):
    """
     Return the result from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time()
    while True:
        r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
        if r:
            return SignedPackage.loads(r)['result']
        if (time() - start) * 1000 >= wait >= 0:
            break
        sleep(0.01)
Exemple #59
0
 def get(cluster_id, broker=None):
     """
     gets the current status for the cluster
     :param cluster_id: id of the cluster
     :return: Stat or Status
     """
     if not broker:
         broker = get_broker()
     pack = broker.get_stat(Stat.get_key(cluster_id))
     if pack:
         try:
             return signing.SignedPackage.loads(pack)
         except signing.BadSignature:
             return None
     return Status(cluster_id)