Esempio n. 1
0
def test_acknowledge_failure_override():
    class VerifyAckMockBroker(Broker):
        def __init__(self, *args, **kwargs):
            super(VerifyAckMockBroker, self).__init__(*args, **kwargs)
            self.acknowledgements = {}

        def acknowledge(self, task_id):
            count = self.acknowledgements.get(task_id, 0)
            self.acknowledgements[task_id] = count + 1

    tag = uuid()
    task_fail_ack = {
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_fail_ack_id",
        "ack_failure": True,
        "func": "math.copysign",
        "args": (1, -1),
        "kwargs": {},
        "started": timezone.now(),
        "stopped": timezone.now(),
        "success": False,
        "result": None,
    }

    tag = uuid()
    task_fail_no_ack = task_fail_ack.copy()
    task_fail_no_ack.update({
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_fail_no_ack_id"
    })
    del task_fail_no_ack["ack_failure"]

    tag = uuid()
    task_success_ack = task_fail_ack.copy()
    task_success_ack.update({
        "id": tag[1],
        "name": tag[0],
        "ack_id": "test_success_ack_id",
        "success": True,
    })
    del task_success_ack["ack_failure"]

    result_queue = Queue()
    result_queue.put(task_fail_ack)
    result_queue.put(task_fail_no_ack)
    result_queue.put(task_success_ack)
    result_queue.put("STOP")
    broker = VerifyAckMockBroker(list_key="key")

    monitor(result_queue, broker)

    assert broker.acknowledgements.get("test_fail_ack_id") == 1
    assert broker.acknowledgements.get("test_fail_no_ack_id") is None
    assert broker.acknowledgements.get("test_success_ack_id") == 1
Esempio n. 2
0
def test_acknowledge_failure_override():
    class VerifyAckMockBroker(Broker):
        def __init__(self, *args, **kwargs):
            super(VerifyAckMockBroker, self).__init__(*args, **kwargs)
            self.acknowledgements = {}

        def acknowledge(self, task_id):
            count = self.acknowledgements.get(task_id, 0)
            self.acknowledgements[task_id] = count + 1

    tag = uuid()
    task_fail_ack = {
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_fail_ack_id',
        'ack_failure': True,
        'func': 'math.copysign',
        'args': (1, -1),
        'kwargs': {},
        'started': timezone.now(),
        'stopped': timezone.now(),
        'success': False,
        'result': None
    }

    tag = uuid()
    task_fail_no_ack = task_fail_ack.copy()
    task_fail_no_ack.update({
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_fail_no_ack_id'
    })
    del task_fail_no_ack['ack_failure']

    tag = uuid()
    task_success_ack = task_fail_ack.copy()
    task_success_ack.update({
        'id': tag[1],
        'name': tag[0],
        'ack_id': 'test_success_ack_id',
        'success': True,
    })
    del task_success_ack['ack_failure']

    result_queue = Queue()
    result_queue.put(task_fail_ack)
    result_queue.put(task_fail_no_ack)
    result_queue.put(task_success_ack)
    result_queue.put('STOP')
    broker = VerifyAckMockBroker(list_key='key')

    monitor(result_queue, broker)

    assert broker.acknowledgements.get('test_fail_ack_id') == 1
    assert broker.acknowledgements.get('test_fail_no_ack_id') is None
    assert broker.acknowledgements.get('test_success_ack_id') == 1
Esempio n. 3
0
def test_admin_views(admin_client):
    s = schedule('sched.test')
    tag = uuid()
    f = Task.objects.create(
        id=tag[1],
        name=tag[0],
        func='test.fail',
        started=timezone.now(),
        stopped=timezone.now(),
        success=False)
    tag = uuid()
    t = Task.objects.create(
        id=tag[1],
        name=tag[0],
        func='test.succes',
        started=timezone.now(),
        stopped=timezone.now(),
        success=True)
    admin_urls = (
        # schedule
        reverse('admin:django_q_schedule_changelist'),
        reverse('admin:django_q_schedule_add'),
        reverse('admin:django_q_schedule_change', args=(s.id,)),
        reverse('admin:django_q_schedule_history', args=(s.id,)),
        reverse('admin:django_q_schedule_delete', args=(s.id,)),
        # success
        reverse('admin:django_q_success_changelist'),
        reverse('admin:django_q_success_change', args=(t.id,)),
        reverse('admin:django_q_success_history', args=(t.id,)),
        reverse('admin:django_q_success_delete', args=(t.id,)),
        # failure
        reverse('admin:django_q_failure_changelist'),
        reverse('admin:django_q_failure_change', args=(f.id,)),
        reverse('admin:django_q_failure_history', args=(f.id,)),
        reverse('admin:django_q_failure_delete', args=(f.id,)),

    )
    for url in admin_urls:
        response = admin_client.get(url)
        assert response.status_code == 200

    # resubmit the failure
    url = reverse('admin:django_q_failure_changelist')
    data = {'action': 'retry_failed',
            '_selected_action': [f.pk]}
    response = admin_client.post(url, data)
    assert response.status_code == 302
    assert Failure.objects.filter(name=f.id).exists() is False
Esempio n. 4
0
def import_urls(user, fresh_urls, mark_read):
    group = uuid()
    size = len(fresh_urls)
    for url in fresh_urls:
        async(subscribe_to_imported_url, user, url, mark_read, group=group)
    start = time.time()
    while True:
        # print("Time", time.time() - start, "count", count_group(group))
        if (time.time() - start) > IMPORT_WAIT:
            # print("TIME!")
            break
        if count_group(group) == size:
            # print("COUNT!")
            break
        time.sleep(1)
    import_results = Counter(result_group(group))
    pretty_results = ', '.join("{}: {}".format(*x) for x in import_results.items())
    num_added = import_results['added']
    num_existed = import_results['existed']
    num_errors = import_results['error']
    if num_added:
        async_messages.success(user, "Import complete - you subscribed to {sub} feed{s}.".format(sub=num_added, s=pluralize(num_added)))
    else:
        async_messages.info(user, "Import complete - no new subscriptions were added.")
    if num_existed:
        async_messages.info(user,
                            "You were already subscribed to {sub_exists} imported feed{s}.".format(sub_exists=num_existed, s=pluralize(num_existed)))
    if num_errors:
        async_messages.error(user, "There was an error subscribing to {errors} imported feed{s}.".format(errors=num_errors, s=pluralize(num_errors)))
    logger.info('User %(user)s OPML import complete - %(results)s', {'user': user, 'results': pretty_results})
    delete_group(group, tasks=True)
    return pretty_results
Esempio n. 5
0
def async_chain(chain,
                group=None,
                cached=Conf.CACHED,
                sync=Conf.SYNC,
                broker=None):
    """
    enqueues a chain of tasks
    the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
    """
    if not group:
        group = uuid()[1]
    args = ()
    kwargs = {}
    task = chain.pop(0)
    if type(task) is not tuple:
        task = (task, )
    if len(task) > 1:
        args = task[1]
    if len(task) > 2:
        kwargs = task[2]
    kwargs['chain'] = chain
    kwargs['group'] = group
    kwargs['cached'] = cached
    kwargs['sync'] = sync
    kwargs['broker'] = broker or get_broker()
    async_task(task[0], *args, **kwargs)
    return group
Esempio n. 6
0
def async(func, *args, **kwargs):
    """Send a task to the cluster."""
    # get options from q_options dict or direct from kwargs
    options = kwargs.pop('q_options', kwargs)
    hook = options.pop('hook', None)
    list_key = options.pop('list_key', Conf.Q_LIST)
    redis = options.pop('redis', redis_client)
    sync = options.pop('sync', False)
    group = options.pop('group', None)
    save = options.pop('save', None)
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1], 'name': tag[0],
            'func': func,
            'args': args,
            'kwargs': kwargs,
            'started': timezone.now()}
    # add optionals
    if hook:
        task['hook'] = hook
    if group:
        task['group'] = group
    if save is not None:
        task['save'] = save
    # sign it
    pack = signing.SignedPackage.dumps(task)
    if sync or Conf.SYNC:
        return _sync(pack)
    # push it
    redis.rpush(list_key, pack)
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Esempio n. 7
0
def async_task(func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = (
        "hook",
        "group",
        "save",
        "sync",
        "cached",
        "ack_failure",
        "iter_count",
        "iter_cached",
        "chain",
        "broker",
        "timeout",
    )
    q_options = keywords.pop("q_options", {})
    # get an id
    tag = uuid()
    # build the task package
    task = {
        "id":
        tag[1],
        "name":
        keywords.pop("task_name", None) or q_options.pop("task_name", None)
        or tag[0],
        "func":
        func,
        "args":
        args,
    }
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop("broker", get_broker())
    # overrides
    if "cached" not in task and Conf.CACHED:
        task["cached"] = Conf.CACHED
    if "sync" not in task and Conf.SYNC:
        task["sync"] = Conf.SYNC
    if "ack_failure" not in task and Conf.ACK_FAILURES:
        task["ack_failure"] = Conf.ACK_FAILURES
    # finalize
    task["kwargs"] = keywords
    task["started"] = timezone.now()
    # signal it
    pre_enqueue.send(sender="django_q", task=task)
    # sign it
    pack = SignedPackage.dumps(task)
    if task.get("sync", False):
        return _sync(pack)
    # push it
    enqueue_id = broker.enqueue(pack)
    logger.info(f"Enqueued {enqueue_id}")
    logger.debug(f"Pushed {tag}")
    return task["id"]
Esempio n. 8
0
def test_attempt_count(broker, monkeypatch):
    monkeypatch.setattr(Conf, 'MAX_ATTEMPTS', 3)
    tag = uuid()
    task = {'id': tag[1],
            'name': tag[0],
            'func': 'math.copysign',
            'args': (1, -1),
            'kwargs': {},
            'started': timezone.now(),
            'stopped': timezone.now(),
            'success': False,
            'result': None}
    # initial save - no success
    save_task(task, broker)
    assert Task.objects.filter(id=task['id']).exists()
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.attempt_count == 1
    sleep(0.5)
    # second save
    old_stopped = task['stopped']
    task['stopped'] = timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.attempt_count == 2
    # third save -
    task['stopped'] = timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.attempt_count == 3
    # task should be removed from queue
    assert broker.queue_size() == 0
Esempio n. 9
0
def test_attempt_count(broker, monkeypatch):
    monkeypatch.setattr(Conf, "MAX_ATTEMPTS", 3)
    tag = uuid()
    task = {
        "id": tag[1],
        "name": tag[0],
        "func": "math.copysign",
        "args": (1, -1),
        "kwargs": {},
        "started": timezone.now(),
        "stopped": timezone.now(),
        "success": False,
        "result": None,
    }
    # initial save - no success
    save_task(task, broker)
    assert Task.objects.filter(id=task["id"]).exists()
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.attempt_count == 1
    sleep(0.5)
    # second save
    old_stopped = task["stopped"]
    task["stopped"] = timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.attempt_count == 2
    # third save -
    task["stopped"] = timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.attempt_count == 3
    # task should be removed from queue
    assert broker.queue_size() == 0
Esempio n. 10
0
def async (func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = ('hook', 'group', 'save', 'sync', 'cached', 'iter_count',
                'iter_cached', 'chain', 'broker')
    q_options = keywords.pop('q_options', None)
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1], 'name': tag[0], 'func': func, 'args': args}
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop('broker', get_broker())
    # overrides
    if 'cached' not in task and Conf.CACHED:
        task['cached'] = Conf.CACHED
    if 'sync' not in task and Conf.SYNC:
        task['sync'] = Conf.SYNC
    # finalize
    task['kwargs'] = keywords
    task['started'] = timezone.now()
    # sign it
    pack = signing.SignedPackage.dumps(task)
    if task.get('sync', False):
        return _sync(pack)
    # push it
    broker.enqueue(pack)
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Esempio n. 11
0
 def create_async_tasks_chain(chain,
                              group=None,
                              cached=Conf.CACHED,
                              sync=Conf.SYNC,
                              broker=None):
     """
     Wrapper method around async_chain that enqueues a chain of tasks
     the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
     """
     if not group:
         group = uuid()[1]
     args = ()
     kwargs = {}
     task = chain.pop(0)
     if type(task) is not tuple:
         task = (task, )
     if len(task) > 1:
         args = task[1]
     if len(task) > 2:
         kwargs = task[2]
     kwargs["chain"] = chain
     kwargs["group"] = group
     kwargs["cached"] = cached
     kwargs["sync"] = sync
     kwargs["broker"] = broker or get_broker()
     QUtilities.add_async_task(task[0], *args, **kwargs)
     return group
Esempio n. 12
0
def async(func, *args, **kwargs):
    """
    Sends a task to the cluster
    """
    # optional hook
    hook = kwargs.pop('hook', None)
    # optional list_key
    list_key = kwargs.pop('list_key', Conf.Q_LIST)
    # optional redis connection
    redis = kwargs.pop('redis', redis_client)
    # optional sync mode
    sync = kwargs.pop('sync', False)
    # optional group
    group = kwargs.pop('group', None)
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1], 'name': tag[0], 'func': func, 'args': args, 'kwargs': kwargs,
            'started': timezone.now()}
    # add optionals
    if hook:
        task['hook'] = hook
    if group:
        task['group'] = group
    # sign it
    pack = signing.SignedPackage.dumps(task)
    if sync:
        return _sync(task['id'], pack)
    # push it
    redis.rpush(list_key, pack)
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Esempio n. 13
0
def async_iter(func, args_iter, **kwargs):
    """
    enqueues a function with iterable arguments
    """
    iter_count = len(args_iter)
    iter_group = uuid()[1]
    # clean up the kwargs
    options = kwargs.get("q_options", kwargs)
    options.pop("hook", None)
    options["broker"] = options.get("broker", get_broker())
    options["group"] = iter_group
    options["iter_count"] = iter_count
    if options.get("cached", None):
        options["iter_cached"] = options["cached"]
    options["cached"] = True
    # save the original arguments
    broker = options["broker"]
    broker.cache.set(
        f"{broker.list_key}:{iter_group}:args", SignedPackage.dumps(args_iter)
    )
    for args in args_iter:
        if not isinstance(args, tuple):
            args = (args,)
        async_task(func, *args, **options)
    return iter_group
Esempio n. 14
0
def test_ironmq():
    Conf.DISQUE_NODES = None
    Conf.SQS = None
    Conf.IRON_MQ = {
        'token': os.getenv('IRON_MQ_TOKEN'),
        'project_id': os.getenv('IRON_MQ_PROJECT_ID')
    }
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # Conf.RETRY = 1
    # broker.enqueue('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    Conf.BULK = 5
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # back to django-redis
    Conf.IRON_MQ = None
    Conf.DJANGO_REDIS = 'default'
Esempio n. 15
0
def test_sqs():
    Conf.IRON_MQ = None
    Conf.DISQUE_NODES = None
    Conf.SQS = {
        "aws_region": os.getenv("AWS_REGION"),
        "aws_access_key_id": os.getenv("AWS_ACCESS_KEY_ID"),
        "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"),
    }
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue("test")
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    Conf.RETRY = 1
    broker.enqueue("test")
    assert broker.dequeue() is not None
    sleep(1.5)
    task = broker.dequeue()[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(1.5)
    # delete job
    broker.enqueue("test")
    task_id = broker.dequeue()[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue("test")
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue("test")
    Conf.BULK = 12
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue("test")
    broker.purge_queue()
    broker.delete_queue()
    # back to django-redis
    Conf.SQS = None
    Conf.BULK = 1
    Conf.DJANGO_REDIS = "default"
Esempio n. 16
0
def test_ironmq(monkeypatch):
    monkeypatch.setattr(
        Conf,
        "IRON_MQ",
        {
            "token": os.getenv("IRON_MQ_TOKEN"),
            "project_id": os.getenv("IRON_MQ_PROJECT_ID"),
        },
    )
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue("test")
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # async_task
    broker.enqueue("test")
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # monkeypatch.setattr(Conf, 'RETRY', 1)
    # broker.async_task('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue("test")
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue("test")
    broker.fail(task_id)
    # bulk test
    for _ in range(5):
        broker.enqueue("test")
    monkeypatch.setattr(Conf, "BULK", 5)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue("test")
    broker.enqueue("test")
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
Esempio n. 17
0
def test_ironmq(monkeypatch):
    monkeypatch.setattr(
        Conf, 'IRON_MQ', {
            'token': os.getenv('IRON_MQ_TOKEN'),
            'project_id': os.getenv('IRON_MQ_PROJECT_ID')
        })
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # monkeypatch.setattr(Conf, 'RETRY', 1)
    # broker.enqueue('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # check close and autoreconnect
    broker.close()
    assert broker.ping() is True
Esempio n. 18
0
def canceled_sqs(monkeypatch):
    monkeypatch.setattr(Conf, 'SQS', {'aws_region': os.getenv('AWS_REGION'),
                                      'aws_access_key_id': os.getenv('AWS_ACCESS_KEY_ID'),
                                      'aws_secret_access_key': os.getenv('AWS_SECRET_ACCESS_KEY')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # async_task
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    sleep(2)
    # Sometimes SQS is not linear
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task = task[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    monkeypatch.setattr(Conf, 'RETRY', 60)
    broker.enqueue('test')
    sleep(1)
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task_id = task[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue('test')
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 12)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
Esempio n. 19
0
def test_sqs():
    Conf.IRON_MQ = None
    Conf.DISQUE_NODES = None
    Conf.SQS = {
        'aws_region': os.getenv('AWS_REGION'),
        'aws_access_key_id': os.getenv('AWS_ACCESS_KEY_ID'),
        'aws_secret_access_key': os.getenv('AWS_SECRET_ACCESS_KEY')
    }
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    Conf.RETRY = 1
    broker.enqueue('test')
    assert broker.dequeue() is not None
    sleep(2)
    task = broker.dequeue()[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    broker.enqueue('test')
    task_id = broker.dequeue()[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue('test')
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue('test')
    Conf.BULK = 12
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
    # back to django-redis
    Conf.SQS = None
    Conf.BULK = 1
    Conf.DJANGO_REDIS = 'default'
Esempio n. 20
0
def canceled_sqs(monkeypatch):
    monkeypatch.setattr(Conf, 'SQS', {'aws_region': os.getenv('AWS_REGION'),
                                      'aws_access_key_id': os.getenv('AWS_ACCESS_KEY_ID'),
                                      'aws_secret_access_key': os.getenv('AWS_SECRET_ACCESS_KEY')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    monkeypatch.setattr(Conf, 'RETRY', 1)
    broker.enqueue('test')
    sleep(2)
    # Sometimes SQS is not linear
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task = task[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    monkeypatch.setattr(Conf, 'RETRY', 60)
    broker.enqueue('test')
    sleep(1)
    task = broker.dequeue()
    if not task:
        pytest.skip('SQS being weird')
    task_id = task[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue('test')
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for i in range(10):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 12)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue('test')
    broker.purge_queue()
    broker.delete_queue()
Esempio n. 21
0
def test_ironmq():
    Conf.DISQUE_NODES = None
    Conf.SQS = None
    Conf.IRON_MQ = {'token': os.getenv('IRON_MQ_TOKEN'),
                    'project_id': os.getenv('IRON_MQ_PROJECT_ID')}
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    #Conf.RETRY = 1
    #broker.enqueue('test')
    #assert broker.dequeue() is not None
    #sleep(3)
    # assert broker.dequeue() is not None
    #task = broker.dequeue()[0]
    #assert len(task) > 0
    #broker.acknowledge(task[0])
    #sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    Conf.BULK = 5
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # back to django-redis
    Conf.IRON_MQ = None
    Conf.DJANGO_REDIS = 'default'
Esempio n. 22
0
def make_async_call(target_func, *args, **kwargs):
    if platform.system() == "Windows":
        task_uuid = uuid()
        kwargs['uuid'] = task_uuid
        p = Process(target=windows_handle_async_call, args=(target_func,) + args, kwargs=kwargs)
        p.start()
        return task_uuid[1]
    else:
        return async(target_func, *args, **kwargs)
Esempio n. 23
0
def test_ironmq():
    Conf.DISQUE_NODES = None
    Conf.SQS = None
    Conf.IRON_MQ = {"token": os.getenv("IRON_MQ_TOKEN"), "project_id": os.getenv("IRON_MQ_PROJECT_ID")}
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue("test")
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue("test")
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    Conf.RETRY = 1
    broker.enqueue("test")
    assert broker.dequeue() is not None
    sleep(1.5)
    task = broker.dequeue()[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(1.5)
    # delete job
    task_id = broker.enqueue("test")
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue("test")
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue("test")
    Conf.BULK = 5
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue("test")
    broker.enqueue("test")
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
    # back to django-redis
    Conf.IRON_MQ = None
    Conf.DJANGO_REDIS = "default"
Esempio n. 24
0
def make_async_call(target_func, *args, **kwargs):
    if platform.system() == "Windows":
        task_uuid = uuid()
        kwargs['uuid'] = task_uuid
        p = Process(target=windows_handle_async_call,
                    args=(target_func, ) + args,
                    kwargs=kwargs)
        p.start()
        return task_uuid[1]
    else:
        return async (target_func, *args, **kwargs)
Esempio n. 25
0
def test_ironmq(monkeypatch):
    monkeypatch.setattr(Conf, 'IRON_MQ', {'token': os.getenv('IRON_MQ_TOKEN'),
                                          'project_id': os.getenv('IRON_MQ_PROJECT_ID')})
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    # initialize the queue
    broker.enqueue('test')
    # clear before we start
    broker.purge_queue()
    assert broker.queue_size() == 0
    # enqueue
    broker.enqueue('test')
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == 'test'
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    # monkeypatch.setattr(Conf, 'RETRY', 1)
    # broker.enqueue('test')
    # assert broker.dequeue() is not None
    # sleep(3)
    # assert broker.dequeue() is not None
    # task = broker.dequeue()[0]
    # assert len(task) > 0
    # broker.acknowledge(task[0])
    # sleep(3)
    # delete job
    task_id = broker.enqueue('test')
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    task_id = broker.enqueue('test')
    broker.fail(task_id)
    # bulk test
    for i in range(5):
        broker.enqueue('test')
    monkeypatch.setattr(Conf, 'BULK', 5)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    # delete queue
    broker.enqueue('test')
    broker.enqueue('test')
    broker.purge_queue()
    assert broker.dequeue() is None
    broker.delete_queue()
Esempio n. 26
0
def async (func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = ('hook', 'group', 'save', 'sync', 'cached', 'iter_count',
                'iter_cached', 'chain', 'broker', 'progress_updates')
    q_options = keywords.pop('q_options', {})
    # get an id
    tag = keywords.pop('uuid', None) or uuid()
    # build the task package
    task = {
        'id':
        tag[1],
        'name':
        keywords.pop('task_name', None) or q_options.pop('task_name', None)
        or tag[0],
        'func':
        func,
        'args':
        args
    }
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop('broker', get_broker())
    # overrides
    if 'cached' not in task and Conf.CACHED:
        task['cached'] = Conf.CACHED
    if 'sync' not in task and Conf.SYNC:
        task['sync'] = Conf.SYNC
    # finalize
    task['kwargs'] = keywords
    task['started'] = timezone.now()
    task['is_progress_updating'] = bool(task.get('progress_updates', False))
    task['success'] = False
    task['stopped'] = None
    task['result'] = None
    task['task_status'] = Task.PENDING

    # sign it
    pack = signing.SignedPackage.dumps(task)
    if task.get('sync', False):
        return _sync(pack)
    # push it
    broker.enqueue(pack)
    logger.debug('Pushed {}'.format(tag))
    # create initial task result entry
    cluster.save_task(task, broker)
    return task['id']
Esempio n. 27
0
def async (func, *args, **kwargs):
    """Queue a task for the cluster."""
    # get options from q_options dict or direct from kwargs
    options = kwargs.pop('q_options', kwargs)
    hook = options.pop('hook', None)
    broker = options.pop('broker', get_broker())
    sync = options.pop('sync', False)
    group = options.pop('group', None)
    save = options.pop('save', None)
    cached = options.pop('cached', Conf.CACHED)
    iter_count = options.pop('iter_count', None)
    iter_cached = options.pop('iter_cached', None)
    # get an id
    tag = uuid()
    # build the task package
    task = {
        'id': tag[1],
        'name': tag[0],
        'func': func,
        'args': args,
        'kwargs': kwargs,
        'started': timezone.now()
    }
    # add optionals
    if hook:
        task['hook'] = hook
    if group:
        task['group'] = group
    if save is not None:
        task['save'] = save
    if cached:
        task['cached'] = cached
    if iter_count:
        task['iter_count'] = iter_count
    if iter_cached:
        task['iter_cached'] = iter_cached
    # sign it
    pack = signing.SignedPackage.dumps(task)
    if sync or Conf.SYNC:
        return _sync(pack)
    # push it
    broker.enqueue(pack)
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Esempio n. 28
0
def async_task(func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = (
    'hook', 'group', 'save', 'sync', 'cached', 'ack_failure', 'iter_count', 'iter_cached', 'chain', 'broker', 'timeout')
    q_options = keywords.pop('q_options', {})
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1],
            'name': keywords.pop('task_name', None) or q_options.pop('task_name', None) or tag[0],
            'func': func,
            'args': args}
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop('broker', get_broker())
    # overrides
    if 'cached' not in task and Conf.CACHED:
        task['cached'] = Conf.CACHED
    if 'sync' not in task and Conf.SYNC:
        task['sync'] = Conf.SYNC
    if 'ack_failure' not in task and Conf.ACK_FAILURES:
        task['ack_failure'] = Conf.ACK_FAILURES
    # finalize
    task['kwargs'] = keywords
    task['started'] = timezone.now()
    # signal it
    pre_enqueue.send(sender="django_q", task=task)
    # sign it
    pack = SignedPackage.dumps(task)
    if task.get('sync', False):
        return _sync(pack)
    # push it
    enqueue_id = broker.enqueue(pack)
    logger.info('Enqueued {}'.format(enqueue_id))
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Esempio n. 29
0
def async_task(func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = (
    'hook', 'group', 'save', 'sync', 'cached', 'ack_failure', 'iter_count', 'iter_cached', 'chain', 'broker')
    q_options = keywords.pop('q_options', {})
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1],
            'name': keywords.pop('task_name', None) or q_options.pop('task_name', None) or tag[0],
            'func': func,
            'args': args}
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop('broker', get_broker())
    # overrides
    if 'cached' not in task and Conf.CACHED:
        task['cached'] = Conf.CACHED
    if 'sync' not in task and Conf.SYNC:
        task['sync'] = Conf.SYNC
    if 'ack_failure' not in task and Conf.ACK_FAILURES:
        task['ack_failure'] = Conf.ACK_FAILURES
    # finalize
    task['kwargs'] = keywords
    task['started'] = timezone.now()
    # signal it
    pre_enqueue.send(sender="django_q", task=task)
    # sign it
    pack = SignedPackage.dumps(task)
    if task.get('sync', False):
        return _sync(pack)
    # push it
    enqueue_id = broker.enqueue(pack)
    logger.info('Enqueued {}'.format(enqueue_id))
    logger.debug('Pushed {}'.format(tag))
    return task['id']
Esempio n. 30
0
def test_update_failed(broker):
    tag = uuid()
    task = {
        'id': tag[1],
        'name': tag[0],
        'func': 'math.copysign',
        'args': (1, -1),
        'kwargs': {},
        'started': timezone.now(),
        'stopped': timezone.now(),
        'success': False,
        'result': None
    }
    # initial save - no success
    save_task(task, broker)
    assert Task.objects.filter(id=task['id']).exists()
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.success is False
    sleep(0.5)
    # second save - no success
    old_stopped = task['stopped']
    task['stopped'] = timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.stopped > old_stopped
    # third save - success
    task['stopped'] = timezone.now()
    task['result'] = 'result'
    task['success'] = True
    save_task(task, broker)
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.success is True
    # fourth save - no success
    task['result'] = None
    task['success'] = False
    task['stopped'] = old_stopped
    save_task(task, broker)
    # should not overwrite success
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.success is True
    assert saved_task.result == 'result'
Esempio n. 31
0
def test_update_failed(broker):
    tag = uuid()
    task = {
        "id": tag[1],
        "name": tag[0],
        "func": "math.copysign",
        "args": (1, -1),
        "kwargs": {},
        "started": timezone.now(),
        "stopped": timezone.now(),
        "success": False,
        "result": None,
    }
    # initial save - no success
    save_task(task, broker)
    assert Task.objects.filter(id=task["id"]).exists()
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.success is False
    sleep(0.5)
    # second save - no success
    old_stopped = task["stopped"]
    task["stopped"] = timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.stopped > old_stopped
    # third save - success
    task["stopped"] = timezone.now()
    task["result"] = "result"
    task["success"] = True
    save_task(task, broker)
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.success is True
    # fourth save - no success
    task["result"] = None
    task["success"] = False
    task["stopped"] = old_stopped
    save_task(task, broker)
    # should not overwrite success
    saved_task = Task.objects.get(id=task["id"])
    assert saved_task.success is True
    assert saved_task.result == "result"
Esempio n. 32
0
def async_iter(func, args_iter, **kwargs):
    iter_count = len(args_iter)
    iter_group = uuid()[1]
    # clean up the kwargs
    options = kwargs.get('q_options', kwargs)
    options.pop('hook', None)
    options['broker'] = options.get('broker', get_broker())
    options['group'] = iter_group
    options['iter_count'] = iter_count
    if options.get('cached', None):
        options['iter_cached'] = options['cached']
    options['cached'] = True
    # save the original arguments
    broker = options['broker']
    broker.cache.set('{}:{}:args'.format(broker.list_key, iter_group),
                     signing.SignedPackage.dumps(args_iter))
    for args in args_iter:
        if type(args) is not tuple:
            args = (args, )
        async (func, *args, **options)
    return iter_group
Esempio n. 33
0
def test_update_failed(broker):
    tag = uuid()
    task = {'id': tag[1],
            'name': tag[0],
            'func': 'math.copysign',
            'args': (1, -1),
            'kwargs': {},
            'started': timezone.now(),
            'stopped': timezone.now(),
            'success': False,
            'result': None}
    # initial save - no success
    save_task(task, broker)
    assert Task.objects.filter(id=task['id']).exists()
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.success is False
    sleep(0.5)
    # second save - no success
    old_stopped = task['stopped']
    task['stopped']=timezone.now()
    save_task(task, broker)
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.stopped > old_stopped
    # third save - success
    task['stopped']=timezone.now()
    task['result']='result'
    task['success']=True
    save_task(task, broker)
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.success is True
    # fourth save - no success
    task['result'] = None
    task['success'] = False
    task['stopped'] = old_stopped
    save_task(task, broker)
    # should not overwrite success
    saved_task = Task.objects.get(id=task['id'])
    assert saved_task.success is True
    assert saved_task.result == 'result'
Esempio n. 34
0
def async_iter(func, args_iter, **kwargs):
    """
    enqueues a function with iterable arguments
    """
    iter_count = len(args_iter)
    iter_group = uuid()[1]
    # clean up the kwargs
    options = kwargs.get('q_options', kwargs)
    options.pop('hook', None)
    options['broker'] = options.get('broker', get_broker())
    options['group'] = iter_group
    options['iter_count'] = iter_count
    if options.get('cached', None):
        options['iter_cached'] = options['cached']
    options['cached'] = True
    # save the original arguments
    broker = options['broker']
    broker.cache.set('{}:{}:args'.format(broker.list_key, iter_group), SignedPackage.dumps(args_iter))
    for args in args_iter:
        if not isinstance(args, tuple):
            args = (args,)
        async_task(func, *args, **options)
    return iter_group
Esempio n. 35
0
def async_iter(func, args_iter, **kwargs):
    """
    enqueues a function with iterable arguments
    """
    iter_count = len(args_iter)
    iter_group = uuid()[1]
    # clean up the kwargs
    options = kwargs.get('q_options', kwargs)
    options.pop('hook', None)
    options['broker'] = options.get('broker', get_broker())
    options['group'] = iter_group
    options['iter_count'] = iter_count
    if options.get('cached', None):
        options['iter_cached'] = options['cached']
    options['cached'] = True
    # save the original arguments
    broker = options['broker']
    broker.cache.set('{}:{}:args'.format(broker.list_key, iter_group), SignedPackage.dumps(args_iter))
    for args in args_iter:
        if not isinstance(args, tuple):
            args = (args,)
        async_task(func, *args, **options)
    return iter_group
Esempio n. 36
0
def async_chain(chain, group=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None):
    """
    enqueues a chain of tasks
    the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
    """
    if not group:
        group = uuid()[1]
    args = ()
    kwargs = {}
    task = chain.pop(0)
    if type(task) is not tuple:
        task = (task,)
    if len(task) > 1:
        args = task[1]
    if len(task) > 2:
        kwargs = task[2]
    kwargs['chain'] = chain
    kwargs['group'] = group
    kwargs['cached'] = cached
    kwargs['sync'] = sync
    kwargs['broker'] = broker or get_broker()
    async_task(task[0], *args, **kwargs)
    return group
Esempio n. 37
0
def async_task(func,
               *pos_args,
               args=None,
               kwargs=None,
               name=None,
               hook=None,
               group=None,
               timeout=None,
               **q_options):
    """
    Queue a task for the cluster.
    :param func: Callable function object or string representation of module.function
    :param pos_args: Positional arguments to provide to func
    :param args: Positional arguments to provide to func
    :param kwargs: Keyword arguments to provide to func
    :param name: Optional custom name of task
    :param hook: Function to call after task complete (provided Task instance as argument)
    :param str group: Group identifier (to correlate related tasks)
    """
    func = validate_function(func)
    hook = validate_function(hook)

    args = tuple(pos_args or args or tuple())

    keywords = kwargs.copy()
    opt_keys = (
        "hook",
        "group",
        "save",
        "sync",  # Whether to run the task synchronously
        "cached",  # Remove
        "ack_failure",  # Causes failed tasks to still mark status as complete
        "iter_count",  # Remove
        "iter_cached",  # Remove
        "chain",  # Use prerequisite instead of chain
        "broker",  # dont need
        "timeout",
    )
    q_options = keywords.pop("q_options", {})
    # get an id
    tag = uuid()
    # Create task instance
    task = Task.objects.create(
        id=tag[1],
        name=name or tag[0],
        func=func,
        args=args,
        kwargs=kwargs,
        hook=hook,
        group=group,
    )
    # push optionals
    for key in opt_keys:
        if q_options and key in q_options:
            task[key] = q_options[key]
        elif key in keywords:
            task[key] = keywords.pop(key)
    # don't serialize the broker
    broker = task.pop("broker", get_broker())
    # overrides
    if "cached" not in task and Conf.CACHED:
        task["cached"] = Conf.CACHED
    if "sync" not in task and Conf.SYNC:
        task["sync"] = Conf.SYNC
    if "ack_failure" not in task and Conf.ACK_FAILURES:
        task["ack_failure"] = Conf.ACK_FAILURES
    # finalize
    task["kwargs"] = keywords
    task["started"] = timezone.now()
    # signal it
    pre_enqueue.send(sender="django_q", task=task)
    # sign it
    pack = SignedPackage.dumps(task)
    if task.get("sync", False):
        return _sync(pack)
    # push it
    enqueue_id = broker.enqueue(pack)
    logger.info(f"Enqueued {enqueue_id}")
    logger.debug(f"Pushed {tag}")
    return task["id"]
Esempio n. 38
0
def canceled_sqs(monkeypatch):
    monkeypatch.setattr(
        Conf,
        "SQS",
        {
            "aws_region": os.getenv("AWS_REGION"),
            "aws_access_key_id": os.getenv("AWS_ACCESS_KEY_ID"),
            "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"),
        },
    )
    # check broker
    broker = get_broker(list_key=uuid()[0])
    assert broker.ping() is True
    assert broker.info() is not None
    assert broker.queue_size() == 0
    # async_task
    broker.enqueue("test")
    # dequeue
    task = broker.dequeue()[0]
    assert task[1] == "test"
    broker.acknowledge(task[0])
    assert broker.dequeue() is None
    # Retry test
    monkeypatch.setattr(Conf, "RETRY", 1)
    broker.enqueue("test")
    sleep(2)
    # Sometimes SQS is not linear
    task = broker.dequeue()
    if not task:
        pytest.skip("SQS being weird")
    task = task[0]
    assert len(task) > 0
    broker.acknowledge(task[0])
    sleep(2)
    # delete job
    monkeypatch.setattr(Conf, "RETRY", 60)
    broker.enqueue("test")
    sleep(1)
    task = broker.dequeue()
    if not task:
        pytest.skip("SQS being weird")
    task_id = task[0][0]
    broker.delete(task_id)
    assert broker.dequeue() is None
    # fail
    broker.enqueue("test")
    while task is None:
        task = broker.dequeue()[0]
    broker.fail(task[0])
    # bulk test
    for _ in range(10):
        broker.enqueue("test")
    monkeypatch.setattr(Conf, "BULK", 12)
    tasks = broker.dequeue()
    for task in tasks:
        assert task is not None
        broker.acknowledge(task[0])
    # duplicate acknowledge
    broker.acknowledge(task[0])
    assert broker.lock_size() == 0
    # delete queue
    broker.enqueue("test")
    broker.purge_queue()
    broker.delete_queue()
Esempio n. 39
0
def test_admin_views(admin_client, monkeypatch):
    monkeypatch.setattr(Conf, 'ORM', 'default')
    s = schedule('schedule.test')
    tag = uuid()
    f = Task.objects.create(
        id=tag[1],
        name=tag[0],
        func='test.fail',
        started=timezone.now(),
        stopped=timezone.now(),
        success=False)
    tag = uuid()
    t = Task.objects.create(
        id=tag[1],
        name=tag[0],
        func='test.success',
        started=timezone.now(),
        stopped=timezone.now(),
        success=True)
    q = OrmQ.objects.create(
        key='test',
        payload=SignedPackage.dumps({'id': 1, 'func': 'test', 'name': 'test'}))
    admin_urls = (
        # schedule
        reverse('admin:django_q_schedule_changelist'),
        reverse('admin:django_q_schedule_add'),
        reverse('admin:django_q_schedule_change', args=(s.id,)),
        reverse('admin:django_q_schedule_history', args=(s.id,)),
        reverse('admin:django_q_schedule_delete', args=(s.id,)),
        # success
        reverse('admin:django_q_success_changelist'),
        reverse('admin:django_q_success_change', args=(t.id,)),
        reverse('admin:django_q_success_history', args=(t.id,)),
        reverse('admin:django_q_success_delete', args=(t.id,)),
        # failure
        reverse('admin:django_q_failure_changelist'),
        reverse('admin:django_q_failure_change', args=(f.id,)),
        reverse('admin:django_q_failure_history', args=(f.id,)),
        reverse('admin:django_q_failure_delete', args=(f.id,)),
        # orm queue
        reverse('admin:django_q_ormq_changelist'),
        reverse('admin:django_q_ormq_change', args=(q.id,)),
        reverse('admin:django_q_ormq_history', args=(q.id,)),
        reverse('admin:django_q_ormq_delete', args=(q.id,)),

    )
    for url in admin_urls:
        response = admin_client.get(url)
        assert response.status_code == 200

    # resubmit the failure
    url = reverse('admin:django_q_failure_changelist')
    data = {'action': 'retry_failed',
            '_selected_action': [f.pk]}
    response = admin_client.post(url, data)
    assert response.status_code == 302
    assert Failure.objects.filter(name=f.id).exists() is False
    # change q
    url = reverse('admin:django_q_ormq_change', args=(q.id,))
    data = {'key': 'default', 'payload': 'test', 'lock_0': '2015-09-17', 'lock_1': '14:31:51', '_save': 'Save'}
    response = admin_client.post(url, data)
    assert response.status_code == 302
    # delete q
    url = reverse('admin:django_q_ormq_delete', args=(q.id,))
    data = {'post': 'yes'}
    response = admin_client.post(url, data)
    assert response.status_code == 302
Esempio n. 40
0
def test_admin_views(admin_client, monkeypatch):
    monkeypatch.setattr(Conf, "ORM", "default")
    s = schedule("schedule.test")
    tag = uuid()
    f = Task.objects.create(
        id=tag[1],
        name=tag[0],
        func="test.fail",
        started=timezone.now(),
        stopped=timezone.now(),
        success=False,
    )
    tag = uuid()
    t = Task.objects.create(
        id=tag[1],
        name=tag[0],
        func="test.success",
        started=timezone.now(),
        stopped=timezone.now(),
        success=True,
    )
    q = OrmQ.objects.create(
        key="test",
        payload=SignedPackage.dumps({
            "id": 1,
            "func": "test",
            "name": "test"
        }),
    )
    admin_urls = (
        # schedule
        reverse("admin:django_q_schedule_changelist"),
        reverse("admin:django_q_schedule_add"),
        reverse("admin:django_q_schedule_change", args=(s.id, )),
        reverse("admin:django_q_schedule_history", args=(s.id, )),
        reverse("admin:django_q_schedule_delete", args=(s.id, )),
        # success
        reverse("admin:django_q_success_changelist"),
        reverse("admin:django_q_success_change", args=(t.id, )),
        reverse("admin:django_q_success_history", args=(t.id, )),
        reverse("admin:django_q_success_delete", args=(t.id, )),
        # failure
        reverse("admin:django_q_failure_changelist"),
        reverse("admin:django_q_failure_change", args=(f.id, )),
        reverse("admin:django_q_failure_history", args=(f.id, )),
        reverse("admin:django_q_failure_delete", args=(f.id, )),
        # orm queue
        reverse("admin:django_q_ormq_changelist"),
        reverse("admin:django_q_ormq_change", args=(q.id, )),
        reverse("admin:django_q_ormq_history", args=(q.id, )),
        reverse("admin:django_q_ormq_delete", args=(q.id, )),
    )
    for url in admin_urls:
        response = admin_client.get(url)
        assert response.status_code == 200

    # resubmit the failure
    url = reverse("admin:django_q_failure_changelist")
    data = {"action": "retry_failed", "_selected_action": [f.pk]}
    response = admin_client.post(url, data)
    assert response.status_code == 302
    assert Failure.objects.filter(name=f.id).exists() is False
    # change q
    url = reverse("admin:django_q_ormq_change", args=(q.id, ))
    data = {
        "key": "default",
        "payload": "test",
        "lock_0": "2015-09-17",
        "lock_1": "14:31:51",
        "_save": "Save",
    }
    response = admin_client.post(url, data)
    assert response.status_code == 302
    # delete q
    url = reverse("admin:django_q_ormq_delete", args=(q.id, ))
    data = {"post": "yes"}
    response = admin_client.post(url, data)
    assert response.status_code == 302