def test_cached(broker): broker.purge_queue() broker.cache.clear() group = 'cache_test' # queue the tests task = async_task('math.copysign', 1, -1, cached=True, broker=broker) task_id = task['id'] async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.popysign', 1, -1, cached=True, broker=broker, group=group) iter_id = async_iter('math.floor', [i for i in range(10)], cached=True) # test wait on cache # test wait timeout assert result(task_id, wait=10, cached=True) is None assert fetch(task_id, wait=10, cached=True) is None assert result_group(group, wait=10, cached=True) is None assert result_group(group, count=2, wait=10, cached=True) is None assert fetch_group(group, wait=10, cached=True) is None assert fetch_group(group, count=2, wait=10, cached=True) is None # run a single inline cluster task_count = 17 assert broker.queue_size() == task_count task_queue = Queue() stop_event = Event() stop_event.set() for i in range(task_count): pusher(task_queue, stop_event, broker=broker) assert broker.queue_size() == 0 assert task_queue.qsize() == task_count task_queue.put('STOP') result_queue = Queue() worker(task_queue, result_queue, Value('f', -1)) assert result_queue.qsize() == task_count result_queue.put('STOP') monitor(result_queue) assert result_queue.qsize() == 0 # assert results assert result(task_id, wait=500, cached=True) == -1 assert fetch(task_id, wait=500, cached=True).result == -1 # make sure it's not in the db backend assert fetch(task_id) is None # assert group assert count_group(group, cached=True) == 6 assert count_group(group, cached=True, failures=True) == 1 assert result_group(group, cached=True) == [-1, -1, -1, -1, -1] assert len(result_group(group, cached=True, failures=True)) == 6 assert len(fetch_group(group, cached=True)) == 6 assert len(fetch_group(group, cached=True, failures=False)) == 5 delete_group(group, cached=True) assert count_group(group, cached=True) is None delete_cached(task_id) assert result(task_id, cached=True) is None assert fetch(task_id, cached=True) is None # iter cached assert result(iter_id) is None assert result(iter_id, cached=True) is not None broker.cache.clear()
def test_cached(broker): broker.purge_queue() broker.cache.clear() group = 'cache_test' # queue the tests task_id = async_task('math.copysign', 1, -1, cached=True, broker=broker) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.copysign', 1, -1, cached=True, broker=broker, group=group) async_task('math.popysign', 1, -1, cached=True, broker=broker, group=group) iter_id = async_iter('math.floor', [i for i in range(10)], cached=True) # test wait on cache # test wait timeout assert result(task_id, wait=10, cached=True) is None assert fetch(task_id, wait=10, cached=True) is None assert result_group(group, wait=10, cached=True) is None assert result_group(group, count=2, wait=10, cached=True) is None assert fetch_group(group, wait=10, cached=True) is None assert fetch_group(group, count=2, wait=10, cached=True) is None # run a single inline cluster task_count = 17 assert broker.queue_size() == task_count task_queue = Queue() stop_event = Event() stop_event.set() for i in range(task_count): pusher(task_queue, stop_event, broker=broker) assert broker.queue_size() == 0 assert task_queue.qsize() == task_count task_queue.put('STOP') result_queue = Queue() worker(task_queue, result_queue, Value('f', -1)) assert result_queue.qsize() == task_count result_queue.put('STOP') monitor(result_queue) assert result_queue.qsize() == 0 # assert results assert result(task_id, wait=500, cached=True) == -1 assert fetch(task_id, wait=500, cached=True).result == -1 # make sure it's not in the db backend assert fetch(task_id) is None # assert group assert count_group(group, cached=True) == 6 assert count_group(group, cached=True, failures=True) == 1 assert result_group(group, cached=True) == [-1, -1, -1, -1, -1] assert len(result_group(group, cached=True, failures=True)) == 6 assert len(fetch_group(group, cached=True)) == 6 assert len(fetch_group(group, cached=True, failures=False)) == 5 delete_group(group, cached=True) assert count_group(group, cached=True) is None delete_cached(task_id) assert result(task_id, cached=True) is None assert fetch(task_id, cached=True) is None # iter cached assert result(iter_id) is None assert result(iter_id, cached=True) is not None broker.cache.clear()
def parzen_async(): # clear the previous results delete_group('parzen', cached=True) mu_vec = numpy.array([0, 0]) cov_mat = numpy.array([[1, 0], [0, 1]]) sample = numpy.random. \ multivariate_normal(mu_vec, cov_mat, 10000) widths = numpy.linspace(1.0, 1.2, 100) x = numpy.array([[0], [0]]) # async_task them with a group label to the cache backend for w in widths: async_task(parzen_estimation, sample, x, w, group='parzen', cached=True) # return after 100 results return result_group('parzen', count=100, cached=True)
def test_enqueue(broker, admin_user): broker.list_key = 'cluster_test:q' broker.delete_queue() a = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, hook='django_q.tests.test_cluster.assert_result', broker=broker) b = async_task('django_q.tests.tasks.count_letters2', WordClass(), hook='django_q.tests.test_cluster.assert_result', broker=broker) # unknown argument c = async_task('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, 'oneargumentoomany', hook='django_q.tests.test_cluster.assert_bad_result', broker=broker) # unknown function d = async_task('django_q.tests.tasks.does_not_exist', WordClass(), hook='django_q.tests.test_cluster.assert_bad_result', broker=broker) # function without result e = async_task('django_q.tests.tasks.countdown', 100000, broker=broker) # function as instance f = async_task(multiply, 753, 2, hook=assert_result, broker=broker) # model as argument g = async_task('django_q.tests.tasks.get_task_name', Task(name='John'), broker=broker) # args,kwargs, group and broken hook h = async_task('django_q.tests.tasks.word_multiply', 2, word='django', hook='fail.me', broker=broker) # args unpickle test j = async_task('django_q.tests.tasks.get_user_id', admin_user, broker=broker, group='test_j') # q_options and save opt_out test k = async_task('django_q.tests.tasks.get_user_id', admin_user, q_options={'broker': broker, 'group': 'test_k', 'save': False, 'timeout': 90}) # test unicode assert Task(name='Amalia').__str__()=='Amalia' # check if everything has a task id assert isinstance(a, str) assert isinstance(b, str) assert isinstance(c, str) assert isinstance(d, str) assert isinstance(e, str) assert isinstance(f, str) assert isinstance(g, str) assert isinstance(h, str) assert isinstance(j, str) assert isinstance(k, str) # run the cluster to execute the tasks task_count = 10 assert broker.queue_size() == task_count task_queue = Queue() stop_event = Event() stop_event.set() # push the tasks for i in range(task_count): pusher(task_queue, stop_event, broker=broker) assert broker.queue_size() == 0 assert task_queue.qsize() == task_count task_queue.put('STOP') # test wait timeout assert result(j, wait=10) is None assert fetch(j, wait=10) is None assert result_group('test_j', wait=10) is None assert result_group('test_j', count=2, wait=10) is None assert fetch_group('test_j', wait=10) is None assert fetch_group('test_j', count=2, wait=10) is None # let a worker handle them result_queue = Queue() worker(task_queue, result_queue, Value('f', -1)) assert result_queue.qsize() == task_count result_queue.put('STOP') # store the results monitor(result_queue) assert result_queue.qsize() == 0 # Check the results # task a result_a = fetch(a) assert result_a is not None assert result_a.success is True assert result(a) == 1506 # task b result_b = fetch(b) assert result_b is not None assert result_b.success is True assert result(b) == 1506 # task c result_c = fetch(c) assert result_c is not None assert result_c.success is False # task d result_d = fetch(d) assert result_d is not None assert result_d.success is False # task e result_e = fetch(e) assert result_e is not None assert result_e.success is True assert result(e) is None # task f result_f = fetch(f) assert result_f is not None assert result_f.success is True assert result(f) == 1506 # task g result_g = fetch(g) assert result_g is not None assert result_g.success is True assert result(g) == 'John' # task h result_h = fetch(h) assert result_h is not None assert result_h.success is True assert result(h) == 12 # task j result_j = fetch(j) assert result_j is not None assert result_j.success is True assert result_j.result == result_j.args[0].id # check fetch, result by name assert fetch(result_j.name) == result_j assert result(result_j.name) == result_j.result # groups assert result_group('test_j')[0] == result_j.result assert result_j.group_result()[0] == result_j.result assert result_group('test_j', failures=True)[0] == result_j.result assert result_j.group_result(failures=True)[0] == result_j.result assert fetch_group('test_j')[0].id == [result_j][0].id assert fetch_group('test_j', failures=False)[0].id == [result_j][0].id assert count_group('test_j') == 1 assert result_j.group_count() == 1 assert count_group('test_j', failures=True) == 0 assert result_j.group_count(failures=True) == 0 assert delete_group('test_j') == 1 assert result_j.group_delete() == 0 deleted_group = delete_group('test_j', tasks=True) assert deleted_group is None or deleted_group[0] == 0 # Django 1.9 deleted_group = result_j.group_delete(tasks=True) assert deleted_group is None or deleted_group[0] == 0 # Django 1.9 # task k should not have been saved assert fetch(k) is None assert fetch(k, 100) is None assert result(k, 100) is None broker.delete_queue()
def delete_task_group(group_id, tasks=False, cached=Conf.CACHED): # Wrapper method to delete task group with awareness of schema schema_name = connection.schema_name with schema_context(schema_name): return delete_group(group_id, tasks, cached)