def test_chord_on_error(self, manager): from celery import states from .tasks import ExpectedException import time if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') # Run the chord and wait for the error callback to finish. c1 = chord( header=[add.s(1, 2), add.s(3, 4), fail.s()], body=print_unicode.s('This should not be called').on_error( chord_error.s()), ) res = c1() try: res.wait(propagate=False) except ExpectedException: pass # Got to wait for children to populate. while not res.children: time.sleep(0.1) try: res.children[0].children[0].wait(propagate=False) except ExpectedException: pass # Extract the results of the successful tasks from the chord. # # We could do this inside the error handler, and probably would in a # real system, but for the purposes of the test it's obnoxious to get # data out of the error handler. # # So for clarity of our test, we instead do it here. # Use the error callback's result to find the failed task. error_callback_result = AsyncResult( res.children[0].children[0].result[0]) failed_task_id = error_callback_result.result.args[0].split()[3] # Use new group_id result metadata to get group ID. failed_task_result = AsyncResult(failed_task_id) original_group_id = failed_task_result._get_task_meta()['group_id'] # Use group ID to get preserved group result. backend = fail.app.backend j_key = backend.get_key_for_group(original_group_id, '.j') redis_connection = get_redis_connection() chord_results = [ backend.decode(t) for t in redis_connection.lrange(j_key, 0, 3) ] # Validate group result assert [cr[3] for cr in chord_results if cr[2] == states.SUCCESS] == \ [3, 7] assert len([cr for cr in chord_results if cr[2] != states.SUCCESS]) == 1
def test_chord_on_error(self, manager): from celery import states from .tasks import ExpectedException import time if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') # Run the chord and wait for the error callback to finish. c1 = chord( header=[add.s(1, 2), add.s(3, 4), fail.s()], body=print_unicode.s('This should not be called').on_error( chord_error.s()), ) res = c1() try: res.wait(propagate=False) except ExpectedException: pass # Got to wait for children to populate. while not res.children: time.sleep(0.1) try: res.children[0].children[0].wait(propagate=False) except ExpectedException: pass # Extract the results of the successful tasks from the chord. # # We could do this inside the error handler, and probably would in a # real system, but for the purposes of the test it's obnoxious to get # data out of the error handler. # # So for clarity of our test, we instead do it here. # Use the error callback's result to find the failed task. error_callback_result = AsyncResult( res.children[0].children[0].result[0]) failed_task_id = error_callback_result.result.args[0].split()[3] # Use new group_id result metadata to get group ID. failed_task_result = AsyncResult(failed_task_id) original_group_id = failed_task_result._get_task_meta()['group_id'] # Use group ID to get preserved group result. backend = fail.app.backend j_key = backend.get_key_for_group(original_group_id, '.j') redis_connection = get_redis_connection() chord_results = [backend.decode(t) for t in redis_connection.lrange(j_key, 0, 3)] # Validate group result assert [cr[3] for cr in chord_results if cr[2] == states.SUCCESS] == \ [3, 7] assert len([cr for cr in chord_results if cr[2] != states.SUCCESS] ) == 1
def retrieve_execution(self, identifier): res = AsyncResult(identifier) try: result = res.get(timeout=1) data = {"task_id": identifier, "data" : {"result": result, **res._get_task_meta()}} # noqa: E203 except TimeoutError: data = {"task_id": identifier, "data" : {"running": True, **res._get_task_meta()}} # noqa: E203 except Exception as e: t = res._get_task_meta() if isinstance(t['result'], BaseException): res = t.pop('result') t['result'] = str(res) data = {"task_id": identifier, "data" : { # noqa: E203 "error": e.args[0], **t} } return data
def get(self, request, *args, **kwargs): """Start or report on a cart-zipping task. This method is expecting one of two possible query parameters: task=[uuid]: Return information on the status of a cart zipping task, including a path to download the zip if it is done. extensions[]: Start a new cart-zipping task for the requesting user. Return a task_id, which can be used in the above query. """ if request.GET.get('task'): task_id = request.GET['task'] task = AsyncResult(task_id) if task.status == "PENDING": return Response({'ready': task.ready(), 'status': task.status, 'progress': 0}) elif task.status == "SUCCESS": return Response({'ready': task.ready(), 'status': "SUCCESS", 'progress': 100, 'path': task.result}) elif task.state == "FAILURE": server_error = status.HTTP_500_INTERNAL_SERVER_ERROR return Response({'ready': task.ready(), 'status': "FAILURE"}, status=server_error) else: meta = task._get_task_meta() progress = meta.get('result', {}).get('progress', 0) return Response({'ready': task.ready(), 'progress': progress, 'status': "PROGRESS"}) if request.GET.get('extensions[]'): extensions = request.GET.getlist('extensions[]') make_dirs = request.GET.get('make_dirs') if make_dirs == 'false': make_dirs = False else: make_dirs = True cart = request.session.get("cart", {}) task_id = str(uuid.uuid4()) tasks.zip_files.apply_async(args=[cart, extensions, request.user.username, make_dirs], task_id=task_id) return Response({"task": task_id}, status=status.HTTP_200_OK) return Response(status=status.HTTP_200_OK)
class QueryTask(object): # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this STATUSES = { 'PENDING': 1, 'STARTED': 2, 'SUCCESS': 3, 'FAILURE': 4, 'REVOKED': 4 } def __init__(self, job_id=None, async_result=None): if async_result: self._async_result = async_result else: self._async_result = AsyncResult(job_id, app=celery) @property def id(self): return self._async_result.id def to_dict(self): task_info = self._async_result._get_task_meta() result, task_status = task_info['result'], task_info['status'] if task_status == 'STARTED': updated_at = result.get('start_time', 0) else: updated_at = 0 status = self.STATUSES[task_status] if isinstance(result, (TimeLimitExceeded, SoftTimeLimitExceeded)): error = "Query exceeded Redash query execution time limit." status = 4 elif isinstance(result, Exception): error = result.message status = 4 elif task_status == 'REVOKED': error = 'Query execution cancelled.' else: error = '' if task_status == 'SUCCESS' and not error: query_result_id = result else: query_result_id = None return { 'id': self._async_result.id, 'updated_at': updated_at, 'status': status, 'error': error, 'query_result_id': query_result_id, } @property def is_cancelled(self): return self._async_result.status == 'REVOKED' @property def celery_status(self): return self._async_result.status def ready(self): return self._async_result.ready() def cancel(self): return self._async_result.revoke(terminate=True, signal='SIGINT')
def test_chord_on_error(self, manager): from celery import states from .tasks import ExpectedException if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') # Run the chord and wait for the error callback to finish. c1 = chord( header=[add.s(1, 2), add.s(3, 4), fail.s()], body=print_unicode.s('This should not be called').on_error( chord_error.s()), ) res = c1() with pytest.raises(ExpectedException): res.get(propagate=True) # Got to wait for children to populate. check = ( lambda: res.children, lambda: res.children[0].children, lambda: res.children[0].children[0].result, ) while not all(f() for f in check): pass # Extract the results of the successful tasks from the chord. # # We could do this inside the error handler, and probably would in a # real system, but for the purposes of the test it's obnoxious to get # data out of the error handler. # # So for clarity of our test, we instead do it here. # Use the error callback's result to find the failed task. uuid_patt = re.compile( r"[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}") callback_chord_exc = AsyncResult( res.children[0].children[0].result[0]).result failed_task_id = uuid_patt.search(str(callback_chord_exc)) assert (failed_task_id is not None), "No task ID in %r" % callback_exc failed_task_id = failed_task_id.group() # Use new group_id result metadata to get group ID. failed_task_result = AsyncResult(failed_task_id) original_group_id = failed_task_result._get_task_meta()['group_id'] # Use group ID to get preserved group result. backend = fail.app.backend j_key = backend.get_key_for_group(original_group_id, '.j') redis_connection = get_redis_connection() # The redis key is either a list or zset depending on configuration if manager.app.conf.result_backend_transport_options.get( 'result_chord_ordered', True): job_results = redis_connection.zrange(j_key, 0, 3) else: job_results = redis_connection.lrange(j_key, 0, 3) chord_results = [backend.decode(t) for t in job_results] # Validate group result assert [cr[3] for cr in chord_results if cr[2] == states.SUCCESS] == \ [3, 7] assert len([cr for cr in chord_results if cr[2] != states.SUCCESS]) == 1
def test_create_vm_task(self): """Tests the create vm task for monthly subscription This test is supposed to validate the proper execution of celery create_vm_task on production, as we have no other way to do this. """ # We create a VM from the first template available to DCL vm_template = VMTemplate.objects.all().first() template_data = VMTemplateSerializer(vm_template).data # The specs of VM that we want to create specs = {'cpu': 1, 'memory': 2, 'disk_size': 10, 'price': 15} stripe_customer = StripeCustomer.get_or_create( email=self.customer_email, token=self.token) card_details = self.stripe_utils.get_card_details( stripe_customer.stripe_id) card_details_dict = card_details.get('error') self.assertEquals(card_details_dict, None) billing_address_data = { 'cardholder_name': self.customer_name, 'postal_code': '1231', 'country': 'CH', 'token': self.token, 'street_address': 'Monty\'s Street', 'city': 'Hollywood' } vm_template_id = template_data.get('id', 1) cpu = specs.get('cpu') memory = specs.get('memory') disk_size = specs.get('disk_size') amount_to_be_charged = get_vm_price(cpu=cpu, memory=memory, disk_size=disk_size) plan_name = StripeUtils.get_stripe_plan_name(cpu=cpu, memory=memory, disk_size=disk_size) stripe_plan_id = StripeUtils.get_stripe_plan_id(cpu=cpu, ram=memory, ssd=disk_size, version=1, app='dcl') stripe_plan = self.stripe_utils.get_or_create_stripe_plan( amount=amount_to_be_charged, name=plan_name, stripe_plan_id=stripe_plan_id) subscription_result = self.stripe_utils.subscribe_customer_to_plan( stripe_customer.stripe_id, [{ "plan": stripe_plan.get('response_object').stripe_plan_id }]) stripe_subscription_obj = subscription_result.get('response_object') # Check if the subscription was approved and is active if stripe_subscription_obj is None \ or stripe_subscription_obj.status != 'active': msg = subscription_result.get('error') raise Exception("Creating subscription failed: {}".format(msg)) billing_address = BillingAddress( cardholder_name=billing_address_data['cardholder_name'], street_address=billing_address_data['street_address'], city=billing_address_data['city'], postal_code=billing_address_data['postal_code'], country=billing_address_data['country']) billing_address.save() order = HostingOrder.create(price=specs['price'], vm_id=0, customer=stripe_customer, billing_address=billing_address) async_task = create_vm_task.delay(vm_template_id, self.user, specs, template_data, order.id) new_vm_id = 0 res = None for i in range(0, 10): sleep(5) res = AsyncResult(async_task.task_id) if res.result is not None and res.result > 0: new_vm_id = res.result break # We expect a VM to be created within 50 seconds self.assertGreater( new_vm_id, 0, "VM could not be created. res._get_task_meta() = {}".format( res._get_task_meta()))
# from test_nfs_task import TestNfsTask test ="amqp://*****:*****@172.16.4.134:5672/cabbage_vhost" app = Celery('cabbage',broker=test) app.config_from_object("cabbage.cabbage_celery.celeryconfig") # import celeryconfig # app.config_from_object(celeryconfig) for k,v in app.conf.items(): print k,v app.conf.update(CELERY_ROUTES = { 'test_ic_task.TestIcTask': {'queue': 'test2', 'routing_key': 'test2'}, # 'product_list_crawler.ProductListCrawlerTask': {'queue': 'celery', 'routing_key': 'celery'} }) # taskId = "de1d0b16-57b1-4128-87bc-3697f78ab6dc" state = app.events.State() print app.tasks() # print state.tasks.get(taskId) app.send_task("test_ic_task.TestIcTask",kwargs={"jobId"}) res =AsyncResult(taskId) print res._get_task_meta() print res.ready() print res.result print dir(res) print res.task_name
class QueryTask(object): # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this STATUSES = { "PENDING": 1, "STARTED": 2, "SUCCESS": 3, "FAILURE": 4, "REVOKED": 4 } def __init__(self, job_id=None, async_result=None): if async_result: self._async_result = async_result else: self._async_result = AsyncResult(job_id, app=celery) @property def id(self): return self._async_result.id def to_dict(self): task_info = self._async_result._get_task_meta() result, task_status = task_info["result"], task_info["status"] if task_status == "STARTED": updated_at = result.get("start_time", 0) else: updated_at = 0 status = self.STATUSES[task_status] if isinstance(result, (TimeLimitExceeded, SoftTimeLimitExceeded)): error = TIMEOUT_MESSAGE status = 4 elif isinstance(result, Exception): error = str(result) status = 4 elif task_status == "REVOKED": error = "Query execution cancelled." else: error = "" if task_status == "SUCCESS" and not error: query_result_id = result else: query_result_id = None return { "id": self._async_result.id, "updated_at": updated_at, "status": status, "error": error, "query_result_id": query_result_id, } @property def is_cancelled(self): return self._async_result.status == "REVOKED" @property def celery_status(self): return self._async_result.status def ready(self): return self._async_result.ready() def cancel(self): return self._async_result.revoke(terminate=True, signal="SIGINT")