def test_prepare_value_serializes_group_result(self): self.b.serializer = 'json' g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) v = self.b.prepare_value(g) self.assertIsInstance(v, (list, tuple)) self.assertEqual(result_from_tuple(v, app=self.app), g) v2 = self.b.prepare_value(g[0]) self.assertIsInstance(v2, (list, tuple)) self.assertEqual(result_from_tuple(v2, app=self.app), g[0]) self.b.serializer = 'pickle' self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult)
def test_prepare_value_serializes_group_result(self): self.b.serializer = 'json' g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) v = self.b.prepare_value(g) assert isinstance(v, (list, tuple)) assert result_from_tuple(v, app=self.app) == g v2 = self.b.prepare_value(g[0]) assert isinstance(v2, (list, tuple)) assert result_from_tuple(v2, app=self.app) == g[0] self.b.serializer = 'pickle' assert isinstance(self.b.prepare_value(g), self.app.GroupResult)
def test_prepare_value_serializes_group_result(self): self.b.serializer = "json" g = self.app.GroupResult("group_id", [self.app.AsyncResult("foo")]) v = self.b.prepare_value(g) self.assertIsInstance(v, (list, tuple)) self.assertEqual(result_from_tuple(v, app=self.app), g) v2 = self.b.prepare_value(g[0]) self.assertIsInstance(v2, (list, tuple)) self.assertEqual(result_from_tuple(v2, app=self.app), g[0]) self.b.serializer = "pickle" self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult)
def test_with_parent(self): x = self.app.AsyncResult(uuid()) x.parent = self.app.AsyncResult(uuid()) y = result_from_tuple(x.as_tuple(), self.app) self.assertEqual(y, x) self.assertEqual(y.parent, x.parent) self.assertIsInstance(y.parent, AsyncResult)
def test_with_parent(self): x = self.app.AsyncResult(uuid()) x.parent = self.app.AsyncResult(uuid()) y = result_from_tuple(x.as_tuple(), self.app) assert y == x assert y.parent == x.parent assert isinstance(y.parent, AsyncResult)
def test_GroupResult_with_parent(self): parent = self.app.AsyncResult(uuid()) result = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], parent ) second_result = result_from_tuple(result.as_tuple(), self.app) assert second_result == result assert second_result.parent == parent
def unlock_chord(self, group_id, callback, interval=None, propagate=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple): # if propagate is disabled exceptions raised by chord tasks # will be sent as part of the result list to the chord callback. # Since 3.1 propagate will be enabled by default, and instead # the chord callback changes state to FAILURE with the # exception set to ChordError. propagate = default_propagate if propagate is None else propagate if interval is None: interval = self.default_retry_delay # check if the task group is ready, and if so apply the callback. callback = maybe_signature(callback, app) deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], app=app, ) j = deps.join_native if deps.supports_native_join else deps.join try: ready = deps.ready() except Exception as exc: raise self.retry( exc=exc, countdown=interval, max_retries=max_retries, ) else: if not ready: raise self.retry(countdown=interval, max_retries=max_retries) callback = maybe_signature(callback, app=app) try: with allow_join_result(): ret = j(timeout=3.0, propagate=propagate) except Exception as exc: try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) app.backend.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) app.backend.chord_error_from_stack( callback, exc=ChordError('Callback error: {0!r}'.format(exc)), )
def _restore_group(self, group_id): """Get task meta-data for a task by id.""" meta = self.get(self.get_key_for_group(group_id)) # previously this was always pickled, but later this # was extended to support other serializers, so the # structure is kind of weird. if meta: meta = self.decode(meta) result = meta['result'] meta['result'] = result_from_tuple(result, self.app) return meta
def _restore_group(self, group_id): """return result value for a group by id.""" group_result = self.GroupModel._default_manager.get_group(group_id) if group_result: res = group_result.as_dict() decoded_result = self.decode_content(group_result, res["result"]) res["result"] = None if decoded_result: res["result"] = result_from_tuple(decoded_result, app=self.app) return res
def group_result(self, app=None): """Return the GroupResult of self. Arguments: --------- app (Celery): app instance to create the GroupResult with. """ return CeleryGroupResult(self.group_id, [ result_from_tuple(r, app=app) for r in json.loads(self.sub_tasks) ], app=app)
def unlock_chord(self, group_id, callback, interval=None, max_retries=MAX_RETRIES, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple, **kwargs): if interval is None: interval = self.default_retry_delay # check if the task group is ready, and if so apply the callback. callback = maybe_signature(callback, app) deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], app=app, ) j = deps.join_native if deps.supports_native_join else deps.join try: ready = deps.ready() except Exception as exc: raise self.retry( exc=exc, countdown=interval, max_retries=max_retries, exchange=MAGIC_EXCHANGE, routing_key=choose_worker() ) else: if not ready: raise self.retry(countdown=interval, max_retries=max_retries, exchange=MAGIC_EXCHANGE, routing_key=choose_worker()) callback = maybe_signature(callback, app=app) try: with allow_join_result(): ret = j(timeout=3.0, propagate=True) except Exception as exc: try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) app.backend.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) app.backend.chord_error_from_stack( callback, exc=ChordError('Callback error: {0!r}'.format(exc)), )
def unlock_chord(group_id, callback, interval=None, propagate=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple): # if propagate is disabled exceptions raised by chord tasks # will be sent as part of the result list to the chord callback. # Since 3.1 propagate will be enabled by default, and instead # the chord callback changes state to FAILURE with the # exception set to ChordError. propagate = default_propagate if propagate is None else propagate if interval is None: interval = unlock_chord.default_retry_delay # check if the task group is ready, and if so apply the callback. deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], ) j = deps.join_native if deps.supports_native_join else deps.join if deps.ready(): callback = signature(callback, app=app) try: with allow_join_result(): ret = j(timeout=3.0, propagate=propagate) except Exception as exc: try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) app.backend.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: app.backend.chord_error_from_stack( callback, exc=ChordError('Callback error: {0!r}'.format(exc)), ) else: raise unlock_chord.retry(countdown=interval, max_retries=max_retries)
def run(self, tasks, result, group_id, partial_args, add_to_parent=True): app = self.app result = result_from_tuple(result, app) # any partial args are added to all tasks in the group taskit = (signature(task, app=app).clone(partial_args) for i, task in enumerate(tasks)) if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER: return app.GroupResult(result.id, [stask.apply(group_id=group_id) for stask in taskit]) with app.producer_or_acquire() as pub: [stask.apply_async(group_id=group_id, producer=pub, add_to_parent=False) for stask in taskit] parent = get_current_worker_task() if add_to_parent and parent: parent.add_trail(result) return result
def group(self, tasks, result, group_id, partial_args, add_to_parent=True): app = self.app result = result_from_tuple(result, app) # any partial args are added to all tasks in the group taskit = (maybe_signature(task, app=app).clone(partial_args) for i, task in enumerate(tasks)) with app.producer_or_acquire() as producer: [stask.apply_async(group_id=group_id, producer=producer, add_to_parent=False) for stask in taskit] parent = app.current_worker_task if add_to_parent and parent: parent.add_trail(result) return result
def unlock_chord(self, group_id, callback, interval=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple, **kwargs): if interval is None: interval = self.default_retry_delay # check if the task group is ready, and if so apply the callback. callback = maybe_signature(callback, app) deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], app=app, ) j = deps.join_native if deps.supports_native_join else deps.join try: ready = deps.ready() except Exception as exc: raise self.retry( exc=exc, countdown=interval, max_retries=max_retries, ) else: if not ready: raise self.retry(countdown=interval, max_retries=max_retries) callback = maybe_signature(callback, app=app) try: with allow_join_result(): ret = j(timeout=3.0, propagate=True) except Exception as exc: try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) app.backend.chord_error_from_stack(callback, ChordError(reason)) else: try: callback.delay(ret) except Exception as exc: logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) app.backend.chord_error_from_stack( callback, exc=ChordError('Callback error: {0!r}'.format(exc)), )
def unlock_chord(group_id, callback, interval=None, propagate=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, result_from_tuple=result_from_tuple): # if propagate is disabled exceptions raised by chord tasks # will be sent as part of the result list to the chord callback. # Since 3.1 propagate will be enabled by default, and instead # the chord callback changes state to FAILURE with the # exception set to ChordError. propagate = default_propagate if propagate is None else propagate if interval is None: interval = unlock_chord.default_retry_delay # check if the task group is ready, and if so apply the callback. deps = GroupResult( group_id, [result_from_tuple(r, app=app) for r in result], ) j = deps.join_native if deps.supports_native_join else deps.join if deps.ready(): callback = signature(callback, app=app) try: with allow_join_result(): ret = j(propagate=propagate) except Exception as exc: try: culprit = next(deps._failed_join_report()) reason = 'Dependency {0.id} raised {1!r}'.format( culprit, exc, ) except StopIteration: reason = repr(exc) app._tasks[callback.task].backend.fail_from_current_stack( callback.id, exc=ChordError(reason), ) else: try: callback.delay(ret) except Exception as exc: app._tasks[callback.task].backend.fail_from_current_stack( callback.id, exc=ChordError('Callback error: {0!r}'.format(exc)), ) else: raise unlock_chord.retry(countdown=interval, max_retries=max_retries)
def run(self, tasks, result, group_id, partial_args): app = self.app result = result_from_tuple(result, app) # any partial args are added to all tasks in the group taskit = (signature(task, app=app).clone(partial_args) for i, task in enumerate(tasks)) if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER: return app.GroupResult( result.id, [stask.apply(group_id=group_id) for stask in taskit], ) with app.producer_or_acquire() as pub: [stask.apply_async(group_id=group_id, publisher=pub, add_to_parent=False) for stask in taskit] parent = get_current_worker_task() if parent: parent.add_trail(result) return result
def test_GroupResult(self): x = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], ) self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) self.assertEqual(x, result_from_tuple(x, self.app))
def taskFromId(id): t = pickle.loads(base64.urlsafe_b64decode(id)) return result_from_tuple(t)
def test_GroupResult(self): x = self.app.GroupResult( uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], ) assert x, result_from_tuple(x.as_tuple() == self.app) assert x, result_from_tuple(x == self.app)
def test_compat(self): uid = uuid() x = result_from_tuple([uid, []], app=self.app) assert x.id == uid
def test_AsyncResult(self): x = self.app.AsyncResult(uuid()) assert x, result_from_tuple(x.as_tuple() == self.app) assert x, result_from_tuple(x == self.app)
def test_compat(self): uid = uuid() x = result_from_tuple([uid, []], app=self.app) self.assertEqual(x.id, uid)
def test_AsyncResult(self): x = self.app.AsyncResult(uuid()) self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) self.assertEqual(x, result_from_tuple(x, self.app))