def create_task(fn, *args, **kwargs): task_state = defer(fn, *args, **kwargs) task_pickle = deferred.serialize( task_wrapper, task_state.key.id(), deferred.serialize(fn, *args, **strip_defer_kwargs(kwargs)), kwargs["task_reference"], ) return task_state, task_pickle
def __getstate__(self): l = [] for callback_data, errback_data in self.callback_pairs: if callback_data: callback_data = serialize(callback_data[0], *callback_data[1], **callback_data[2]) if errback_data: errback_data = serialize(errback_data[0], *errback_data[1], **errback_data[2]) if callback_data is None and errback_data is None: raise ValueError("Both callbacks are empty. Something wrong") l.append((callback_data, errback_data)) return dict(_raise_exception_types=self._raise_exception_types, callback_pairs=l)
def _new_deferred_defer(obj, *args, **kwargs): # Sets current user and fixes an issue where the transactional argument wasn't supplied when the task is too large from rogerthat.rpc import users from mcfw.consts import MISSING if users.get_current_deferred_user() == MISSING: kwargs['__user'] = users.get_current_user() else: kwargs['__user'] = users.get_current_deferred_user() taskargs = dict((x, kwargs.pop(("_%s" % x), None)) for x in ("countdown", "eta", "name", "target", "retry_options")) taskargs["url"] = kwargs.pop("_url", deferred.deferred._DEFAULT_URL) transactional = kwargs.pop("_transactional", False) taskargs["headers"] = dict(deferred.deferred._TASKQUEUE_HEADERS) taskargs["headers"].update(kwargs.pop("_headers", {})) queue = kwargs.pop("_queue", deferred.deferred._DEFAULT_QUEUE) pickled = deferred.serialize(obj, *args, **kwargs) try: task = taskqueue.Task(payload=pickled, **taskargs) return task.add(queue, transactional=transactional) except taskqueue.TaskTooLargeError: key = deferred.deferred._DeferredTaskEntity(data=pickled).put() pickled = deferred.deferred.serialize(deferred.deferred.run_from_datastore, str(key)) task = taskqueue.Task(payload=pickled, **taskargs) # this is the patched line (transactional=transactional) return task.add(queue, transactional=transactional)
def test_no_task_state(self): noop_pickle = deferred.serialize(noop) request = self.make_request("/", 'task1', 'default', POST=noop_pickle) response = request.get_response(application) self.assertEqual(response.status_int, 200)
def test_failure(self): task_state = defer(noop_fail) noop_pickle = deferred.serialize(noop_fail) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle) response = request.get_response(application) self.assertEqual(response.status_int, 500) task_state = self.reload(task_state) self.assertFalse(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertFalse(task_state.is_permanently_failed)
def test_retry_success(self): task_state = defer(noop) noop_pickle = deferred.serialize(noop) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=2) response = request.get_response(application) self.assertEqual(response.status_int, 200) task_state = self.reload(task_state) self.assertEqual(task_state.retry_count, 2) self.assertTrue(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertFalse(task_state.is_permanently_failed)
def defer(obj, *args, **kwargs): from .handler import task_wrapper unique = kwargs.pop('unique', False) task_reference = kwargs.pop('task_reference', None) if unique: assert task_reference, "a task_reference must be passed" if UniqueTaskMarker.get_by_id(task_reference): logging.warning( "Did not defer task with reference {0} - task already present".format(task_reference)) return else: UniqueTaskMarker(id=task_reference).put() defer_kwargs = get_defer_kwargs(kwargs) obj_kwargs = strip_defer_kwargs(kwargs) # have to pickle the callable within the wrapper because # the special treatment that deferred.serialize uses to allow # things like instance methods to be pickled doesn't work for # the arguments pickled_obj = deferred.serialize(obj, *args, **obj_kwargs) task_state = TaskState( task_reference=task_reference, unique=unique, queue_name=kwargs.get('_queue', 'default'), pickle=pickled_obj ) try: task_state.deferred_args = unicode(args) task_state.deferred_kwargs = unicode(strip_defer_kwargs(kwargs)) task_state.deferred_function = get_func_repr(obj) except: pass task_state.put() task = deferred.defer(task_wrapper, task_state.key.id(), pickled_obj, task_reference, _transactional=True, **defer_kwargs) return task_state
def _doWait(aKey, aObj, *args, **kwargs): lneedsRun = False lsem = db.get(aKey) if not lsem: raise Exception("Internal: failed to retrieve semaphore in _doWait") if lsem._counter > 0: lsem._counter -= 1 logging.debug("counter: %s" % lsem._counter) lneedsRun = True else: logging.debug("about to defer") pickled = deferred.serialize(aObj, *args, **kwargs) pickled = base64.encodestring(pickled) logging.debug("after defer, pickled=%s" % pickled) lsem._suspendList.append(pickled) lsem.put() return lneedsRun
def test_retry_max_retries(self): task_state = defer(noop_fail) # give the task an old age. tasks must fail both the retry and age conditions (if specified) task_state.first_run = datetime.datetime.utcnow() - datetime.timedelta( days=2) task_state.put() noop_pickle = deferred.serialize(noop_fail) request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=8) response = request.get_response(application) self.assertEqual(response.status_int, 500) task_state = self.reload(task_state) self.assertEqual(task_state.retry_count, 8) self.assertTrue(task_state.is_complete) self.assertFalse(task_state.is_running) self.assertTrue(task_state.is_permanently_failed)
def _log(self, level, message, *args, **kwargs): """ Logs the message to the normal logging module and also queues a Task to create an _FantasmLog @param level: @param message: @param args: @param kwargs: NOTE: we are not not using deferred module to reduce dependencies, but we are re-using the helper functions .serialize() and .run() - see handler.py """ if not (self.level <= level <= self.maxLevel): return namespace = kwargs.pop('namespace', None) tags = kwargs.pop('tags', None) self.getLoggingMap()[level](message, *args, **kwargs) if not self.persistentLogging: return stack = None if 'exc_info' in kwargs: f = StringIO.StringIO() traceback.print_exc(25, f) stack = f.getvalue() # this _log method requires everything to be serializable, which is not the case for the logging # module. if message is not a basestring, then we simply cast it to a string to allow _something_ # to be logged in the deferred task if not isinstance(message, basestring): try: message = str(message) except Exception: message = LOG_ERROR_MESSAGE if args: args = [] logging.warning(message, exc_info=True) taskName = (self.__obj or {}).get(constants.TASK_NAME_PARAM) stateName = None if self.context.currentState: stateName = self.context.currentState.name transitionName = None if self.context.startingState and self.context.startingEvent: transitionName = self.context.startingState.getTransition(self.context.startingEvent).name actionName = None if self.context.currentAction: actionName = self.context.currentAction.__class__.__name__ # in immediateMode, tack the messages onto obj so that they can be returned # in the http response in handler.py if self.__obj is not None: if self.__obj.get(constants.IMMEDIATE_MODE_PARAM): try: self.__obj[constants.MESSAGES_PARAM].append(message % args) except TypeError: self.__obj[constants.MESSAGES_PARAM].append(message) serialized = deferred.serialize(_log, taskName, self.context.instanceName, self.context.machineName, stateName, actionName, transitionName, level, namespace, (self.tags or []) + (tags or []), message, stack, datetime.datetime.now(), # FIXME: called .utcnow() instead? *args, **kwargs) try: task = taskqueue.Task(url=constants.DEFAULT_LOG_URL, payload=serialized, retry_options=taskqueue.TaskRetryOptions(task_retry_limit=20)) # FIXME: a batch add may be more optimal, but there are quite a few more corners to deal with taskqueue.Queue(name=constants.DEFAULT_LOG_QUEUE_NAME).add(task) except taskqueue.TaskTooLargeError: logging.warning("fantasm log message too large - skipping persistent storage") except taskqueue.Error: logging.warning("error queuing log message Task - skipping persistent storage", exc_info=True)
def _log(self, level, message, *args, **kwargs): """ Logs the message to the normal logging module and also queues a Task to create an _FantasmLog @param level: @param message: @param args: @param kwargs: NOTE: we are not not using deferred module to reduce dependencies, but we are re-using the helper functions .serialize() and .run() - see handler.py """ if not (self.level <= level <= self.maxLevel): return namespace = kwargs.pop('namespace', None) tags = kwargs.pop('tags', None) self.getLoggingMap()[level](message, *args, **kwargs) if not self.persistentLogging: return stack = None if 'exc_info' in kwargs: f = StringIO.StringIO() traceback.print_exc(25, f) stack = f.getvalue() # this _log method requires everything to be serializable, which is not the case for the logging # module. if message is not a basestring, then we simply cast it to a string to allow _something_ # to be logged in the deferred task if not isinstance(message, basestring): try: message = str(message) except Exception: message = LOG_ERROR_MESSAGE if args: args = [] logging.warning(message, exc_info=True) taskName = (self.__obj or {}).get(constants.TASK_NAME_PARAM) stateName = None if self.context.currentState: stateName = self.context.currentState.name transitionName = None if self.context.startingState and self.context.startingEvent: transitionName = self.context.startingState.getTransition( self.context.startingEvent).name actionName = None if self.context.currentAction: actionName = self.context.currentAction.__class__.__name__ # in immediateMode, tack the messages onto obj so that they can be returned # in the http response in handler.py if self.__obj is not None: if self.__obj.get(constants.IMMEDIATE_MODE_PARAM): try: self.__obj[constants.MESSAGES_PARAM].append(message % args) except TypeError: self.__obj[constants.MESSAGES_PARAM].append(message) serialized = deferred.serialize( _log, taskName, self.context.instanceName, self.context.machineName, stateName, actionName, transitionName, level, namespace, (self.tags or []) + (tags or []), message, stack, datetime.datetime.now(), # FIXME: called .utcnow() instead? *args, **kwargs) try: task = taskqueue.Task( url=constants.DEFAULT_LOG_URL, payload=serialized, retry_options=taskqueue.TaskRetryOptions(task_retry_limit=20)) # FIXME: a batch add may be more optimal, but there are quite a few more corners to deal with taskqueue.Queue(name=constants.DEFAULT_LOG_QUEUE_NAME).add(task) except taskqueue.TaskTooLargeError: logging.warning( "fantasm log message too large - skipping persistent storage") except taskqueue.Error: logging.warning( "error queuing log message Task - skipping persistent storage", exc_info=True)
def callback_function(self, func, *args, **kwds): from google.appengine.ext.deferred import serialize self.callback_fn = serialize(func, *args, **kwds) return self.callback_fn