def run(self, *args, **kwargs): email = args[0] # seq_id is an id for the sequence of periodic tasks, to avoid # running multiple concurrent sequences of the same task with the # same arguments. it is empty on first run, constant afterwards seq_id = kwargs.pop('seq_id', '') id_str = json.dumps([self.task_key, args, kwargs]) cache_key = b64encode(id_str) cached_err = self.memcache.get(cache_key + 'error') if cached_err: # task has been failing recently if seq_id != cached_err['seq_id']: # other sequence of task already handling this error flow # This is not working! Passing instead #return pass if not amqp_user_listening(email): # noone is waiting for result, stop trying, but flush cached erros if cached_err: self.memcache.delete(cache_key + 'error') return # check cache to stop iteration if other sequence has started cached = self.memcache.get(cache_key) if cached: if seq_id and seq_id != cached['seq_id']: amqp_log("%s: found new cached seq_id [%s], " "stopping iteration of [%s]" % (id_str, cached['seq_id'], seq_id)) return elif not seq_id and time( ) - cached['timestamp'] < self.result_fresh: amqp_log("%s: fresh task submitted with fresh cached result " ", dropping" % id_str) return if not seq_id: # this task is called externally, not a rerun, create a seq_id amqp_log("%s: fresh task submitted [%s]" % (id_str, seq_id)) seq_id = uuid4().hex # actually run the task try: data = self.execute(*args, **kwargs) except Exception as exc: # error handling now = time() if not cached_err: cached_err = {'seq_id': seq_id, 'timestamps': []} cached_err['timestamps'].append(now) x0 = cached_err['timestamps'][0] rel_points = [x - x0 for x in cached_err['timestamps']] rerun = self.error_rerun_handler(exc, rel_points, *args, **kwargs) if rerun is not None: self.memcache.set(cache_key + 'error', cached_err) kwargs['seq_id'] = seq_id self.apply_async(args, kwargs, countdown=rerun) else: self.memcache.delete(cache_key + 'error') amqp_log("%s: error %r, rerun %s" % (id_str, exc, rerun)) return else: if cached_err: self.memcache.delete(cache_key + 'error') cached = {'timestamp': time(), 'payload': data, 'seq_id': seq_id} ok = amqp_publish_user(email, routing_key=self.task_key, data=data) if not ok: # echange closed, no one gives a shit, stop repeating, why try? amqp_log("%s: exchange closed" % id_str) return kwargs['seq_id'] = seq_id self.memcache.set(cache_key, cached) if self.polling: amqp_log("%s: will rerun in %d secs [%s]" % (id_str, self.result_fresh, seq_id)) self.apply_async(args, kwargs, countdown=self.result_fresh)
def run(self, *args, **kwargs): email = args[0] # seq_id is an id for the sequence of periodic tasks, to avoid # running multiple concurrent sequences of the same task with the # same arguments. it is empty on first run, constant afterwards seq_id = kwargs.pop('seq_id', '') id_str = json.dumps([self.task_key, args, kwargs]) cache_key = b64encode(id_str) cached_err = self.memcache.get(cache_key + 'error') if cached_err: # task has been failing recently if seq_id != cached_err['seq_id']: # other sequence of task already handling this error flow # This is not working! Passing instead #return pass if not amqp_user_listening(email): # noone is waiting for result, stop trying, but flush cached erros if cached_err: self.memcache.delete(cache_key + 'error') return # check cache to stop iteration if other sequence has started cached = self.memcache.get(cache_key) if cached: if seq_id and seq_id != cached['seq_id']: amqp_log("%s: found new cached seq_id [%s], " "stopping iteration of [%s]" % (id_str, cached['seq_id'], seq_id)) return elif not seq_id and time() - cached['timestamp'] < self.result_fresh: amqp_log("%s: fresh task submitted with fresh cached result " ", dropping" % id_str) return if not seq_id: # this task is called externally, not a rerun, create a seq_id amqp_log("%s: fresh task submitted [%s]" % (id_str, seq_id)) seq_id = uuid4().hex # actually run the task try: data = self.execute(*args, **kwargs) except Exception as exc: # error handling now = time() if not cached_err: cached_err = {'seq_id': seq_id, 'timestamps': []} cached_err['timestamps'].append(now) x0 = cached_err['timestamps'][0] rel_points = [x - x0 for x in cached_err['timestamps']] rerun = self.error_rerun_handler(exc, rel_points, *args, **kwargs) if rerun is not None: self.memcache.set(cache_key + 'error', cached_err) kwargs['seq_id'] = seq_id self.apply_async(args, kwargs, countdown=rerun) else: self.memcache.delete(cache_key + 'error') amqp_log("%s: error %r, rerun %s" % (id_str, exc, rerun)) return else: if cached_err: self.memcache.delete(cache_key + 'error') cached = {'timestamp': time(), 'payload': data, 'seq_id': seq_id} ok = amqp_publish_user(email, routing_key=self.task_key, data=data) if not ok: # echange closed, no one gives a shit, stop repeating, why try? amqp_log("%s: exchange closed" % id_str) return kwargs['seq_id'] = seq_id self.memcache.set(cache_key, cached) if self.polling: amqp_log("%s: will rerun in %d secs [%s]" % (id_str, self.result_fresh, seq_id)) self.apply_async(args, kwargs, countdown=self.result_fresh)
def run(self, *args, **kwargs): email = args[0] # seq_id is an id for the sequence of periodic tasks, to avoid # running multiple concurrent sequences of the same task with the # same arguments. it is empty on first run, constant afterwards seq_id = kwargs.pop("seq_id", "") id_str = json.dumps([self.task_key, args, kwargs]) cache_key = b64encode(id_str) cached_err = self.memcache.get(cache_key + "error") if cached_err: # task has been failing recently if seq_id != cached_err["seq_id"]: if seq_id: # other sequence of tasks has taken over return else: # taking over from other sequence cached_err = None # cached err will be deleted or overwritten in a while # self.memcache.delete(cache_key + 'error') if not amqp_user_listening(email): # noone is waiting for result, stop trying, but flush cached erros self.memcache.delete(cache_key + "error") return # check cache to stop iteration if other sequence has started cached = self.memcache.get(cache_key) if cached: if seq_id and seq_id != cached["seq_id"]: amqp_log( "%s: found new cached seq_id [%s], " "stopping iteration of [%s]" % (id_str, cached["seq_id"], seq_id) ) return elif not seq_id and time() - cached["timestamp"] < self.result_fresh: amqp_log("%s: fresh task submitted with fresh cached result " ", dropping" % id_str) return if not seq_id: # this task is called externally, not a rerun, create a seq_id amqp_log("%s: fresh task submitted [%s]" % (id_str, seq_id)) seq_id = uuid4().hex # actually run the task try: data = self.execute(*args, **kwargs) except Exception as exc: # error handling if isinstance(exc, SoftTimeLimitExceeded): log.error("SoftTimeLimitExceeded: %s", id_str) now = time() if not cached_err: cached_err = {"seq_id": seq_id, "timestamps": []} cached_err["timestamps"].append(now) x0 = cached_err["timestamps"][0] rel_points = [x - x0 for x in cached_err["timestamps"]] rerun = self.error_rerun_handler(exc, rel_points, *args, **kwargs) if rerun is not None: self.memcache.set(cache_key + "error", cached_err) kwargs["seq_id"] = seq_id self.apply_async(args, kwargs, countdown=rerun) else: self.memcache.delete(cache_key + "error") amqp_log("%s: error %r, rerun %s" % (id_str, exc, rerun)) return else: self.memcache.delete(cache_key + "error") cached = {"timestamp": time(), "payload": data, "seq_id": seq_id} ok = amqp_publish_user(email, routing_key=self.task_key, data=data) if not ok: # echange closed, no one gives a shit, stop repeating, why try? amqp_log("%s: exchange closed" % id_str) return kwargs["seq_id"] = seq_id self.memcache.set(cache_key, cached) if self.polling: amqp_log("%s: will rerun in %d secs [%s]" % (id_str, self.result_fresh, seq_id)) self.apply_async(args, kwargs, countdown=self.result_fresh)