def perform(self, parent): logger = parent.logger msg = parent.msg # it is possible to optimize scanning by not ... FIXME! this is not tested. not sure how PS! if hasattr(parent, 'part_clamav_maxblock'): if parent.current_block > parent.part_clamav_maxblock[0]: logger.info( "part_clamav_scan scan skipped, too far into file %s" % (end - start, msg.new_file)) return True # scanner wants an absolute path name... dunno why. if msg.new_file[0] != '/': scanfn = os.getcwd() + '/' + msg.new_file else: scanfn = msg.new_file # worried about how long the scan will take. start = nowflt() virus_found = self.av.scan_file(scanfn) end = nowflt() if virus_found: logger.error( "part_clamav_scan took %g not forwarding, virus detected in %s" % (end - start, msg.new_file)) return False logger.info("part_clamav_scan took %g seconds, no viruses in %s" % (end - start, msg.new_file)) return True
def __init__(self,parent): """ set defaults for options. can be overridden in config file. """ logger = parent.logger # make parent know about these possible options parent.declare_option('msg_total_interval') parent.declare_option('msg_total_maxlag') if hasattr(parent,'msg_total_maxlag'): if type(parent.msg_total_maxlag) is list: parent.msg_total_maxlag=int(parent.msg_total_maxlag[0]) else: parent.msg_total_maxlag=60 if hasattr(parent,'msg_total_interval'): if type(parent.msg_total_interval) is list: parent.msg_total_interval=int(parent.msg_total_interval[0]) else: parent.msg_total_interval=5 now=nowflt() parent.msg_total_last = now parent.msg_total_start = now parent.msg_total_msgcount=0 parent.msg_total_bytecount=0 parent.msg_total_lag=0 logger.debug("msg_total: initialized, interval=%d, maxlag=%d" % \ ( parent.msg_total_interval, parent.msg_total_maxlag ) ) parent.msg_total_cache_file = parent.user_cache_dir + os.sep parent.msg_total_cache_file += 'msg_total_plugin_%.4d.vars' % parent.instance
def __init__(self, parent): """ set defaults for options. can be overridden in config file. """ logger = parent.logger if hasattr(parent, 'msg_speedo_maxlag'): if type(parent.msg_speedo_maxlag) is list: parent.msg_speedo_maxlag = int(parent.msg_speedo_maxlag[0]) else: parent.msg_speedo_maxlag = 60 logger.debug("speedo init: 2 ") if hasattr(parent, 'msg_speedo_interval'): if type(parent.msg_speedo_interval) is list: parent.msg_speedo_interval = int(parent.msg_speedo_interval[0]) else: parent.msg_speedo_interval = 5 now = nowflt() parent.msg_speedo_last = now parent.msg_speedo_msgcount = 0 parent.msg_speedo_bytecount = 0
def perform(self, parent): self.logger = parent.logger if not hasattr(parent, "cache"): self.logger.info("hb_cache: off ") return True if parent.cache_stat: count = parent.cache.count parent.cache.save() now = nowflt() new_count = parent.cache.count self.logger.info( "hb_cache was %d, but since %5.2f sec, increased up to %d, now saved %d entries" % (self.last_count, now - self.last_time, count, new_count)) self.last_time = now self.last_count = new_count else: parent.cache.save() self.logger.info("hb_cache saved (%d)" % len(parent.cache.cache_dict)) return True
def check_expire(self): self.logger.debug("sr_cache check_expire") now = nowflt() elapse = now - self.last_expire if elapse > self.expire : self.last_expire = now self.clean()
def on_message(self,parent): if hasattr(parent,'msg') : now = nowflt() lag = now - parent.msg.tbegin if lag > self.msg_maxlag: self.msg_maxlag = lag return True
def on_message(self, parent): logger = parent.logger msg = parent.msg if msg.isRetry: return True import calendar import humanize import datetime import sys if (parent.msg_total_msgcount == 0): logger.info( "msg_total: 0 messages received: 0 msg/s, 0.0 bytes/s, lag: 0.0 s (RESET)" ) msgtime = timestr2flt(msg.pubtime) now = nowflt() parent.msg_total_msgcount = parent.msg_total_msgcount + 1 lag = now - msgtime parent.msg_total_lag = parent.msg_total_lag + lag # guess the size of the message payload, ignoring overheads. parent.msg_total_bytecount += (len(parent.msg.exchange) + len(parent.msg.topic) + len(parent.msg.notice) + len(parent.msg.hdrstr)) #not time to report yet. if parent.msg_total_interval > now - parent.msg_total_last: return True logger.info( "msg_total: %3d messages received: %5.2g msg/s, %s bytes/s, lag: %4.2g s" % (parent.msg_total_msgcount, parent.msg_total_msgcount / (now - parent.msg_total_start), humanize.naturalsize( parent.msg_total_bytecount / (now - parent.msg_total_start), binary=True, gnu=True), parent.msg_total_lag / parent.msg_total_msgcount)) # Set the maximum age, in seconds, of a message to retrieve. if lag > parent.msg_total_maxlag: logger.warn("total: Excessive lag! Messages posted %s " % humanize.naturaltime(datetime.timedelta(seconds=lag))) parent.msg_total_last = now if (parent.msg_total_count > 0) and (parent.msg_total_msgcount >= parent.msg_total_count): os._exit(0) return True
def perform(self, parent): import time now = nowflt() time_elapsed = now - parent.post_rate_limit_since parent.post_rate_limit_msgcount += 1 if (parent.post_rate_limit_msgcount > parent.post_rate_limit): parent.logger.info("post_rate_limit %d messages/second, sleeping" % parent.post_rate_limit) time.sleep(1 - time_elapsed) time_elapsed = 1 if time_elapsed >= 1: parent.post_rate_limit_msgcount = 0 parent.post_rate_limit_since = nowflt() return True
def load(self): self.logger.debug("sr_cache load") self.cache_dict = {} self.count = 0 # create file if not existing if not os.path.isfile(self.cache_file) : self.fp = open(self.cache_file,'w') self.fp.close() # set time now = nowflt() # open file (read/append)... # read through # keep open to append entries self.fp = open(self.cache_file,'r+') lineno=0 while True : # read line, parse words line = self.fp.readline() if not line : break lineno += 1 # words = [ sum, time, path, part ] try: words = line.split() key = words[0] ctime = float(words[1]) qpath = words[2] path = urllib.parse.unquote(qpath) part = words[3] value = '%s*%s' % (path,part) # skip expired entry ttl = now - ctime if ttl > self.expire : continue except Exception as err: err_msg_fmt = "load corrupted: lineno={}, cache_file={}, err={}" self.logger.error(err_msg_fmt.format(lineno, self.cache_file, err)) self.logger.debug('Exception details:', exc_info=True) continue # add info in cache if key in self.cache_dict : kdict = self.cache_dict[key] else: kdict = {} if not value in kdict : self.count += 1 kdict[value] = ctime self.cache_dict[key] = kdict
def on_message(self, parent): import os import stat # Prepare msg delay test if parent.msg.sumflg == 'R': # 'R' msg will be removed by itself return False # Test msg delay elapsedtime = nowflt() - timestr2flt(parent.msg.headers['pubTime']) if elapsedtime < parent.msg_fdelay: dbg_msg = "message not old enough, sleeping for {:.3f} seconds" parent.logger.debug( dbg_msg.format(elapsedtime, parent.msg_fdelay - elapsedtime)) parent.consumer.sleep_now = parent.consumer.sleep_min parent.consumer.msg_to_retry() parent.msg.isRetry = False return False # Prepare file delay test if '/cfr/' in parent.msg.new_dir: f = os.path.join(parent.msg.new_dir, parent.msg.new_file) else: f = parent.msg.relpath if not os.path.exists(f): parent.logger.error("did not find file {}".format(f)) return False # Test file delay filetime = os.stat(f)[stat.ST_MTIME] elapsedtime = nowflt() - filetime if elapsedtime < parent.msg_fdelay: dbg_msg = "file not old enough, sleeping for {:.3f} seconds" parent.logger.debug( dbg_msg.format(elapsedtime, parent.msg_fdelay - elapsedtime)) parent.consumer.sleep_now = parent.consumer.sleep_min parent.consumer.msg_to_retry() parent.msg.isRetry = False return False return True
def perform(self, parent): import time if not 'mtime' in parent.msg.headers.keys(): return True now = nowflt() mtime = timestr2flt(parent.msg.headers['mtime']) age = now - mtime parent.logger.info("file_age %g seconds for %s" % (age, parent.msg.new_file)) return True
def on_message(self, parent): logger = parent.logger msg = parent.msg import calendar then = timestr2flt(msg.pubtime) now = nowflt() logger.info("print_lag, posted: %s, lag: %g sec. to deliver: %s, " % (msg.pubtime, (now - then), msg.new_file)) return True
def __init__(self, parent): if hasattr(parent, 'post_rate_limit'): if type(parent.post_rate_limit) is list: parent.post_rate_limit = int(parent.post_rate_limit[0]) else: parent.post_rate_limit = 1 parent.post_rate_limit_msgcount = 0 parent.post_rate_limit_since = nowflt() parent.logger.info("post_rate_limit set to %d messages/second" % parent.post_rate_limit)
def on_file(self, parent): logger = parent.logger msg = parent.msg import calendar import humanize import datetime from sarra.sr_util import timestr2flt if (parent.file_total_bytecount == 0): logger.info( "file_total: 0 files received: 0 msg/s, 0.0 bytes/s, lag: 0.0 s (RESET)" ) msgtime = timestr2flt(msg.pubtime) now = nowflt() parent.file_total_msgcount = parent.file_total_msgcount + 1 lag = now - msgtime parent.file_total_lag = parent.file_total_lag + lag (method, psize, ptot, prem, pno) = msg.partstr.split(',') parent.file_total_bytecount = parent.file_total_bytecount + int(psize) #not time to report yet. if parent.file_total_interval > now - parent.file_total_last: return True logger.info( "file_total: %3d files received: %5.2g msg/s, %s bytes/s, lag: %4.2g s" % (parent.file_total_msgcount, parent.file_total_msgcount / (now - parent.file_total_start), humanize.naturalsize(parent.file_total_bytecount / (now - parent.file_total_start), binary=True, gnu=True), parent.file_total_lag / parent.file_total_msgcount)) # Set the maximum age, in seconds, of a message to retrieve. if lag > parent.file_total_maxlag: logger.warn( "total: Excessive lag! downloading too slowly/late %s behind" % humanize.naturaltime(datetime.timedelta(seconds=lag))) parent.file_total_last = now return True
def on_start(self, parent): import paho.mqtt.client as mqtt import time if not hasattr(parent, 'exp_2mqtt_post_broker'): parent.exp_2mqtt_post_broker = ['mqtt://localhost'] logger = parent.logger if parent.no < 1: # randomize broker used in foreground testing. i = int(nowflt() % len(parent.exp_2mqtt_post_broker)) else: i = (parent.no - 1) % len(parent.exp_2mqtt_post_broker) logger.info( "using: %s parent.no=%d exp_2mqtt_broker=%s" % \ (parent.exp_2mqtt_post_broker[i], parent.no, parent.exp_2mqtt_post_broker) ) ok, details = parent.credentials.get(parent.exp_2mqtt_post_broker[i]) if ok: parent.mqtt_bs = details else: logger.error( "exp_2mqtt: post_broker credential lookup failed for %s" % parent.exp_2mqtt_post_broker) return if not hasattr(parent, 'post_exchange'): logger.error("exp_2mqtt: defaulting post_exchange to xpublic") parent.post_exchange = 'xpublic' u = parent.mqtt_bs.url if u.port == None: port = 1883 else: port = u.port # https://www.iso.org/standard/69466.html - ISO standard v3.1.1 is most common. parent.mqtt_client = mqtt.Client(protocol=mqtt.MQTTv311) if u.username != None: logger.error("exp_2mqtt: authenticating as %s " % (u.username)) parent.mqtt_client.username_pw_set(u.username, u.password) parent.mqtt_client.connect(u.hostname, port) parent.mqtt_client.loop_start() return True
def on_message(self,parent): logger = parent.logger msg = parent.msg import calendar then=timestr2flt(msg.pubtime) now=nowflt() # Set the maximum age, in seconds, of a message to retrieve. lag=now-then if lag > int(parent.msg_skip_threshold) : logger.info("msg_skip_old, Excessive lag: %g sec. Skipping download of: %s, " % (lag, msg.new_file)) return False return True
def __init__(self, parent ): parent.logger.debug("sr_cache init") self.parent = parent self.logger = parent.logger self.expire = parent.caching self.cache_dict = {} self.cache_file = None self.cache_hit = None self.fp = None self.cache_basis = parent.cache_basis self.last_expire = nowflt() self.count = 0
def on_message(self, parent): logger = parent.logger msg = parent.msg import calendar import humanize import datetime msgtime = timestr2flt(msg.pubtime) now = nowflt() parent.msg_speedo_msgcount = parent.msg_speedo_msgcount + 1 (method, psize, ptot, prem, pno) = msg.partstr.split(',') parent.msg_speedo_bytecount = parent.msg_speedo_bytecount + int(psize) #not time to report yet. if parent.msg_speedo_interval > now - parent.msg_speedo_last: return True lag = now - msgtime logger.info( "speedo: %3d messages received: %5.2g msg/s, %s bytes/s, lag: %4.2g s" % (parent.msg_speedo_msgcount, parent.msg_speedo_msgcount / (now - parent.msg_speedo_last), humanize.naturalsize(parent.msg_speedo_bytecount / (now - parent.msg_speedo_last), binary=True, gnu=True), lag)) # Set the maximum age, in seconds, of a message to retrieve. if lag > parent.msg_speedo_maxlag: logger.warn("speedo: Excessive lag! Messages posted %s " % humanize.naturaltime(datetime.timedelta(seconds=lag))) parent.msg_speedo_last = now parent.msg_speedo_msgcount = 0 parent.msg_speedo_bytecount = 0 return True
def perform(self, parent): logger = parent.logger msg = parent.msg import calendar import humanize import datetime from sarra.sr_util import timestr2flt if parent.post_total_msgcount == 0: logger.info( "post_total: 0 messages posted: 0 msg/s, 0.0 bytes/s, lag: 0.0 s (RESET)" ) msgtime = timestr2flt(msg.pubtime) now = nowflt() parent.post_total_msgcount = parent.post_total_msgcount + 1 lag = now - msgtime parent.post_total_lag = parent.post_total_lag + lag #(method,psize,ptot,prem,pno) = msg.partstr.split(',') #parent.post_total_bytecount = parent.post_total_bytecount + int(psize) #not time to report yet. if parent.post_total_interval > now - parent.post_total_last: return True logger.info( "post_total: %3d messages posted: %5.2g msg/s, lag: %4.2g s" % (parent.post_total_msgcount, parent.post_total_msgcount / (now - parent.post_total_start), parent.post_total_lag / parent.post_total_msgcount)) # Set the maximum age, in seconds, of a message to retrieve. if lag > parent.post_total_maxlag: logger.warn("total: Excessive lag! Messages posted %s " % humanize.naturaltime(datetime.timedelta(seconds=lag))) parent.post_total_last = now return True
def on_message(self,parent): if not 'delay' in parent.msg.headers: parent.msg.headers['delay'] = nowstr() # Test msg delay elapsedtime = nowflt() - timestr2flt(parent.msg.headers['pubTime']) if 0 < elapsedtime < 1: parent.logger.debug("msg_delay received msg") else: parent.logger.info("trying msg with {:.3f}s elapsed".format(elapsedtime)) if elapsedtime < parent.msg_delay: dbg_msg = "message not old enough, sleeping for {:.3f} seconds" parent.logger.debug(dbg_msg.format(elapsedtime, parent.msg_delay - elapsedtime)) parent.consumer.sleep_now = parent.consumer.sleep_min parent.consumer.msg_to_retry() parent.msg.isRetry = False return False return True
def clean(self, persist=False, delpath=None): self.logger.debug("sr_cache clean") # create refreshed dict now = nowflt() new_dict = {} self.count = 0 if delpath is not None: qdelpath = urllib.parse.quote(delpath) else: qdelpath = None # from cache[sum] = [(time,[path,part]), ... ] for key in self.cache_dict.keys() : ndict = {} kdict = self.cache_dict[key] for value in kdict : # expired or keep t = kdict[value] ttl = now - t if ttl > self.expire : continue parts = value.split('*') path = parts[0] qpath = urllib.parse.quote(path) part = parts[1] if qpath == qdelpath : continue ndict[value] = t self.count += 1 if persist: self.fp.write("%s %f %s %s\n"%(key,t,qpath,part)) if len(ndict) > 0 : new_dict[key] = ndict # set cleaned cache_dict self.cache_dict = new_dict
def perform(self,parent): logger = parent.logger msg = parent.msg import calendar then=timestr2flt(msg.pubtime) now=nowflt() lag= now-then if msg.onfly_checksum != msg.checksum : msg = "checksum differ - %s - %s msg %s" % (msg.new_file, parent.onfly_checksum,msg.checksum) if lag > parent.part_check_lag_threshold : logger.warning("part_check might just be referring to an older version of file, but " + msg) return True else: logger.error( "part_check rejecting " + msg) return False logger.info("part_check Checksum matched for : %s" % msg.new_file ) return True
def __init__(self, parent): """ set defaults for options. can be overridden in config file. """ logger = parent.logger # make parent know about these possible options parent.declare_option('post_total_interval') parent.declare_option('post_total_maxlag') if hasattr(parent, 'post_total_maxlag'): if type(parent.post_total_maxlag) is list: parent.post_total_maxlag = int(parent.post_total_maxlag[0]) else: parent.post_total_maxlag = 60 logger.debug("speedo init: 2 ") if hasattr(parent, 'post_total_interval'): if type(parent.post_total_interval) is list: parent.post_total_interval = int(parent.post_total_interval[0]) else: parent.post_total_interval = 5 now = nowflt() parent.post_total_last = now parent.post_total_start = now parent.post_total_msgcount = 0 parent.post_total_bytecount = 0 parent.post_total_msgcount = 0 parent.post_total_bytecount = 0 parent.post_total_lag = 0 logger.debug( "post_total: initialized, interval=%d, maxlag=%d" % \ ( parent.post_total_interval, parent.post_total_maxlag ) )
def on_file(self, parent): import logging, telnetlib, sys, os, stat, time logger = parent.logger # Grabs credentials from credentials.conf that were given in a config option ok, details = parent.credentials.get( parent.file_send_egc_les_telnet[0]) if ok: setting = details.url user = setting.username password = setting.password server = setting.hostname port = setting.port logger.debug("file_send_egc_les telnet credentials valid") else: logger.error("file_send_egc_les telnet credentials invalid") return False timeout = int(parent.file_send_egc_les_timeout[0]) if not port: port = 23 # Read in the bulletin and replace any instances of .S with S, \n with \r\n # and add .S\r\n at the end indicating 'store and submit' filepath = parent.msg.new_relpath with open(filepath, 'r') as f: data = f.read() data = data.replace('.S', ' S') bul = data.replace('\n', '\r\n') + '.S\r\n' # Find the 2 header components: T1T2A1A2ii CCCC parts = data.split(' ') HDR = parts[0] CCCC = parts[1] # Decide which egc it should be transmitted with egc = self.find_egc(HDR, CCCC, logger) if egc == None: return False # For FQ of CWAO if 'PAN PAN' in message then increase priority if HDR[:2] == 'FQ' and CCCC == 'CWAO' and 'PAN PAN' in data: egc = egc.replace(' 1 4 ', ' 2 4 ') # Transmit the file over telnet try: start = nowflt() tn = telnetlib.Telnet(server, port, timeout) tn.read_until("username:"******"\r\n") tn.read_until("password:"******"\r\n") tn.read_until(">", timeout) tn.write(egc) tn.read_until("Text:", timeout) tn.write(bul) tn.write("quit\r\n") info = tn.read_all() tn.close() nbBytes = os.stat(filepath)[stat.ST_SIZE] end = nowflt() logger.info( "file_send_egc_les: ({0} bytes) file {1} delivered to: {2}, took {3}s" .format(nbBytes, os.path.basename(filepath), setting, end - start)) logger.info("file_send_egc_les: egc used: %s" % egc) logger.info("file_send_egc_les: return message: %s" % info) if 'Storing' in info and 'Submitted' in info and 'Reference' in info: os.unlink(filepath) return True else: logger.error( "file_send_egc_les: error with return info from file: %s" % filepath) return False except: logger.error( "file_send_egc_les/on_file: error sending over telnet") logger.debug('Exception details: ', exc_info=True) return False
def __init__(self, parent): self.last_time = nowflt() self.last_count = 0
def init(self, parent): self.last_time = nowflt() self.last_message_count = parent.message_count self.last_publish_count = parent.publish_count self.last_pulse_count = parent.pulse_count
def check(self, key, path, part): self.logger.debug("sr_cache check basis=%s" % self.cache_basis ) # not found self.cache_hit = None # set time and value now = nowflt() relpath = self.__get_relpath(path) qpath = urllib.parse.quote(relpath) value = '%s*%s' % (relpath, part) if key not in self.cache_dict : self.logger.debug("adding a new entry in cache") kdict = {} kdict[value] = now self.cache_dict[key] = kdict self.fp.write("%s %f %s %s\n"%(key,now,qpath,part)) self.count += 1 return True self.logger.debug("sum already in cache: key={}".format(key)) kdict = self.cache_dict[key] present = value in kdict kdict[value] = now # differ or newer, write to file self.fp.write("%s %f %s %s\n"%(key,now,qpath,part)) self.count += 1 if present: self.logger.debug("updated time of old entry: value={}".format(value)) self.cache_hit = value return False else: self.logger.debug("added value={}".format(value)) if part is None or part[0] not in "pi": self.logger.debug("new entry, not a part: part={}".format(part)) return True ptoken = part.split(',') if len(ptoken) < 4 : self.logger.debug("new entry, weird part: ptoken={}".format(ptoken)) return True # build a wiser dict value without # block_count and remainder (ptoken 2 and 3) # FIXME the remainder of this method is wrong. It will trivially have a match because we just # added the entry in kdict, then we will always find the value and return false. It will not change # anything at all though. Worst, the cache hit will falsely indicate that we hit an old entry. Then, # partitioned files would be lost. And why are we removing blktot and brem to do such a check. pvalue = value pvalue = pvalue.replace(','+ptoken[2],'',10) pvalue = pvalue.replace(','+ptoken[3],'',10) # check every key in kdict # build a similar value to compare with pvalue for value in kdict : kvalue = value kvalue = kvalue.replace(','+ptoken[2],'',10) kvalue = kvalue.replace(','+ptoken[3],'',10) # if we find the value... its in cache... its old if pvalue == kvalue : self.cache_hit = value return False # FIXME variable value was overwritten by loop variable value. Using pvalue is safer here # for when the loop bug will be fixed. self.logger.debug("did not find it... its new: pvalue={}".format(pvalue)) return True
def on_message(self, parent): import filecmp import os import random from difflib import Differ parent.logger.info("msg_pclean_f90.py on_message") result = True msg_relpath = parent.msg.relpath f20_path = msg_relpath.replace("{}/".format(self.all_fxx_dirs[1]), self.all_fxx_dirs[0]) path_dict = self.build_path_dict(self.all_fxx_dirs[2:], msg_relpath) ext = self.get_extension(msg_relpath) for fxx_dir, path in path_dict.items(): # f90 test if not os.path.exists(path): # propagation check to all path except f20 which is the origin err_msg = "file not in folder {} with {:.3f}s elapsed" lag = nowflt() - timestr2flt(parent.msg.headers['pubTime']) parent.logger.error(err_msg.format(fxx_dir, lag)) parent.logger.debug("file missing={}".format(path)) result = False break elif ext not in self.test_extension_list and not filecmp.cmp( f20_path, path): # file differ check: f20 against others parent.logger.warning( "skipping, file differs from f20 file: {}".format(path)) with open(f20_path, 'r', encoding='iso-8859-1') as f: f20_lines = f.readlines() with open(path, 'r', encoding='iso-8859-1') as f: f_lines = f.readlines() diff = Differ().compare(f20_lines, f_lines) diff = [d for d in diff if d[0] != ' '] # Diffs without context parent.logger.debug("diffs found:\n{}".format("".join(diff))) if ext not in self.test_extension_list: # prepare next f90 test test_extension = random.choice( self.test_extension_list ) # pick one test identified by file extension src = msg_relpath # src file is in f30 dir dest = "{}{}".format( src, test_extension ) # format input file for extension test (next f90) try: if test_extension == '.slink': os.symlink(src, dest) elif test_extension == '.hlink': os.link(src, dest) elif test_extension == '.moved': os.rename(src, dest) else: parent.logger.error( "test '{}' is not supported".format(test_extension)) except FileNotFoundError as err: # src is not there parent.logger.error("test failed: {}".format(err)) parent.logger.debug("Exception details:", exc_info=True) result = False except FileExistsError as err: # dest is already there parent.logger.error( 'skipping, found a moving target {}'.format(err)) parent.logger.debug("Exception details:", exc_info=True) result = False if 'toolong' in parent.msg.headers: # cleanup del parent.msg.headers['toolong'] return result