def __init__(self, slice=None, numslices=1): self._kids = {} # Create our own switchboard. Don't use the switchboard cache because # we want to provide slice and numslice arguments. self._switchboard = Switchboard(self.QDIR, slice, numslices, True) # Create the shunt switchboard self._shunt = Switchboard(config.SHUNTQUEUE_DIR) self._stop = False
def __init__(self, slice=None, numslices=1): self._kids = {} # Create our own switchboard. Don't use the switchboard cache because # we want to provide slice and numslice arguments. self._switchboard = Switchboard(self.QDIR, slice, numslices, True) # Create the shunt switchboard self._shunt = Switchboard(mm_cfg.SHUNTQUEUE_DIR) self._stop = False
def __init__(self, slice=None, numslices=1): Runner.__init__(self, slice, numslices) BounceMixin.__init__(self) # We look this function up only at startup time modname = 'Mailman.Handlers.' + mm_cfg.DELIVERY_MODULE mod = __import__(modname) self._func = getattr(sys.modules[modname], 'process') # This prevents smtp server connection problems from filling up the # error log. It gets reset if the message was successfully sent, and # set if there was a socket.error. self.__logged = False self.__retryq = Switchboard(mm_cfg.RETRYQUEUE_DIR)
class RetryRunner(Runner): QDIR = mm_cfg.RETRYQUEUE_DIR SLEEPTIME = mm_cfg.minutes(15) def __init__(self, slice=None, numslices=1): Runner.__init__(self, slice, numslices) self.__outq = Switchboard(mm_cfg.OUTQUEUE_DIR) def _dispose(self, mlist, msg, msgdata): # Move it to the out queue for another retry self.__outq.enqueue(msg, msgdata) return False def _snooze(self, filecnt): # We always want to snooze time.sleep(self.SLEEPTIME)
class RetryRunner(Runner): QDIR = mm_cfg.RETRYQUEUE_DIR SLEEPTIME = mm_cfg.minutes(15) def __init__(self, slice=None, numslices=1): Runner.__init__(self, slice, numslices) self.__outq = Switchboard(mm_cfg.OUTQUEUE_DIR) def _dispose(self, mlist, msg, msgdata): # Move it to the out queue for another retry if it's time. deliver_after = msgdata.get('deliver_after', 0) if time.time() < deliver_after: return True self.__outq.enqueue(msg, msgdata) return False def _snooze(self, filecnt): # We always want to snooze time.sleep(self.SLEEPTIME)
class Runner: QDIR = None SLEEPTIME = config.QRUNNER_SLEEP_TIME def __init__(self, slice=None, numslices=1): self._kids = {} # Create our own switchboard. Don't use the switchboard cache because # we want to provide slice and numslice arguments. self._switchboard = Switchboard(self.QDIR, slice, numslices, True) # Create the shunt switchboard self._shunt = Switchboard(config.SHUNTQUEUE_DIR) self._stop = False def __repr__(self): return "<%s at %s>" % (self.__class__.__name__, id(self)) def stop(self): self._stop = True def run(self): # Start the main loop for this queue runner. try: try: while True: # Once through the loop that processes all the files in # the queue directory. filecnt = self._oneloop() # Do the periodic work for the subclass. BAW: this # shouldn't be called here. There should be one more # _doperiodic() call at the end of the _oneloop() loop. self._doperiodic() # If the stop flag is set, we're done. if self._stop: break # Give the runner an opportunity to snooze for a while, # but pass it the file count so it can decide whether to # do more work now or not. self._snooze(filecnt) except KeyboardInterrupt: pass finally: # We've broken out of our main loop, so we want to reap all the # subprocesses we've created and do any other necessary cleanups. self._cleanup() def _oneloop(self): # First, list all the files in our queue directory. # Switchboard.files() is guaranteed to hand us the files in FIFO # order. Return an integer count of the number of files that were # available for this qrunner to process. files = self._switchboard.files for filebase in files: try: # Ask the switchboard for the message and metadata objects # associated with this filebase. msg, msgdata = self._switchboard.dequeue(filebase) except Exception, e: # This used to just catch email.Errors.MessageParseError, # but other problems can occur in message parsing, e.g. # ValueError, and exceptions can occur in unpickling too. # We don't want the runner to die, so we just log and skip # this entry, but preserve it for analysis. self._log(e) log.error("Skipping and preserving unparseable message: %s", filebase) self._switchboard.finish(filebase, preserve=True) continue try: self._onefile(msg, msgdata) self._switchboard.finish(filebase) except Exception, e: # All runners that implement _dispose() must guarantee that # exceptions are caught and dealt with properly. Still, there # may be a bug in the infrastructure, and we do not want those # to cause messages to be lost. Any uncaught exceptions will # cause the message to be stored in the shunt queue for human # intervention. self._log(e) # Put a marker in the metadata for unshunting msgdata["whichq"] = self._switchboard.queue_directory # It is possible that shunting can throw an exception, e.g. a # permissions problem or a MemoryError due to a really large # message. Try to be graceful. try: new_filebase = self._shunt.enqueue(msg, msgdata) log.error("SHUNTING: %s", new_filebase) self._switchboard.finish(filebase) except Exception, e: # The message wasn't successfully shunted. Log the # exception and try to preserve the original queue entry # for possible analysis. self._log(e) log.error("SHUNTING FAILED, preserving original entry: %s", filebase) self._switchboard.finish(filebase, preserve=True)
class Runner: QDIR = None SLEEPTIME = mm_cfg.QRUNNER_SLEEP_TIME def __init__(self, slice=None, numslices=1): self._kids = {} # Create our own switchboard. Don't use the switchboard cache because # we want to provide slice and numslice arguments. self._switchboard = Switchboard(self.QDIR, slice, numslices, True) # Create the shunt switchboard self._shunt = Switchboard(mm_cfg.SHUNTQUEUE_DIR) self._stop = False def __repr__(self): return '<%s at %s>' % (self.__class__.__name__, id(self)) def stop(self): self._stop = True def run(self): # Start the main loop for this queue runner. try: try: while True: # Once through the loop that processes all the files in # the queue directory. filecnt = self._oneloop() # Do the periodic work for the subclass. BAW: this # shouldn't be called here. There should be one more # _doperiodic() call at the end of the _oneloop() loop. self._doperiodic() # If the stop flag is set, we're done. if self._stop: break # Give the runner an opportunity to snooze for a while, # but pass it the file count so it can decide whether to # do more work now or not. self._snooze(filecnt) except KeyboardInterrupt: pass finally: # We've broken out of our main loop, so we want to reap all the # subprocesses we've created and do any other necessary cleanups. self._cleanup() def _oneloop(self): # First, list all the files in our queue directory. # Switchboard.files() is guaranteed to hand us the files in FIFO # order. Return an integer count of the number of files that were # available for this qrunner to process. files = self._switchboard.files() for filebase in files: try: # Ask the switchboard for the message and metadata objects # associated with this filebase. msg, msgdata = self._switchboard.dequeue(filebase) except Exception, e: # This used to just catch email.Errors.MessageParseError, # but other problems can occur in message parsing, e.g. # ValueError, and exceptions can occur in unpickling too. # We don't want the runner to die, so we just log and skip # this entry, but maybe preserve it for analysis. self._log(e) if mm_cfg.QRUNNER_SAVE_BAD_MESSAGES: syslog('error', 'Skipping and preserving unparseable message: %s', filebase) preserve = True else: syslog('error', 'Ignoring unparseable message: %s', filebase) preserve = False self._switchboard.finish(filebase, preserve=preserve) continue try: self._onefile(msg, msgdata) self._switchboard.finish(filebase) except Exception, e: # All runners that implement _dispose() must guarantee that # exceptions are caught and dealt with properly. Still, there # may be a bug in the infrastructure, and we do not want those # to cause messages to be lost. Any uncaught exceptions will # cause the message to be stored in the shunt queue for human # intervention. self._log(e) # Put a marker in the metadata for unshunting msgdata['whichq'] = self._switchboard.whichq() # It is possible that shunting can throw an exception, e.g. a # permissions problem or a MemoryError due to a really large # message. Try to be graceful. try: new_filebase = self._shunt.enqueue(msg, msgdata) syslog('error', 'SHUNTING: %s', new_filebase) self._switchboard.finish(filebase) except Exception, e: # The message wasn't successfully shunted. Log the # exception and try to preserve the original queue entry # for possible analysis. self._log(e) syslog('error', 'SHUNTING FAILED, preserving original entry: %s', filebase) self._switchboard.finish(filebase, preserve=True)
def __init__(self, slice=None, numslices=1): Runner.__init__(self, slice, numslices) self.__outq = Switchboard(mm_cfg.OUTQUEUE_DIR)
def get_switchboard(qdir): switchboard = _sbcache.setdefault(qdir, Switchboard(qdir)) return switchboard
class Runner: QDIR = None SLEEPTIME = mm_cfg.QRUNNER_SLEEP_TIME def __init__(self, slice=None, numslices=1): self._kids = {} # Create our own switchboard. Don't use the switchboard cache because # we want to provide slice and numslice arguments. self._switchboard = Switchboard(self.QDIR, slice, numslices, True) # Create the shunt switchboard self._shunt = Switchboard(mm_cfg.SHUNTQUEUE_DIR) self._stop = False def __repr__(self): return '<%s at %s>' % (self.__class__.__name__, id(self)) def stop(self): self._stop = True def run(self): # Start the main loop for this queue runner. try: try: while True: # Once through the loop that processes all the files in # the queue directory. filecnt = self._oneloop() # Do the periodic work for the subclass. BAW: this # shouldn't be called here. There should be one more # _doperiodic() call at the end of the _oneloop() loop. self._doperiodic() # If the stop flag is set, we're done. if self._stop: break # Give the runner an opportunity to snooze for a while, # but pass it the file count so it can decide whether to # do more work now or not. self._snooze(filecnt) except KeyboardInterrupt: pass finally: # We've broken out of our main loop, so we want to reap all the # subprocesses we've created and do any other necessary cleanups. self._cleanup() def _oneloop(self): # First, list all the files in our queue directory. # Switchboard.files() is guaranteed to hand us the files in FIFO # order. Return an integer count of the number of files that were # available for this qrunner to process. files = self._switchboard.files() for filebase in files: try: # Ask the switchboard for the message and metadata objects # associated with this filebase. msg, msgdata = self._switchboard.dequeue(filebase) except Exception as e: # This used to just catch email.errors.MessageParseError, # but other problems can occur in message parsing, e.g. # ValueError, and exceptions can occur in unpickling too. # We don't want the runner to die, so we just log and skip # this entry, but maybe preserve it for analysis. self._log(e) if mm_cfg.QRUNNER_SAVE_BAD_MESSAGES: syslog('error', 'Skipping and preserving unparseable message: %s', filebase) preserve = True else: syslog('error', 'Ignoring unparseable message: %s', filebase) preserve = False self._switchboard.finish(filebase, preserve=preserve) continue try: self._onefile(msg, msgdata) self._switchboard.finish(filebase) except Exception as e: # All runners that implement _dispose() must guarantee that # exceptions are caught and dealt with properly. Still, there # may be a bug in the infrastructure, and we do not want those # to cause messages to be lost. Any uncaught exceptions will # cause the message to be stored in the shunt queue for human # intervention. self._log(e) # Put a marker in the metadata for unshunting msgdata['whichq'] = self._switchboard.whichq() # It is possible that shunting can throw an exception, e.g. a # permissions problem or a MemoryError due to a really large # message. Try to be graceful. try: new_filebase = self._shunt.enqueue(msg, msgdata) syslog('error', 'SHUNTING: %s', new_filebase) self._switchboard.finish(filebase) except Exception as e: # The message wasn't successfully shunted. Log the # exception and try to preserve the original queue entry # for possible analysis. self._log(e) syslog('error', 'SHUNTING FAILED, preserving original entry: %s', filebase) self._switchboard.finish(filebase, preserve=True) # Other work we want to do each time through the loop Utils.reap(self._kids, once=True) self._doperiodic() if self._shortcircuit(): break return len(files) def _onefile(self, msg, msgdata): # Do some common sanity checking on the message metadata. It's got to # be destined for a particular mailing list. This switchboard is used # to shunt off badly formatted messages. We don't want to just trash # them because they may be fixable with human intervention. Just get # them out of our site though. # # Find out which mailing list this message is destined for. listname = msgdata.get('listname') if not listname: listname = mm_cfg.MAILMAN_SITE_LIST mlist = self._open_list(listname) if not mlist: syslog('error', 'Dequeuing message destined for missing list: %s', listname) self._shunt.enqueue(msg, msgdata) return # Now process this message, keeping track of any subprocesses that may # have been spawned. We'll reap those later. # # We also want to set up the language context for this message. The # context will be the preferred language for the user if a member of # the list, or the list's preferred language. However, we must take # special care to reset the defaults, otherwise subsequent messages # may be translated incorrectly. BAW: I'm not sure I like this # approach, but I can't think of anything better right now. otranslation = i18n.get_translation() sender = msg.get_sender() if mlist: lang = mlist.getMemberLanguage(sender) else: lang = mm_cfg.DEFAULT_SERVER_LANGUAGE i18n.set_language(lang) msgdata['lang'] = lang try: keepqueued = self._dispose(mlist, msg, msgdata) finally: i18n.set_translation(otranslation) # Keep tabs on any child processes that got spawned. kids = msgdata.get('_kids') if kids: self._kids.update(kids) if keepqueued: self._switchboard.enqueue(msg, msgdata) def _open_list(self, listname): # We no longer cache the list instances. Because of changes to # MailList.py needed to avoid not reloading an updated list, caching # is not as effective as it once was. Also, with OldStyleMemberships # as the MemberAdaptor, there was a self-reference to the list which # kept all lists in the cache. Changing this reference to a # weakref.proxy created other issues. try: mlist = MailList.MailList(listname, lock=False) except Errors.MMListError as e: syslog('error', 'error opening list: %s\n%s', listname, e) return None return mlist def _log(self, exc): syslog('error', 'Uncaught runner exception: %s', exc) s = StringIO() traceback.print_exc(file=s) syslog('error', s.getvalue()) # # Subclasses can override these methods. # def _cleanup(self): """Clean up upon exit from the main processing loop. Called when the Runner's main loop is stopped, this should perform any necessary resource deallocation. Its return value is irrelevant. """ Utils.reap(self._kids) def _dispose(self, mlist, msg, msgdata): """Dispose of a single message destined for a mailing list. Called for each message that the Runner is responsible for, this is the primary overridable method for processing each message. Subclasses, must provide implementation for this method. mlist is the MailList instance this message is destined for. msg is the Message object representing the message. msgdata is a dictionary of message metadata. """ raise NotImplementedError def _doperiodic(self): """Do some processing `every once in a while'. Called every once in a while both from the Runner's main loop, and from the Runner's hash slice processing loop. You can do whatever special periodic processing you want here, and the return value is irrelevant. """ pass def _snooze(self, filecnt): """Sleep for a little while. filecnt is the number of messages in the queue the last time through. Sub-runners can decide to continue to do work, or sleep for a while based on this value. By default, we only snooze if there was nothing to do last time around. """ if filecnt or self.SLEEPTIME <= 0: return time.sleep(self.SLEEPTIME) def _shortcircuit(self): """Return a true value if the individual file processing loop should exit before it's finished processing each message in the current slice of hash space. A false value tells _oneloop() to continue processing until the current snapshot of hash space is exhausted. You could, for example, implement a throttling algorithm here. """ return self._stop
class OutgoingRunner(Runner, BounceMixin): QDIR = mm_cfg.OUTQUEUE_DIR def __init__(self, slice=None, numslices=1): Runner.__init__(self, slice, numslices) BounceMixin.__init__(self) # We look this function up only at startup time modname = 'Mailman.Handlers.' + mm_cfg.DELIVERY_MODULE mod = __import__(modname) self._func = getattr(sys.modules[modname], 'process') # This prevents smtp server connection problems from filling up the # error log. It gets reset if the message was successfully sent, and # set if there was a socket.error. self.__logged = False self.__retryq = Switchboard(mm_cfg.RETRYQUEUE_DIR) def _dispose(self, mlist, msg, msgdata): # See if we should retry delivery of this message again. deliver_after = msgdata.get('deliver_after', 0) if time.time() < deliver_after: return True # Make sure we have the most up-to-date state mlist.Load() try: pid = os.getpid() self._func(mlist, msg, msgdata) # Failsafe -- a child may have leaked through. if pid <> os.getpid(): syslog('error', 'child process leaked thru: %s', modname) os._exit(1) self.__logged = False except socket.error: # There was a problem connecting to the SMTP server. Log this # once, but crank up our sleep time so we don't fill the error # log. port = mm_cfg.SMTPPORT if port == 0: port = 'smtp' # Log this just once. if not self.__logged: syslog('error', 'Cannot connect to SMTP server %s on port %s', mm_cfg.SMTPHOST, port) self.__logged = True self._snooze(0) return True except Errors.SomeRecipientsFailed, e: # Handle local rejects of probe messages differently. if msgdata.get('probe_token') and e.permfailures: self._probe_bounce(mlist, msgdata['probe_token']) else: # Delivery failed at SMTP time for some or all of the # recipients. Permanent failures are registered as bounces, # but temporary failures are retried for later. # # BAW: msg is going to be the original message that failed # delivery, not a bounce message. This may be confusing if # this is what's sent to the user in the probe message. Maybe # we should craft a bounce-like message containing information # about the permanent SMTP failure? if e.permfailures: self._queue_bounces(mlist.internal_name(), e.permfailures, msg) # Move temporary failures to the qfiles/retry queue which will # occasionally move them back here for another shot at # delivery. if e.tempfailures: now = time.time() recips = e.tempfailures last_recip_count = msgdata.get('last_recip_count', 0) deliver_until = msgdata.get('deliver_until', now) if len(recips) == last_recip_count: # We didn't make any progress, so don't attempt # delivery any longer. BAW: is this the best # disposition? if now > deliver_until: return False else: # Keep trying to delivery this message for a while deliver_until = now + mm_cfg.DELIVERY_RETRY_PERIOD msgdata['last_recip_count'] = len(recips) msgdata['deliver_until'] = deliver_until msgdata['recips'] = recips self.__retryq.enqueue(msg, msgdata) # We've successfully completed handling of this message return False