def setUp(self): super(TestHandleProxyError, self).setUp() self.team, self.mailing_list = self.factory.makeTeamAndMailingList( 'team-1', 'team-1-owner') self.mm_list = self.makeMailmanList(self.mailing_list) syslog.write_ex('xmlrpc', 'Ensure the log is open.') self.reset_log()
def reset_log(self): """Truncate the log.""" log_path = syslog._logfiles['xmlrpc']._Logger__filename syslog._logfiles['xmlrpc'].close() with open(log_path, 'w') as log_file: log_file.truncate() syslog.write_ex('xmlrpc', 'Reset by test.')
def setUp(self): super(OopsReportingTestCase, self).setUp() self.mm_list = None syslog.write_ex('xmlrpc', 'Ensure the log is open.') self.reset_log() self.runner = XMLRPCRunner() # MailmanTestCase's setup of the test proxy is ignored because # the runner had a reference to the true proxy in its __init__. self.runner._proxy = get_mailing_list_api_test_proxy()
def setUp(self): super(TestXMLRPCRunnerHeatBeat, self).setUp() self.mm_list = None syslog.write_ex('xmlrpc', 'Ensure the log is open.') self.reset_log() self.runner = XMLRPCRunner() # MailmanTestCase's setup of the test proxy is ignored because # the runner had a reference to the true proxy in its __init__. self.runner._proxy = get_mailing_list_api_test_proxy()
def process(mlist, msg, msgdata): recips = msgdata.get('recips') if not recips: # Nobody to deliver to! return # Calculate the non-VERP envelope sender. envsender = msgdata.get('envsender') if envsender is None: if mlist: envsender = mlist.GetBouncesEmail() else: envsender = Utils.get_site_email(extra='bounces') # Time to split up the recipient list. If we're personalizing or VERPing # then each chunk will have exactly one recipient. We'll then hand craft # an envelope sender and stitch a message together in memory for each one # separately. If we're not VERPing, then we'll chunkify based on # SMTP_MAX_RCPTS. Note that most MTAs have a limit on the number of # recipients they'll swallow in a single transaction. deliveryfunc = None if ('personalize' not in msgdata or msgdata['personalize']) and ( msgdata.get('verp') or mlist.personalize): chunks = [[recip] for recip in recips] msgdata['personalize'] = 1 deliveryfunc = verpdeliver elif mm_cfg.SMTP_MAX_RCPTS <= 0: chunks = [recips] else: chunks = chunkify(recips, mm_cfg.SMTP_MAX_RCPTS) # See if this is an unshunted message for which some were undelivered if 'undelivered' in msgdata: chunks = msgdata['undelivered'] # If we're doing bulk delivery, then we can stitch up the message now. if deliveryfunc is None: # Be sure never to decorate the message more than once! if not msgdata.get('decorated'): Decorate.process(mlist, msg, msgdata) msgdata['decorated'] = True deliveryfunc = bulkdeliver refused = {} t0 = time.time() # Open the initial connection origrecips = msgdata['recips'] # MAS: get the message sender now for logging. If we're using 'sender' # and not 'from', bulkdeliver changes it for bounce processing. If we're # VERPing, it doesn't matter because bulkdeliver is working on a copy, but # otherwise msg gets changed. If the list is anonymous, the original # sender is long gone, but Cleanse.py has logged it. origsender = msgdata.get('original_sender', msg.get_sender()) # `undelivered' is a copy of chunks that we pop from to do deliveries. # This seems like a good tradeoff between robustness and resource # utilization. If delivery really fails (i.e. qfiles/shunt type # failures), then we'll pick up where we left off with `undelivered'. # This means at worst, the last chunk for which delivery was attempted # could get duplicates but not every one, and no recips should miss the # message. conn = Connection() try: msgdata['undelivered'] = chunks while chunks: chunk = chunks.pop() msgdata['recips'] = chunk try: deliveryfunc(mlist, msg, msgdata, envsender, refused, conn) except Exception: # If /anything/ goes wrong, push the last chunk back on the # undelivered list and re-raise the exception. We don't know # how many of the last chunk might receive the message, so at # worst, everyone in this chunk will get a duplicate. Sigh. chunks.append(chunk) raise del msgdata['undelivered'] finally: conn.quit() msgdata['recips'] = origrecips # Log the successful post t1 = time.time() d = MsgSafeDict(msg, {'time' : t1-t0, # BAW: Urg. This seems inefficient. 'size' : len(msg.as_string()), '#recips' : len(recips), '#refused': len(refused), 'listname': mlist.internal_name(), 'sender' : origsender, }) # We have to use the copy() method because extended call syntax requires a # concrete dictionary object; it does not allow a generic mapping. It's # still worthwhile doing the interpolation in syslog() because it'll catch # any catastrophic exceptions due to bogus format strings. if mm_cfg.SMTP_LOG_EVERY_MESSAGE: syslog.write_ex(mm_cfg.SMTP_LOG_EVERY_MESSAGE[0], mm_cfg.SMTP_LOG_EVERY_MESSAGE[1], kws=d) if refused: if mm_cfg.SMTP_LOG_REFUSED: syslog.write_ex(mm_cfg.SMTP_LOG_REFUSED[0], mm_cfg.SMTP_LOG_REFUSED[1], kws=d) elif msgdata.get('tolist'): # Log the successful post, but only if it really was a post to the # mailing list. Don't log sends to the -owner, or -admin addrs. # -request addrs should never get here. BAW: it may be useful to log # the other messages, but in that case, we should probably have a # separate configuration variable to control that. if mm_cfg.SMTP_LOG_SUCCESS: syslog.write_ex(mm_cfg.SMTP_LOG_SUCCESS[0], mm_cfg.SMTP_LOG_SUCCESS[1], kws=d) # Process any failed deliveries. tempfailures = [] permfailures = [] for recip, (code, smtpmsg) in list(refused.items()): # DRUMS is an internet draft, but it says: # # [RFC-821] incorrectly listed the error where an SMTP server # exhausts its implementation limit on the number of RCPT commands # ("too many recipients") as having reply code 552. The correct # reply code for this condition is 452. Clients SHOULD treat a 552 # code in this case as a temporary, rather than permanent failure # so the logic below works. # if code >= 500 and code != 552: # A permanent failure permfailures.append(recip) else: # Deal with persistent transient failures by queuing them up for # future delivery. TBD: this could generate lots of log entries! tempfailures.append(recip) if mm_cfg.SMTP_LOG_EACH_FAILURE: d.update({'recipient': recip, 'failcode' : code, 'failmsg' : smtpmsg}) syslog.write_ex(mm_cfg.SMTP_LOG_EACH_FAILURE[0], mm_cfg.SMTP_LOG_EACH_FAILURE[1], kws=d) # Return the results if tempfailures or permfailures: raise Errors.SomeRecipientsFailed(tempfailures, permfailures)
def process(mlist, msg, msgdata): recips = msgdata.get('recips') if not recips: # Nobody to deliver to! return # Calculate the non-VERP envelope sender. envsender = msgdata.get('envsender') if envsender is None: if mlist: envsender = mlist.GetBouncesEmail() else: envsender = Utils.get_site_email(extra='bounces') # Time to split up the recipient list. If we're personalizing or VERPing # then each chunk will have exactly one recipient. We'll then hand craft # an envelope sender and stitch a message together in memory for each one # separately. If we're not VERPing, then we'll chunkify based on # SMTP_MAX_RCPTS. Note that most MTAs have a limit on the number of # recipients they'll swallow in a single transaction. deliveryfunc = None if (not msgdata.has_key('personalize') or msgdata['personalize']) and ( msgdata.get('verp') or mlist.personalize): chunks = [[recip] for recip in recips] msgdata['personalize'] = 1 deliveryfunc = verpdeliver elif mm_cfg.SMTP_MAX_RCPTS <= 0: chunks = [recips] else: chunks = chunkify(recips, mm_cfg.SMTP_MAX_RCPTS) # See if this is an unshunted message for which some were undelivered if msgdata.has_key('undelivered'): chunks = msgdata['undelivered'] # If we're doing bulk delivery, then we can stitch up the message now. if deliveryfunc is None: # Be sure never to decorate the message more than once! if not msgdata.get('decorated'): Decorate.process(mlist, msg, msgdata) msgdata['decorated'] = True deliveryfunc = bulkdeliver refused = {} t0 = time.time() # Open the initial connection origrecips = msgdata['recips'] # MAS: get the message sender now for logging. If we're using 'sender' # and not 'from', bulkdeliver changes it for bounce processing. If we're # VERPing, it doesn't matter because bulkdeliver is working on a copy, but # otherwise msg gets changed. If the list is anonymous, the original # sender is long gone, but Cleanse.py has logged it. origsender = msgdata.get('original_sender', msg.get_sender()) # `undelivered' is a copy of chunks that we pop from to do deliveries. # This seems like a good tradeoff between robustness and resource # utilization. If delivery really fails (i.e. qfiles/shunt type # failures), then we'll pick up where we left off with `undelivered'. # This means at worst, the last chunk for which delivery was attempted # could get duplicates but not every one, and no recips should miss the # message. conn = Connection() try: msgdata['undelivered'] = chunks while chunks: chunk = chunks.pop() msgdata['recips'] = chunk try: deliveryfunc(mlist, msg, msgdata, envsender, refused, conn) except Exception: # If /anything/ goes wrong, push the last chunk back on the # undelivered list and re-raise the exception. We don't know # how many of the last chunk might receive the message, so at # worst, everyone in this chunk will get a duplicate. Sigh. chunks.append(chunk) raise del msgdata['undelivered'] finally: conn.quit() msgdata['recips'] = origrecips # Log the successful post t1 = time.time() d = MsgSafeDict(msg, {'time' : t1-t0, # BAW: Urg. This seems inefficient. 'size' : len(msg.as_string()), '#recips' : len(recips), '#refused': len(refused), 'listname': mlist.internal_name(), 'sender' : origsender, }) # We have to use the copy() method because extended call syntax requires a # concrete dictionary object; it does not allow a generic mapping. It's # still worthwhile doing the interpolation in syslog() because it'll catch # any catastrophic exceptions due to bogus format strings. if mm_cfg.SMTP_LOG_EVERY_MESSAGE: syslog.write_ex(mm_cfg.SMTP_LOG_EVERY_MESSAGE[0], mm_cfg.SMTP_LOG_EVERY_MESSAGE[1], kws=d) if refused: if mm_cfg.SMTP_LOG_REFUSED: syslog.write_ex(mm_cfg.SMTP_LOG_REFUSED[0], mm_cfg.SMTP_LOG_REFUSED[1], kws=d) elif msgdata.get('tolist'): # Log the successful post, but only if it really was a post to the # mailing list. Don't log sends to the -owner, or -admin addrs. # -request addrs should never get here. BAW: it may be useful to log # the other messages, but in that case, we should probably have a # separate configuration variable to control that. if mm_cfg.SMTP_LOG_SUCCESS: syslog.write_ex(mm_cfg.SMTP_LOG_SUCCESS[0], mm_cfg.SMTP_LOG_SUCCESS[1], kws=d) # Process any failed deliveries. tempfailures = [] permfailures = [] for recip, (code, smtpmsg) in refused.items(): # DRUMS is an internet draft, but it says: # # [RFC-821] incorrectly listed the error where an SMTP server # exhausts its implementation limit on the number of RCPT commands # ("too many recipients") as having reply code 552. The correct # reply code for this condition is 452. Clients SHOULD treat a 552 # code in this case as a temporary, rather than permanent failure # so the logic below works. # if code >= 500 and code <> 552: # A permanent failure permfailures.append(recip) else: # Deal with persistent transient failures by queuing them up for # future delivery. TBD: this could generate lots of log entries! tempfailures.append(recip) if mm_cfg.SMTP_LOG_EACH_FAILURE: d.update({'recipient': recip, 'failcode' : code, 'failmsg' : smtpmsg}) syslog.write_ex(mm_cfg.SMTP_LOG_EACH_FAILURE[0], mm_cfg.SMTP_LOG_EACH_FAILURE[1], kws=d) # Return the results if tempfailures or permfailures: raise Errors.SomeRecipientsFailed(tempfailures, permfailures)
def debug(msg, *args, **kws): if DEBUG == 1: syslog.write_ex('debug', msg, args, kws)