예제 #1
0
 def add(self, store, pendable, lifetime=None):
     verifyObject(IPendable, pendable)
     # Calculate the token and the lifetime.
     if lifetime is None:
         lifetime = as_timedelta(config.mailman.pending_request_life)
     # Calculate a unique token.  Algorithm vetted by the Timbot.  time()
     # has high resolution on Linux, clock() on Windows.  random gives us
     # about 45 bits in Python 2.2, 53 bits on Python 2.3.  The time and
     # clock values basically help obscure the random number generator, as
     # does the hash calculation.  The integral parts of the time values
     # are discarded because they're the most predictable bits.
     for attempts in range(3):
         right_now = time.time()
         x = random.random() + right_now % 1.0 + time.clock() % 1.0
         # Use sha1 because it produces shorter strings.
         token = hashlib.sha1(repr(x).encode('utf-8')).hexdigest()
         # In practice, we'll never get a duplicate, but we'll be anal
         # about checking anyway.
         if store.query(Pended).filter_by(token=token).count() == 0:
             break
     else:
         raise RuntimeError('Could not find a valid pendings token')
     # Create the record, and then the individual key/value pairs.
     pending = Pended(token=token, expiration_date=now() + lifetime)
     for key, value in pendable.items():
         # Both keys and values must be strings.
         if isinstance(key, bytes):
             key = key.decode('utf-8')
         if isinstance(value, bytes):
             # Make sure we can turn this back into a bytes.
             value = dict(__encoding__='utf-8', value=value.decode('utf-8'))
         keyval = PendedKeyValue(key=key, value=json.dumps(value))
         pending.key_values.append(keyval)
     store.add(pending)
     return token
예제 #2
0
 def add(self, store, pendable, lifetime=None):
     verifyObject(IPendable, pendable)
     # Calculate the token and the lifetime.
     if lifetime is None:
         lifetime = as_timedelta(config.mailman.pending_request_life)
     # Calculate a unique token.  Algorithm vetted by the Timbot.  time()
     # has high resolution on Linux, clock() on Windows.  random gives us
     # about 45 bits in Python 2.2, 53 bits on Python 2.3.  The time and
     # clock values basically help obscure the random number generator, as
     # does the hash calculation.  The integral parts of the time values
     # are discarded because they're the most predictable bits.
     for attempts in range(3):
         right_now = time.time()
         x = random.random() + right_now % 1.0 + time.clock() % 1.0
         # Use sha1 because it produces shorter strings.
         token = hashlib.sha1(repr(x).encode("utf-8")).hexdigest()
         # In practice, we'll never get a duplicate, but we'll be anal
         # about checking anyway.
         if store.query(Pended).filter_by(token=token).count() == 0:
             break
     else:
         raise RuntimeError("Could not find a valid pendings token")
     # Create the record, and then the individual key/value pairs.
     pending = Pended(token=token, expiration_date=now() + lifetime)
     for key, value in pendable.items():
         # Both keys and values must be strings.
         if isinstance(key, bytes):
             key = key.decode("utf-8")
         if isinstance(value, bytes):
             # Make sure we can turn this back into a bytes.
             value = dict(__encoding__="utf-8", value=value.decode("utf-8"))
         keyval = PendedKeyValue(key=key, value=json.dumps(value))
         pending.key_values.append(keyval)
     store.add(pending)
     return token
예제 #3
0
 def throttle_date(self):
     """See `IDirectEmailAuthorization`."""
     now = datetime.now(pytz.timezone('UTC'))
     after = now - as_timedelta(
         config.launchpad.user_to_user_throttle_interval)
     throttlers = self._getThrottlers(after)
     # We now have the set of emails that would throttle delivery.  If the
     # configuration variable has changed, this could produce more or less
     # than the now-allowed number of throttlers.  We should never get here
     # if it's less because the contact would have been allowed.
     #
     # If it's more, then we really want to count back from the sorted end,
     # because when /that/ contact record expires, they'll be able to
     # resend.  Here are two examples.
     #
     # affecters = A B C
     # max allowed = 3
     # index = len(affecters) - 3 == 0 == A
     # when A's date < the interval, they can try again
     #
     # affecters = A B C D E F G
     # max allowed (now) = 3
     # index = len(affecters) - 3 = 4 == E (counting from zero)
     # when E's date < than the interval, they can try again
     affecters = sorted(throttlers, key=attrgetter('date_sent'))
     max_throttlers = config.launchpad.user_to_user_max_messages
     expiry = len(affecters) - max_throttlers
     if expiry < 0:
         # There were fewer affecters than are now allowed, so they can
         # retry immediately.  Remember that the caller adds the interval
         # back, so this would give us 'now'.
         return after
     return affecters[expiry].date_sent
예제 #4
0
 def add(self, store, pendable, lifetime=None):
     verifyObject(IPendable, pendable)
     # Calculate the token and the lifetime.
     if lifetime is None:
         lifetime = as_timedelta(config.mailman.pending_request_life)
     for attempts in range(3):
         token = token_factory.new()
         # In practice, we'll never get a duplicate, but we'll be anal
         # about checking anyway.
         if store.query(Pended).filter_by(token=token).count() == 0:
             break
     else:
         raise RuntimeError('Could not find a valid pendings token')
     # Create the record, and then the individual key/value pairs.
     pending = Pended(token=token, expiration_date=now() + lifetime)
     pendable_type = pendable.get('type', pendable.PEND_TYPE)
     pending.key_values.append(
         PendedKeyValue(key='type', value=pendable_type))
     for key, value in pendable.items():
         # The type has been handled above.
         if key == 'type':
             continue
         # Both keys and values must be strings.
         if isinstance(key, bytes):
             key = key.decode('utf-8')
         if isinstance(value, bytes):
             # Make sure we can turn this back into a bytes.
             value = dict(__encoding__='utf-8', value=value.decode('utf-8'))
         keyval = PendedKeyValue(key=key, value=json.dumps(value))
         pending.key_values.append(keyval)
     store.add(pending)
     return token
예제 #5
0
파일: pending.py 프로젝트: aswinpj/Mailman
 def add(self, store, pendable, lifetime=None):
     verifyObject(IPendable, pendable)
     # Calculate the token and the lifetime.
     if lifetime is None:
         lifetime = as_timedelta(config.mailman.pending_request_life)
     for attempts in range(3):
         token = token_factory.new()
         # In practice, we'll never get a duplicate, but we'll be anal
         # about checking anyway.
         if store.query(Pended).filter_by(token=token).count() == 0:
             break
     else:
         raise RuntimeError('Could not find a valid pendings token')
     # Create the record, and then the individual key/value pairs.
     pending = Pended(
         token=token,
         expiration_date=now() + lifetime)
     pendable_type = pendable.get('type', pendable.PEND_TYPE)
     pending.key_values.append(
         PendedKeyValue(key='type', value=pendable_type))
     for key, value in pendable.items():
         # The type has been handled above.
         if key == 'type':
             continue
         # Both keys and values must be strings.
         if isinstance(key, bytes):
             key = key.decode('utf-8')
         if isinstance(value, bytes):
             # Make sure we can turn this back into a bytes.
             value = dict(__encoding__='utf-8',
                          value=value.decode('utf-8'))
         keyval = PendedKeyValue(key=key, value=json.dumps(value))
         pending.key_values.append(keyval)
     store.add(pending)
     return token
예제 #6
0
 def throttle_date(self):
     """See `IDirectEmailAuthorization`."""
     now = datetime.now(pytz.timezone('UTC'))
     after = now - as_timedelta(
         config.launchpad.user_to_user_throttle_interval)
     throttlers = self._getThrottlers(after)
     # We now have the set of emails that would throttle delivery.  If the
     # configuration variable has changed, this could produce more or less
     # than the now-allowed number of throttlers.  We should never get here
     # if it's less because the contact would have been allowed.
     #
     # If it's more, then we really want to count back from the sorted end,
     # because when /that/ contact record expires, they'll be able to
     # resend.  Here are two examples.
     #
     # affecters = A B C
     # max allowed = 3
     # index = len(affecters) - 3 == 0 == A
     # when A's date < the interval, they can try again
     #
     # affecters = A B C D E F G
     # max allowed (now) = 3
     # index = len(affecters) - 3 = 4 == E (counting from zero)
     # when E's date < than the interval, they can try again
     affecters = sorted(throttlers, key=attrgetter('date_sent'))
     max_throttlers = config.launchpad.user_to_user_max_messages
     expiry = len(affecters) - max_throttlers
     if expiry < 0:
         # There were fewer affecters than are now allowed, so they can
         # retry immediately.  Remember that the caller adds the interval
         # back, so this would give us 'now'.
         return after
     return affecters[expiry].date_sent
예제 #7
0
    def __init__(self, name, slice=None):
        """Create a runner.

        :param slice: The slice number for this runner.  This is passed
            directly to the underlying `ISwitchboard` object.  This is ignored
            for runners that don't manage a queue.
        :type slice: int or None
        """
        # Grab the configuration section.
        self.name = name
        section = getattr(config, 'runner.' + name)
        substitutions = config.paths
        substitutions['name'] = name
        numslices = int(section.instances)
        # Check whether the runner is queue runner or not; non-queue runner
        # should not have queue_directory or switchboard instance.
        if self.is_queue_runner:
            self.queue_directory = expand(section.path, substitutions)
            self.switchboard = Switchboard(
                name, self.queue_directory, slice, numslices, True)
        else:
            self.queue_directory = None
            self.switchboard= None
        self.sleep_time = as_timedelta(section.sleep_time)
        # sleep_time is a timedelta; turn it into a float for time.sleep().
        self.sleep_float = (86400 * self.sleep_time.days +
                            self.sleep_time.seconds +
                            self.sleep_time.microseconds / 1.0e6)
        self.max_restarts = int(section.max_restarts)
        self.start = as_boolean(section.start)
        self._stop = False
        self.status = 0
예제 #8
0
    def __init__(self, name, slice=None):
        """Create a runner.

        :param slice: The slice number for this runner.  This is passed
            directly to the underlying `ISwitchboard` object.  This is ignored
            for runners that don't manage a queue.
        :type slice: int or None
        """
        # Grab the configuration section.
        self.name = name
        section = getattr(config, 'runner.' + name)
        substitutions = config.paths
        substitutions['name'] = name
        numslices = int(section.instances)
        # Check whether the runner is queue runner or not; non-queue runner
        # should not have queue_directory or switchboard instance.
        if self.is_queue_runner:
            self.queue_directory = expand(section.path, None, substitutions)
            self.switchboard = Switchboard(name, self.queue_directory, slice,
                                           numslices, True)
        else:
            self.queue_directory = None
            self.switchboard = None
        self.sleep_time = as_timedelta(section.sleep_time)
        # sleep_time is a timedelta; turn it into a float for time.sleep().
        self.sleep_float = (86400 * self.sleep_time.days +
                            self.sleep_time.seconds +
                            self.sleep_time.microseconds / 1.0e6)
        self.max_restarts = int(section.max_restarts)
        self.start = as_boolean(section.start)
        self._stop = False
        self.status = 0
예제 #9
0
 def is_allowed(self):
     """See `IDirectEmailAuthorization`."""
     # Users are only allowed to send X number of messages in a certain
     # period of time.  Both the number of messages and the time period
     # are configurable.
     now = datetime.now(pytz.timezone('UTC'))
     after = now - as_timedelta(
         config.launchpad.user_to_user_throttle_interval)
     return self._isAllowedAfter(after)
예제 #10
0
 def is_allowed(self):
     """See `IDirectEmailAuthorization`."""
     # Users are only allowed to send X number of messages in a certain
     # period of time.  Both the number of messages and the time period
     # are configurable.
     now = datetime.now(pytz.timezone('UTC'))
     after = now - as_timedelta(
         config.launchpad.user_to_user_throttle_interval)
     return self._isAllowedAfter(after)
예제 #11
0
 def test_cached_copy_is_missing(self):
     cache_path = os.path.join(config.VAR_DIR, dmarc.LOCAL_FILE_NAME)
     self.assertFalse(os.path.exists(cache_path))
     new_path = dmarc.ensure_current_suffix_list()
     self.assertEqual(cache_path, new_path)
     with open(cache_path, 'r', encoding='utf-8') as fp:
         contents = fp.read()
     self.assertEqual(contents, 'abc')
     self.assertEqual(
         os.stat(new_path).st_mtime,
         (now() + as_timedelta(config.dmarc.cache_lifetime)).timestamp())
예제 #12
0
def wait_for_webservice(hostname=None, port=None):
    """Wait for the REST server to start serving requests."""
    hostname = config.webservice.hostname if hostname is None else hostname
    port = int(config.webservice.port) if port is None else port
    until = datetime.datetime.now() + as_timedelta(config.devmode.wait)
    while datetime.datetime.now() < until:
        try:
            socket.socket().connect((hostname, port))
        except ConnectionRefusedError:
            time.sleep(0.1)
        else:
            break
    else:
        raise RuntimeError('Connection refused')
예제 #13
0
def ensure_current_suffix_list():
    # Read and parse the organizational domain suffix list.  First look in the
    # cached directory to see if we already have a valid copy of it.
    cached_copy_path = os.path.join(config.VAR_DIR, LOCAL_FILE_NAME)
    lifetime = as_timedelta(config.dmarc.cache_lifetime)
    download = False
    try:
        mtime = os.stat(cached_copy_path).st_mtime
    except FileNotFoundError:
        vlog.info('No cached copy of the public suffix list found')
        download = True
        cache_found = False
    else:
        cache_found = True
        # Is the cached copy out-of-date?  Note that when we write a new cache
        # version we explicitly set its mtime to the time in the future when
        # the cache will expire.
        if mtime < now().timestamp():
            download = True
            vlog.info('Cached copy of public suffix list is out of date')
    if download:
        try:
            content = get(config.dmarc.org_domain_data_url)
        except (URLError, HTTPError) as error:
            elog.error('Unable to retrieve public suffix list from %s: %s',
                       config.dmarc.org_domain_data_url,
                       getattr(error, 'reason', str(error)))
            if cache_found:
                vlog.info('Using out of date public suffix list')
                content = None
            else:
                # We couldn't access the URL and didn't even have an out of
                # date suffix list cached.  Use the shipped version.
                content = resource_bytes('mailman.rules.data', LOCAL_FILE_NAME)
        if content is not None:
            # Content is either a string or UTF-8 encoded bytes.
            if isinstance(content, bytes):
                content = content.decode('utf-8')
            # Write the cache atomically.
            new_path = cached_copy_path + '.new'
            with open(new_path, 'w', encoding='utf-8') as fp:
                fp.write(content)
            # Set the expiry time to the future.
            mtime = (now() + lifetime).timestamp()
            os.utime(new_path, (mtime, mtime))
            # Flip the new file into the cached location.  This does not
            # modify the mtime.
            os.rename(new_path, cached_copy_path)
    return cached_copy_path
예제 #14
0
def wait_for_webservice():
    """Wait for the REST server to start serving requests."""
    until = datetime.datetime.now() + as_timedelta(config.devmode.wait)
    while datetime.datetime.now() < until:
        try:
            socket.socket().connect((config.webservice.hostname, int(config.webservice.port)))
        except socket.error as error:
            if error[0] == errno.ECONNREFUSED:
                time.sleep(0.1)
            else:
                raise
        else:
            break
    else:
        raise RuntimeError("Connection refused")
예제 #15
0
 def _wait_for_rest_server():
     until = datetime.datetime.now() + as_timedelta(config.devmode.wait)
     while datetime.datetime.now() < until:
         try:
             request = Request("http://localhost:9001/3.0/system")
             basic_auth = "{0}:{1}".format(config.webservice.admin_user, config.webservice.admin_pass)
             request.add_header("Authorization", "Basic " + b64encode(basic_auth))
             fp = urlopen(request)
         except URLError:
             pass
         else:
             fp.close()
             break
     else:
         raise RuntimeError("REST server did not start up")
예제 #16
0
 def test_cached_copy_is_missing_download_404s(self):
     # There's no cached file and we'll get a 404 with the .err file so
     # we'll have to fall back to our internal copy.
     cache_path = os.path.join(config.VAR_DIR, dmarc.LOCAL_FILE_NAME)
     self.assertFalse(os.path.exists(cache_path))
     new_path = dmarc.ensure_current_suffix_list()
     self.assertEqual(cache_path, new_path)
     with open(cache_path, 'r', encoding='utf-8') as fp:
         contents = fp.read()
     # The contents is *not* equal to our dummy test data, but don't tie it
     # too closely to the in-tree file contents since that might change
     # when and if we update that.
     self.assertNotEqual(contents, 'abc')
     self.assertEqual(
         os.stat(new_path).st_mtime,
         (now() + as_timedelta(config.dmarc.cache_lifetime)).timestamp())
예제 #17
0
 def test_cached_copy_is_expired(self):
     cache_path = os.path.join(config.VAR_DIR, dmarc.LOCAL_FILE_NAME)
     with open(cache_path, 'w', encoding='utf-8') as fp:
         print('xyz', end='', file=fp)
     # Expire the cache file.  That way the current cached file will be
     # invalid and a new one will be downloaded.
     expires = (now() - timedelta(days=1)).timestamp()
     os.utime(cache_path, (expires, expires))
     new_path = dmarc.ensure_current_suffix_list()
     self.assertEqual(cache_path, new_path)
     with open(cache_path, 'r', encoding='utf-8') as fp:
         contents = fp.read()
     self.assertEqual(contents, 'abc')
     self.assertEqual(
         os.stat(new_path).st_mtime,
         (now() + as_timedelta(config.dmarc.cache_lifetime)).timestamp())
예제 #18
0
def wait_for_webservice():
    """Wait for the REST server to start serving requests."""
    until = datetime.datetime.now() + as_timedelta(config.devmode.wait)
    while datetime.datetime.now() < until:
        try:
            socket.socket().connect((config.webservice.hostname,
                                    int(config.webservice.port)))
        except IOError as error:
            if error.errno == errno.ECONNREFUSED:
                time.sleep(0.1)
            else:
                raise
        else:
            break
    else:
        raise RuntimeError('Connection refused')
예제 #19
0
 def add(self, store, pendable, lifetime=None):
     verifyObject(IPendable, pendable)
     # Calculate the token and the lifetime.
     if lifetime is None:
         lifetime = as_timedelta(config.mailman.pending_request_life)
     # Calculate a unique token.  Algorithm vetted by the Timbot.  time()
     # has high resolution on Linux, clock() on Windows.  random gives us
     # about 45 bits in Python 2.2, 53 bits on Python 2.3.  The time and
     # clock values basically help obscure the random number generator, as
     # does the hash calculation.  The integral parts of the time values
     # are discarded because they're the most predictable bits.
     for attempts in range(3):
         right_now = time.time()
         x = random.random() + right_now % 1.0 + time.clock() % 1.0
         # Use sha1 because it produces shorter strings.
         token = hashlib.sha1(repr(x)).hexdigest()
         # In practice, we'll never get a duplicate, but we'll be anal
         # about checking anyway.
         if store.query(Pended).filter_by(token=token).count() == 0:
             break
     else:
         raise AssertionError('Could not find a valid pendings token')
     # Create the record, and then the individual key/value pairs.
     pending = Pended(
         token=token,
         expiration_date=now() + lifetime)
     for key, value in pendable.items():
         if isinstance(key, bytes):
             key = key.decode('utf-8')
         if isinstance(value, bytes):
             value = value.decode('utf-8')
         elif type(value) is int:
             value = '__builtin__.int\1%s' % value
         elif type(value) is float:
             value = '__builtin__.float\1%s' % value
         elif type(value) is bool:
             value = '__builtin__.bool\1%s' % value
         elif type(value) is list:
             # We expect this to be a list of strings.
             value = ('mailman.model.pending.unpack_list\1' +
                      '\2'.join(value))
         keyval = PendedKeyValue(key=key, value=value)
         pending.key_values.append(keyval)
     store.add(pending)
     return token
예제 #20
0
 def add(self, store, key, contents, lifetime=None):
     """See `ICacheManager`."""
     if lifetime is None:
         lifetime = as_timedelta(config.mailman.cache_life)
     is_bytes = isinstance(contents, bytes)
     file_id = self._key_to_file_id(key)
     # Is there already an unexpired entry under this id in the database?
     # If the entry doesn't exist, create it.  If it overwrite both the
     # contents and lifetime.
     entry = store.query(CacheEntry).filter(
         CacheEntry.key == key).one_or_none()
     if entry is None:
         entry = CacheEntry(key, file_id, is_bytes, lifetime)
         store.add(entry)
     else:
         entry.update(is_bytes, lifetime)
     self._write_contents(file_id, contents, is_bytes)
     return file_id
예제 #21
0
 def test_one_temporary_failure(self):
     # The first time there are temporary failures, the message just gets
     # put in the retry queue, but with some metadata to prevent infinite
     # retries.
     temporary_failures.append('*****@*****.**')
     self._outq.enqueue(self._msg, {}, listid='test.example.com')
     self._runner.run()
     events = list(self._processor.unprocessed)
     self.assertEqual(len(events), 0)
     items = get_queue_messages('retry', expected_count=1)
     self.assertEqual(self._msg.as_string(), items[0].msg.as_string())
     # The metadata has three keys which are used two decide whether the
     # next temporary failure should be retried.
     self.assertEqual(items[0].msgdata['last_recip_count'], 1)
     deliver_until = (datetime(2005, 8, 1, 7, 49, 23) +
                      as_timedelta(config.mta.delivery_retry_period))
     self.assertEqual(items[0].msgdata['deliver_until'], deliver_until)
     self.assertEqual(items[0].msgdata['recipients'], ['*****@*****.**'])
예제 #22
0
 def test_no_progress_on_retries_within_retry_period(self):
     # Temporary failures cause queuing for a retry later on, unless no
     # progress is being made on the retries and we've tried for the
     # specified delivery retry period.  This test ensures that even if no
     # progress is made, if the retry period hasn't expired, the message
     # will be requeued.
     temporary_failures.append('*****@*****.**')
     temporary_failures.append('*****@*****.**')
     deliver_until = (datetime(2005, 8, 1, 7, 49, 23) +
                      as_timedelta(config.mta.delivery_retry_period))
     msgdata = dict(last_recip_count=2, deliver_until=deliver_until)
     self._outq.enqueue(self._msg, msgdata, listid='test.example.com')
     self._runner.run()
     # The retry queue should have our message waiting to be retried.
     items = get_queue_messages('retry', expected_count=1)
     self.assertEqual(items[0].msgdata['deliver_until'], deliver_until)
     self.assertEqual(items[0].msgdata['recipients'],
                      ['*****@*****.**', '*****@*****.**'])
예제 #23
0
def get_lmtp_client(quiet=False):
    """Return a connected LMTP client."""
    # It's possible the process has started but is not yet accepting
    # connections.  Wait a little while.
    lmtp = LMTP()
    # lmtp.debuglevel = 1
    until = datetime.datetime.now() + as_timedelta(config.devmode.wait)
    while datetime.datetime.now() < until:
        try:
            response = lmtp.connect(config.mta.lmtp_host,
                                    int(config.mta.lmtp_port))
            if not quiet:
                print(response)
            return lmtp
        except ConnectionRefusedError:
            time.sleep(0.1)
    else:
        raise RuntimeError('Connection refused')
예제 #24
0
 def test_one_temporary_failure(self):
     # The first time there are temporary failures, the message just gets
     # put in the retry queue, but with some metadata to prevent infinite
     # retries.
     temporary_failures.append('*****@*****.**')
     self._outq.enqueue(self._msg, {}, listid='test.example.com')
     self._runner.run()
     events = list(self._processor.unprocessed)
     self.assertEqual(len(events), 0)
     items = get_queue_messages('retry', expected_count=1)
     self.assertEqual(self._msg.as_string(), items[0].msg.as_string())
     # The metadata has three keys which are used two decide whether the
     # next temporary failure should be retried.
     self.assertEqual(items[0].msgdata['last_recip_count'], 1)
     deliver_until = (datetime(2005, 8, 1, 7, 49, 23) +
                      as_timedelta(config.mta.delivery_retry_period))
     self.assertEqual(items[0].msgdata['deliver_until'], deliver_until)
     self.assertEqual(items[0].msgdata['recipients'], ['*****@*****.**'])
예제 #25
0
 def test_no_progress_on_retries_within_retry_period(self):
     # Temporary failures cause queuing for a retry later on, unless no
     # progress is being made on the retries and we've tried for the
     # specified delivery retry period.  This test ensures that even if no
     # progress is made, if the retry period hasn't expired, the message
     # will be requeued.
     temporary_failures.append('*****@*****.**')
     temporary_failures.append('*****@*****.**')
     deliver_until = (datetime(2005, 8, 1, 7, 49, 23) +
                      as_timedelta(config.mta.delivery_retry_period))
     msgdata = dict(last_recip_count=2,
                    deliver_until=deliver_until)
     self._outq.enqueue(self._msg, msgdata, listid='test.example.com')
     self._runner.run()
     # The retry queue should have our message waiting to be retried.
     items = get_queue_messages('retry', expected_count=1)
     self.assertEqual(items[0].msgdata['deliver_until'], deliver_until)
     self.assertEqual(items[0].msgdata['recipients'],
                      ['*****@*****.**', '*****@*****.**'])
예제 #26
0
def get_lmtp_client(quiet=False):
    """Return a connected LMTP client."""
    # It's possible the process has started but is not yet accepting
    # connections.  Wait a little while.
    lmtp = LMTP()
    # lmtp.debuglevel = 1
    until = datetime.datetime.now() + as_timedelta(config.devmode.wait)
    while datetime.datetime.now() < until:
        try:
            response = lmtp.connect(config.mta.lmtp_host, int(config.mta.lmtp_port))
            if not quiet:
                print(response)
            return lmtp
        except socket.error as error:
            if error[0] == errno.ECONNREFUSED:
                time.sleep(0.1)
            else:
                raise
    else:
        raise RuntimeError("Connection refused")
예제 #27
0
def _should_clobber(msg, msgdata, archiver):
    """Should the Date header in the original message get clobbered?"""
    # Calculate the Date header of the message as a datetime.  What if there
    # are multiple Date headers, even in violation of the RFC?  For now, take
    # the first one.  If there are no Date headers, then definitely clobber.
    original_date = msg.get('date')
    if original_date is None:
        return True
    section = getattr(config.archiver, archiver, None)
    if section is None:
        log.error('No archiver config section found: {0}'.format(archiver))
        return False
    try:
        clobber = ClobberDate[section.clobber_date]
    except ValueError:
        log.error('Invalid clobber_date for "{0}": {1}'.format(
            archiver, section.clobber_date))
        return False
    if clobber is ClobberDate.always:
        return True
    elif clobber is ClobberDate.never:
        return False
    # Maybe we'll clobber the date.  Let's see if it's farther off from now
    # than the skew period.
    skew = as_timedelta(section.clobber_skew)
    try:
        time_tuple = parsedate_tz(original_date)
    except (ValueError, OverflowError):
        # The likely cause of this is that the year in the Date: field is
        # horribly incorrect, e.g. (from SF bug # 571634):
        #
        # Date: Tue, 18 Jun 0102 05:12:09 +0500
        #
        # Obviously clobber such dates.
        return True
    if time_tuple is None:
        # There was some other bogosity in the Date header.
        return True
    claimed_date = datetime.fromtimestamp(mktime_tz(time_tuple))
    return (abs(now() - claimed_date) > skew)
예제 #28
0
파일: archive.py 프로젝트: aswinpj/Mailman
def _should_clobber(msg, msgdata, archiver):
    """Should the Date header in the original message get clobbered?"""
    # Calculate the Date header of the message as a datetime.  What if there
    # are multiple Date headers, even in violation of the RFC?  For now, take
    # the first one.  If there are no Date headers, then definitely clobber.
    original_date = msg.get('date')
    if original_date is None:
        return True
    section = getattr(config.archiver, archiver, None)
    if section is None:
        log.error('No archiver config section found: {}'.format(archiver))
        return False
    try:
        clobber = ClobberDate[section.clobber_date]
    except ValueError:
        log.error('Invalid clobber_date for "{}": {}'.format(
            archiver, section.clobber_date))
        return False
    if clobber is ClobberDate.always:
        return True
    elif clobber is ClobberDate.never:
        return False
    # Maybe we'll clobber the date.  Let's see if it's farther off from now
    # than the skew period.
    skew = as_timedelta(section.clobber_skew)
    try:
        time_tuple = parsedate_tz(original_date)
    except (ValueError, OverflowError):
        # The likely cause of this is that the year in the Date: field is
        # horribly incorrect, e.g. (from SF bug # 571634):
        #
        # Date: Tue, 18 Jun 0102 05:12:09 +0500
        #
        # Obviously clobber such dates.
        return True
    if time_tuple is None:
        # There was some other bogosity in the Date header.
        return True
    claimed_date = datetime.fromtimestamp(mktime_tz(time_tuple))
    return (abs(now() - claimed_date) > skew)
예제 #29
0
    def __init__(self, name, slice=None):
        """Create a runner.

        :param slice: The slice number for this runner.  This is passed
            directly to the underlying `ISwitchboard` object.  This is ignored
            for runners that don't manage a queue.
        :type slice: int or None
        """
        # Grab the configuration section.
        self.name = name
        section = getattr(config, "runner." + name)
        substitutions = config.paths
        substitutions["name"] = name
        self.queue_directory = expand(section.path, substitutions)
        numslices = int(section.instances)
        self.switchboard = Switchboard(name, self.queue_directory, slice, numslices, True)
        self.sleep_time = as_timedelta(section.sleep_time)
        # sleep_time is a timedelta; turn it into a float for time.sleep().
        self.sleep_float = 86400 * self.sleep_time.days + self.sleep_time.seconds + self.sleep_time.microseconds / 1.0e6
        self.max_restarts = int(section.max_restarts)
        self.start = as_boolean(section.start)
        self._stop = False
예제 #30
0
 def test_no_progress_on_retries_with_expired_retry_period(self):
     # We've had temporary failures with no progress, and the retry period
     # has expired.  In that case, a log entry is written and message is
     # discarded.  There's nothing more that can be done.
     temporary_failures.append('*****@*****.**')
     temporary_failures.append('*****@*****.**')
     retry_period = as_timedelta(config.mta.delivery_retry_period)
     deliver_until = datetime(2005, 8, 1, 7, 49, 23) + retry_period
     msgdata = dict(last_recip_count=2, deliver_until=deliver_until)
     self._outq.enqueue(self._msg, msgdata, listid='test.example.com')
     # Before the runner runs, several days pass.
     factory.fast_forward(retry_period.days + 1)
     mark = LogFileMark('mailman.smtp')
     self._runner.run()
     # There should be no message in the retry or outgoing queues.
     get_queue_messages('retry', expected_count=0)
     get_queue_messages('out', expected_count=0)
     # There should be a log message in the smtp log indicating that the
     # message has been discarded.
     line = mark.readline()
     self.assertEqual(
         line[-63:-1],
         'Discarding message with persistent temporary failures: <first>')
예제 #31
0
 def test_no_progress_on_retries_with_expired_retry_period(self):
     # We've had temporary failures with no progress, and the retry period
     # has expired.  In that case, a log entry is written and message is
     # discarded.  There's nothing more that can be done.
     temporary_failures.append('*****@*****.**')
     temporary_failures.append('*****@*****.**')
     retry_period = as_timedelta(config.mta.delivery_retry_period)
     deliver_until = datetime(2005, 8, 1, 7, 49, 23) + retry_period
     msgdata = dict(last_recip_count=2,
                    deliver_until=deliver_until)
     self._outq.enqueue(self._msg, msgdata, listid='test.example.com')
     # Before the runner runs, several days pass.
     factory.fast_forward(retry_period.days + 1)
     mark = LogFileMark('mailman.smtp')
     self._runner.run()
     # There should be no message in the retry or outgoing queues.
     get_queue_messages('retry', expected_count=0)
     get_queue_messages('out', expected_count=0)
     # There should be a log message in the smtp log indicating that the
     # message has been discarded.
     line = mark.readline()
     self.assertEqual(
         line[-63:-1],
         'Discarding message with persistent temporary failures: <first>')
예제 #32
0
 def _dispose(self, mlist, msg, msgdata):
     # See if we should retry delivery of this message again.
     deliver_after = msgdata.get('deliver_after', datetime.fromtimestamp(0))
     if now() < deliver_after:
         return True
     # Calculate whether we should VERP this message or not.  The results of
     # this set the 'verp' key in the message metadata.
     interval = int(config.mta.verp_delivery_interval)
     if 'verp' in msgdata:
         # Honor existing settings.
         pass
     # If personalization is enabled for this list and we've configured
     # Mailman to always VERP personalized deliveries, then yes we VERP it.
     # Also, if personalization is /not/ enabled, but
     # verp_delivery_interval is set (and we've hit this interval), then
     # again, this message should be VERP'd. Otherwise, no.
     elif mlist.personalize != Personalization.none:
         if as_boolean(config.mta.verp_personalized_deliveries):
             msgdata['verp'] = True
     elif interval == 0:
         # Never VERP.
         msgdata['verp'] = False
     elif interval == 1:
         # VERP every time.
         msgdata['verp'] = True
     else:
         # VERP every 'interval' number of times.
         msgdata['verp'] = (mlist.post_id % interval == 0)
     try:
         debug_log.debug('[outgoing] {}: {}'.format(
             self._func, msg.get('message-id', 'n/a')))
         self._func(mlist, msg, msgdata)
         self._logged = False
     except socket.error:
         # There was a problem connecting to the SMTP server.  Log this
         # once, but crank up our sleep time so we don't fill the error
         # log.
         port = int(config.mta.smtp_port)
         if port == 0:
             port = 'smtp'  # Log this just once.
         if not self._logged:
             log.error('Cannot connect to SMTP server %s on port %s',
                       config.mta.smtp_host, port)
             self._logged = True
         return True
     except SomeRecipientsFailed as error:
         processor = getUtility(IBounceProcessor)
         # BAW: msg is the original message that failed delivery, not a
         # bounce message.  This may be confusing if this is what's sent to
         # the user in the probe message.  Maybe we should craft a
         # bounce-like message containing information about the permanent
         # SMTP failure?
         if 'probe_token' in msgdata:
             # This is a failure of our local MTA to deliver to a probe
             # message recipient.  Register the bounce event for permanent
             # failures.  Start by grabbing and confirming (i.e. removing)
             # the pendable record associated with this bounce token,
             # regardless of what address was actually failing.
             if len(error.permanent_failures) > 0:
                 pended = getUtility(IPendings).confirm(
                     msgdata['probe_token'])
                 # It's possible the token has been confirmed out of the
                 # database.  Just ignore that.
                 if pended is not None:
                     # The UUID had to be pended as a unicode.
                     member = getUtility(ISubscriptionService).get_member(
                         UUID(hex=pended['member_id']))
                     processor.register(mlist, member.address.email, msg,
                                        BounceContext.probe)
         else:
             # Delivery failed at SMTP time for some or all of the
             # recipients.  Permanent failures are registered as bounces,
             # but temporary failures are retried for later.
             for email in error.permanent_failures:
                 processor.register(mlist, email, msg, BounceContext.normal)
             # Move temporary failures to the qfiles/retry queue which will
             # occasionally move them back here for another shot at
             # delivery.
             if error.temporary_failures:
                 current_time = now()
                 recipients = error.temporary_failures
                 last_recip_count = msgdata.get('last_recip_count', 0)
                 deliver_until = msgdata.get('deliver_until', current_time)
                 if len(recipients) == last_recip_count:
                     # We didn't make any progress.  If we've exceeded the
                     # configured retry period, log this failure and
                     # discard the message.
                     if current_time > deliver_until:
                         smtp_log.error('Discarding message with '
                                        'persistent temporary failures: '
                                        '{}'.format(msg['message-id']))
                         return False
                 else:
                     # We made some progress, so keep trying to delivery
                     # this message for a while longer.
                     deliver_until = current_time + as_timedelta(
                         config.mta.delivery_retry_period)
                 msgdata['last_recip_count'] = len(recipients)
                 msgdata['deliver_until'] = deliver_until
                 msgdata['recipients'] = recipients
                 self._retryq.enqueue(msg, msgdata)
     # We've successfully completed handling of this message.
     return False
예제 #33
0
def is_reject_or_quarantine(mlist, email, dmarc_domain, org=False):
    # This takes a mailing list, an email address as in the From: header, the
    # _dmarc host name for the domain in question, and a flag stating whether
    # we should check the organizational domains.  It returns one of three
    # values:
    # * True if the DMARC policy is reject or quarantine;
    # * False if is not;
    # * A special sentinel if we should continue looking
    resolver = dns.resolver.Resolver()
    resolver.timeout = as_timedelta(
        config.dmarc.resolver_timeout).total_seconds()
    resolver.lifetime = as_timedelta(
        config.dmarc.resolver_lifetime).total_seconds()
    try:
        txt_recs = resolver.query(dmarc_domain, dns.rdatatype.TXT)
    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
        return KEEP_LOOKING
    except (dns.resolver.NoNameservers):
        elog.error('DNSException: No Nameservers available for %s (%s).',
                   email, dmarc_domain)
        # Typically this means a dnssec validation error.  Clients that don't
        # perform validation *may* successfully see a _dmarc RR whereas a
        # validating mailman server won't see the _dmarc RR.  We should
        # mitigate this email to be safe.
        return True
    except DNSException as error:
        elog.error(
            'DNSException: Unable to query DMARC policy for %s (%s). %s',
            email, dmarc_domain, error.__doc__)
        # While we can't be sure what caused the error, there is potentially
        # a DMARC policy record that we missed and that a receiver of the mail
        # might see.  Thus, we should err on the side of caution and mitigate.
        return True
    # Be as robust as possible in parsing the result.
    results_by_name = {}
    cnames = {}
    want_names = set([dmarc_domain + '.'])
    # Check all the TXT records returned by DNS.  Keep track of the CNAMEs for
    # checking later on.  Ignore any other non-TXT records.
    for txt_rec in txt_recs.response.answer:
        if txt_rec.rdtype == dns.rdatatype.CNAME:
            cnames[txt_rec.name.to_text()] = (
                txt_rec.items[0].target.to_text())
        if txt_rec.rdtype != dns.rdatatype.TXT:
            continue
        result = EMPTYSTRING.join(
            str(record, encoding='utf-8')
            for record in txt_rec.items[0].strings)
        name = txt_rec.name.to_text()
        results_by_name.setdefault(name, []).append(result)
    expands = list(want_names)
    seen = set(expands)
    while expands:
        item = expands.pop(0)
        if item in cnames:
            if cnames[item] in seen:
                # CNAME loop.
                continue
            expands.append(cnames[item])
            seen.add(cnames[item])
            want_names.add(cnames[item])
            want_names.discard(item)
    assert len(want_names) == 1, (
        'Error in CNAME processing for {}; want_names != 1.'.format(
            dmarc_domain))
    for name in want_names:
        if name not in results_by_name:
            continue
        dmarcs = [
            record for record in results_by_name[name]
            if record.startswith('v=DMARC1;')
        ]
        if len(dmarcs) == 0:
            return KEEP_LOOKING
        if len(dmarcs) > 1:
            elog.error(
                'RRset of TXT records for %s has %d v=DMARC1 entries; '
                'testing them all', dmarc_domain, len(dmarcs))
        for entry in dmarcs:
            mo = re.search(r'\bsp=(\w*)\b', entry, re.IGNORECASE)
            if org and mo:
                policy = mo.group(1).lower()
            else:
                mo = re.search(r'\bp=(\w*)\b', entry, re.IGNORECASE)
                if mo:
                    policy = mo.group(1).lower()
                else:
                    # This continue does actually get covered by
                    # TestDMARCRules.test_domain_with_subdomain_policy() and
                    # TestDMARCRules.test_no_policy() but because of
                    # Coverage BitBucket issue #198 and
                    # http://bugs.python.org/issue2506 coverage cannot report
                    # it as such, so just pragma it away.
                    continue  # pragma: missed
            if policy in ('reject', 'quarantine'):
                vlog.info('%s: DMARC lookup for %s (%s) found p=%s in %s = %s',
                          mlist.list_name, email, dmarc_domain, policy, name,
                          entry)
                return True
    return False
예제 #34
0
파일: outgoing.py 프로젝트: aregee/Mailman
 def _dispose(self, mlist, msg, msgdata):
     # See if we should retry delivery of this message again.
     deliver_after = msgdata.get('deliver_after', datetime.fromtimestamp(0))
     if now() < deliver_after:
         return True
     # Calculate whether we should VERP this message or not.  The results of
     # this set the 'verp' key in the message metadata.
     interval = int(config.mta.verp_delivery_interval)
     if 'verp' in msgdata:
         # Honor existing settings.
         pass
     # If personalization is enabled for this list and we've configured
     # Mailman to always VERP personalized deliveries, then yes we VERP it.
     # Also, if personalization is /not/ enabled, but
     # verp_delivery_interval is set (and we've hit this interval), then
     # again, this message should be VERP'd. Otherwise, no.
     elif mlist.personalize != Personalization.none:
         if as_boolean(config.mta.verp_personalized_deliveries):
             msgdata['verp'] = True
     elif interval == 0:
         # Never VERP.
         msgdata['verp'] = False
     elif interval == 1:
         # VERP every time.
         msgdata['verp'] = True
     else:
         # VERP every 'interval' number of times.
         msgdata['verp'] = (mlist.post_id % interval == 0)
     try:
         debug_log.debug('[outgoing] {0}: {1}'.format(
             self._func, msg.get('message-id', 'n/a')))
         self._func(mlist, msg, msgdata)
         self._logged = False
     except socket.error:
         # There was a problem connecting to the SMTP server.  Log this
         # once, but crank up our sleep time so we don't fill the error
         # log.
         port = int(config.mta.smtp_port)
         if port == 0:
             port = 'smtp'            # Log this just once.
         if not self._logged:
             log.error('Cannot connect to SMTP server %s on port %s',
                       config.mta.smtp_host, port)
             self._logged = True
         return True
     except SomeRecipientsFailed as error:
         processor = getUtility(IBounceProcessor)
         # BAW: msg is the original message that failed delivery, not a
         # bounce message.  This may be confusing if this is what's sent to
         # the user in the probe message.  Maybe we should craft a
         # bounce-like message containing information about the permanent
         # SMTP failure?
         if 'probe_token' in msgdata:
             # This is a failure of our local MTA to deliver to a probe
             # message recipient.  Register the bounce event for permanent
             # failures.  Start by grabbing and confirming (i.e. removing)
             # the pendable record associated with this bounce token,
             # regardless of what address was actually failing.
             if len(error.permanent_failures) > 0:
                 pended = getUtility(IPendings).confirm(
                     msgdata['probe_token'])
                 # It's possible the token has been confirmed out of the
                 # database.  Just ignore that.
                 if pended is not None:
                     # The UUID had to be pended as a unicode.
                     member = getUtility(ISubscriptionService).get_member(
                         UUID(hex=pended['member_id']))
                     processor.register(
                         mlist, member.address.email, msg,
                         BounceContext.probe)
         else:
             # Delivery failed at SMTP time for some or all of the
             # recipients.  Permanent failures are registered as bounces,
             # but temporary failures are retried for later.
             for email in error.permanent_failures:
                 processor.register(mlist, email, msg, BounceContext.normal)
             # Move temporary failures to the qfiles/retry queue which will
             # occasionally move them back here for another shot at
             # delivery.
             if error.temporary_failures:
                 current_time = now()
                 recipients = error.temporary_failures
                 last_recip_count = msgdata.get('last_recip_count', 0)
                 deliver_until = msgdata.get('deliver_until', current_time)
                 if len(recipients) == last_recip_count:
                     # We didn't make any progress.  If we've exceeded the
                     # configured retry period, log this failure and
                     # discard the message.
                     if current_time > deliver_until:
                         smtp_log.error('Discarding message with '
                                        'persistent temporary failures: '
                                        '{0}'.format(msg['message-id']))
                         return False
                 else:
                     # We made some progress, so keep trying to delivery
                     # this message for a while longer.
                     deliver_until = current_time + as_timedelta(
                         config.mta.delivery_retry_period)
                 msgdata['last_recip_count'] = len(recipients)
                 msgdata['deliver_until'] = deliver_until
                 msgdata['recipients'] = recipients
                 self._retryq.enqueue(msg, msgdata)
     # We've successfully completed handling of this message.
     return False