class MDA(MTA): def __init__(self, msa, mail_list_url, mda_domain, list_subject_prefix=None): self.msa = msa # Relay: relay = DovecotLdaRelay(config['LDA']['dovecot_path'], timeout=10.0) # Queue: #env_db = shelve.open('envelope') #meta_db = shelve.open('meta') #storage = DictStorage(env_db, meta_db) # !!! replace with DiskStorage! (installed via pip install python-slimta-diskstorage) storage = DiskStorage(config['MDA']['ds_env'], config['MDA']['ds_meta']) self.queue = Queue( storage, relay) # no backoff - just fail local delivery immediately self.queue.start() # Headers: self.queue.add_policy(AddDateHeader()) self.queue.add_policy(AddMessageIdHeader()) self.queue.add_policy(AddReceivedHeader()) # Mailing List: self.queue.add_policy( MailingListDistribution(self.msa, mail_list_url, mda_domain, list_subject_prefix)) # SpamAssassin: #self.queue.add_policy(SpamAssassin()) # Edge: #tls_args = {'keyfile': '/home/jmcaine/dev/temp/slimta/tls/key.pem', 'certfile': '/home/jmcaine/dev/temp/slimta/tls/certificate.pem'} -- gone, see https://docs.slimta.org/en/latest/blog/2016-11-14.html ssl = SSLContext(PROTOCOL_SSLv23) ssl.load_cert_chain(config['SSL']['certificate_path'], config['SSL']['key_path']) self.edge = SmtpEdge(('0.0.0.0', 25), self.queue, validator_class=MDA_Validators, hostname=mda_domain, context=ssl) self.edge.start()
def _start_outbound_queue(args, relay, inbound_queue): from slimta.queue.dict import DictStorage from slimta.queue import Queue from slimta.policy.headers import AddDateHeader, \ AddMessageIdHeader, AddReceivedHeader from slimta.policy.split import RecipientDomainSplit envelope_db = {} meta_db = {} storage = DictStorage(envelope_db, meta_db) queue = Queue(storage, relay, bounce_queue=inbound_queue) queue.start() queue.add_policy(AddDateHeader()) queue.add_policy(AddMessageIdHeader()) queue.add_policy(AddReceivedHeader()) queue.add_policy(RecipientDomainSplit()) return queue
def _start_inbound_queue(args, relay): from slimta.queue.dict import DictStorage from slimta.queue import Queue from slimta.policy.headers import AddDateHeader, \ AddMessageIdHeader, AddReceivedHeader from slimta.policy.spamassassin import SpamAssassin import shelve envelope_db = shelve.open(args.envelope_db) meta_db = shelve.open(args.meta_db) storage = DictStorage(envelope_db, meta_db) queue = Queue(storage, relay) queue.start() queue.add_policy(AddDateHeader()) queue.add_policy(AddMessageIdHeader()) queue.add_policy(AddReceivedHeader()) queue.add_policy(SpamAssassin()) return queue
def _start_inbound_queue(args, relay): from slimta.queue.dict import DictStorage from slimta.queue import Queue from slimta.policy.headers import AddDateHeader, \ AddMessageIdHeader, AddReceivedHeader from slimta.policy.spamassassin import SpamAssassin envelope_db = {} meta_db = {} storage = DictStorage(envelope_db, meta_db) queue = Queue(storage, relay) queue.start() queue.add_policy(AddDateHeader()) queue.add_policy(AddMessageIdHeader()) queue.add_policy(AddReceivedHeader()) if args.spamassassin: queue.add_policy(SpamAssassin()) return queue
class MSA(MTA): def __init__(self): # Relay: ssl = SSLContext(PROTOCOL_SSLv23) ssl.load_cert_chain(config['SSL']['certificate_path'], config['SSL']['key_path']) self.relay = MxSmtpRelay(context=ssl, connect_timeout=20, command_timeout=10, data_timeout=20, idle_timeout=30) # Queue: #env_db = shelve.open('msa_envelope') #meta_db = shelve.open('msa_meta') #storage = DictStorage(env_db, meta_db) # !!! replace with DiskStorage! (installed via pip install python-slimta-diskstorage) storage = DiskStorage(config['MSA']['ds_env'], config['MSA']['ds_meta']) def retry_backoff(envelope, attempts): if attempts < 10: return 60 * attempts * attempts # try again at increasingly long intervals; give up after 10 tries (100 minutes) return None self.queue = Queue(storage, self.relay, backoff=retry_backoff) self.queue.start() # Headers: self.queue.add_policy(AddDateHeader()) self.queue.add_policy(AddMessageIdHeader()) self.queue.add_policy(AddReceivedHeader()) self.queue.add_policy(RecipientDomainSplit()) # !!! Add Forward policy here, to manage general forwarding (but not list distribution - do that in mda!) # Edge: self.edge = SmtpEdge( ('localhost', 587), self.queue, auth=False ) #, auth=True, validator_class=MSA_Validators) # ?!!! context=ssl, tls_immediately=True, self.edge.start()
from slimta.relay import RelayError from slimta.relay.smtp.static import StaticSmtpRelay def backoff(envelope, attempts): if attempts <= 5: return 5.0 * attempts relay = StaticSmtpRelay('mail.example.com', 25, pool_size=2) env_db = shelve.open('envelope.db') meta_db = shelve.open('meta.db') queue_storage = DictStorage(env_db, meta_db) queue = Queue(queue_storage, relay, backoff) edge = SmtpEdge(('127.0.0.1', 1337), queue) edge.start() queue.start() try: edge.get() except KeyboardInterrupt: print finally: for key in env_db.keys(): print 'env', key for key in meta_db.keys(): print 'meta', key env_db.close() meta_db.close() # vim:et:fdm=marker:sts=4:sw=4:ts=4
def _start_queue(self, name, options=None): if name in self.queues: return self.queues[name] if not options: options = getattr(self.cfg.queue, name) from .helpers import add_queue_policies, build_backoff_function new_queue = None relay_name = options.get('relay') relay = self._start_relay(relay_name) if relay_name else None bounce_queue_name = options.get('bounce_queue', name) bounce_queue = self._start_queue(bounce_queue_name) \ if bounce_queue_name != name else None if options.type == 'memory': from slimta.queue import Queue from slimta.queue.dict import DictStorage store = DictStorage() backoff = build_backoff_function(options.get('retry')) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'disk': from slimta.queue import Queue from slimta.diskstorage import DiskStorage env_dir = options.envelope_dir meta_dir = options.meta_dir tmp_dir = options.get('tmp_dir') store = DiskStorage(env_dir, meta_dir, tmp_dir) backoff = build_backoff_function(options.get('retry')) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'redis': from slimta.queue import Queue from slimta.redisstorage import RedisStorage kwargs = {} if 'host' in options: kwargs['host'] = options.host if 'port' in options: kwargs['port'] = int(options.port) if 'db' in options: kwargs['db'] = int(options.db) if 'password' in options: kwargs['password'] = options.password if 'socket_timeout' in options: kwargs['socket_timeout'] = float(options.socket_timeout) if 'prefix' in options: kwargs['prefix'] = options.prefix store = RedisStorage(**kwargs) backoff = build_backoff_function(options.get('retry')) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'rackspace': from slimta.queue import Queue from slimta.cloudstorage import CloudStorage from slimta.cloudstorage.rackspace import RackspaceCloudAuth, \ RackspaceCloudFiles, RackspaceCloudQueues credentials = {'username': options.username} if 'password' in options: credentials['password'] = options.password if 'api_key' in options: credentials['api_key'] = options.api_key if 'tenant_id' in options: credentials['tenant_id'] = options.tenant_id auth_kwargs = {'region': options.get('region'), 'timeout': 10.0} if 'endpoint' in options: auth_kwargs['endpoint'] = options.endpoint auth = RackspaceCloudAuth(credentials, **auth_kwargs) cloud_files = RackspaceCloudFiles(auth, container=options.container_name, timeout=20.0) cloud_queues = None if 'queue_name' in options: cloud_queues = RackspaceCloudQueues(auth, queue_name=options.queue_name, timeout=10.0) store = CloudStorage(cloud_files, cloud_queues) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'aws': from slimta.queue import Queue from slimta.cloudstorage import CloudStorage from slimta.cloudstorage.aws import SimpleStorageService, \ SimpleQueueService import boto if 'access_key_id' in options: from boto.s3.connection import S3Connection s3_conn = S3Connection(options.access_key_id, options.secret_access_key) else: s3_conn = boto.connect_s3() s3_bucket = s3_conn.get_bucket(options.bucket_name) s3 = SimpleStorageService(s3_bucket, timeout=20.0) sqs = None if 'queue_name' in options: from boto.sqs import connect_to_region region = options.get('queue_region', 'us-west-2') if 'access_key_id' in options: sqs_conn = connect_to_region(region, aws_access_key_id=options.access_key_id, aws_secret_access_key=options.secret_access_key) else: sqs_conn = connect_to_region(region) sqs_queue = sqs_conn.create_queue(options.queue_name) sqs = SimpleQueueService(sqs_queue, timeout=10.0) store = CloudStorage(s3, sqs) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'proxy': from slimta.queue.proxy import ProxyQueue new_queue = ProxyQueue(relay) elif options.type == 'custom': new_queue = self._load_from_custom(options, relay) else: msg = 'queue type does not exist: '+options.type raise ConfigValidationError() add_queue_policies(new_queue, options.get('policies', [])) self.queues[name] = new_queue return new_queue
def _start_queue(self, name, options=None): if name in self.queues: return self.queues[name] if not options: options = getattr(self.cfg.queue, name) from .helpers import add_queue_policies, build_backoff_function new_queue = None relay_name = options.relay relay = self._start_relay(relay_name) if relay_name else None bounce_queue_name = options.get('bounce_queue', name) bounce_queue = self._start_queue(bounce_queue_name) \ if bounce_queue_name != name else None if options.type == 'memory': from slimta.queue import Queue from slimta.queue.dict import DictStorage store = DictStorage() backoff = build_backoff_function(options.retry) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'disk': from slimta.queue import Queue from slimta.diskstorage import DiskStorage env_dir = options.envelope_dir meta_dir = options.meta_dir tmp_dir = options.tmp_dir store = DiskStorage(env_dir, meta_dir, tmp_dir) backoff = build_backoff_function(options.retry) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'redis': from slimta.queue import Queue from slimta.redisstorage import RedisStorage kwargs = {} if 'host' in options: kwargs['host'] = options.host if 'port' in options: kwargs['port'] = int(options.port) if 'db' in options: kwargs['db'] = int(options.db) if 'password' in options: kwargs['password'] = options.password if 'socket_timeout' in options: kwargs['socket_timeout'] = float(options.socket_timeout) if 'prefix' in options: kwargs['prefix'] = options.prefix store = RedisStorage(**kwargs) backoff = build_backoff_function(options.retry) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'rackspace': from slimta.queue import Queue from slimta.cloudstorage import CloudStorage from slimta.cloudstorage.rackspace import RackspaceCloudAuth, \ RackspaceCloudFiles, RackspaceCloudQueues credentials = {'username': options.username} if 'password' in options: credentials['password'] = options.password if 'api_key' in options: credentials['api_key'] = options.api_key if 'tenant_id' in options: credentials['tenant_id'] = options.tenant_id auth_kwargs = {'region': options.region, 'timeout': 10.0} if 'endpoint' in options: auth_kwargs['endpoint'] = options.endpoint auth = RackspaceCloudAuth(credentials, **auth_kwargs) cloud_files = RackspaceCloudFiles(auth, container=options.container_name, timeout=20.0) cloud_queues = None if 'queue_name' in options: cloud_queues = RackspaceCloudQueues(auth, queue_name=options.queue_name, timeout=10.0) store = CloudStorage(cloud_files, cloud_queues) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'aws': from slimta.queue import Queue from slimta.cloudstorage import CloudStorage from slimta.cloudstorage.aws import SimpleStorageService, \ SimpleQueueService import boto if 'access_key_id' in options: from boto.s3.connection import S3Connection s3_conn = S3Connection(options.access_key_id, options.secret_access_key) else: s3_conn = boto.connect_s3() s3_bucket = s3_conn.get_bucket(options.bucket_name) s3 = SimpleStorageService(s3_bucket, timeout=20.0) sqs = None if 'queue_name' in options: from boto.sqs import connect_to_region region = options.get('queue_region', 'us-west-2') if 'access_key_id' in options: sqs_conn = connect_to_region(region, aws_access_key_id=options.access_key_id, aws_secret_access_key=options.secret_access_key) else: sqs_conn = connect_to_region(region) sqs_queue = sqs_conn.create_queue(options.queue_name) sqs = SimpleQueueService(sqs_queue, timeout=10.0) store = CloudStorage(s3, sqs) new_queue = Queue(store, relay, backoff=backoff, bounce_queue=bounce_queue) new_queue.start() elif options.type == 'proxy': from slimta.queue.proxy import ProxyQueue new_queue = ProxyQueue(relay) elif options.type == 'custom': new_queue = custom_factory(options, relay) else: msg = 'queue type does not exist: '+options.type raise ConfigValidationError(msg) add_queue_policies(new_queue, options.get('policies', [])) self.queues[name] = new_queue return new_queue