def test_pop_messages(self): """ MESSAGE (CORE): Test retrieve and delete messages """ truncate_messages() for i in range(10): add_message(event_type='TEST', payload={ 'foo': True, 'monty': 'python', 'number': i }) tmp = retrieve_messages(10) to_delete = [] for i in tmp: assert isinstance(i['payload'], dict) assert i['payload']['foo'] is True assert i['payload']['monty'] == 'python' assert i['payload']['number'] in list(range(100)) to_delete.append({ 'id': i['id'], 'created_at': i['created_at'], 'updated_at': i['created_at'], 'payload': str(i['payload']), 'event_type': i['event_type'] }) delete_messages(to_delete) assert retrieve_messages() == []
def test_pop_messages(self): """ MESSAGE (CORE): Test retrieve and delete messages """ truncate_messages() for i in xrange(10): add_message(event_type='TEST', payload={'foo': True, 'monty': 'python', 'number': i}) tmp = retrieve_messages(10) to_delete = [] for i in tmp: assert_is_instance(i['payload'], dict) assert_equal(i['payload']['foo'], True) assert_equal(i['payload']['monty'], 'python') assert_in(i['payload']['number'], xrange(100)) to_delete.append(i['id']) delete_messages(to_delete) assert_equal(retrieve_messages(), [])
def test_pop_messages(self): """ MESSAGE (CORE): Test retrieve and delete messages """ truncate_messages() for i in xrange(10): add_message(event_type='TEST', payload={ 'foo': True, 'monty': 'python', 'number': i }) tmp = retrieve_messages(10) to_delete = [] for i in tmp: assert_is_instance(i['payload'], dict) assert_equal(i['payload']['foo'], True) assert_equal(i['payload']['monty'], 'python') assert_in(i['payload']['number'], xrange(100)) to_delete.append(i['id']) delete_messages(to_delete) assert_equal(retrieve_messages(), [])
def deliver_emails(once=False, send_email=True, thread=0, bulk=1000, delay=10): ''' Main loop to deliver emails via SMTP. ''' logging.info('[email] starting - threads (%i) bulk (%i)', thread, bulk) executable = 'hermes [email]' hostname = socket.getfqdn() pid = os.getpid() heartbeat_thread = threading.current_thread() sanity_check(executable=executable, hostname=hostname) # Make an initial heartbeat so that all daemons have the correct worker number on the next try live(executable=executable, hostname=hostname, pid=pid, thread=heartbeat_thread) GRACEFUL_STOP.wait(1) email_from = config_get('messaging-hermes', 'email_from') while not GRACEFUL_STOP.is_set(): heartbeat = live(executable, hostname, pid, heartbeat_thread) logging.debug('[email] %i:%i - bulk %i', heartbeat['assign_thread'], heartbeat['nr_threads'], bulk) t_start = time.time() messages = retrieve_messages(bulk=bulk, thread=heartbeat['assign_thread'], total_threads=heartbeat['nr_threads'], event_type='email') if messages != []: to_delete = [] for message in messages: logging.debug('[email] %i:%i - submitting: %s', heartbeat['assign_thread'], heartbeat['nr_threads'], str(message)) if PY2: msg = MIMEText(message['payload']['body'].encode('utf-8')) else: msg = MIMEText(message['payload']['body']) msg['From'] = email_from msg['To'] = ', '.join(message['payload']['to']) msg['Subject'] = message['payload']['subject'].encode('utf-8') if send_email: smtp = smtplib.SMTP() smtp.connect() smtp.sendmail(msg['From'], message['payload']['to'], msg.as_string()) smtp.quit() to_delete.append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': str(message['payload']), 'event_type': 'email' }) logging.debug('[email] %i:%i - submitting done: %s', heartbeat['assign_thread'], heartbeat['nr_threads'], str(message['id'])) delete_messages(to_delete) logging.info('[email] %i:%i - submitted %i messages', heartbeat['assign_thread'], heartbeat['nr_threads'], len(to_delete)) if once: break t_delay = delay - (time.time() - t_start) t_delay = t_delay if t_delay > 0 else 0 if t_delay: logging.debug('[email] %i:%i - sleeping %s seconds', heartbeat['assign_thread'], heartbeat['nr_threads'], t_delay) time.sleep(t_delay) logging.debug('[email] %i:%i - graceful stop requested', heartbeat['assign_thread'], heartbeat['nr_threads']) die(executable, hostname, pid, heartbeat_thread) logging.debug('[email] %i:%i - graceful stop done', heartbeat['assign_thread'], heartbeat['nr_threads'])
def deliver_messages(once=False, brokers_resolved=None, thread=0, bulk=1000, delay=10, broker_timeout=3, broker_retry=3): ''' Main loop to deliver messages to a broker. ''' logging.info('[broker] starting - threads (%i) bulk (%i)', thread, bulk) if not brokers_resolved: logging.fatal('No brokers resolved.') return if not broker_timeout: # Allow zero in config broker_timeout = None logging.info('[broker] checking authentication method') use_ssl = True try: use_ssl = config_get_bool('messaging-hermes', 'use_ssl') except: logging.info( '[broker] could not find use_ssl in configuration -- please update your rucio.cfg' ) port = config_get_int('messaging-hermes', 'port') vhost = config_get('messaging-hermes', 'broker_virtual_host', raise_exception=False) if not use_ssl: username = config_get('messaging-hermes', 'username') password = config_get('messaging-hermes', 'password') port = config_get_int('messaging-hermes', 'nonssl_port') conns = [] for broker in brokers_resolved: if not use_ssl: logging.info( '[broker] setting up username/password authentication: %s' % broker) con = stomp.Connection12(host_and_ports=[(broker, port)], vhost=vhost, keepalive=True, timeout=broker_timeout) else: logging.info( '[broker] setting up ssl cert/key authentication: %s' % broker) con = stomp.Connection12( host_and_ports=[(broker, port)], use_ssl=True, ssl_key_file=config_get('messaging-hermes', 'ssl_key_file'), ssl_cert_file=config_get('messaging-hermes', 'ssl_cert_file'), vhost=vhost, keepalive=True, timeout=broker_timeout) con.set_listener( 'rucio-hermes', HermesListener(con.transport._Transport__host_and_ports[0])) conns.append(con) destination = config_get('messaging-hermes', 'destination') executable = 'hermes [broker]' hostname = socket.getfqdn() pid = os.getpid() heartbeat_thread = threading.current_thread() # Make an initial heartbeat so that all daemons have the correct worker number on the next try sanity_check(executable=executable, hostname=hostname, pid=pid, thread=heartbeat_thread) GRACEFUL_STOP.wait(1) while not GRACEFUL_STOP.is_set(): try: t_start = time.time() heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=heartbeat_thread) logging.debug('[broker] %i:%i - using: %s', heartbeat['assign_thread'], heartbeat['nr_threads'], [ conn.transport._Transport__host_and_ports[0][0] for conn in conns ]) messages = retrieve_messages(bulk=bulk, thread=heartbeat['assign_thread'], total_threads=heartbeat['nr_threads']) if messages: logging.debug('[broker] %i:%i - retrieved %i messages', heartbeat['assign_thread'], heartbeat['nr_threads'], len(messages)) to_delete = [] for message in messages: try: conn = random.sample(conns, 1)[0] if not conn.is_connected(): host_and_ports = conn.transport._Transport__host_and_ports[ 0][0] record_counter('daemons.hermes.reconnect.%s' % host_and_ports.split('.')[0]) conn.start() if not use_ssl: logging.info( '[broker] %i:%i - connecting with USERPASS to %s', heartbeat['assign_thread'], heartbeat['nr_threads'], host_and_ports) conn.connect(username, password, wait=True) else: logging.info( '[broker] %i:%i - connecting with SSL to %s', heartbeat['assign_thread'], heartbeat['nr_threads'], host_and_ports) conn.connect(wait=True) conn.send(body=json.dumps({ 'event_type': str(message['event_type']).lower(), 'payload': message['payload'], 'created_at': str(message['created_at']) }), destination=destination, headers={ 'persistent': 'true', 'event_type': str(message['event_type']).lower() }) to_delete.append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': json.dumps(message['payload']), 'event_type': message['event_type'] }) except ValueError: logging.warn('Cannot serialize payload to JSON: %s', str(message['payload'])) to_delete.append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': str(message['payload']), 'event_type': message['event_type'] }) continue except stomp.exception.NotConnectedException as error: logging.warn( 'Could not deliver message due to NotConnectedException: %s', str(error)) continue except stomp.exception.ConnectFailedException as error: logging.warn( 'Could not deliver message due to ConnectFailedException: %s', str(error)) continue except Exception as error: logging.warn('Could not deliver message: %s', str(error)) logging.critical(traceback.format_exc()) continue if str(message['event_type']).lower().startswith( 'transfer') or str(message['event_type']).lower( ).startswith('stagein'): logging.debug( '[broker] %i:%i - event_type: %s, scope: %s, name: %s, rse: %s, request-id: %s, transfer-id: %s, created_at: %s', heartbeat['assign_thread'], heartbeat['nr_threads'], str(message['event_type']).lower(), message['payload'].get('scope', None), message['payload'].get('name', None), message['payload'].get('dst-rse', None), message['payload'].get('request-id', None), message['payload'].get('transfer-id', None), str(message['created_at'])) elif str(message['event_type']).lower().startswith( 'dataset'): logging.debug( '[broker] %i:%i - event_type: %s, scope: %s, name: %s, rse: %s, rule-id: %s, created_at: %s)', heartbeat['assign_thread'], heartbeat['nr_threads'], str(message['event_type']).lower(), message['payload']['scope'], message['payload']['name'], message['payload']['rse'], message['payload']['rule_id'], str(message['created_at'])) elif str(message['event_type']).lower().startswith( 'deletion'): if 'url' not in message['payload']: message['payload']['url'] = 'unknown' logging.debug( '[broker] %i:%i - event_type: %s, scope: %s, name: %s, rse: %s, url: %s, created_at: %s)', heartbeat['assign_thread'], heartbeat['nr_threads'], str(message['event_type']).lower(), message['payload']['scope'], message['payload']['name'], message['payload']['rse'], message['payload']['url'], str(message['created_at'])) else: logging.debug('[broker] %i:%i - other message: %s', heartbeat['assign_thread'], heartbeat['nr_threads'], message) delete_messages(to_delete) logging.info('[broker] %i:%i - submitted %i messages', heartbeat['assign_thread'], heartbeat['nr_threads'], len(to_delete)) if once: break except NoResultFound: # silence this error: https://its.cern.ch/jira/browse/RUCIO-1699 pass except: logging.critical(traceback.format_exc()) t_delay = delay - (time.time() - t_start) t_delay = t_delay if t_delay > 0 else 0 if t_delay: logging.debug('[broker] %i:%i - sleeping %s seconds', heartbeat['assign_thread'], heartbeat['nr_threads'], t_delay) time.sleep(t_delay) for conn in conns: try: conn.disconnect() except Exception: pass logging.debug('[broker] %i:%i - graceful stop requested', heartbeat['assign_thread'], heartbeat['nr_threads']) die(executable, hostname, pid, heartbeat_thread) logging.debug('[broker] %i:%i - graceful stop done', heartbeat['assign_thread'], heartbeat['nr_threads'])
def hermes2(once=False, thread=0, bulk=1000, sleep_time=10): """ Creates a Hermes2 Worker that can submit messages to different services (InfluXDB, ElasticSearch, ActiveMQ) The list of services need to be define in the config service in the hermes section. The list of endpoints need to be defined in rucio.cfg in the hermes section. :param once: Run only once. :param thread: Thread number at startup. :param bulk: The number of requests to process. :param sleep_time: Time between two cycles. """ executable = 'hermes2' hostname = socket.getfqdn() pid = os.getpid() hb_thread = threading.current_thread() heartbeat.sanity_check(executable=executable, hostname=hostname, pid=pid, thread=hb_thread) heart_beat = heartbeat.live(executable, hostname, pid, hb_thread) # Make an initial heartbeat so that all daemons have the correct worker number on the next try GRACEFUL_STOP.wait(10) heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600) prepend_str = 'hermes2[%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) logger = formatted_logger(logging.log, prepend_str + '%s') try: services_list = get('hermes', 'services_list') services_list = services_list.split(',') except ConfigNotFound: logger(logging.DEBUG, 'No services found, exiting') sys.exit(1) if 'influx' in services_list: try: influx_endpoint = config_get('hermes', 'influxdb_endpoint', False, None) if not influx_endpoint: logger( logging.ERROR, 'InfluxDB defined in the services list, but no endpoint can be find. Exiting' ) sys.exit(1) except Exception as err: logger(logging.ERROR, str(err)) if 'elastic' in services_list: try: elastic_endpoint = config_get('hermes', 'elastic_endpoint', False, None) if not elastic_endpoint: logger( logging.ERROR, 'Elastic defined in the services list, but no endpoint can be find. Exiting' ) sys.exit(1) except Exception as err: logger(logging.ERROR, str(err)) if 'activemq' in services_list: try: # activemq_endpoint = config_get('hermes', 'activemq_endpoint', False, None) conns, destination, username, password, use_ssl = setup_activemq( logger) if not conns: logger( logging.ERROR, 'ActiveMQ defined in the services list, cannot be setup') sys.exit(1) except Exception as err: logger(logging.ERROR, str(err)) while not GRACEFUL_STOP.is_set(): message_status = copy.deepcopy(services_list) message_statuses = {} stime = time.time() try: start_time = time.time() heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600) prepend_str = 'hermes2[%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) logger = formatted_logger(logging.log, prepend_str + '%s') messages = retrieve_messages( bulk=bulk, thread=heart_beat['assign_thread'], total_threads=heart_beat['nr_threads']) if messages: for message in messages: message_statuses[message['id']] = copy.deepcopy( services_list) logger(logging.DEBUG, 'Retrieved %i messages retrieved in %s seconds', len(messages), time.time() - start_time) if 'influx' in message_status: t_time = time.time() logger(logging.DEBUG, 'Will submit to influxDB') try: state = aggregate_to_influx(messages=messages, bin_size='1m', endpoint=influx_endpoint, logger=logger) except Exception as error: logger(logging.ERROR, 'Error sending to InfluxDB : %s', str(error)) state = 500 if state in [204, 200]: logger( logging.INFO, 'Messages successfully submitted to influxDB in %s seconds', time.time() - t_time) for message in messages: message_statuses[message['id']].remove('influx') else: logger(logging.INFO, 'Failure to submit to influxDB') if 'elastic' in message_status: t_time = time.time() try: state = submit_to_elastic(messages=messages, endpoint=elastic_endpoint, logger=logger) except Exception as error: logger(logging.ERROR, 'Error sending to Elastic : %s', str(error)) state = 500 if state in [200, 204]: logger( logging.INFO, 'Messages successfully submitted to elastic in %s seconds', time.time() - t_time) for message in messages: message_statuses[message['id']].remove('elastic') else: logger(logging.INFO, 'Failure to submit to elastic') if 'emails' in message_status: t_time = time.time() try: to_delete = deliver_emails(messages=messages, logger=logger) logger( logging.INFO, 'Messages successfully submitted by emails in %s seconds', time.time() - t_time) for message_id in to_delete: message_statuses[message_id].remove('emails') except Exception as error: logger(logging.ERROR, 'Error sending email : %s', str(error)) if 'activemq' in message_status: t_time = time.time() try: to_delete = deliver_to_activemq( messages=messages, conns=conns, destination=destination, username=username, password=password, use_ssl=use_ssl, logger=logger) logger( logging.INFO, 'Messages successfully submitted to ActiveMQ in %s seconds', time.time() - t_time) for message_id in to_delete: message_statuses[message_id].remove('activemq') except Exception as error: logger(logging.ERROR, 'Error sending to ActiveMQ : %s', str(error)) to_delete = [] to_update = {} for message in messages: status = message_statuses[message['id']] if not status: to_delete.append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': str(message['payload']), 'event_type': message['event_type'] }) else: status = ",".join(status) if status not in to_update: to_update[status] = [] to_update[status].append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': str(message['payload']), 'event_type': message['event_type'] }) logger(logging.INFO, 'Deleting %s messages', len(to_delete)) delete_messages(messages=to_delete) for status in to_update: logger( logging.INFO, 'Failure to submit %s messages to %s. Will update the message status', str(len(to_update[status])), status) update_messages_services(messages=to_update[status], services=status) if once: break daemon_sleep(start_time=stime, sleep_time=sleep_time, graceful_stop=GRACEFUL_STOP, logger=logger) except Exception: logger(logging.ERROR, "Failed to submit messages", exc_info=True)
def deliver_messages(once=False, brokers_resolved=None, process=0, total_processes=1, thread=0, total_threads=1, bulk=1000): """ Main loop to deliver messages to a broker. """ logging.info('hermes starting - process (%i/%i) thread (%i/%i) bulk (%i)' % (process, total_processes, thread, total_threads, bulk)) conns = [] for broker in brokers_resolved: conns.append(stomp.Connection(host_and_ports=[(broker, config_get_int('messaging-hermes', 'port'))], use_ssl=True, ssl_key_file=config_get('messaging-hermes', 'ssl_key_file'), ssl_cert_file=config_get('messaging-hermes', 'ssl_cert_file'), ssl_version=ssl.PROTOCOL_TLSv1)) logging.info('hermes started - process (%i/%i) thread (%i/%i) bulk (%i)' % (process, total_processes, thread, total_threads, bulk)) while not graceful_stop.is_set(): try: for conn in conns: if not conn.is_connected(): logging.info('connecting to %s' % conn.transport._Transport__host_and_ports[0][0]) record_counter('daemons.hermes.reconnect.%s' % conn.transport._Transport__host_and_ports[0][0].split('.')[0]) conn.start() conn.connect() tmp = retrieve_messages(bulk=bulk, process=process, total_processes=total_processes, thread=thread, total_threads=total_threads) if tmp == []: time.sleep(1) else: to_delete = [] for t in tmp: try: random.sample(conns, 1)[0].send(body=json.dumps({'event_type': str(t['event_type']).lower(), 'payload': t['payload'], 'created_at': str(t['created_at'])}), destination=config_get('messaging-hermes', 'destination')) except ValueError: logging.warn('Cannot serialize payload to JSON: %s' % str(t['payload'])) continue except Exception, e: logging.warn('Could not deliver message: %s' % str(e)) continue to_delete.append(t['id']) if str(t['event_type']).lower().startswith("transfer"): logging.debug('%i:%i - event_type: %s, scope: %s, name: %s, rse: %s, request-id: %s, transfer-id: %s, created_at: %s' % (process, thread, str(t['event_type']).lower(), t['payload']['scope'], t['payload']['name'], t['payload']['dst-rse'], t['payload']['request-id'], t['payload']['transfer-id'], str(t['created_at']))) elif str(t['event_type']).lower().startswith("dataset"): logging.debug('%i:%i - event_type: %s, scope: %s, name: %s, rse: %s, rule-id: %s, created_at: %s)' % (process, thread, str(t['event_type']).lower(), t['payload']['scope'], t['payload']['name'], t['payload']['rse'], t['payload']['rule_id'], str(t['created_at']))) elif str(t['event_type']).lower().startswith("deletion"): if 'url' not in t['payload']: t['payload']['url'] = 'unknown' logging.debug('%i:%i - event_type: %s, scope: %s, name: %s, rse: %s, url: %s, created_at: %s)' % (process, thread, str(t['event_type']).lower(), t['payload']['scope'], t['payload']['name'], t['payload']['rse'], t['payload']['url'], str(t['created_at']))) else: logging.debug('%i:%i -other message: %s' % (process, thread, t)) delete_messages(to_delete) except: logging.critical(traceback.format_exc()) logging.debug('%i:%i - graceful stop requests' % (process, thread)) for conn in conns: try: conn.disconnect() except: pass logging.debug('%i:%i - graceful stop done' % (process, thread))
def hermes2(once=False, thread=0, bulk=1000, sleep_time=10): """ Creates a Hermes2 Worker that can submit messages to different services (InfluXDB, ElasticSearch, ActiveMQ) The list of services need to be define in the config service in the hermes section. The list of endpoints need to be defined in rucio.cfg in the hermes section. :param once: Run only once. :param thread: Thread number at startup. :param bulk: The number of requests to process. :param sleep_time: Time between two cycles. """ try: services_list = get('hermes', 'services_list') services_list = services_list.split(',') except ConfigNotFound as err: logging.debug('No services found, exiting') sys.exit(1) if 'influx' in services_list: try: influx_endpoint = config_get('hermes', 'influxdb_endpoint', False, None) if not influx_endpoint: logging.error( 'InfluxDB defined in the services list, but no endpoint can be find. Exiting' ) sys.exit(1) except Exception as err: logging.error(err) if 'elastic' in services_list: try: elastic_endpoint = config_get('hermes', 'elastic_endpoint', False, None) if not elastic_endpoint: logging.error( 'Elastic defined in the services list, but no endpoint can be find. Exiting' ) sys.exit(1) except Exception as err: logging.error(err) if 'activemq' in services_list: try: activemq_endpoint = config_get('hermes', 'activemq_endpoint', False, None) if not activemq_endpoint: logging.error( 'ActiveMQ defined in the services list, but no endpoint can be find. Exiting' ) sys.exit(1) except Exception as err: logging.error(err) executable = 'hermes2' hostname = socket.getfqdn() pid = os.getpid() hb_thread = threading.current_thread() heartbeat.sanity_check(executable=executable, hostname=hostname, pid=pid, thread=hb_thread) heart_beat = heartbeat.live(executable, hostname, pid, hb_thread) prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) # Make an initial heartbeat so that all daemons have the correct worker number on the next try GRACEFUL_STOP.wait(10) heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600) while not GRACEFUL_STOP.is_set(): message_status = deepcopy(services_list) stime = time.time() try: start_time = time.time() heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600) prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) messages = retrieve_messages( bulk=bulk, thread=heart_beat['assign_thread'], total_threads=heart_beat['nr_threads']) if messages: logging.debug( '%s Retrieved %i messages retrieved in %s seconds', prepend_str, len(messages), time.time() - start_time) if 'influx' in message_status: logging.debug('%s Will submit to influxDB', prepend_str) state = aggregate_to_influx(messages, bin_size='1m', endpoint=influx_endpoint, prepend_str=prepend_str) if state in [204, 200]: logging.info( '%s Messages successfully submitted to influxDB', prepend_str) message_status.remove('influx') else: logging.info('%s Failure to submit to influxDB', prepend_str) if 'elastic' in message_status: state = submit_to_elastic(messages, endpoint=elastic_endpoint, prepend_str=prepend_str) if state in [200, 204]: logging.info( '%s Messages successfully submitted to elastic', prepend_str) message_status.remove('elastic') else: logging.info('%s Failure to submit to elastic', prepend_str) to_delete_or_update = [] for message in messages: to_delete_or_update.append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': str(message['payload']), 'event_type': 'email' }) if message_status == []: delete_messages(messages=to_delete_or_update) else: logging.info( '%s Failure to submit to one service. Will update the message status', prepend_str) update_messages_services(messages=to_delete_or_update, services=",".join(message_status)) if once: break tottime = time.time() - stime if tottime < sleep_time: logging.info('%s Will sleep for %s seconds', prepend_str, sleep_time - tottime) time.sleep(sleep_time - tottime) except: logging.critical(traceback.format_exc())
def deliver_emails(once=False, send_email=True, thread=0, bulk=1000, delay=60, sleep_time=60): ''' Main loop to deliver emails via SMTP. ''' logging.info('[email] starting - threads (%i) bulk (%i)', thread, bulk) if sleep_time == deliver_emails.__defaults__[ 5] and delay != deliver_emails.__defaults__[4]: sleep_time = delay executable = 'hermes [email]' hostname = socket.getfqdn() pid = os.getpid() heartbeat_thread = threading.current_thread() sanity_check(executable=executable, hostname=hostname) # Make an initial heartbeat so that all daemons have the correct worker number on the next try heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=heartbeat_thread) prepend_str = 'hermes-email [%i/%i] : ' % (heartbeat['assign_thread'], heartbeat['nr_threads']) logger = formatted_logger(logging.log, prepend_str + '%s') GRACEFUL_STOP.wait(1) email_from = config_get('messaging-hermes', 'email_from') while not GRACEFUL_STOP.is_set(): heartbeat = live(executable, hostname, pid, heartbeat_thread) prepend_str = 'hermes-email [%i/%i] : ' % (heartbeat['assign_thread'], heartbeat['nr_threads']) logger = formatted_logger(logging.log, prepend_str + '%s') logger(logging.DEBUG, 'bulk %i', bulk) t_start = time.time() messages = retrieve_messages(bulk=bulk, thread=heartbeat['assign_thread'], total_threads=heartbeat['nr_threads'], event_type='email') if messages != []: to_delete = [] for message in messages: logger(logging.DEBUG, 'submitting: %s', str(message)) if PY2: msg = MIMEText(message['payload']['body'].encode('utf-8')) else: msg = MIMEText(message['payload']['body']) msg['From'] = email_from msg['To'] = ', '.join(message['payload']['to']) if PY2: msg['Subject'] = message['payload']['subject'].encode( 'utf-8') else: msg['Subject'] = message['payload']['subject'] if send_email: smtp = smtplib.SMTP() smtp.connect() smtp.sendmail(msg['From'], message['payload']['to'], msg.as_string()) smtp.quit() to_delete.append({ 'id': message['id'], 'created_at': message['created_at'], 'updated_at': message['created_at'], 'payload': str(message['payload']), 'event_type': 'email' }) logger(logging.DEBUG, 'submitting done: %s', str(message['id'])) delete_messages(to_delete) logger(logging.INFO, 'submitted %i messages', len(to_delete)) if once: break if len(messages) < bulk: logger( logging.INFO, "Only %d messages, which is less than the bulk %d, will sleep" % (len(messages), bulk)) daemon_sleep(start_time=t_start, sleep_time=sleep_time, graceful_stop=GRACEFUL_STOP) logger(logging.DEBUG, 'graceful stop requested') die(executable, hostname, pid, heartbeat_thread) logger(logging.DEBUG, 'graceful stop done')