def emails_run(qEmails): """ Process Requests and send Emails accordingly """ # db connection is shared between threads dbconnection = connect() while True: try: event = Event(qEmails.get()) except Exception as e: logger.error("Problem with event: {}".format(e)) continue else: if event.notify: # We try to send the email only once event.notify = False dispatch(dbconnection, event) # Recipients # TODO: Send specific emails # Status did NOT changed # Comments about an event with a message if event.status == event.previous_status: logger.warning("TODO: send specific emails with messages") recipients = set() url = s.web['url'] if s.web['port'] and s.web['port'] != 80: url = url +':'+ s.web['port'] buttonLabel = "View details" if event.object.type == ObjectType.PASSWORD: recipients.add(User(event.object.id)) url = url+'/password/'+event.data['hashing'] subject, template = build_subject_and_template('password', event) buttonLabel = "Change password" else: if event.isPending(): # Find the authority of the event object # Then according the authority, put the pi_emails in pis_email try: authority_id = event.data['authority'] except KeyError: msg = 'Authority id not specified ({})'.format(event.id) logger.error(msg) event.logWarning('Authority not specified in event {}, email not sent'.format(event.id)) pass else: authority = Authority(db.get(dbconnection, table='authorities', id=authority_id)) if not authority: # get admin users users = db.get(dbconnection, table='users') for u in users: user = User(u) if user.isAdmin(): logger.debug("user %s is admin" % user.id) recipients.add(user) else: for pi_id in authority.pi_users: recipients.add(User(pi_id)) if not recipients: msg = 'Emails cannot be sent because no one is the PI of {}'.format(event.object.id) logger.error(msg) event.logWarning('No recipients could be found for event {}, email not sent'.format(event.id)) subject, template = build_subject_and_template('request', event) buttonLabel = "Approve / Deny" url = url + '/activity' else: if event.user: recipients.add(User(event.user)) else: # Look for the user email in the Event if event.object.type == ObjectType.USER: recipients.add(User({'email':event.data['email'], 'first_name':event.data['first_name'], 'last_name':event.data['last_name']})) elif event.object.type == ObjectType.AUTHORITY: for user in event.data['users']: recipients.add(User(user)) else: for user in event.data['pi_users']: recipients.add(User(user)) if event.isSuccess(): subject, template = build_subject_and_template('approve', event) elif event.isDenied(): subject, template = build_subject_and_template('deny', event) try: sendEmail(event, recipients, subject, template, url, buttonLabel) except Exception as e: import traceback traceback.print_exc() msg = "Error in event {} while trying to send an email: {} {}".format(event.id, e, traceback.print_exc()) logger.error(msg) event.logWarning(msg) continue finally: dispatch(dbconnection, event)
def run(): """ """ signal.signal(signal.SIGINT, receive_signal) signal.signal(signal.SIGTERM, receive_signal) signal.signal(signal.SIGHUP, receive_signal) threads = [] qEmails = Queue() for y in range(1): t = threading.Thread(target=manageEmails, args=(qEmails, )) t.daemon = True threads.append(t) t.start() qConfirmEmails = Queue() for y in range(1): t = threading.Thread(target=confirmEmails, args=(qConfirmEmails, )) t.daemon = True threads.append(t) t.start() context = zmq.Context() socket = context.socket(zmq.SUB) socket.setsockopt_string(zmq.SUBSCRIBE, 'emails') socket.connect("tcp://localhost:6002") logger.info("[emails] Collecting updates from ZMQ bus for activity") while True: logger.debug("[emails]Change in emails feed") topic, zmqmessage = socket.recv_multipart() activity = pickle.loads(zmqmessage) logger.debug("[emails]{0}: {1}".format(topic, activity)) try: event = Event(activity['new_val']) except Exception as e: logger.error("Problem with event: {}".format(e)) continue else: if event.isConfirm() and event.notify: logger.debug("Add event %s to Confirm Email queue" % (event.id)) qConfirmEmails.put(event) elif event.isPending() and event.notify: logger.debug("Add event %s to Email queue" % (event.id)) qEmails.put(event) elif event.isDenied() and event.notify: logger.info("event {} is denied".format(event.id)) logger.debug("Add event %s to Email queue" % (event.id)) qEmails.put(event) elif event.isSuccess() and event.notify: logger.debug("Add event %s to Email queue" % (event.id)) qEmails.put(event) logger.critical("Service emails stopped") # waits for the thread to finish for x in threads: x.join()
def post(self, id=None): """ ONLY FOR DEBUG """ # TODO: get user id from user logged in # NOTE: checks are done by the service, here we only dispatch the event try: data = escape.json_decode(self.request.body)['event'] except json.decoder.JSONDecodeError as e: logger.error(self.request.body) logger.exception("malformed request") self.set_status(400) self.finish( json.dumps({ "return": { "status": "error", "messages": "malformed request" } })) try: event = Event(data) except Exception as e: logger.exception("error in post activity") import traceback traceback.print_exc() self.set_status(500) self.finish( json.dumps( {"return": { "status": "error", "messages": e.message }})) else: try: # XXX If watching all events, is scalability an issue? # changes sends back all the events that occured since it started... feed = yield changes(dbconnection=self.dbconnection, table='activity') # We need to watch the changes before dispatch, because the service writing into the DB is faster than this process result = yield dispatch(self.dbconnection, event) event_id = result['generated_keys'][0] while (yield feed.fetch_next()): item = yield feed.next() # items are piling up... ev = Event(item['new_val']) if ev.id == event_id: if ev.isError() or ev.isWarning(): self.set_status(500) # XXX trying to cleanup the Cursor, but it is not Working # <class 'rethinkdb.net_tornado.TornadoCursor'> # https://github.com/rethinkdb/rethinkdb/blob/next/drivers/python/rethinkdb/tornado_net/net_tornado.py # https://github.com/rethinkdb/rethinkdb/blob/next/drivers/python/rethinkdb/net.py #yield feed.close() self.finish( json.dumps( { "return": { "status": ev.status, "messages": ev } }, cls=myJSONEncoder)) elif ev.isSuccess() or ev.isPending(): self.set_status(200) #yield feed.close() self.finish( json.dumps( { "return": { "status": ev.status, "messages": ev } }, cls=myJSONEncoder)) except Exception as e: logger.exception("error in post activity") import traceback traceback.print_exc() self.set_status(500) #yield feed.close() self.finish( json.dumps( { "return": { "status": EventStatus.ERROR, "messages": e } }, cls=myJSONEncoder))