def load_complete_events(store, events_limit=GLSettings.notification_limit): """ This function do not serialize, but make an OD() of the description. events_limit represent the amount of event that can be returned by the function, events to be notified are taken in account later. """ node_desc = db_admin_serialize_node(store, GLSettings.memory_copy.default_language) event_list = [] totaleventinqueue = store.find(EventLogs, EventLogs.mail_sent == False).count() storedevnts = store.find(EventLogs, EventLogs.mail_sent == False)[: events_limit * 3] debug_event_counter = {} for i, stev in enumerate(storedevnts): if len(event_list) == events_limit: log.debug("Reached maximum number of event notifications doable on a single loop %d" % events_limit) break debug_event_counter.setdefault(stev.event_reference["kind"], 0) debug_event_counter[stev.event_reference["kind"]] += 1 if not stev.description["receiver_info"]["tip_notification"]: continue eventcomplete = OD() # node level information are not stored in the node, but fetch now eventcomplete.notification_settings = admin_serialize_notification( store.find(Notification).one(), stev.description["receiver_info"]["language"] ) eventcomplete.node_info = node_desc # event level information are decoded form DB in the old 'Event'|nametuple format: eventcomplete.receiver_info = stev.description["receiver_info"] eventcomplete.tip_info = stev.description["tip_info"] eventcomplete.subevent_info = stev.description["subevent_info"] eventcomplete.context_info = stev.description["context_info"] eventcomplete.type = stev.description["type"] # 'Tip', 'Comment' eventcomplete.trigger = stev.event_reference["kind"] # 'blah' ... eventcomplete.orm_id = stev.id event_list.append(eventcomplete) if debug_event_counter: if totaleventinqueue > (events_limit * 3): log.debug("load_complete_events: %s from %d Events" % (debug_event_counter, totaleventinqueue)) else: log.debug( "load_complete_events: %s from %d Events, with a protection limit of %d" % (debug_event_counter, totaleventinqueue, events_limit * 3) ) return event_list
def load_complete_events(store, events_limit=GLSettings.notification_limit): """ This function do not serialize, but make an OD() of the description. events_limit represent the amount of event that can be returned by the function, events to be notified are taken in account later. """ node_desc = db_admin_serialize_node(store, GLSettings.defaults.language) event_list = [] totaleventinqueue = store.find(EventLogs, EventLogs.mail_sent == False).count() storedevnts = store.find(EventLogs, EventLogs.mail_sent == False)[:events_limit * 3] debug_event_counter = {} for i, stev in enumerate(storedevnts): if len(event_list) == events_limit: log.debug("Maximum number of notification event reach (Mailflush) %d, after %d" % (events_limit, i)) break debug_event_counter.setdefault(stev.event_reference['kind'], 0) debug_event_counter[stev.event_reference['kind']] += 1 if not stev.description['receiver_info']['tip_notification']: continue eventcomplete = OD() # node level information are not stored in the node, but fetch now eventcomplete.notification_settings = admin_serialize_notification( store.find(Notification).one(), stev.description['receiver_info']['language'] ) eventcomplete.node_info = node_desc # event level information are decoded form DB in the old 'Event'|nametuple format: eventcomplete.receiver_info = stev.description['receiver_info'] eventcomplete.tip_info = stev.description['tip_info'] eventcomplete.subevent_info = stev.description['subevent_info'] eventcomplete.context_info = stev.description['context_info'] eventcomplete.type = stev.description['type'] # 'Tip', 'Comment' eventcomplete.trigger = stev.event_reference['kind'] # 'blah' ... eventcomplete.orm_id = stev.id event_list.append(eventcomplete) if debug_event_counter: if totaleventinqueue > (events_limit * 3): log.debug("load_complete_events: %s from %d Events" % (debug_event_counter, totaleventinqueue )) else: log.debug("load_complete_events: %s from %d Events, with a protection limit of %d" % (debug_event_counter, totaleventinqueue, events_limit * 3 )) return event_list
def load_complete_events(store, event_number=GLSettings.notification_limit): """ _complete_ is explicit because do not serialize, but make an OD() of the description. event_number represent the amount of event that can be returned by the function, event to be notified are taken in account later. """ node_desc = db_admin_serialize_node(store, GLSettings.defaults.language) event_list = [] storedevnts = store.find(EventLogs, EventLogs.mail_sent == False) storedevnts.order_by(Asc(EventLogs.creation_date)) debug_event_counter = {} for i, stev in enumerate(storedevnts): if len(event_list) == event_number: log.debug("Maximum number of notification event reach (Mailflush) %d, after %d" % (event_number, i)) break debug_event_counter.setdefault(stev.event_reference['kind'], 0) debug_event_counter[stev.event_reference['kind']] += 1 if not stev.description['receiver_info']['tip_notification']: continue eventcomplete = OD() # node level information are not stored in the node, but fetch now eventcomplete.notification_settings = admin_serialize_notification( store.find(Notification).one(), stev.description['receiver_info']['language'] ) eventcomplete.node_info = node_desc # event level information are decoded form DB in the old 'Event'|nametuple format: eventcomplete.receiver_info = stev.description['receiver_info'] eventcomplete.tip_info = stev.description['tip_info'] eventcomplete.subevent_info = stev.description['subevent_info'] eventcomplete.context_info = stev.description['context_info'] eventcomplete.type = stev.description['type'] # 'Tip', 'Comment' eventcomplete.trigger = stev.event_reference['kind'] # 'blah' ... eventcomplete.orm_id = stev.id event_list.append(eventcomplete) if debug_event_counter: log.debug("load_complete_events: %s" % debug_event_counter) return event_list
def generate_anomaly_email(self, plausible_event): anomalevent = OD() anomalevent.type = u'receiver_notification_limit_reached' anomalevent.notification_settings = plausible_event.notification_settings anomalevent.node_info = plausible_event.node_info anomalevent.context_info = None anomalevent.receiver_info = plausible_event.receiver_info anomalevent.tip_info = None anomalevent.subevent_info = None anomalevent.orm_id = 0 return anomalevent
def filter_notification_event(notifque): """ :param notifque: the current notification event queue :return: a modified queue in the case some email has not to be sent Basically performs two filtering; they are defined in: 1) issue #444 2) issue #798 """ # Here we collect the Storm event of Files having as key the Tip files_event_by_tip = {} _tmp_list = [] return_filtered_list = [] # to be smoked Storm.id orm_id_to_be_skipped = [] for ne in notifque: if ne['trigger'] != u'Tip': continue files_event_by_tip.update({ne['tip_info']['id'] : []}) log.debug("Filtering function: iterating over %d Tip" % len(files_event_by_tip.keys())) # not files_event_by_tip contains N keys with an empty list, # I'm looping two times because dict has random ordering for ne in notifque: if GLSettings.memory_copy.disable_receiver_notification_emails: orm_id_to_be_skipped.append(ne['orm_id']) continue if ne['trigger'] != u'File': _tmp_list.append(ne) continue if ne['tip_info']['id'] in files_event_by_tip: orm_id_to_be_skipped.append(ne['orm_id']) else: _tmp_list.append(ne) if len(orm_id_to_be_skipped): if GLSettings.memory_copy.disable_receiver_notification_emails: log.debug("All the %d mails will be marked as sent because the admin has disabled receivers notifications" % len(orm_id_to_be_skipped)) else: log.debug("Filtering function: Marked %d Files notification to be suppressed cause part of a submission" % len(orm_id_to_be_skipped)) for ne in _tmp_list: receiver_id = ne['receiver_info']['id'] sent_emails = GLSettings.get_mail_counter(receiver_id) if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour: log.debug("Discarding email for receiver %s due to threshold already exceeded for the current hour" % receiver_id) orm_id_to_be_skipped.append(ne['orm_id']) continue GLSettings.increment_mail_counter(receiver_id) if sent_emails + 1 >= GLSettings.memory_copy.notification_threshold_per_hour: log.info("Reached threshold of %d emails with limit of %d for receiver %s" % ( sent_emails, GLSettings.memory_copy.notification_threshold_per_hour, receiver_id) ) # Append anomalyevent = OD() anomalyevent.type = u'receiver_notification_limit_reached' anomalyevent.notification_settings = ne.notification_settings anomalyevent.node_info = ne.node_info anomalyevent.context_info = None anomalyevent.receiver_info = ne.receiver_info anomalyevent.tip_info = None anomalyevent.subevent_info = None anomalyevent.orm_id = '0' return_filtered_list.append(anomalyevent) orm_id_to_be_skipped.append(ne['orm_id']) continue return_filtered_list.append(ne) log.debug("Mails filtering completed passing from #%d to #%d events" % (len(notifque), len(return_filtered_list))) # return the new list of event and the list of Storm.id return return_filtered_list, orm_id_to_be_skipped