Exemplo n.º 1
0
 def get_data(self, **kwargs):
     c = EventClass.objects.filter(name="Unknown | SNMP Trap").first()
     pipeline = [{
         "$match": {
             "event_class": c.id
         }
     }, {
         "$project": {
             "vars": 1
         }
     }, {
         "$group": {
             "_id": "$vars.trap_oid",
             "count": {
                 "$sum": 1
             }
         }
     }]
     oids = ActiveEvent._get_collection().aggregate(pipeline)
     data = [(e["_id"], MIB.get_name(e["_id"]), e["count"]) for e in oids]
     data = sorted(data, key=lambda x: -x[2])
     return self.from_dataset(title=self.title,
                              columns=[
                                  "OID", "Name",
                                  TableColumn("Count",
                                              format="integer",
                                              align="right",
                                              total="sum")
                              ],
                              data=data)
Exemplo n.º 2
0
def event(request):
    path, cfg = request.param
    coll = cfg.get("$collection", COLLECTION_NAME)
    assert coll == COLLECTION_NAME, "Invalid collection %s" % coll
    ec = EventClass.get_by_name(
        cfg.get("eventclass__name", DEFAULT_EVENT_CLASS)
    )
    mo = ManagedObject(
        id=MO_ID,
        name=MO_NAME,
        address=MO_ADDRESS,
        profile=Profile.get_by_name(cfg.get("profile__name", DEFAULT_PROFILE))
    )
    now = datetime.datetime.now()
    data = cfg.get("data", {})
    source = data.pop("source", "other")
    event = ActiveEvent(
        timestamp=now,
        start_timestamp=now,
        managed_object=mo,
        source=source,
        raw_vars=data,
        repeats=1
    )
    request.fixturename = "events-%s" % cfg.get("uuid")
    # request.fspath = path
    return event, ec, cfg.get("vars", {})
Exemplo n.º 3
0
 def get_data(self, **kwargs):
     c = EventClass.objects.filter(name="Unknown | SNMP Trap").first()
     # Переделать на agregate Функция считает число OID'ов в переменных аварий
     # и проверяет их на опознанность
     pipeline = [{
         "$match": {
             "event_class": c.id
         }
     }, {
         "$project": {
             "vars": 1
         }
     }, {
         "$group": {
             "_id": "$vars.trap_oid",
             "count": {
                 "$sum": 1
             }
         }
     }]
     oids = ActiveEvent._get_collection().aggregate(pipeline)
     d = [(e["_id"], MIB.get_name(e["_id"]), e["count"]) for e in oids]
     print d
     data = [(o, n, c) for o, n, c in d if self.rx_unclassified.search(n)]
     return self.from_dataset(title=self.title,
                              columns=[
                                  "OID", "Name",
                                  TableColumn("Count",
                                              format="integer",
                                              align="right",
                                              total="sum")
                              ],
                              data=data)
Exemplo n.º 4
0
 async def raise_abduct_event(self, event: ActiveEvent) -> None:
     """
     Create Cable Abduct Event and dispose it to correlator
     :param event:
     :return:
     """
     if not self.cable_abduct_ecls:
         self.cable_abduct_ecls = EventClass.get_by_name(CABLE_ABDUCT)
     abd_event = ActiveEvent(
         timestamp=event.timestamp,
         start_timestamp=event.timestamp,
         managed_object=event.managed_object,
         source=event.source,
         repeats=1,
         event_class=self.cable_abduct_ecls,
     )
     abd_event.save()
     await self.dispose_event(abd_event)
Exemplo n.º 5
0
 def read_syslog(self, f):
     now = datetime.datetime.now()
     for line in f:
         yield ActiveEvent(
             timestamp=now,
             start_timestamp=now,
             managed_object=self.managed_object,
             source="syslog",
             raw_vars={"collector": "default", "message": line[:-1]},
             repeats=1,
         )
Exemplo n.º 6
0
 def get_event_from_hint(self, hint):
     """
     Get ActiveEvent from json hint
     :param hint:
     :return:
     """
     metrics["event_hints"] += 1
     e = ActiveEvent.from_json(hint)
     # Prevent TypeError: can't compare offset-naive and offset-aware datetimes
     # when calculating alarm timestamp
     e.timestamp = e.timestamp.replace(tzinfo=None)
     return e
Exemplo n.º 7
0
 def suppress_repeats(self, event: ActiveEvent) -> bool:
     """
     Suppress repeated events
     :param event:
     :param vars:
     :return:
     """
     se_id = self.suppress_filter.find(event)
     if not se_id:
         return False
     self.logger.info(
         "[%s|%s|%s] Suppressed by event %s",
         event.id,
         event.managed_object.name,
         event.managed_object.address,
         se_id,
     )
     # Update suppressing event
     ActiveEvent.log_suppression(se_id, event.timestamp)
     # Delete suppressed event
     metrics[CR_SUPPRESSED] += 1
     return True
Exemplo n.º 8
0
Arquivo: events.py Projeto: 0pt1on/noc
 def handle_clean(self, options, events):
     before = options.get("before")
     if before:
         datetime.datetime.strptime(before, "%Y-%m-%d")
     else:
         self.print("Before is not set, use default")
         before = datetime.datetime.now() - DEFAULT_CLEAN
     force = options.get("force")
     aa = ActiveAlarm._get_collection()
     ah = ArchivedAlarm._get_collection()
     ae = ActiveEvent._get_collection()
     event_ts = ae.find_one({"timestamp": {"$lte": before}}, limit=1, sort=[("timestamp", 1)])
     event_ts = event_ts["timestamp"]
     print("[%s] Cleaned before %s ... \n" % ("events", before), end="")
     bulk = []
     window = CLEAN_WINDOW
     while event_ts < before:
         refer_event_ids = []
         for e in [aa, ah]:
             for ee in e.find(
                 {"timestamp": {"$gte": event_ts, "$lte": event_ts + CLEAN_WINDOW}},
                 {"opening_event": 1, "closing_event": 1},
             ):
                 if "opening_event" in ee:
                     refer_event_ids += [ee["opening_event"]]
                 if "closing_event" in ee:
                     refer_event_ids += [ee["closing_event"]]
         try:
             clear_qs = {
                 "timestamp": {"$gte": event_ts, "$lte": event_ts + CLEAN_WINDOW},
                 "_id": {"$nin": refer_event_ids},
             }
             self.print(
                 "Interval: %s, %s; Count: %d"
                 % (event_ts, event_ts + CLEAN_WINDOW, ae.count(clear_qs))
             )
             bulk += [DeleteMany(clear_qs)]
             event_ts += window
             if window != CLEAN_WINDOW:
                 window = CLEAN_WINDOW
         except DocumentTooLarge:
             window = window // 2
             if window < datetime.timedelta(hours=1):
                 self.die("Too many events for delete in interval %s" % window)
             event_ts -= window
     if force:
         self.print("All data before %s from active events will be Remove..\n" % before)
         for i in reversed(range(1, 10)):
             self.print("%d\n" % i)
             time.sleep(1)
         ae.bulk_write(bulk)
Exemplo n.º 9
0
 def lookup_event(self, event_id):
     """
     Lookup event by id.
     Uses cache heating effect from classifier
     :param event_id:
     :return: ActiveEvent instance or None
     """
     self.logger.info("[%s] Lookup event", event_id)
     e = ActiveEvent.get_by_id(event_id)
     if not e:
         self.logger.info("[%s] Event not found, skipping", event_id)
         metrics["event_lookup_failed"] += 1
     metrics["event_lookups"] += 1
     return e
Exemplo n.º 10
0
 async def on_event(self, msg: Message):
     # Decode message
     event = orjson.loads(msg.value)
     object = event.get("object")
     data = event.get("data")
     # Process event
     event_ts = datetime.datetime.fromtimestamp(event.get("ts"))
     # Generate or reuse existing object id
     event_id = ObjectId(event.get("id"))
     # Calculate message processing delay
     lag = (time.time() - float(msg.timestamp) / NS) * 1000
     metrics["lag_us"] = int(lag * 1000)
     self.logger.debug("[%s] Receiving new event: %s (Lag: %.2fms)",
                       event_id, data, lag)
     metrics[CR_PROCESSED] += 1
     # Resolve managed object
     mo = ManagedObject.get_by_id(object)
     if not mo:
         self.logger.info("[%s] Unknown managed object id %s. Skipping",
                          event_id, object)
         metrics[CR_UOBJECT] += 1
         return
     self.logger.info("[%s|%s|%s] Managed object found", event_id, mo.name,
                      mo.address)
     # Process event
     source = data.pop("source", "other")
     event = ActiveEvent(
         id=event_id,
         timestamp=event_ts,
         start_timestamp=event_ts,
         managed_object=mo,
         source=source,
         repeats=1,
     )  # raw_vars will be filled by classify_event()
     # Ignore event
     if self.patternset.find_ignore_rule(event, data):
         self.logger.debug("Ignored event %s vars %s", event, data)
         metrics[CR_IGNORED] += 1
         return
     # Classify event
     try:
         await self.classify_event(event, data)
     except Exception as e:
         self.logger.error("[%s|%s|%s] Failed to process event: %s",
                           event.id, mo.name, mo.address, e)
         metrics[CR_FAILED] += 1
         return
     self.logger.info("[%s|%s|%s] Event processed successfully", event.id,
                      mo.name, mo.address)
Exemplo n.º 11
0
 def on_event(self,
              message,
              ts=None,
              object=None,
              data=None,
              id=None,
              *args,
              **kwargs):
     event_ts = datetime.datetime.fromtimestamp(ts)
     # Generate or reuse existing object id
     event_id = ObjectId(id)
     # Calculate messate processing delay
     lag = (time.time() - ts) * 1000
     metrics["lag_us"] = int(lag * 1000)
     self.logger.debug("[%s] Receiving new event: %s (Lag: %.2fms)",
                       event_id, data, lag)
     metrics[CR_PROCESSED] += 1
     # Resolve managed object
     mo = ManagedObject.get_by_id(object)
     if not mo:
         self.logger.info("[%s] Unknown managed object id %s. Skipping",
                          event_id, object)
         metrics[CR_UOBJECT] += 1
         return True
     self.logger.info("[%s|%s|%s] Managed object found", event_id, mo.name,
                      mo.address)
     # Process event
     source = data.pop("source", "other")
     event = ActiveEvent(
         id=event_id,
         timestamp=event_ts,
         start_timestamp=event_ts,
         managed_object=mo,
         source=source,
         repeats=1,
     )  # raw_vars will be filled by classify_event()
     # Classify event
     try:
         self.classify_event(event, data)
     except Exception as e:
         self.logger.error("[%s|%s|%s] Failed to process event: %s",
                           event.id, mo.name, mo.address, e)
         metrics[CR_FAILED] += 1
         return False
     self.logger.info("[%s|%s|%s] Event processed successfully", event.id,
                      mo.name, mo.address)
     return True
Exemplo n.º 12
0
 async def on_dispose_event(self, msg: Message) -> None:
     """
     Called on new dispose message
     """
     data = orjson.loads(msg.value)
     event_id = data["event_id"]
     hint = data["event"]
     self.logger.info("[%s] Receiving message", event_id)
     metrics["alarm_dispose"] += 1
     try:
         event = ActiveEvent.from_json(hint)
         event.timestamp = event.timestamp.replace(tzinfo=None)
         await self.dispose_event(event)
     except Exception:
         metrics["alarm_dispose_error"] += 1
         error_report()
     finally:
         if self.topo_rca_lock:
             # Release pending RCA Lock
             await self.topo_rca_lock.release()
             self.topo_rca_lock = None
Exemplo n.º 13
0
from noc.fm.models.activeevent import ActiveEvent
from noc.core.mongo.connection import connect
from datetime import *
connect()
import re

events = ActiveEvent.objects()

re_events = re.compile(".+[LOGIN|LOGOUT|login|logout].+217.76.35.203")
events = ActiveEvent.objects()
for event in events:
    if 'message' in event.raw_vars:
        if re_events.match(event.raw_vars['message']):
            event.delete()

re2_events = re.compile(".+217.76.35.203.+")
for event in events:
    if '1.3.6.1.2.1.16.9.1.1.2.178' in event.raw_vars:
        if re2_events.match(event.raw_vars['1.3.6.1.2.1.16.9.1.1.2.178']):
            event.delete()