Exemple #1
0
 def extract(self):
     value = (get_db()["noc.gridvcs.config.files"].with_options(
         read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate([
             {
                 "$match": {
                     "object": {
                         "$in": self.sync_ids
                     }
                 }
             },
             {
                 "$group": {
                     "_id": "$object",
                     "last_ts": {
                         "$max": "$ts"
                     }
                 }
             },
             {
                 "$sort": {
                     "_id": 1
                 }
             },
         ]))
     for v in value:
         if not v["_id"]:
             continue
         yield v["_id"], v["last_ts"]
Exemple #2
0
 def _3_isp(self, index):
     # Profiles detect problems
     match = {
         "$or": [
             {
                 "problems.suggest_snmp.": {
                     "$regex": "Failed to guess SNMP community",
                     "$options": "i",
                 }
             },
             {
                 "problems.profile.":
                 "Cannot fetch snmp data, check device for SNMP access"
             },
             {
                 "problems.profile.": "Cannot detect profile"
             },
         ]
     }
     c = set(
         int(r["_id"].rsplit("-")[-1])
         for r in get_db()["noc.joblog"].with_options(
             read_preference=ReadPreference.SECONDARY_PREFERRED).find(
                 match))
     if index == "0":
         return self.common_filter - c
     return c
Exemple #3
0
    def extract(self):
        match = {"data.management.managed_object": {"$exists": True}}
        if self.sync_ids:
            match = {"data.management.managed_object": {"$in": self.sync_ids}}
        value = (
            get_db()["noc.objects"]
            .with_options(read_preference=ReadPreference.SECONDARY_PREFERRED)
            .aggregate(
                [
                    {"$match": match},
                    {"$sort": {"data.management.managed_object": 1}},
                    {
                        "$lookup": {
                            "from": "noc.objects",
                            "localField": "container",
                            "foreignField": "_id",
                            "as": "cont",
                        }
                    },
                    {"$project": {"data": 1, "cont.data": 1}},
                ]
            )
        )

        for v in value:
            r = {}
            if "asset" in v["data"]:
                # r[v["data"]["management"]["managed_object"]].update(v["data"]["asset"])
                r.update(v["data"]["asset"])
            if v["cont"]:
                if "data" in v["cont"][0]:
                    # r[v["data"]["management"]["managed_object"]].update(v["cont"][0]["data"].get("address", {}))
                    r.update(v["cont"][0]["data"].get("address", {}))
            yield v["data"]["management"]["managed_object"], r
Exemple #4
0
Fichier : bi.py Projet : 0pt1on/noc
 def get_last_extract(self, name):
     coll = get_db()["noc.bi_timestamps"]
     d = coll.find_one({"_id": name})
     if d:
         return d["last_extract"]
     else:
         return None
Exemple #5
0
    def archive(self):
        """
        Move data to archive collection
        :return:
        """

        def spool(collection_name):
            coll = db[collection_name]
            try:
                r = coll.insert_many(data[collection_name])
            except BulkWriteError as e:
                print(e.details)
                return None
            return r

        self.require_db_connect()
        db = get_db()
        # Compile name template
        tpl = self.get_archived_template()
        # collection name -> data
        data = defaultdict(list)
        # Collect data and spool full batches
        for d in self.iter_archived_items():
            cname = str(tpl.render({"doc": d}))
            data[cname] += [d]
            if len(data[cname]) >= self.archive_batch_limit:
                result = spool(cname)
                if result:
                    data[cname] = []
                else:
                    break
        # Spool remaining incomplete batches
        for cname in data:
            if data[cname]:
                spool(cname)
Exemple #6
0
 def extract(self):
     i_type = "physical"
     match = {"type": i_type}
     if len(self.sync_ids) < 20000:
         # @todo Very large list slowest encode, need research
         match = {"type": i_type, "managed_object": {"$in": self.sync_ids}}
     value = (get_db()["noc.interfaces"].with_options(
         read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate(
             [
                 {
                     "$match": match
                 },
                 {
                     "$group": {
                         "_id": "$managed_object",
                         "count": {
                             "$sum": 1
                         }
                     }
                 },
                 {
                     "$sort": {
                         "_id": 1
                     }
                 },
             ],
             allowDiskUse=True,
         ))
     for v in value:
         if not v["_id"]:
             continue
         yield v["_id"], v["count"]
Exemple #7
0
 def _2_isp(self, index):
     # CLI Problem
     match = {
         "$and": [
             {
                 "problems.profile.": {
                     "$ne": "Cannot fetch snmp data, check device for SNMP access"
                 }
             },
             {"problems.profile.": {"$ne": "Cannot detect profile"}},
             {
                 "$or": [
                     {"problems.version.": {"$regex": r"Remote error code \d+"}},
                     {
                         "problems.suggest_cli.": {
                             "$regex": r"Failed to guess CLI credentials",
                             "$options": "i",
                         }
                     },
                 ]
             },
         ]
     }
     c = set(
         int(r["_id"].rsplit("-")[-1])
         for r in get_db()["noc.joblog"]
         .with_options(read_preference=ReadPreference.SECONDARY_PREFERRED)
         .find(match)
     )
     if index == "0":
         return self.common_filter - c
     return c
Exemple #8
0
 def handle(self, host=None, port=None, *args, **options):
     connect()
     db = get_db()
     collections = set(db.list_collection_names())
     for model_id in iter_model_id():
         model = get_model(model_id)
         if not model:
             self.die("Invalid model: %s" % model_id)
         if not is_document(model):
             continue
         # Rename collections when necessary
         legacy_collections = model._meta.get("legacy_collections", [])
         for old_name in legacy_collections:
             if old_name in collections:
                 new_name = model._meta["collection"]
                 self.print("[%s] Renaming %s to %s" %
                            (model_id, old_name, new_name))
                 db[old_name].rename(new_name)
                 break
         # Ensure only documents with auto_create_index == False
         if model._meta.get("auto_create_index", True):
             continue
         # Index model
         self.index_model(model_id, model)
     # Index datastreams
     self.index_datastreams()
     # Index GridVCS
     self.index_gridvcs()
     # Index mongo cache
     self.index_cache()
     # Index datasource cache
     self.index_datasource_cache()
     # @todo: Detect changes
     self.print("OK")
Exemple #9
0
 def has_asset_members(cls):
     """
     Returns True if cache contains asset.members
     :return:
     """
     db = get_db()
     collection = db.noc.whois.asset.members
     return bool(collection.count_documents({}))
Exemple #10
0
 def __iter__(self):
     for p in self.mos_pools:
         r = (get_db()[self.coll_name % p.name].with_options(
             read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate(
                 self.pipelines.get(p.name, self.pipeline())))
         for x in r:
             # @todo Append info for MO
             yield x
Exemple #11
0
 def get_collection(cls) -> pymongo.collection:
     """
     Get pymongo collection instance
     :return:
     """
     if not cls._coll:
         cls._coll = get_db()[cls.COLL_NAME]
     return cls._coll
Exemple #12
0
 def has_origin_routes(cls):
     """
     Returns True if cache contains origin.routes
     :return:
     """
     db = get_db()
     collection = db.noc.whois.origin.route
     return bool(collection.count_documents({}))
Exemple #13
0
def fix():
    # Initialize with distinct values
    coll = get_db()["noc.platforms"]
    bulk = []
    for d in coll.find({"full_name": {"$exists": False}}, {"_id": 1}):
        bulk += [UpdateOne({"_id": d["_id"]}, {"$set": {"full_name": str(d["_id"])}})]
    if bulk:
        coll.bulk_write(bulk)
    fix_full_name()
Exemple #14
0
 def get_collection(self):
     """
     Returns mongo collection instance
     """
     if not self.collection:
         self.logger.debug("Open collection %s", self.collection_name)
         self.collection = get_db()[self.collection_name]
         self.bulk = []
     return self.collection
Exemple #15
0
 def extract(self):
     match = {"data.address.text": {"$exists": True}}
     # if self.sync_ids:
     #     containers = dict(ManagedObject.objects.filter(id__in=self.sync_ids).values_list("id", "container"))
     #     match = {"_id": {"$in": list(containers)}}
     # if self.sync_ids:
     #     match = {"data.management.managed_object": {"$in": self.sync_ids}}
     value = (
         get_db()["noc.objects"]
         .with_options(read_preference=ReadPreference.SECONDARY_PREFERRED)
         .aggregate(
             [
                 {"$match": match},
                 # {"$sort": {"_id": 1}},
                 {
                     "$project": {
                         "parent_address": "$data.address.text",
                         "parent_name": 1,
                         "_id": 1,
                     }
                 },
                 {
                     "$lookup": {
                         "from": "noc.objects",
                         "localField": "_id",
                         "foreignField": "container",
                         "as": "child_cont",
                     }
                 },
                 {
                     "$project": {
                         "parent_address": 1,
                         "parent_name": 1,
                         "_id": 1,
                         "child_cont._id": 1,
                         "child_cont.name": 1,
                     }
                 },
                 {"$unwind": {"path": "$child_cont", "preserveNullAndEmptyArrays": True}},
             ]
         )
     )
     r = {}
     for v in value:
         cid = str(v["_id"])
         if "child_cont" in v:
             # ccid = str(v["child_cont"]["_id"])
             r[str(v["child_cont"]["_id"])] = v["parent_address"].strip()
         if cid not in r:
             r[cid] = v["parent_address"].strip()
     for mo_id, container in (
         ManagedObject.objects.filter(id__in=self.sync_ids)
         .values_list("id", "container")
         .order_by("id")
     ):
         if container in r:
             yield mo_id, r[container]
Exemple #16
0
    def extract(self):
        # @todo Make reports field
        """
        { "_id" : { "managed_object" : 6757 }, "count_in_speed" : 3 }
        { "_id" : { "oper_status" : true, "in_speed" : 10000, "managed_object" : 6757 }, "count_in_speed" : 2 }
        { "_id" : { "oper_status" : true, "in_speed" : 1000000, "managed_object" : 6757 }, "count_in_speed" : 11 }
        { "_id" : { "oper_status" : false, "in_speed" : 1000000, "managed_object" : 6757 }, "count_in_speed" : 62 }
        { "_id" : { "oper_status" : true, "in_speed" : 10000000, "managed_object" : 6757 }, "count_in_speed" : 5 }
        { "_id" : { "oper_status" : false, "in_speed" : 100000, "managed_object" : 6757 }, "count_in_speed" : 1 }
        :return:
        """

        def humanize_speed(speed):
            if not speed:
                return "-"
            for t, n in [(1000000, "G"), (1000, "M"), (1, "k")]:
                if speed >= t:
                    if speed // t * t == speed:
                        return "%d%s" % (speed // t, n)
                    else:
                        return "%.2f%s" % (float(speed) / t, n)
            return str(speed)

        oper = True
        group = {"in_speed": "$in_speed", "managed_object": "$managed_object"}
        if oper:
            group["oper_status"] = "$oper_status"

        match = {"type": "physical"}
        if self.sync_ids:
            match = {"type": "physical", "managed_object": {"$in": self.sync_ids}}
        value = (
            get_db()["noc.interfaces"]
            .with_options(read_preference=ReadPreference.SECONDARY_PREFERRED)
            .aggregate(
                [{"$match": match}, {"$group": {"_id": group, "count": {"$sum": 1}}}],
                allowDiskUse=True,
            )
        )
        r = defaultdict(lambda: [""] * len(self.ATTRS))
        # @todo Fix Down
        for v in value:
            c = (
                {True: "Up", False: "Down", None: "-"}[v["_id"].get("oper_status", None)]
                if oper
                else ""
            )

            if v["_id"].get("in_speed", None):
                c += "/" + humanize_speed(v["_id"]["in_speed"])
            else:
                c += "/-"
            # r[v["_id"]["managed_object"]].append((c, v["count"]))
            if c in self.ATTRS:
                r[v["_id"]["managed_object"]][self.ATTRS.index(c)] = v["count"]
        for val in r:
            yield val, r[val]
Exemple #17
0
 def get_collection(cls):
     """
     Get pymongo Collection object
     :return:
     """
     coll = getattr(cls, "_collection", None)
     if not coll:
         coll = get_db()[cls.get_collection_name()]
         cls._collection = coll
     return coll
Exemple #18
0
 def _resolve_as_set_prefixes(cls, as_set):
     db = get_db()
     collection = db.noc.whois.origin.route
     # Resolve
     prefixes = set()
     for a in cls.resolve_as_set(as_set):
         o = collection.find_one({"_id": a}, {"routes": 1})
         if o:
             prefixes.update(o["routes"])
     return prefixes
Exemple #19
0
 def _4_isp(self, index):
     # Undefined profiles
     match = {"problems.profile.": {"$regex": "Not find profile for OID"}}
     c = set(
         int(r["_id"].rsplit("-")[-1])
         for r in get_db()["noc.joblog"].with_options(
             read_preference=ReadPreference.SECONDARY_PREFERRED).find(
                 match))
     if index == "0":
         return self.common_filter - c
     return c
Exemple #20
0
 def api_job_log(self, request, id, job):
     o = self.get_object_or_404(ManagedObject, id=id)
     if not o.has_access(request.user):
         return self.response_forbidden("Access denied")
     # fs = gridfs.GridFS(get_db(), "noc.joblog")
     key = "discovery-%s-%s" % (job, o.id)
     d = get_db()["noc.joblog"].find_one({"_id": key})
     if d and d["log"]:
         return self.render_plain_text(zlib.decompress(str(d["log"])))
     else:
         return self.render_plain_text("No data")
Exemple #21
0
    def extract(self):
        # load(self, mo_ids, attributes):
        # Namedtuple caps, for save
        Caps = namedtuple("Caps", list(self.ATTRS))
        Caps.__new__.__defaults__ = ("", ) * len(Caps._fields)

        mo_ids = self.sync_ids[:]
        while mo_ids:
            chunk, mo_ids = mo_ids[:self.CHUNK_SIZE], mo_ids[self.CHUNK_SIZE:]
            match = {"_id": {"$in": chunk}}
            value = (get_db()["noc.sa.objectcapabilities"].with_options(
                read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate([
                    {
                        "$match": match
                    },
                    {
                        "$unwind": "$caps"
                    },
                    {
                        "$match": {
                            "caps.source": {
                                "$in": ["caps", "manual"]
                            }
                        }
                    },
                    {
                        "$project": {
                            "key": "$caps.capability",
                            "value": "$caps.value"
                        }
                    },
                    {
                        "$group": {
                            "_id": "$_id",
                            "cap": {
                                "$push": {
                                    "item": "$key",
                                    "val": "$value"
                                }
                            },
                        }
                    },
                    {
                        "$sort": {
                            "_id": 1
                        }
                    },
                ]))
            for v in value:
                r = {
                    f'c_{ll["item"]}': ll["val"]
                    for ll in v["cap"] if f'c_{ll["item"]}' in self.ATTRS
                }
                yield v["_id"], Caps(**r)
Exemple #22
0
 def extract(self):
     db = get_db()["noc.objectfacts"]
     mos_filter = {"label": "system"}
     if self.sync_ids:
         mos_filter["object"] = {"$in": list(self.sync_ids)}
     for val in (
         db.with_options(read_preference=ReadPreference.SECONDARY_PREFERRED)
         .find(mos_filter, {"_id": 0, "object": 1, "attrs.hostname": 1})
         .sort("object")
     ):
         yield val["object"], val["attrs"].get("hostname", self.unknown_value)
Exemple #23
0
 def has_asset(cls, as_set):
     """
     Returns true if as-set has members in cache
     :param as_set:
     :return:
     """
     if is_asn(as_set[2:]):
         return True
     db = get_db()
     collection = db.noc.whois.asset.members
     return bool(collection.find_one({"_id": as_set}, {"_id": 1}))
Exemple #24
0
        def load(mo_ids):
            # match = {"links.mo": {"$in": mo_ids}}
            match = {"int.managed_object": {"$in": mo_ids}}
            group = {
                "_id": "$_id",
                "links": {
                    "$push": {
                        "iface_n": "$int.name",
                        # "iface_id": "$int._id",
                        # "iface_descr": "$int.description",
                        # "iface_speed": "$int.in_speed",
                        # "dis_method": "$discovery_method",
                        # "last_seen": "$last_seen",
                        "mo": "$int.managed_object",
                        "linked_obj": "$linked_objects",
                    }
                },
            }
            value = (get_db()["noc.links"].with_options(
                read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate(
                    [
                        {
                            "$unwind": "$interfaces"
                        },
                        {
                            "$lookup": {
                                "from": "noc.interfaces",
                                "localField": "interfaces",
                                "foreignField": "_id",
                                "as": "int",
                            }
                        },
                        {
                            "$match": match
                        },
                        {
                            "$group": group
                        },
                    ],
                    allowDiskUse=True,
                ))

            res = defaultdict(dict)

            for v in value:
                if v["_id"]:
                    for vv in v["links"]:
                        if len(vv["linked_obj"]) == 2:
                            mo = vv["mo"][0]
                            iface = vv["iface_n"]
                            for i in vv["linked_obj"]:
                                if mo != i:
                                    res[mo][i] = iface[0]
            return res
Exemple #25
0
 def _0_isp(self, index):
     # Common Problem
     match = {"problems": {"$exists": True}}
     c = set(
         int(r["_id"].rsplit("-")[-1])
         for r in get_db()["noc.joblog"].with_options(
             read_preference=ReadPreference.SECONDARY_PREFERRED).find(
                 match))
     if index == "0":
         return self.common_filter - c
     else:
         return c
Exemple #26
0
    def is_p(self, num, value, inverse=False):
        """
        Problem match
        :param discovery: Discovery name
        :param problem: Problem text
        :return:
        """
        match = {"problems": {"$exists": True}}
        if num == "1":
            # SNMP Problem
            match = {
                "problems": {
                    "$exists": True
                },
                "problems.suggest_snmp.": {
                    "$regex": "Failed to guess SNMP community",
                    "$options": "i",
                },
            }
        if num == "2":
            # CLI Problem
            match = {
                "$and": [
                    {
                        "problems.profile.": {
                            "$ne":
                            "Cannot fetch snmp data, check device for SNMP access"
                        }
                    },
                    {
                        "problems.profile.": {
                            "$ne": "Cannot detect profile"
                        }
                    },
                    {
                        "problems.version.": {
                            "$regex": "/Remote error code \d+/"
                        }
                    },
                ]
            }

        c = set(
            int(r["_id"].rsplit("-")[-1])
            for r in get_db()["noc.joblog"].with_options(
                read_preference=ReadPreference.SECONDARY_PREFERRED).find(
                    match))
        if inverse:
            return set(ManagedObject.objects.filter().values_list(
                "id", flat=True)) - c
        else:
            return c
Exemple #27
0
 def get_collection(cls,
                    fmt: Optional[str] = None
                    ) -> pymongo.collection.Collection:
     """
     Get pymongo Collection object
     :return:
     """
     c_name = cls.get_collection_name(fmt)
     coll = cls._collections.get(c_name)
     if not coll:
         coll = get_db()[c_name]
         cls._collections[c_name] = coll
     return coll
Exemple #28
0
 def get_format_role(cls, fmt: str) -> Optional[str]:
     """
     Returns format role, if any
     :param fmt:
     :return:
     """
     doc = get_db()["datastreamconfigs"].find_one({"name": cls.name})
     if not doc:
         return None
     for f in doc.get("formats", []):
         if f.get("name") == fmt:
             return f.get("role") or None
     return None
Exemple #29
0
 def api_job_log(self, request, id):
     o = self.get_object_or_404(ManagedObject, id=id)
     if not o.has_access(request.user):
         return self.response_forbidden("Access denied")
     r = []
     for job in self.job_map:
         key = "discovery-%s-%s" % (job, o.id)
         d = get_db()["noc.joblog"].find_one({"_id": key})
         if d and d["log"]:
             r += [b"\n", smart_bytes(job), b"\n"]
             r += [zlib.decompress(smart_bytes((d["log"])))]
     if r:
         return self.render_plain_text(b"".join(r))
     else:
         return self.render_plain_text("No data")
Exemple #30
0
    def _refresh_labels(self):
        """
        Recalculate labels on model
        :return:
        """
        from django.db import connection

        for scope in self.iter_scopes():
            labels = [f"noc::rxfilter::{self.name}::{scope}::="] + (self.labels
                                                                    or [])
            model, field = scope.split("_")
            if model == "managedobject":
                # Cleanup current labels
                logger.info("[%s] Cleanup ManagedObject effective labels: %s",
                            self.name, labels)
                Label.reset_model_labels("sa.ManagedObject", labels)
                # Apply new rule
                logger.info("[%s] Apply new regex '%s' labels", self.name,
                            self.regexp)
                sql = f"""
                UPDATE sa_managedobject
                SET effective_labels=ARRAY (
                SELECT DISTINCT e FROM unnest(effective_labels || %s::varchar[]) AS a(e)
                )
                WHERE {field} ~ %s
                """
                cursor = connection.cursor()
                cursor.execute(sql, [labels, self.regexp])
            elif model == "interface":
                # Cleanup current labels
                logger.info("[%s] Cleanup Interface effective labels: %s",
                            self.name, labels)
                Label.reset_model_labels("inv.Interface", labels)
                # Apply new rule
                coll = get_db()["noc.interfaces"]
                coll.bulk_write([
                    UpdateMany[{
                        field: {
                            "$re": self.regexp
                        }
                    }, {
                        "$addToSet": {
                            "effective_labels": {
                                "$each": labels
                            }
                        }
                    }, ]
                ])