Exemple #1
0
 def forwards(self):
     handlers = set()
     for h, in db.execute(
             "SELECT DISTINCT config_filter_handler FROM sa_managedobject"):
         if h:
             handlers.add(h)
     for h, in db.execute(
             "SELECT DISTINCT config_diff_filter_handler FROM sa_managedobject"
     ):
         if h:
             handlers.add(h)
     if handlers:
         coll = get_db()["handlers"]
         for h in handlers:
             name = h.split(".")[-2]
             coll.insert({
                 "_id": h,
                 "name": name,
                 "allow_config_filter": True
             })
     handlers = set()
     for h, in db.execute(
             "SELECT DISTINCT config_validation_handler FROM sa_managedobject"
     ):
         if h:
             handlers.add(h)
     if handlers:
         coll = get_db()["handlers"]
         for h in handlers:
             name = h.split(".")[-2]
             coll.insert({
                 "_id": h,
                 "name": name,
                 "allow_config_validation": True
             })
Exemple #2
0
 def schedule_next(self, status):
     if self.check_timings:
         self.logger.info(
             "Timings: %s", ", ".join("%s = %.2fms" % (n, t * 1000)
                                      for n, t in self.check_timings))
     super(MODiscoveryJob, self).schedule_next(status)
     # Update alarm statuses
     self.update_alarms()
     # Write job log
     key = "discovery-%s-%s" % (self.attrs[self.ATTR_CLASS],
                                self.attrs[self.ATTR_KEY])
     problems = {}
     for p in self.problems:
         if p["check"] in problems and p["path"]:
             problems[p["check"]][p["path"]] = p["message"]
         elif p["check"] in problems and not p["path"]:
             # p["path"] == ""
             problems[p["check"]][p["path"]] += "; %s" % p["message"]
         else:
             problems[p["check"]] = {p["path"]: p["message"]}
     get_db()["noc.joblog"].update({"_id": key}, {
         "$set": {
             "log": bson.Binary(zlib.compress(self.out_buffer.getvalue())),
             "problems": problems
         }
     },
                                   upsert=True)
 def forwards(self):
     # Get profile record mappings
     pcoll = get_db()["noc.profiles"]
     acoll = get_db()["noc.actioncommands"]
     pmap = {}  # name -> id
     for d in pcoll.find({}, {"_id": 1, "name": 1}):
         pmap[d["name"]] = d["_id"]
     # Update
     for p in pmap:
         acoll.update_many({"profile": p}, {"$set": {"profile": pmap[p]}})
Exemple #4
0
 def get_last_extract(self, name):
     coll = get_db()["noc.bi_timestamps"]
     d = coll.find_one({"_id": name})
     if d:
         return d["last_extract"]
     else:
         return None
 def forwards(self):
     PROBEUSER = "******"
     mdb = get_db()
     # Check probe has no storage and credentials
     if mdb.noc.pm.probe.count() != 1:
         return
     p = mdb.noc.pm.probe.find_one({})
     if p.get("storage") or p.get("user"):
         return
     # Check user probe is not exists
     if db.execute("SELECT COUNT(*) FROM auth_user WHERE username=%s",
                   [PROBEUSER])[0][0] > 0:
         return
     # Check ./noc user is valid command
     if not os.path.exists("main/management/commands/user.py"):
         return
     # Check ./scripts/set-conf.py is exists
     if not os.path.exists("scripts/set-conf.py"):
         return
     # Check probe config is exists
     if not os.path.exists("etc/noc-probe.conf"):
         return
     # Create user and set config
     os.system("./scripts/set-conf.py etc/noc-probe.conf autoconf user %s" %
               PROBEUSER)
     os.system(
         "PW=`./noc user --add --username=%s [email protected] --template=probe --pwgen` &&"
         "./scripts/set-conf.py etc/noc-probe.conf autoconf passwd $PW" %
         PROBEUSER)
     uid = db.execute("SELECT id FROM auth_user WHERE username=%s",
                      [PROBEUSER])[0][0]
     sid = mdb.noc.pm.storages.find_one({})["_id"]
     mdb.noc.pm.probe.update({}, {"$set": {"storage": sid, "user": uid}})
Exemple #6
0
 def forwards(self):
     db = get_db()
     if db.noc.pm.db.count() == 0:
         ## Create PMDB
         db.noc.pm.db.insert({
             "name": "default",
             "database": db.name,
             "host": db.connection.host,
             "port": db.connection.port,
             "user": config.get("nosql_database", "user"),
             "password": config.get("nosql_database", "password")
         })
         ## Create PMStorage
         db_id = db.noc.pm.db.find()[0]["_id"]
         db.noc.pm.storage.insert({
             "db": db_id,
             "name": "default",
             "collection": "noc.ts.default",
             "raw_retention": 86400
         })
         ## Create PMProbe
         db.noc.pm.probe.insert({
             "name": "default",
             "is_active": True
         })
Exemple #7
0
    def forwards(self):
        def convert(doc):
            def convert_caps(ci):
                return {
                    "capability": ci["capability"],
                    "value": ci["discovered_value"],
                    "source": sources.get(ci["capability"], "caps")
                }

            return {
                "_id": doc["object"],
                "caps": [convert_caps(c) for c in doc["caps"]]
            }

        db = get_db()
        caps = db["noc.sa.objectcapabilities"]
        if not caps.count_documents({}):
            return
        caps.rename("noc.sa.objectcapabilities_old", dropTarget=True)
        old_caps = db["noc.sa.objectcapabilities_old"]
        new_caps = db["noc.sa.objectcapabilities"]
        sources = {}
        d = db["noc.inv.capabilities"].find_one({"name": "DB | Interfaces"})
        if d:
            sources[d["_id"]] = "interface"

        CHUNK = 500
        data = [convert(x) for x in old_caps.find({}) if x.get("object")]
        while data:
            chunk, data = data[:CHUNK], data[CHUNK:]
            new_caps.insert(chunk)
Exemple #8
0
 def forwards(self):
     db = get_db()
     for c in (db.noc.alarmclasses, db.noc.eventclasses):
         bulk = []
         for d in c.find({}):
             text = d["text"]["en"]
             bulk += [
                 UpdateOne({"_id": d["_id"]}, {
                     "$set": {
                         "subject_template": text["subject_template"],
                         "body_template": text["body_template"],
                         "symptoms": text["symptoms"],
                         "probable_causes": text["probable_causes"],
                         "recommended_actions": text["recommended_actions"]
                     },
                     "$unset": {
                         "text": ""
                     }
                 })
             ]
         if bulk:
             print("Commiting changes to database")
             try:
                 db.noc.fm.uptimes.bulk_write(bulk)
                 print("Database has been synced")
             except BulkWriteError as e:
                 print("Bulk write error: '%s'", e.details)
                 print("Stopping check")
 def extract(self):
     i_type = "physical"
     match = {"type": i_type}
     if self.sync_ids:
         match = {"type": i_type, "managed_object": {"$in": self.sync_ids}}
     value = get_db()["noc.interfaces"].with_options(
         read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate([{
             "$match":
             match
         }, {
             "$group": {
                 "_id": "$managed_object",
                 "count": {
                     "$sum": 1
                 }
             }
         }, {
             "$sort": {
                 "_id": 1
             }
         }])
     for v in value:
         if not v["_id"]:
             continue
         yield v["_id"], v["count"]
Exemple #10
0
 def __init__(self, name, cleanup=None, reset_running=False,
              initial_submit=False, max_threads=None,
              preserve_order=False, max_faults=None,
              mrt_limit=None):
     self.logger = PrefixLoggerAdapter(logger, name)
     self.name = name
     self.job_classes = {}
     self.collection_name = self.COLLECTION_BASE + self.name
     self.collection = get_db()[self.collection_name]
     self.active_mrt = {}  # ReduceTask -> Job instance
     self.cleanup_callback = cleanup
     self.reset_running = reset_running
     self.ignored = []
     self.initial_submit = initial_submit
     self.initial_submit_next_check = {}  # job class -> timestamp
     self.max_threads = max_threads
     self.preserve_order = preserve_order
     self.max_faults = max_faults
     self.mrt_limit = mrt_limit
     self.mrt_overload = False
     self.running_lock = threading.Lock()
     self.running_count = defaultdict(int)  # Group -> Count
     self.log_jobs = None
     self.metrics = MetricsHub(
         "noc.scheduler.%s" % name,
         "jobs.count",
         "jobs.success",
         "jobs.failed",
         "jobs.dereference.count",
         "jobs.dereference.success",
         "jobs.dereference.failed",
         "jobs.time"
     )
 def forwards(self):
     # Get profile record mappings
     pcoll = get_db()["noc.profiles"]
     pmap = {}  # name -> id
     for d in pcoll.find({}, {"_id": 1, "name": 1}):
         pmap[d["name"]] = str(d["_id"])
     # Create .profile column
     db.add_column(
         "peer_peeringpoint",
         "profile",
         DocumentReferenceField(
             "sa.Profile", null=True, blank=True
         )
     )
     # Update profiles
     for p, in list(db.execute("SELECT DISTINCT profile_name FROM peer_peeringpoint")):
         db.execute("""
         UPDATE peer_peeringpoint
         SET profile = %s
         WHERE profile_name = %s
         """, [pmap[p], p])
     # Set profile as not null
     db.execute("ALTER TABLE peer_peeringpoint ALTER profile SET NOT NULL")
     # Drop legacy profile_name
     db.delete_column("peer_peeringpoint", "profile_name")
Exemple #12
0
    def get_data(self, request, repo="config", days=1, **kwargs):
        data = []
        baseline = datetime.datetime.now() - datetime.timedelta(days=days)
        if repo == "config":
            mos = ManagedObject.objects.filter()
            if not request.user.is_superuser:
                mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user))
            mos = dict(mos.values_list("id", "name"))
            config_db = get_db()["noc.gridvcs.config.files"].with_options(
                read_preference=ReadPreference.SECONDARY_PREFERRED)
            pipeline = [{"$match": {"ts": {"$gte": baseline}}},
                        {"$group": {"_id": "$object", "last_ts": {"$max": "$ts"}}},
                        {"$sort": {"_id": 1}}]
            for value in config_db.aggregate(pipeline):
                if value["_id"] not in mos:
                    continue
                data += [(mos[value["_id"]], value["last_ts"])]

        else:
            oc = Object.get_object_class(repo)
            data = [(o, o.last_modified) for o in
                    oc.objects.filter(last_modified__gte=baseline).order_by("-last_modified")]
        return self.from_dataset(
            title="%s: %s in %d days" % (self.title, repo, days),
            columns=[
                "Object",
                TableColumn(_("Last Changed"), format="datetime")],
            data=data,
            enumerate=True)
 def extract(self):
     sync_ids_set = set(self.sync_ids)
     value = get_db()["noc.links"].with_options(
         read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate([
             {
                 "$unwind": "$interfaces"
             },
             {
                 "$lookup": {
                     "from": "noc.interfaces",
                     "localField": "interfaces",
                     "foreignField": "_id",
                     "as": "int"
                 }
             },
             # {"$match": {"int.managed_object": {"$in": self.sync_ids}}},
             {
                 "$group": {
                     "_id": "$int.managed_object",
                     "count": {
                         "$sum": 1
                     }
                 }
             },
             {
                 "$sort": {
                     "_id": 1
                 }
             }
         ])
     for v in value:
         if not v["_id"] or v["_id"][0] not in sync_ids_set:
             continue
         yield v["_id"][0], v["count"]
Exemple #14
0
    def archive(self):
        """
        Move data to archive collection
        :return:
        """
        def spool(collection_name):
            coll = db[collection_name]
            try:
                r = coll.insert_many(data[collection_name])
            except BulkWriteError as e:
                print(e.details)
                return None
            return r

        db = get_db()
        # Compile name template
        tpl = self.get_archived_template()
        # collection name -> data
        data = defaultdict(list)
        # Collect data and spool full batches
        for d in self.iter_archived_items():
            cname = str(tpl.render({"doc": d}))
            data[cname] += [d]
            if len(data[cname]) >= self.archive_batch_limit:
                result = spool(cname)
                if result:
                    data[cname] = []
                else:
                    break
        # Spool remaining incomplete batches
        for cname in data:
            if data[cname]:
                spool(cname)
Exemple #15
0
 def forwards(self):
     if db.execute("""
         select count(*) from pg_class where relname='gis_geodata'
         """)[0][0] == 0:
         return  # No PostGIS
     c = get_db().noc.geodata
     bulk = []
     for layer, label, object, data in db.execute("""
         SELECT layer, label, object, ST_AsGeoJSON(data)
         FROM gis_geodata
     """):
         data = json.loads(data)
         bulk += [
             InsertOne({
                 "layer": ObjectId(layer),
                 "object": ObjectId(object),
                 "label": label,
                 "data": data
             })
         ]
     if bulk:
         print("Commiting changes to database")
         try:
             c.bulk_write(bulk)
             print("Database has been synced")
         except BulkWriteError as e:
             print("Bulk write error: '%s'", e.details)
             print("Stopping check")
Exemple #16
0
 def forwards(self):
     # Update mongodb collections
     mdb = get_db()
     for coll_name in ["noc.firmwares",
                       "noc.interface_profiles",
                       "noc.networksegments",
                       "noc.networksegmentprofiles",
                       "noc.objects",
                       "noc.platforms",
                       "noc.vendors"]:
         coll = mdb[coll_name]
         updates = []
         for d in coll.find({"bi_id": {"$exists": False}},
                            {"_id": 1}):
             updates += [
                 UpdateOne({
                     "_id": d["_id"]
                 }, {
                     "$set": {
                         "bi_id": bson.Int64(bi_hash(d["_id"]))
                     }
                 })
             ]
             if len(updates) >= MONGO_CHUNK:
                 coll.bulk_write(updates)
                 updates = []
         if updates:
             coll.bulk_write(updates)
Exemple #17
0
    def handle(self, host=None, port=None, *args, **options):
        from noc.lib.nosql import get_db

        db = get_db()
        collections = set(db.list_collection_names())
        for model_id in iter_model_id():
            model = get_model(model_id)
            if not model:
                self.die("Invalid model: %s" % model_id)
            if not is_document(model):
                continue
            # Rename collections when necessary
            legacy_collections = model._meta.get("legacy_collections", [])
            for old_name in legacy_collections:
                if old_name in collections:
                    new_name = model._meta["collection"]
                    self.print("[%s] Renaming %s to %s" %
                               (model_id, old_name, new_name))
                    db[old_name].rename(new_name)
                    break
            # Ensure only documents with auto_create_index == False
            if model._meta.get('auto_create_index', True):
                continue
            # Index model
            self.index_model(model_id, model)
        # Index datastreams
        self.index_datastreams()
        # @todo: Detect changes
        self.print("OK")
Exemple #18
0
 def forwards(self):
     db = get_db()
     for om in db.noc.objectmodels.find():
         if "data" not in om:
             continue
         if "asset" not in om["data"]:
             continue
         part_no = []
         order_part_no = []
         uso = {}
         so = {}
         for k in om["data"]["asset"]:
             if k.startswith("part_no") and k != "part_no":
                 part_no += [om["data"]["asset"][k]]
                 uso["data.asset.%s" % k] = ""
             elif k.startswith("order_part_no") and k != "order_part_no":
                 order_part_no += [om["data"]["asset"][k]]
                 uso["data.asset.%s" % k] = ""
         if not part_no and not order_part_no:
             continue
         if part_no:
             so["data.asset.part_no"] = part_no
         if order_part_no:
             so["data.asset.order_part_no"] = order_part_no
         db.noc.objectmodels.update({"_id": om["_id"]}, {
             "$set": so,
             "$unset": uso
         })
 def forwards(self):
     c = get_db().noc.subinterfaces
     for i in ("is_ipv4_1", "is_ipv6_1", "is_bridge_1"):
         try:
             c.drop_index(i)
         except:
             pass
Exemple #20
0
 def forwards(self):
     db = get_db()
     metrics = db.noc.ts.metrics
     has_children = {}
     for m in metrics.find({}).sort("name", 1):
         has_children[m["name"]] = False
         if "." in m["name"]:
             parent = ".".join(m["name"].split(".")[:-1])
             has_children[parent] = True
     if has_children:
         bulk = []
         for name in has_children:
             bulk += [
                 UpdateOne({"name": name},
                           {"$set": {
                               "has_children": has_children[name]
                           }})
             ]
         if bulk:
             print("Commiting changes to database")
             try:
                 metrics.bulk_write(bulk)
                 print("Database has been synced")
             except BulkWriteError as e:
                 print("Bulk write error: '%s'", e.details)
                 print("Stopping check")
Exemple #21
0
 def __init__(self):
     self.mhashes = {}  # metric -> metric hash
     #
     self.hash_width = config.getint("pm_storage", "hash_width")
     self.key_mask = "!%dsL" % self.hash_width
     # Set key-value store class
     kvtype = config.get("pm_storage", "type")
     logger.info("Initializing %s storage. %d-byte wide hash", kvtype,
                 self.hash_width)
     self.kvcls = load_name("noc.pm.db.storage", kvtype, KVStorage)
     if not self.kvcls:
         logger.critical("Invalid storage type: '%s'", kvtype)
         raise ValueError("Invalid storage type: '%s'" % kvtype)
     # Set partitioning scheme
     ps = config.get("pm_storage", "partition")
     logger.info("Setting %s partitioning scheme", ps)
     self.partition = load_name("noc.pm.db.partition", ps, Partition)
     # Index collection
     self.metrics = get_db()["noc.ts.metrics"]
     self.metrics_batch = self.metrics.initialize_ordered_bulk_op()
     self.new_metrics = 0
     self.flush_lock = threading.Lock()
     self.epoch = int(
         time.mktime(
             time.strptime(config.get("pm_storage", "epoch"), "%Y-%m-%d")))
     self.zero_hash = Binary("\x00" * self.hash_width)
Exemple #22
0
 def forwards(self):
     now = datetime.datetime.now()
     sc = get_db()["noc.synccaches"]
     for zone_id, sync_id in db.execute(
             "SELECT z.id, s.sync "
             "FROM "
             "    dns_dnszone z JOIN dns_dnszoneprofile p ON (z.profile_id = p.id) "
             "    JOIN dns_dnszoneprofile_masters m ON (m.dnszoneprofile_id = p.id) "
             "    JOIN dns_dnsserver s ON (s.id = m.dnsserver_id) "
             "WHERE "
             "    z.is_auto_generated = TRUE "
             "    AND s.sync IS NOT NULL"):
         if not sc.count_documents({
                 "sync_id": str(sync_id),
                 "model_id": "dns.DNSZone",
                 "object_id": str(zone_id)
         }):
             sc.insert({
                 "uuid": str(uuid.uuid4()),
                 "model_id": "dns.DNSZone",
                 "object_id": str(zone_id),
                 "sync_id": str(sync_id),
                 "instance_id": 0,
                 "changed": now,
                 "expire": now
             })
Exemple #23
0
 def forwards(self):
     phash = {}
     db = get_db()
     metrics = db.noc.ts.metrics
     bulk = []
     for m in metrics.find({}).sort("name", 1):
         phash[m["name"]] = m["hash"]
         if "." in m["name"]:
             pn = ".".join(m["name"].split(".")[:-1])
             parent = phash[pn]
         else:
             parent = Binary("\x00" * 8)
         bulk += [
             UpdateOne({"_id": m["_id"]}, {
                 "$set": {
                     "local": m["name"].split(".")[-1],
                     "parent": parent
                 }
             })
         ]
     if bulk:
         print("Commiting changes to database")
         try:
             metrics.bulk_write(bulk)
             print("Database has been synced")
         except BulkWriteError as e:
             print("Bulk write error: '%s'", e.details)
             print("Stopping check")
Exemple #24
0
 def forwards(self):
     def process_event(event_id, alarm_id):
         e = None
         for c in (active_events, archived_events):
             e = c.find_one(event_id)
             if e:
                 break
         if not e:
             return
         assert e["_id"] == event_id
         alarms = e.get("alarms", [])
         if alarm_id not in alarms:
             alarms += [alarm_id]
             e["alarms"] = alarms
             c.save(e)
     
     def process_alarm(collection, doc):
         if "events" not in doc:
             return
         a_id = doc["_id"]
         for e_id in doc["events"]:
             process_event(e_id, a_id)
         del doc["events"]
         collection.save(doc)
     
     db = get_db()
     active_alarms = db.noc.alarms.active
     archived_alarms = db.noc.alarms.archive
     active_events = db.noc.events.active
     archived_events = db.noc.events.archive
     
     for ac in (active_alarms, archived_alarms):
         for doc in ac.find():
             process_alarm(ac, doc)
Exemple #25
0
    def forwards(self):
        db = get_db()
        coll = db["noc.networksegmentprofiles"]
        result = coll.insert({
            "name":
            "default",
            "description":
            "Default segment profile",
            "discovery_interval":
            0,
            "mac_restrict_to_management_vlan":
            False,
            "enable_lost_redundancy":
            True,
            "topology_methods": [{
                "method": m,
                "is_active": True
            } for m in [
                "oam", "lacp", "udld", "lldp", "cdp", "huawei_ndp", "stp",
                "nri"
            ]]
        })
        if isinstance(result, bson.ObjectId):
            profile_id = result
        else:
            profile_id = result.inserted_id

        db["noc.networksegments"].update_many(
            {}, {"$set": {
                "profile": profile_id
            }})
Exemple #26
0
def sliding_job(scheduler_name,
                job_class,
                key=None,
                ts=None,
                delta=None,
                data=None,
                cutoff_delta=0):
    #
    if ts is None:
        ts = datetime.datetime.now()
        if delta:
            ts += datetime.timedelta(seconds=delta)
    # Check the job exists
    now = datetime.datetime.now()
    c = get_db()["noc.schedules.%s" % scheduler_name]
    j = c.find_one({Scheduler.ATTR_CLASS: job_class, Scheduler.ATTR_KEY: key})
    if j:
        cutoff = j[Scheduler.ATTR_SCHEDULE].get("cutoff")
        if not cutoff or ts <= cutoff:
            # Slide job
            c.update({"_id": j["_id"]}, {"$set": {Scheduler.ATTR_TS: ts}})
    else:
        # Submit job
        cutoff = now + datetime.timedelta(seconds=cutoff_delta)
        c.insert({
            Scheduler.ATTR_CLASS: job_class,
            Scheduler.ATTR_KEY: key,
            Scheduler.ATTR_STATUS: Scheduler.S_WAIT,
            Scheduler.ATTR_TS: ts,
            Scheduler.ATTR_DATA: data,
            Scheduler.ATTR_SCHEDULE: {
                "cutoff": cutoff
            }
        })
Exemple #27
0
 def __iter__(self):
     for p in self.mos_pools:
         r = get_db()[self.coll_name % p.name].with_options(
             read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate(
                 self.pipelines.get(p.name, self.pipeline()))
         for x in r:
             # @todo Append info for MO
             yield x
Exemple #28
0
def check_mongo():
    from noc.lib.nosql import get_db
    import pymongo
    try:
        db = get_db()
    except pymongo.errors.OperationFailure, why:
        sys.stderr.write("ERROR: %s\n" % why)
        sys.exit(1)
Exemple #29
0
 def has_origin_routes(cls):
     """
     Returns True if cache contains origin.routes
     :return:
     """
     db = nosql.get_db()
     collection = db.noc.whois.origin.route
     return bool(collection.count_documents({}))
Exemple #30
0
 def has_asset_members(cls):
     """
     Returns True if cache contains asset.members
     :return:
     """
     db = nosql.get_db()
     collection = db.noc.whois.asset.members
     return bool(collection.count_documents({}))