def event(request): path, cfg = request.param coll = cfg.get("$collection", COLLECTION_NAME) assert coll == COLLECTION_NAME, "Invalid collection %s" % coll ec = EventClass.get_by_name( cfg.get("eventclass__name", DEFAULT_EVENT_CLASS) ) mo = ManagedObject( id=MO_ID, name=MO_NAME, address=MO_ADDRESS, profile=Profile.get_by_name(cfg.get("profile__name", DEFAULT_PROFILE)) ) now = datetime.datetime.now() data = cfg.get("data", {}) source = data.pop("source", "other") event = ActiveEvent( timestamp=now, start_timestamp=now, managed_object=mo, source=source, raw_vars=data, repeats=1 ) request.fixturename = "events-%s" % cfg.get("uuid") # request.fspath = path return event, ec, cfg.get("vars", {})
def handle_prefix_list(self, as_set, name=None, profile=None, *args, **options): from noc.peer.models.whoiscache import WhoisCache from noc.sa.models.profile import Profile p = Profile.get_by_name(profile) if not p: self.die("Invalid profile %s" % profile) prefixes = WhoisCache.resolve_as_set_prefixes_maxlen(as_set[0]) self.print(p.get_profile().generate_prefix_list(name, prefixes))
class AttributeIsolator(IsolatorClass): name = "attribute" OP_ATTR_MAP = { "2": {"1": False, "2": True}, "7": {"1": str(Profile.get_by_name("Generic.Host").id), "2": str(Profile.get_by_name("Generic.Host").id), "ne": ["2"]}, "13020": {"1": False, "2": True, "model": ManagedObjectProfile}, "1403": {"5": "S", "model": AuthProfile} } fields = [n.name for n in ManagedObject._meta._fields()] def default(self, num, index): return self.f_attribute(num, index) def f_attribute(self, num, value): """ Islated object by atribute number :param num: Attribute number :param value: Attribute value :return: """ # print "Attr a %s, %s" % (num, value) if "0" in num: # Cross link num1, num2 = num.split("0", 1) ff2 = [n.name for n in self.OP_ATTR_MAP[num]["model"]._meta._fields()][int(num2)] field = "%s__%s" % (self.fields[int(num1)], ff2) else: field = self.fields[int(num)] if value in self.OP_ATTR_MAP[num].get("ne", []): return ~d_Q(**{field: self.OP_ATTR_MAP[num][value]}) else: return d_Q(**{field: self.OP_ATTR_MAP[num][value]})
def clean(self, row): """ Fix pool """ v = super(ManagedObjectLoader, self).clean(row) v["pool"] = self.pools[v["pool"]] if "tags" in v: v["tags"] = [ x.strip().strip('"') for x in v["tags"].split(",") if x.strip() ] if v["tags"] else [] v["profile"] = Profile.get_by_name(v["profile"]) v["static_client_groups"] = [v["static_client_groups"] ] if v["static_client_groups"] else [] v["static_service_groups"] = [v["static_service_groups"] ] if v["static_service_groups"] else [] return v
def handle(self, *args, **options): # Check expression if len(args) != 1: raise CommandError("No expression given") expression = args[0] # Process profile profile = None if options["profile"]: profile = Profile.get_by_name(options["profile"]) if not profile: raise CommandError("Invalid profile: %s" % options["profile"]) # Create output try: out = open(options["output"], "w") except IOError as e: raise CommandError(str(e)) # Build self.build_prefix_list(out, expression, options["name"], profile) # Finalize out.close()
def update_spec(self, name, script): """ Update named spec :param name: Spec name :param script: BaseScript instance :return: """ from noc.dev.models.quiz import Quiz from noc.dev.models.spec import Spec, SpecAnswer from noc.sa.models.profile import Profile self.print("Updating spec: %s" % name) spec = Spec.get_by_name(name) changed = False if not spec: self.print(" Spec not found. Creating") # Get Ad-Hoc quiz quiz = Quiz.get_by_name("Ad-Hoc") if not quiz: self.print(" 'Ad-Hoc' quiz not found. Skipping") return # Create Ad-Hoc spec for profile spec = Spec( name, description="Auto-generated Ad-Hoc spec for %s profile" % script.profile.name, revision=1, quiz=quiz, author="NOC", profile=Profile.get_by_name(script.profile.name), changes=[], answers=[]) changed = True # Fetch commands from spans cli_svc = {"beef_cli", "cli", "telnet", "ssh"} commands = set() for sd in get_spans(): row = sd.split("\t") if row[6] not in cli_svc: continue commands.add(row[12].decode("string_escape").strip()) # Update specs s_name = "cli_%s" % script.name.rsplit(".", 1)[-1] names = set() for ans in spec.answers: if ((ans.name == s_name or ans.name.startswith(s_name + ".")) and ans.type == "cli"): names.add(ans.name) if ans.value in commands: # Already recorded commands.remove(ans.value) if commands: # New commands left max_n = 0 for n in names: if "." in n: nn = int(n.rsplit(".", 1)[-1]) if nn > max_n: max_n = nn # ntpl = "%s.%%d" % s_name for nn, cmd in enumerate(sorted(commands)): spec.answers += [ SpecAnswer(name=ntpl % (nn + 1), type="cli", value=cmd) ] changed = True if changed: spec.save()
class Command(BaseCommand): """ Manage Jobs """ logger = logging.getLogger("main") TZ = pytz.timezone(noc.settings.TIME_ZONE) TMP = "/tmp/noc-import-rancid" help = "Import data from rancid" def add_arguments(self, parser): parser.add_argument( parser.add_argument("--routerdb", action="append", dest="routerdb", help="Path to the router.db"), parser.add_argument("--cloginrc", action="append", dest="cloginrc", help="Path to the .cloginrc"), parser.add_argument("--hosts", action="append", dest="hosts", help="Path to the /etc/hosts"), parser.add_argument("--repo", action="store", dest="repo", help="CVS repository"), parser.add_argument( "--repo-prefix", action="store", dest="repoprefix", help="File path prefix to checkout from repo", ), parser.add_argument( "--dry-run", action="store_true", dest="dry_run", help="Check only, do not write to database", ), parser.add_argument("--tags", action="append", dest="tags", help="Mark created managed objects by tags"), parser.add_argument( "--profile", action="store", dest="object_profile", help="Set managed object profile for created objects", default="default", ), parser.add_argument( "--domain", action="store", dest="domain", help="Set administrative domain for created objects", default="default", ), parser.add_argument( "--pool", action="store", dest="pool", help="Set pool for created objects", default="default", ), parser.add_argument("--shard", action="store", dest="shard", help="Shard import to separate processes"), ) PROFILE_MAP = { "cisco": Profile.get_by_name("Cisco.IOS"), "juniper": Profile.get_by_name("Juniper.JUNOS"), } rx_f = re.compile(r"^RCS file: (?P<file>.+?),v$", re.MULTILINE | re.DOTALL) rx_fn = re.compile(r"^Working file: (?P<fn>.+?)$", re.MULTILINE | re.DOTALL) rx_rev = re.compile( r"^-----+\n" r"revision (?P<rev>\S+)\n" r"date: (?P<date>\S+ \S+(?: \S+)?);.+? state: (?P<state>\S+)", re.MULTILINE | re.DOTALL, ) # Regular expresion to split config # Config is a final part SPLIT_MAP = { "Cisco.IOS": re.compile(r"^\n", re.MULTILINE), "Juniper.JUNOS": re.compile(r"^#[^#>\n]+> show configuration\s*\n", re.MULTILINE), } def parse_hosts(self, hosts): """ Parse /etc/hosts :returns: dict of name -> ip """ r = {} for path in hosts: self.logger.info("Reading hosts from %s", path) with open(path) as f: for line in f.readlines(): if "#" in line: line = line.split("#", 1)[0] line = line.strip() if not line: continue if ":" in line: # Skip IPv6 continue parts = line.split() for p in parts[1:]: r[p] = parts[0] return r def parse_routerdb(self, routerdb): rdb = {} # Name -> profile for path in routerdb: self.logger.info("Reading routers from %s", path) with open(path) as f: for line in f.readlines(): if "#" in line: line = line.split("#", 1)[0] line = line.strip() if not line: continue r = line.split(":") if len(r) != 3: continue name, t, s = r if s != "up": self.logger.debug("Skipping %s", name) continue p = self.PROFILE_MAP.get(t) if not p: self.logger.info("Unknown type '%s'. Skipping", t) continue rdb[name] = p return rdb def parse_cloginrc(self, cloginrc): login = {} defaults = {} for path in cloginrc: self.logger.info("Reading cloginrc from %s", path) with open(path) as f: for line in f.readlines(): if "#" in line: line = line.split("#", 1)[0] line = line.strip() if not line: continue line = line.replace("\t", " ") r = line.split() if len(r) > 4: op, v, host = r[:3] value = " ".join(r[3:]) elif len(r) == 3: op, v, value = r defaults[v] = value continue else: op, v, host, value = r if op != "add": continue if host not in login: login[host] = {} login[host][v] = value return login, defaults def index_cvs(self, repo): r = {} # path -> (revision, date) p = subprocess.Popen(["cvs", "log"], cwd=repo, stdout=subprocess.PIPE) data = p.stdout.read() parts = self.rx_f.split(data)[2::2] for data in parts: match = self.rx_fn.search(data) if not match: continue fn = match.group("fn") r[fn] = [] for match in self.rx_rev.finditer(data): # if match.group("state").lower() == "dead": # continue # Ignore object replacement rev = match.group("rev") date = match.group("date") ds = date.split() if "/" in ds[0]: fmt = "%Y/%m/%d %H:%M:%S" else: fmt = "%Y-%m-%d %H:%M:%S" if len(ds) == 3: # Date with TZ date = "%s %s" % (ds[0], ds[1]) r[fn] += [( rev, self.TZ.normalize( pytz.utc.localize(datetime.datetime.strptime( date, fmt))), )] return r def handle(self, *args, **options): connect() if options["verbosity"] >= 2: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) for h in self.logger.handlers: h.setFormatter( logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) if not options["routerdb"]: raise CommandError("No routerdb given") if not options["cloginrc"]: raise CommandError("No cloginrc given") if not options["hosts"]: options["hosts"] = ["/etc/hosts"] if not options["repo"]: raise CommandError("No CVS repository") repo_prefix = options.get("repoprefix") or "" if not options["object_profile"]: raise CommandError("No object profile set") try: object_profile = ManagedObjectProfile.objects.get( name=options["object_profile"].strip()) except ManagedObjectProfile.DoesNotExist: raise CommandError("Invalid object profile: %s" % options["object_profile"]) if not options["domain"]: raise CommandError("No administrative domain set") try: domain = AdministrativeDomain.objects.get( name=options["domain"].strip()) except AdministrativeDomain.DoesNotExist: raise CommandError("Invalid administrative domain: %s" % options["domain"]) if not options["pool"]: raise CommandError("No pool set") try: pool = Pool.objects.get(name=options["domain"].strip()) except Pool.DoesNotExist: raise CommandError("Invalid pool: %s" % options["pool"]) shard_member = 0 shard_members = 0 if options.get("shard"): shard = options["shard"] if "/" not in shard: raise CommandError("Shard must be <member>/<members>") shard_member, shard_members = [int(x) for x in shard.split("/")] tags = [] if options["tags"]: for t in options["tags"]: tags += [x.strip() for x in t.split(",")] self.dry_run = bool(options["dry_run"]) # if not os.path.isdir(self.TMP): os.mkdir(self.TMP) # revisions = self.index_cvs(options["repo"]) # Read configs hosts = self.parse_hosts(options["hosts"]) rdb = self.parse_routerdb(options["routerdb"]) login, ldefaults = self.parse_cloginrc(options["cloginrc"]) # Process data n = 0 count = len(rdb) for name in sorted(rdb): if shard_members: if n % shard_members != shard_member: n += 1 continue # Processed by other shard self.logger.debug("[%s/%s] Processing host %s", n, count, name) n += 1 profile = Profile.get_by_name(rdb[name]) address = hosts.get(name) if not address: # @todo: Resolve self.logger.info("Cannot resolve address for %s. Skipping", name) continue ld = login.get(name, ldefaults) if not ld: self.logger.info("No credentials for %s. Skipping", name) continue user = ld.get("user") password = ld.get("password") if "method" in ld and "ssh" in ld["method"]: method = "ssh" else: method = "telnet" self.logger.info( "Managed object found: %s (%s, %s://%s@%s/)", name, profile.name, method, user, address, ) if not self.dry_run: try: mo = ManagedObject.objects.get( Q(name=name) | Q(address=address)) self.logger.info("Already in the database") except ManagedObject.DoesNotExist: self.logger.info("Creating managed object %s", name) mo = ManagedObject( name=name, object_profile=object_profile, administrative_domain=domain, pool=pool, scheme=SSH if method == "ssh" else TELNET, address=address, profile=profile, user=user, password=password, trap_source_ip=address, tags=tags, ) mo.save() if name not in revisions: self.logger.error("Cannot find config for %s", name) continue if not self.dry_run: self.import_revisions(options["repo"], repo_prefix, mo, name, revisions[name]) def get_diff(self, repo, name, r0, r1): p = subprocess.Popen( ["cvs", "diff", "-r%s" % r0, "-r%s" % r1, name], cwd=repo, stdout=subprocess.PIPE) return p.stdout.read() def import_revisions(self, repo, repo_prefix, mo, name, revisions): """ Import CVS file revisions """ def write_file(path, data): with open(path, "w") as f: f.write(data) path = os.path.join(self.TMP, name) lr = len(revisions) n = 1 gridvcs = GridVCS("config") split_re = self.SPLIT_MAP[mo.profile.name] for rev, ts in reversed(revisions): self.logger.debug("%s: importing rev %s [%s/%s]", name, rev, n, lr) n += 1 try: subprocess.check_call( "cvs co -p -r%s -f %s > %s" % (rev, os.path.join(repo_prefix, name), path), cwd=repo, shell=True, ) except subprocess.CalledProcessError as e: self.logger.error("Failed to import %s@%s. Skipping", name, rev) self.logger.error("CVS reported: %s", e) continue if not self.dry_run: with open(path, "r") as f: data = f.read() # Strip config data = split_re.split(data, 1)[-1] # Save to GridVCS gridvcs.put(mo.id, data, ts=ts) if os.path.exists(path): os.unlink(path)
def handle(self, *args, **options): connect() if options["verbosity"] >= 2: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) for h in self.logger.handlers: h.setFormatter( logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) if not options["routerdb"]: raise CommandError("No routerdb given") if not options["cloginrc"]: raise CommandError("No cloginrc given") if not options["hosts"]: options["hosts"] = ["/etc/hosts"] if not options["repo"]: raise CommandError("No CVS repository") repo_prefix = options.get("repoprefix") or "" if not options["object_profile"]: raise CommandError("No object profile set") try: object_profile = ManagedObjectProfile.objects.get( name=options["object_profile"].strip()) except ManagedObjectProfile.DoesNotExist: raise CommandError("Invalid object profile: %s" % options["object_profile"]) if not options["domain"]: raise CommandError("No administrative domain set") try: domain = AdministrativeDomain.objects.get( name=options["domain"].strip()) except AdministrativeDomain.DoesNotExist: raise CommandError("Invalid administrative domain: %s" % options["domain"]) if not options["pool"]: raise CommandError("No pool set") try: pool = Pool.objects.get(name=options["domain"].strip()) except Pool.DoesNotExist: raise CommandError("Invalid pool: %s" % options["pool"]) shard_member = 0 shard_members = 0 if options.get("shard"): shard = options["shard"] if "/" not in shard: raise CommandError("Shard must be <member>/<members>") shard_member, shard_members = [int(x) for x in shard.split("/")] tags = [] if options["tags"]: for t in options["tags"]: tags += [x.strip() for x in t.split(",")] self.dry_run = bool(options["dry_run"]) # if not os.path.isdir(self.TMP): os.mkdir(self.TMP) # revisions = self.index_cvs(options["repo"]) # Read configs hosts = self.parse_hosts(options["hosts"]) rdb = self.parse_routerdb(options["routerdb"]) login, ldefaults = self.parse_cloginrc(options["cloginrc"]) # Process data n = 0 count = len(rdb) for name in sorted(rdb): if shard_members: if n % shard_members != shard_member: n += 1 continue # Processed by other shard self.logger.debug("[%s/%s] Processing host %s", n, count, name) n += 1 profile = Profile.get_by_name(rdb[name]) address = hosts.get(name) if not address: # @todo: Resolve self.logger.info("Cannot resolve address for %s. Skipping", name) continue ld = login.get(name, ldefaults) if not ld: self.logger.info("No credentials for %s. Skipping", name) continue user = ld.get("user") password = ld.get("password") if "method" in ld and "ssh" in ld["method"]: method = "ssh" else: method = "telnet" self.logger.info( "Managed object found: %s (%s, %s://%s@%s/)", name, profile.name, method, user, address, ) if not self.dry_run: try: mo = ManagedObject.objects.get( Q(name=name) | Q(address=address)) self.logger.info("Already in the database") except ManagedObject.DoesNotExist: self.logger.info("Creating managed object %s", name) mo = ManagedObject( name=name, object_profile=object_profile, administrative_domain=domain, pool=pool, scheme=SSH if method == "ssh" else TELNET, address=address, profile=profile, user=user, password=password, trap_source_ip=address, tags=tags, ) mo.save() if name not in revisions: self.logger.error("Cannot find config for %s", name) continue if not self.dry_run: self.import_revisions(options["repo"], repo_prefix, mo, name, revisions[name])
def get_data(self, request, **kwargs): data = [] value = get_db()["noc.links"].with_options( read_preference=ReadPreference.SECONDARY_PREFERRED).aggregate([{ "$unwind": "$interfaces" }, { "$lookup": { "from": "noc.interfaces", "localField": "interfaces", "foreignField": "_id", "as": "int" } }, { "$group": { "_id": "$int.managed_object", "count": { "$sum": 1 } } }]) count = {0: set([]), 1: set([]), 2: set([]), 3: set([])} ap = AuthProfile.objects.filter(name__startswith="TG") for v in value: if v["count"] > 2: count[3].add(v["_id"][0]) continue if not v["_id"]: self.logger.warning("No IDS in response query") continue count[v["count"]].add(v["_id"][0]) for p in Pool.objects.order_by("name"): if p.name == "P0001": continue data += [SectionRow(name=p.name)] smos = set( ManagedObject.objects.filter(pool=p, is_managed=True).exclude( profile=Profile.get_by_name(GENERIC_PROFILE)).exclude( auth_profile__in=ap).values_list('id', flat=True)) all_p = 100.0 / len(smos) if len(smos) else 1.0 data += [("All polling", len(smos))] for c in count: if c == 3: data += [ ("More 3", len(count[c].intersection(smos)), "%.2f %%" % round(len(count[c].intersection(smos)) * all_p, 2)) ] continue data += [(c, len(count[c].intersection(smos)), "%.2f %%" % round(len(count[c].intersection(smos)) * all_p), 2)] # 0 links - All discovering- summary with links s0 = len(smos) - sum([d[1] for d in data[-3:]]) data.pop(-4) data.insert(-3, (0, s0, "%.2f %%" % round(s0 * all_p, 2))) return self.from_dataset( title=self.title, columns=[_("Links count"), _("MO Count"), _("Percent at All")], data=data)
def handle( self, paths, profile, format, report=None, reject=None, progress=False, *args, **options ): connect() assert profile_loader.has_profile(profile), "Invalid profile: %s" % profile if report: report_writer = csv.writer( report, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL ) report_writer.writerow(["message", "event class", "rule name", "vars"]) t0 = time.time() ruleset = RuleSet() ruleset.load() self.print("Ruleset load in %.2fms" % ((time.time() - t0) * 1000)) reader = getattr(self, "read_%s" % format, None) assert reader, "Invalid format %s" % format self.managed_object = ManagedObject( id=1, name="test", address="127.0.0.1", profile=Profile.get_by_name(profile) ) t0 = time.time() stats = defaultdict(int) total = 0 for p in paths: if not os.path.isfile(p): continue for f in iter_open(p): for event in reader(f): e_vars = event.raw_vars.copy() if event.source == "SNMP Trap": e_vars.update(MIB.resolve_vars(event.raw_vars)) rule, r_vars = ruleset.find_rule(event, e_vars) if report: report_writer.writerow( [event.raw_vars["message"], rule.event_class.name, rule.name, r_vars] ) if reject and rule.is_unknown: reject.write(f'{event.raw_vars["message"]}\n') stats[rule.event_class.name] += 1 total += 1 if progress and total % 1000 == 0: self.print("%d records processed" % total) dt = time.time() - t0 self.print( "%d events processed in %.2fms (%.fevents/sec)" % (total, dt * 1000, float(total) / dt) ) if stats: # Prepare statistics s_data = sorted( [(k, stats[k]) for k in stats], key=operator.itemgetter(1), reverse=True ) s_total = sum(stats[k] for k in stats if not self.is_ignored(k)) data = [["Events", "%", "Event class"]] for ecls, qty in s_data: data += [[str(qty), "%3.2f%%" % (float(stats[ecls] * 100) / float(total)), ecls]] # Calculate classification quality data += [["", "%3.2f%%" % (float(s_total * 100) / total), "Classification Quality"]] # Ruleset hit rate rs_rate = float(metrics["rules_checked"].value) / float(total) data += [["", "%.2f" % rs_rate, "Rule checks per event"]] # Dump table self.print("Event classes summary:") self.print(format_table([4, 6, 10], data))
def get_profile(name): return Profile.get_by_name(name)