class VersionInventoryJob(MODiscoveryJob): name = "version_inventory" map_task = "get_version" system_notification = "sa.version_inventory" ignored = not config.getboolean("version_inventory", "enabled") to_save = config.getboolean("version_inventory", "save") def handler(self, object, result): r = {} for k in result: v = result[k] if k == "attributes": for kk in v: r[kk] = v[kk] else: r[k] = v self.report = VersionReport(self, to_save=self.to_save) self.report.submit(r) self.report.send() return True def can_run(self): return (super(VersionInventoryJob, self).can_run() and self.object.object_profile.enable_version_inventory) def get_failed_interval(self): return self.object.object_profile.version_inventory_min_interval
class VLANDiscoveryJob(MODiscoveryJob): name = "vlan_discovery" map_task = "get_vlans" ignored = not config.getboolean("vlan_discovery", "enabled") to_save = config.getboolean("vlan_discovery", "save") def handler(self, object, result): """ :param object: :param result: :return: """ vc_domain = object.vc_domain self.report = VLANReport(self, to_save=self.to_save) for v in result: self.report.submit(vc_domain=vc_domain, l1=v["vlan_id"], name=v.get("name")) self.report.send() return True def can_run(self): return (super(VLANDiscoveryJob, self).can_run() and self.object.object_profile.enable_vlan_discovery and self.object.vc_domain) def get_failed_interval(self): return self.object.object_profile.vlan_discovery_min_interval
def execute(self): self._url_cache = {} # URL -> data self.download_status = True # Read config self.use_ripe = config.getboolean("peer", "use_ripe") self.use_arin = config.getboolean("peer", "use_arin") self.use_radb = config.getboolean("peer", "use_radb") # Process self.process_as_set_members() self.process_origin_route() return self.download_status
def rpsl(self): s = "import: from AS%d" % self.remote_asn s += " at %s" % self.peering_point.hostname actions = [] local_pref = self.effective_local_pref if local_pref: # Select pref meaning if config.getboolean("peer", "rpsl_inverse_pref_style"): pref = 65535 - local_pref # RPSL style else: pref = local_pref actions += ["pref=%d;" % pref] import_med = self.effective_import_med if import_med: actions += ["med=%d;" % import_med] if actions: s += " action " + " ".join(actions) s += " accept %s\n" % self.import_filter actions = [] export_med = self.effective_export_med if export_med: actions += ["med=%d;" % export_med] s += "export: to AS%s at %s" % (self.remote_asn, self.peering_point.hostname) if actions: s += " action " + " ".join(actions) s += " announce %s" % self.export_filter return s
def __init__(self, *args, **kwargs): ExtApplication.__init__(self, *args, **kwargs) # # Parse themes self.default_theme = config.get("customization", "default_theme") self.themes = {} # id -> {name: , css:} for o in config.options("themes"): if o.endswith(".name"): theme_id = o[:-5] nk = "%s.name" % theme_id ek = "%s.enabled" % theme_id if (config.has_option("themes", nk) and config.has_option("themes", ek) and config.getboolean("themes", ek)): self.themes[theme_id] = { "id": theme_id, "name": config.get("themes", nk).strip(), "css": "/static/pkg/extjs/packages/ext-theme-%s/build/resources/ext-theme-%s-all.css" % (theme_id, theme_id), "js": "/static/pkg/extjs/packages/ext-theme-%s/build/ext-theme-%s.js" % (theme_id, theme_id) } # Login restrictions self.restrict_to_group = self.get_group( config.get("authentication", "restrict_to_group")) self.single_session_group = self.get_group( config.get("authentication", "single_session_group")) self.mutual_exclusive_group = self.get_group( config.get("authentication", "mutual_exclusive_group")) self.idle_timeout = config.getint("authentication", "idle_timeout")
class STPLinkDiscoveryJob(LinkDiscoveryJob): """ Abstract class for link discovery jobs """ name = "stp_discovery" map_task = "get_spanning_tree" method = "stp" ignored = not config.getboolean("stp_discovery", "enabled") strict_pending_candidates_check = False def convert_port_id(self, port_id): l, r = [int(x) for x in port_id.split(".")] l //= 16 return "%x" % ((l << 12) + r) def process_result(self, object, result): self.n_cache = {} # bridge_id -> object self.desg_port_id = {} # port_id -> name for i in result["instances"]: for iface in i["interfaces"]: if iface["role"] == "designated": # Store designated port id for pending link processing pi = self.convert_port_id(iface["port_id"]) self.desg_port_id[pi] = iface["interface"] elif iface["role"] in ("root", "alternate"): # ROOT and ALTERNATE ports are pending check candidates # Get remote object by bridge id remote_object = self.get_neighbor_by_mac( iface["designated_bridge_id"]) if not remote_object: continue # Commit remote port id instead of # interface name. # Will be resolved later in load_pending_checks pi = self.convert_port_id(iface["designated_port_id"]) self.submit_candidate(iface["interface"], remote_object, pi) def process_pending_checks(self, object): for remote_object in self.p_candidates: for local_port_id, remote_interface in self.p_candidates[remote_object]: local_interface = self.desg_port_id.get(local_port_id) if local_interface: self.submit_link( object, local_interface, remote_object, remote_interface) self.submited.add((local_port_id, remote_object, remote_interface)) else: self.debug("Designated port %s is not found in %s" % ( local_port_id, ", ".join(self.desg_port_id.keys()))) def resolve_self_links(self, object): if object in self.candidates: sl = set() for l, r in self.candidates[object]: if (l and r and l != r and (l, r) not in sl and (r, l) not in sl): sl.add((l, r)) for l, r in sl: self.submit_link(object, l, object, self.desg_port_id[r])
class OAMLinkDiscoveryJob(LinkDiscoveryJob): """ OAM Link Discovery """ name = "oam_discovery" map_task = "get_oam_status" method = "oam" ignored = not config.getboolean("oam_discovery", "enabled") def process_result(self, object, result): # Build mac to interfaces map remote_macs = defaultdict(list) # remote mac -> local iface for n in result: if "L" in n["caps"]: remote_macs[n["remote_mac"]] += [n["interface"]] # Resolve links for rmac in remote_macs: if len(remote_macs[rmac]) == 1: local_interface = remote_macs[rmac][0] # Try to find interface by mac il = list(Interface.objects.filter(mac=rmac, type="physical")) if len(il) == 1: # Exact match by mac found remote_interface = il[0] self.submit_candidate( local_interface=local_interface, remote_object=remote_interface.managed_object, remote_interface=remote_interface.name )
class UDLDLinkDiscoveryJob(LinkDiscoveryJob): """ UDLD Link Discovery """ name = "udld_discovery" map_task = "get_udld_neighbors" method = "udld" ignored = not config.getboolean("udld_discovery", "enabled") def process_result(self, object, result): self.n_cache = {} # device_id -> object local_id = None # Local IDs for n in result: local_id = n["local_device"] self.n_cache[local_id] = object remote_object = self.get_neighbor(n["remote_device"]) if not remote_object: continue self.submit_candidate( n["local_interface"], remote_object, remote_object.profile.convert_interface_name( n["remote_interface"])) # Update UDLD id if local_id: self.update_udld_id(object, local_id) def get_neighbor(self, device_id): """ Find neighbor by chassis id and chassis subtype :param device_id: :return: """ # Get cached n = self.n_cache.get(device_id) if n: return n n = DiscoveryID.objects.filter(udld_id=device_id).first() if n: n = n.object self.n_cache[device_id] = n return n def update_udld_id(self, object, local_id): """ Update UDLD id if necessary :param local_id: :return: """ n = DiscoveryID.objects.filter(object=object.id).first() if n: # Found if n.udld_id != local_id: self.info("Setting local UDLD id to '%s'" % local_id) n.udld_id = local_id n.save() else: # Not Found self.info("Setting local UDLD id to '%s'" % local_id) DiscoveryID(object=object, udld_id=local_id).save()
def solutions_roots(): """ Generator returning active solutions roots """ for sn in config.options("solutions"): if config.getboolean("solutions", sn): vendor, name = sn.split(".", 1) yield os.path.join("solutions", vendor, name)
def read_solutions_configs(config, name): cn = os.path.splitext(name)[0] # Update config with solution's one for sn in config.options("solutions"): if config.getboolean("solutions", sn): v, s = sn.split(".", 1) c = os.path.join("solutions", v, s, "etc", cn) config.read([c + ".defaults", c + ".conf"])
def resolve_as_set_prefixes(cls, as_set, optimize=None): prefixes = cls._resolve_as_set_prefixes(as_set) pl_optimize = config.getboolean("peer", "prefix_list_optimization") threshold = config.getint("peer", "prefix_list_optimization_threshold") if (optimize or (optimize is None and pl_optimize and len(prefixes) >= threshold)): return set(optimize_prefix_list(prefixes)) return prefixes
def init_solutions(): """ Initialize solutions and load modules """ from noc.main.models import CustomField CustomField.install_fields() for sn in config.options("solutions"): if config.getboolean("solutions", sn): load_solution(sn)
class CapsDiscoveryJob(MODiscoveryJob): name = "caps_discovery" map_task = "get_capabilities" ignored = not config.getboolean("caps_discovery", "enabled") to_save = config.getboolean("caps_discovery", "save") def handler(self, object, result): self.logger.info("Set capabilities: %s", result) self.object.update_caps(result) return True def can_run(self): return (super(CapsDiscoveryJob, self).can_run() and self.object.object_profile.enable_caps_discovery) def get_failed_interval(self): return self.object.object_profile.version_inventory_min_interval
def __init__(self): self.apps = {} # app_id -> app instance self.urlpatterns = patterns("") # Install admin: namespace # for model applications self.admin_patterns = patterns("") # Django 1.4 compatibility self.urlpatterns = [ RegexURLResolver("^admin/", self.admin_patterns, namespace="admin") ] self.urlresolvers = {} # (module,appp) -> RegexURLResolver self.menu = [] self.menu_index = {} # id -> menu item self.reports = [] # app_id -> title self.views = ProxyNode() # Named views proxy self.testing_mode = hasattr(settings, "IS_TEST") self.log_api_calls = (config.has_option("main", "log_api_calls") and config.getboolean("main", "log_api_calls")) self.log_sql_statements = config.getboolean("main", "log_sql_statements") self.app_contributors = defaultdict(set)
class AssetDiscoveryJob(MODiscoveryJob): """ AssetDiscovery """ name = "asset_discovery" map_task = "get_inventory" ignored = not config.getboolean("asset_discovery", "enabled") to_save = config.getboolean("asset_discovery", "save") def handler(self, object, result): self.report = AssetReport(self, to_save=self.to_save) # self.report.find_managed() # Submit objects for o in result: self.debug("Submit %s" % str_dict(o)) self.report.submit( type=o["type"], number=o.get("number"), builtin=o["builtin"], vendor=o.get("vendor"), part_no=o["part_no"], revision=o.get("revision"), serial=o.get("serial"), description=o.get("description") ) # Assign stack members self.report.submit_stack_members() # self.report.submit_connections() # self.report.check_management() # Finish self.report.send() return True def can_run(self): return (super(AssetDiscoveryJob, self).can_run() and getattr(self.object.object_profile, "enable_asset_discovery"))
class IPDiscoveryJob(MODiscoveryJob): name = "ip_discovery" map_task = "get_ip_discovery" ignored = not config.getboolean("ip_discovery", "enabled") to_save = config.getboolean("ip_discovery", "save") def handler(self, object, result): """ :param object: :param result: :return: """ self.report = IPReport(self, to_save=self.to_save, allow_prefix_restrictions=True) for v in result: vrf = vrf_cache.get_or_create( object, v["name"], v.get("rd", "0:0")) if vrf is None: self.info("Skipping unknown VRF '%s'" % v["name"]) continue for a in v["addresses"]: self.report.submit( vrf=vrf, address=a["ip"], interface=a["interface"], mac=a["mac"]) self.report.send() return True def can_run(self): return (super(IPDiscoveryJob, self).can_run() and self.object.object_profile.enable_ip_discovery) def get_failed_interval(self): return self.object.object_profile.ip_discovery_min_interval
class ConfigDiscoveryJob(MODiscoveryJob): name = "config_discovery" map_task = "get_config" threaded = True ignored = not config.getboolean("config_discovery", "enabled") to_save = config.getboolean("config_discovery", "save") def handler(self, object, result): """ :param object: :param result: :return: """ if self.to_save: object.save_config(result) return True def can_run(self): return (super(ConfigDiscoveryJob, self).can_run() and self.object.object_profile.enable_config_discovery) def get_failed_interval(self): return self.object.object_profile.config_discovery_min_interval
def __init__(self): super(NOCLDAPBackend, self).__init__() self.server = config.get("authentication", "ldap_server") self.bind_method = config.get("authentication", "ldap_bind_method") self.bind_dn = config.get("authentication", "ldap_bind_dn") self.bind_password = config.get("authentication", "ldap_bind_password") self.users_base = config.get("authentication", "ldap_users_base") self.users_filter = config.get("authentication", "ldap_users_filter") self.required_group = config.get("authentication", "ldap_required_group") self.requred_filter = config.get("authentication", "ldap_required_filter") self.superuser_group = config.get("authentication", "ldap_superuser_group") self.superuser_filter = config.get("authentication", "ldap_superuser_filter") self.start_tls = config.getboolean("authentication", "ldap_start_tls")
def setup(cls): from noc.settings import config for opt in config.options("i18n"): if opt.startswith("collections."): cn = opt[12:] if cn.endswith(".allow_fuzzy"): cn = opt[:-12] if cn == "global": cn = None cls.ALLOW_FUZZY[cn] = config.getboolean("i18n", opt) else: if cn == "global": cn = None tr = [ x.strip() for x in config.get("i18n", opt).split(",") ] if "en" not in tr: tr += ["en"] cls.TRANSLATIONS[cn] = tr
class CDPLinkDiscoveryJob(LinkDiscoveryJob): """ CDP Link Discovery """ name = "cdp_discovery" map_task = "get_cdp_neighbors" method = "cdp" ignored = not config.getboolean("cdp_discovery", "enabled") def process_result(self, object, result): self.n_cache = {} # device_id -> object for n in result["neighbors"]: remote_object = self.get_neighbor(n["device_id"]) if not remote_object: continue remote_port = remote_object.profile.convert_interface_name( n["remote_interface"]) self.submit_candidate(n["local_interface"], remote_object, remote_port) def get_neighbor(self, device_id): """ Find neighbor by chassis id and chassis subtype :param device_id: :return: """ # Get cached n = self.n_cache.get(device_id) if n: return n n = DiscoveryID.objects.filter(hostname=device_id).first() if n: n = n.object elif "." not in device_id: # Sometimes, domain part is truncated. # Try to resolve anyway m = list( DiscoveryID.objects.filter(hostname__startswith=device_id + ".")) if len(m) == 1: n = m[0].object # Exact match self.n_cache[device_id] = n return n
def resolve_as_set_prefixes_maxlen(cls, as_set, optimize=None): """ Generate prefixes for as-sets. Returns a list of (prefix, min length, max length) """ prefixes = cls._resolve_as_set_prefixes(as_set) pl_optimize = config.getboolean("peer", "prefix_list_optimization") threshold = config.getint("peer", "prefix_list_optimization_threshold") max_len = config.getint("peer", "max_prefix_length") if (optimize or (optimize is None and pl_optimize and len(prefixes) >= threshold)): # Optimization is enabled return [(p.prefix, p.mask, m) for p, m in optimize_prefix_list_maxlen(prefixes) if p.mask <= max_len] else: # Optimization is disabled return [(x.prefix, x.mask, x.mask) for x in sorted([IP.prefix(p) for p in prefixes]) if x.mask <= max_len]
class UptimeDiscoveryJob(MODiscoveryJob): name = "uptime_discovery" map_task = "get_uptime" ignored = not config.getboolean("uptime_discovery", "enabled") def handler(self, object, result): """ :param object: :param result: :return: """ self.logger.info("Received uptime %s", result) if result: Uptime.register(self.object, result) return True def can_run(self): return (super(UptimeDiscoveryJob, self).can_run() and self.object.object_profile.enable_uptime_discovery) def get_failed_interval(self): return self.object.object_profile.uptime_discovery_min_interval
def view_desktop(self, request): """ Render application root template """ cp = CPClient() ext_apps = [a for a in self.site.apps if isinstance(self.site.apps[a], ExtApplication) or\ isinstance(self.site.apps[a], ModelApplication)] apps = [a.split(".") for a in sorted(ext_apps)] # Prepare settings favicon_url = config.get("customization", "favicon_url") if favicon_url.endswith(".png"): favicon_mime = "image/png" elif favicon_url.endswith(".jpg") or favicon_url.endswith(".jpeg"): favicon_mime = "image/jpeg" else: favicon_mime = None setup = { "system_uuid": cp.system_uuid, "installation_name": config.get("customization", "installation_name"), "logo_url": config.get("customization", "logo_url"), "logo_width": config.get("customization", "logo_width"), "logo_height": config.get("customization", "logo_height"), "branding_color": config.get("customization", "branding_color"), "branding_background_color": config.get("customization", "branding_background_color"), "favicon_url": favicon_url, "favicon_mime": favicon_mime, "debug_js": config.getboolean("main", "debug_js"), "install_collection": config.getboolean("develop", "install_collection"), "enable_gis_base_osm": config.getboolean("gis", "enable_osm"), "enable_gis_base_google_sat": config.getboolean("gis", "enable_google_sat"), "enable_gis_base_google_roadmap": config.getboolean("gis", "enable_google_roadmap"), "trace_extjs_events": config.getboolean("main", "trace_extjs_events"), "preview_theme": self.get_preview_theme(request) } theme = self.get_theme(request) return self.render( request, "desktop.html", apps=apps, setup=setup, theme=theme, theme_css=self.themes[theme]["css"], theme_js=self.themes[theme]["js"] )
class IDDiscoveryJob(MODiscoveryJob): name = "id_discovery" map_task = "get_discovery_id" ignored = not config.getboolean("id_discovery", "enabled") def handler(self, object, result): """ :param object: :param result: :return: """ cm = result.get("chassis_mac") if cm: cm = ", ".join( "%s - %s" % (m["first_chassis_mac"], m["last_chassis_mac"]) for m in cm ) self.info("Identity found: Chassis MACs = %s, hostname = %s, router-id = %s" % ( cm, result.get("hostname"), result.get("router_id") )) DiscoveryID.submit(object=object, chassis_mac=result.get("chassis_mac"), hostname=result.get("hostname"), router_id=result.get("router_id") ) return True def can_run(self): return (super(IDDiscoveryJob, self).can_run() and self.object.object_profile.enable_id_discovery) def get_failed_interval(self): return self.object.object_profile.id_discovery_min_interval
class MACDiscoveryJob(MODiscoveryJob): name = "mac_discovery" map_task = "get_mac_address_table" ignored = not config.getboolean("mac_discovery", "enabled") to_save = config.getboolean("mac_discovery", "save") def handler(self, object, result): """ :param object: :param result: :return: """ seen = {} # MAC -> vlan dups = set() # Detect SVI addresses seen in multiple vlans for v in result: if v["type"] == "D": mac = v["mac"] vlan = v["vlan_id"] if mac in seen and seen[mac] != vlan: # Duplicated dups.add(mac) else: seen[mac] = vlan # Fill report port_macs = defaultdict( lambda: defaultdict(list)) # port -> vlan -> [macs] self.report = MACReport(self, to_save=self.to_save) vc_domain = VCDomain.get_for_object(self.object) for v in result: if v["type"] == "D" and v["interfaces"]: iface = v["interfaces"][0] port_macs[iface][v["vlan_id"]] += [v["mac"]] if v["mac"] not in dups: # Save to MAC DB self.report.submit(mac=v["mac"], vc_domain=vc_domain, vlan=v["vlan_id"], managed_object=object, if_name=iface) # Submit found MACs to database self.report.send() # Discover topology # Find suitable ports for port in port_macs: vlans = port_macs[port] if any(1 for vlan in vlans if len(vlans[vlan]) != 1): continue # Suitable port found, only one MAC in each vlan macs = [(vlan, vlans[vlan][0]) for vlan in vlans] self.check_port(port, macs) return True @classmethod def can_submit(cls, object): """ Check object has bridge interfaces :param cls: :param object: :return: """ return object.is_managed and bool( SubInterface.objects.filter(managed_object=object.id, enabled_afi="BRIDGE").first()) def can_run(self): if not super(MACDiscoveryJob, self).can_run(): return False if not self.object.object_profile.enable_mac_discovery: return False # Check object has bridge interfaces for si in SubInterface.objects.filter(managed_object=self.object.id, enabled_afi="BRIDGE"): try: iface = si.interface except Exception: continue # Dereference failed #if iface.profile.mac_discovery: # return True return True # No suitable interfaces return False def get_failed_interval(self): return self.object.object_profile.mac_discovery_min_interval def can_link(self, iface): """ Check interface is suitable for linking """ return iface.type in ("physical", "management", "aggregated") def check_port(self, port, macs): """ Check link candidate and submit link if any :param local_port: Local port name :param macs: [(vlan, mac), ...] :return: """ # Local interface iface = self.get_interface_by_name(self.object, port) if not iface: return # Not found # Check interface can be linked at all if not self.can_link(iface): return # Not suitable type # Check interface is still unlinked if iface.is_linked: return # Already linked # Find BRIDGE sub local_sub = iface.subinterface_set.filter(enabled_afi="BRIDGE").first() if not local_sub: return # if not local_sub.tagged_vlans: # Untagged port mac = macs[0][1] subs = [ sub for sub in SubInterface.objects.filter( enabled_afi__in=["IPv4", "IPv6"], mac=mac) if self.can_link(sub.interface) ] if len(subs) == 1: r_iface = subs[0].interface if not r_iface.is_linked: self.submit_link(iface, r_iface) else: # Tagged port mac_vlans = defaultdict(list) for vlan, mac in macs: mac_vlans[mac] += [vlan] # r_iface = None for mac in mac_vlans: left = set(mac_vlans[mac]) for sub in (SubInterface.objects.filter( enabled_afi__in=["IPv4", "IPv6"], mac=mac)): if not sub.vlan_ids: break if not self.can_link(sub.interface): break vlan = sub.vlan_ids[0] if vlan in left: if r_iface is None: r_iface = sub.interface elif r_iface != sub.interface: return # Interface mismatch left.remove(vlan) if left: return # Not all vlans found if r_iface and not r_iface.is_linked: self.submit_link(iface, r_iface) def submit_link(self, local_iface, remote_iface): if local_iface.id == remote_iface.id: return self.debug("Linking %s and %s" % (local_iface, remote_iface)) try: local_iface.link_ptp(remote_iface, method="mac") except ValueError, why: self.error("Error linking %s with %s: %s" % (local_iface, remote_iface, why))
class LLDPLinkDiscoveryJob(LinkDiscoveryJob): """ Abstract class for link discovery jobs """ name = "lldp_discovery" map_task = "get_lldp_neighbors" method = "lldp" ignored = not config.getboolean("lldp_discovery", "enabled") def process_result(self, object, result): self.n_cache = {} # (chassis_id, chassis_subtype) -> object for n in result: if len(n["neighbors"]) != 1: ## Not direct link continue # Resolve remote object ni = n["neighbors"][0] remote_object = self.get_neighbor( ni["remote_chassis_id"], ni["remote_chassis_id_subtype"]) self.debug("get_neighbor(%s, %s) -> %s" % (ni["remote_chassis_id"], ni["remote_chassis_id_subtype"], remote_object)) if not remote_object: # Object not found continue # Resolve remote interface remote_port = self.get_remote_port(remote_object, ni["remote_port"], ni["remote_port_subtype"]) self.submit_candidate( n["local_interface"], remote_object, remote_port) def get_neighbor(self, chassis_id, chassis_subtype): """ Find neighbor by chassis id and chassis subtype :param chassis_id: :param chassis_subtype: :return: """ # Get cached n = self.n_cache.get((chassis_id, chassis_subtype)) if n: return n # Find by id f = { 4: self.get_neighbor_by_mac, # macAddress(4) 5: self.get_neighbor_by_ip, # networkAddress(5) 7: self.get_neighbor_by_local # local(7) }.get(chassis_subtype) if f: n = f(chassis_id) else: n = None self.n_cache[(chassis_id, chassis_subtype)] = n return n def get_neighbor_by_ip(self, ip): d = DiscoveryID.objects.filter(router_id=ip).first() if d: return d.object else: return None def get_neighbor_by_local(self, local): pass def get_remote_port(self, object, remote_port, remote_port_subtype): f = { 1: self.get_remote_port_by_description, # interfaceAlias(1) 3: self.get_remote_port_by_mac, # macAddress(3) 5: self.get_remote_port_by_name, # interfaceName(5) 7: self.get_remote_port_by_local, # local(7) 128: self.get_remote_port_unspecified # undetermined }.get(remote_port_subtype) if f: return f(object, remote_port) else: self.info( "Unsupported remote port subtype " "from %s. value=%s subtype=%s" % ( object, remote_port, remote_port_subtype)) return None def get_remote_port_by_name(self, object, port): self.debug("Remote port name: %s" % port) return object.profile.convert_interface_name(port) def get_remote_port_by_description(self, object, port): """ Find remote port by interface description. :param object: :param port: :return: port name if found, None otherwise. """ self.debug("Remote port description: %s" % port) try: i = Interface.objects.filter( managed_object=object.id, description=port).first() if i: return i.name else: return None except: return None def get_remote_port_by_local(self, object, port): """ Try to guess remote port from free-form description :param object: :param port: :return: """ self.debug("Remote port local: %s" % port) # Try ifindex if is_int(port): i = Interface.objects.filter( managed_object=object.id, ifindex=int(port)).first() if i: return i.name # Try interface name try: n_port = object.profile.convert_interface_name(port) i = Interface.objects.filter( managed_object=object.id, name=n_port).first() if i: return n_port for p in object.profile.get_interface_names(n_port): i = Interface.objects.filter( managed_object=object.id, name=p).first() if i: return p except InterfaceTypeError: pass # Unable to decode self.info("Unable to decode local subtype port id %s at %s" % ( port, object)) return port def get_remote_port_by_mac(self, object, mac): self.debug("Remote port mac: %s" % mac) i = Interface.objects.filter(managed_object=object.id, mac=mac).first() if i: return i.name else: return None def get_remote_port_unspecified(self, object, port): """ Try to guess remote port from description of undetermined subtype. :param object: :param port: :return: """ self.debug("Remote port unspecified: %s" % port) # Try to find interface with given name. try: n_port = self.get_remote_port_by_name(object, port) except: n_port = None iface = None # Check whether returned port name exists. Return it if yes. if n_port: i = Interface.objects.filter( managed_object=object.id, name=n_port).first() if i: iface = n_port if iface: return iface # Try to find interface with given MAC address. TODO: clean MAC. try: iface = self.get_remote_port_by_mac(object, port) except: iface = None if iface: return iface # Try to find interface with given description. iface = self.get_remote_port_by_description(object, port) if iface: return iface # Use algorithms from get_remote_port_by_local as last resort. return self.get_remote_port_by_local(object, port)
class InterfaceDiscoveryJob(MODiscoveryJob): name = "interface_discovery" map_task = "get_interfaces" ignored = not config.getboolean("interface_discovery", "enabled") to_save = config.getboolean("interface_discovery", "save") # @todo: Ignored # Related reports ip_discovery_enable = config.getboolean("ip_discovery", "enabled") ip_discovery_save = config.getboolean("ip_discovery", "save") prefix_discovery_enable = config.getboolean("prefix_discovery", "enabled") prefix_discovery_save = config.getboolean("prefix_discovery", "save") @classmethod def initialize(cls, scheduler): super(InterfaceDiscoveryJob, cls).initialize(scheduler) cls.get_interface_profile = None if scheduler.daemon: # Compile classification rules sol = config.get("interface_discovery", "get_interface_profile") if sol: cls.get_interface_profile = staticmethod(get_solution(sol)) def handler(self, object, result): """ :param object: :param result: :return: """ self.profiles_cache = {} self.report = InterfaceReport(self, to_save=self.to_save) self.seen_interfaces = [] # Process forwarding instances for fi in result: forwarding_instance = self.report.submit_forwarding_instance( instance=fi["forwarding_instance"], type=fi["type"], rd=fi.get("rd"), vr=fi.get("virtual_router")) # Move LAG members to the end # to make use of cache ifaces = sorted(fi["interfaces"], key=lambda x: ("aggregated_interface" in x and bool(x["aggregated_interface"]))) icache = {} for i in ifaces: # Get LAG agg = None if ("aggregated_interface" in i and bool(i["aggregated_interface"])): agg = icache.get(i["aggregated_interface"]) if not agg: self.error( "Cannot find aggregated interface '%s'. Skipping %s" % (i["aggregated_interface"], i["name"])) continue # Submit discovered interface iface = self.report.submit_interface( name=i["name"], type=i["type"], mac=i.get("mac"), description=i.get("description"), aggregated_interface=agg, enabled_protocols=i.get("enabled_protocols", []), ifindex=i.get("snmp_ifindex")) icache[i["name"]] = iface # Submit subinterfaces for si in i["subinterfaces"]: self.report.submit_subinterface( forwarding_instance=forwarding_instance, interface=iface, name=si["name"], description=si.get("description"), mac=si.get("mac", i.get("mac")), vlan_ids=si.get("vlan_ids", []), enabled_afi=si.get("enabled_afi", []), ipv4_addresses=si.get("ipv4_addresses", []), ipv6_addresses=si.get("ipv6_addresses", []), iso_addresses=si.get("iso_addresses", []), vpi=si.get("vpi"), vci=si.get("vci"), enabled_protocols=si.get("enabled_protocols", []), untagged_vlan=si.get("untagged_vlan"), tagged_vlans=si.get("tagged_vlans", []), # ip_unnumbered_subinterface ifindex=si.get("snmp_ifindex")) # Delete hanging subinterfaces self.report.submit_subinterfaces( forwarding_instance, iface, [si["name"] for si in i["subinterfaces"]]) # Perform interface classification self.interface_classification(iface) # Delete hanging interfaces self.seen_interfaces += [i["name"] for i in fi["interfaces"]] # Delete hanging interfaces self.report.submit_interfaces(self.seen_interfaces) # Delete hanging forwarding instances self.report.submit_forwarding_instances(fi["forwarding_instance"] for fi in result) self.report.refine_ifindexes() self.report.send() return True def interface_classification(self, iface): """ Perform interface classification :param iface: Interface instance :return: """ if not self.get_interface_profile or iface.profile_locked: return p_name = self.get_interface_profile(iface) if p_name and p_name != iface.profile.name: # Change profile p = self.profiles_cache.get(p_name) if p is None: p = InterfaceProfile.objects.filter(name=p_name).first() if p: self.profiles_cache[p_name] = p else: self.error( "Invalid interface profile '%s' for interface '%s'" % (p_name, iface.name)) if p and p != iface.profile: self.info("Interface %s has been classified as '%s'" % (iface.name, p_name)) iface.profile = p iface.save() def can_run(self): return (super(InterfaceDiscoveryJob, self).can_run() and self.object.object_profile.enable_interface_discovery) def get_failed_interval(self): return self.object.object_profile.interface_discovery_min_interval
class BFDLinkDiscoveryJob(LinkDiscoveryJob): """ BFD protocol link discovery """ name = "bfd_discovery" map_task = "get_bfd_sessions" method = "bfd" ignored = not config.getboolean("bfd_discovery", "enabled") strict_pending_candidates_check = False def process_result(self, object, result): self.ld = {} # local discriminator -> port name self.n_cache = {} # address -> neighbor for session in result: if "L2" not in session["clients"]: continue self.ld[session["local_discriminator"]] = session["local_interface"] remote_object = self.get_neighbor(session["remote_address"]) if not remote_object: continue self.submit_candidate(session["local_interface"], remote_object, str(session["remote_discriminator"]) ) self.debug("Candidates: %s" % self.candidates) def process_pending_checks(self, object): self.debug("Process pending checks: %s" % self.p_candidates) for remote_object in self.p_candidates: for disc, remote_interface in self.p_candidates[remote_object]: disc = int(disc) local_interface = self.ld.get(disc) if local_interface: self.submit_link( object, local_interface, remote_object, remote_interface) self.submited.add((str(disc), remote_object, remote_interface)) else: self.debug("Local discriminator %d is not found in %s" % ( disc, ", ".join(str(k) for k in self.ld))) def get_neighbor(self, address): """ Find neighbor by ip interface :param address: :return: """ # Get cached n = self.n_cache.get(address) if n: return n # @todo: Optimize search subs = list(SubInterface.objects.filter( enabled_afi="IPv4", ipv4_addresses__startswith="%s/" % address)) if len(subs) == 1: # Exact match n = subs[0].managed_object else: n = None self.n_cache[address] = n return n
def rpsl(self): sep = "remarks: %s" % ("-" * 72) s = [] s += ["aut-num: AS%s" % self.asn] if self.as_name: s += ["as-name: %s" % self.as_name] if self.description: s += ["descr: %s" % x for x in self.description.split("\n")] s += ["org: %s" % self.organisation.organisation] # Add header remarks if self.header_remarks: s += ["remarks: %s" % x for x in self.header_remarks.split("\n")] # Find AS peers pg = { } # Peer Group -> AS -> peering_point -> [(import, export, localpref, import_med, export_med, remark)] for peer in self.peer_set.filter(status="A"): if peer.peer_group not in pg: pg[peer.peer_group] = {} if peer.remote_asn not in pg[peer.peer_group]: pg[peer.peer_group][peer.remote_asn] = {} if peer.peering_point not in pg[peer.peer_group][peer.remote_asn]: pg[peer.peer_group][peer.remote_asn][peer.peering_point] = [] to_skip = False e_import_med = peer.effective_import_med e_export_med = peer.effective_export_med for R in pg[peer.peer_group][peer.remote_asn][peer.peering_point]: p_import, p_export, localpref, import_med, export_med, remark = R if (peer.import_filter == p_import and peer.export_filter == p_export and e_import_med == import_med and e_export_med == export_med): to_skip = True break if not to_skip: pg[peer.peer_group][peer.remote_asn][peer.peering_point] +=\ [(peer.import_filter, peer.export_filter, peer.effective_local_pref, e_import_med, e_export_med, peer.rpsl_remark)] # Build RPSL inverse_pref = config.getboolean("peer", "rpsl_inverse_pref_style") for peer_group in pg: s += [sep] s += [ "remarks: -- %s" % x for x in peer_group.description.split("\n") ] s += [sep] for asn in sorted(pg[peer_group]): add_at = len(pg[peer_group][asn]) != 1 for pp in pg[peer_group][asn]: for R in pg[peer_group][asn][pp]: import_filter, export_filter, localpref, import_med,\ export_med, remark = R # Prepend import and export with remark when given if remark: s += ["remarks: # %s" % remark] # Build import statement i_s = "import: from AS%d" % asn if add_at: i_s += " at %s" % pp.hostname actions = [] if localpref: pref = (65535 - localpref) if inverse_pref else localpref actions += ["pref=%d;" % pref] if import_med: actions += ["med=%d;" % import_med] if actions: i_s += " action " + " ".join(actions) i_s += " accept %s" % import_filter s += [i_s] # Build export statement e_s = "export: to AS%d" % asn if add_at: e_s += " at %s" % pp.hostname if export_med: e_s += " action med=%d;" % export_med e_s += " announce %s" % export_filter s += [e_s] # Add contacts for c in self.administrative_contacts.order_by("nic_hdl"): s += ["admin-c: %s" % c.nic_hdl] for c in self.tech_contacts.order_by("nic_hdl"): s += ["tech-c: %s" % c.nic_hdl] # Add maintainers for m in self.maintainers.all(): s += ["mnt-by: %s" % m.maintainer] for m in self.routes_maintainers.all(): s += ["mnt-routes: %s" % m.maintainer] # Add footer remarks if self.footer_remarks: s += ["remarks: %s" % x for x in self.footer_remarks.split("\n")] return rpsl_format("\n".join(s))