def rebase(self, vrf, new_prefix): """ Rebase prefix to a new location :param vrf: :param new_prefix: :return: """ # b = IP.prefix(self.prefix) nb = IP.prefix(new_prefix) # Validation if vrf == self.vrf and self.prefix == new_prefix: raise ValueError("Cannot rebase to self") if b.afi != nb.afi: raise ValueError("Cannot change address family during rebase") if b.mask < nb.mask: raise ValueError("Cannot rebase to prefix of lesser size") # Rebase prefix and all nested prefixes # Parents are left untouched for p in Prefix.objects.filter(vrf=self.vrf, afi=self.afi).extra( where=["prefix <<= %s"], params=[self.prefix]): np = IP.prefix(p.prefix).rebase(b, nb).prefix # Prefix.objects.filter(pk=p.pk).update(prefix=np, vrf=vrf) p.prefix = np p.vrf = vrf p.save() # Raise events # Rebase addresses # Parents are left untouched for a in Address.objects.filter(vrf=self.vrf, afi=self.afi).extra( where=["address <<= %s"], params=[self.prefix]): na = IP.prefix(a.address).rebase(b, nb).address # Address.objects.filter(pk=a.pk).update(address=na, vrf=vrf) a.address = na a.vrf = vrf a.save() # Raise events # Rebase permissions # move all permissions to the nested blocks for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra( where=["prefix <<= %s"], params=[self.prefix]): np = IP.prefix(pa.prefix).rebase(b, nb).prefix PrefixAccess.objects.filter(pk=pa.pk).update(prefix=np, vrf=vrf) # create permissions for covered blocks for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra( where=["prefix >> %s"], params=[self.prefix]): PrefixAccess( user=pa.user, vrf=vrf, afi=pa.afi, prefix=new_prefix, can_view=pa.can_view, can_change=pa.can_change, ).save() # @todo: Rebase bookmarks # @todo: Update caches # Return rebased prefix return Prefix.objects.get(pk=self.pk) # Updated object
def get_data(self, vrf, afi, prefix, **kwargs): p = IP.prefix(prefix.prefix) return self.from_dataset( title=_("Free blocks in VRF %(vrf)s (IPv%(afi)s), %(prefix)s" % { "vrf": vrf.name, "afi": afi, "prefix": prefix.prefix }), columns=["Free Blocks"], data=[[unicode(f)] for f in p.iter_free( [IP.prefix(c.prefix) for c in prefix.children_set.all()])])
def clean(self, data): data = super(AddressRangeApplication, self).clean(data) afi = data["afi"] from_address = data["from_address"] to_address = data["to_address"] # Check AFI address_validator = is_ipv4 if afi == "4" else is_ipv6 if not address_validator(from_address): raise ValueError("Invalid IPv%(afi)s 'From Address'" % {"afi": afi}) if not address_validator(to_address): raise ValueError("Invalid IPv%(afi)s 'To Address'" % {"afi": afi}) # Check from address not greater than to address if IP.prefix(from_address) > IP.prefix(to_address): raise ValueError( "'To Address' must be greater or equal than 'From Address'") # Check for valid "action" combination if "fqdn_template" in data and data[ "fqdn_template"] and data["action"] != "G": raise ValueError( "'FQDN Template' must be clean for selected 'Action'") if "reverse_nses" in data and data[ "reverse_nses"] and data["action"] != "D": raise ValueError( "'Reverse NSes' must be clean for selected 'Action'") # Set range as locked for "G" and "D" actions if data["action"] != "N": data["is_locked"] = True # @todo: check FQDN template # Check reverse_nses is a list of FQDNs or IPs if "reverse_nses" in data and data["reverse_nses"]: reverse_nses = data["reverse_nses"] for ns in reverse_nses.split(","): ns = ns.strip() if not is_ipv4(ns) and not is_ipv6(ns) and not is_fqdn(ns): raise ValueError("%s is invalid nameserver" % ns) # Check no locked range overlaps another locked range if data["is_locked"]: r = [ r for r in AddressRange.get_overlapping_ranges( data["vrf"], data["afi"], data["from_address"], data["to_address"]) if r.is_locked is True and r.name != data["name"] ] if r: raise ValueError( "Locked range overlaps with ahother locked range: %s" % unicode(r[0])) return data
def iter_lazy_labels(cls, prefix: str): p = IP.prefix(prefix) for pt in PrefixTablePrefix.objects.filter(afi=p.afi).extra( where=["%s <<= prefix"], params=[prefix]): yield f"noc::prefixfilter::{pt.table.name}::<" if prefix == pt.prefix: yield f"noc::prefixfilter::{pt.table.name}::="
def resolve_as_set_prefixes_maxlen(cls, as_set, optimize=None): """ Generate prefixes for as-sets. Returns a list of (prefix, min length, max length) """ prefixes = cls._resolve_as_set_prefixes(as_set) max_len = config.peer.max_prefix_length if optimize or ( optimize is None and config.peer.prefix_list_optimization and len(prefixes) >= config.peer.prefix_list_optimization_threshold ): # Optimization is enabled return [ (p.prefix, p.mask, m) for p, m in optimize_prefix_list_maxlen(prefixes) if p.mask <= max_len ] else: # Optimization is disabled return [ (x.prefix, x.mask, x.mask) for x in sorted([IP.prefix(p) for p in prefixes]) if x.mask <= max_len ]
def clean_args(self, obj, **kwargs): args = {} for p in self.params: if not p.name in kwargs and p.is_required and not p.default: raise ValueError("Required parameter '%s' is missed" % p.name) v = kwargs.get(p.name, p.default) if v is None: continue if p.type == "int": # Integer type try: v = int(v) except ValueError: raise ValueError( "Invalid integer in parameter '%s': '%s'" % (p.name, v)) elif p.type == "float": # Float type try: v = float(v) except ValueError: raise ValueError("Invalid float in parameter '%s': '%s'" % (p.name, v)) elif p.type == "interface": # Interface try: v = obj.get_profile().convert_interface_name(v) except Exception: raise ValueError( "Invalid interface name in parameter '%s': '%s'" % (p.name, v)) elif p.type == "ip": # IP address try: v = IP.prefix(v) except ValueError: raise ValueError("Invalid ip in parameter '%s': '%s'" % (p.name, v)) elif p.type == "vrf": if isinstance(v, VRF): pass elif isinstance(v, six.integer_types): try: v = VRF.objects.get(id=v) except VRF.DoesNotExist: raise ValueError( "Unknown VRF in parameter '%s': '%s'" % (p.name, v)) elif isinstance(v, six.string_types): try: v = VRF.objects.get(name=v) except VRF.DoesNotExist: raise ValueError( "Unknown VRF in parameter '%s': '%s'" % (p.name, v)) else: raise ValueError("Unknown VRF in parameter '%s': '%s'" % (p.name, v)) args[str(p.name)] = v return args
def clean(self, data): data = super(PeerApplication, self).clean(data) # Check address fields if not is_prefix(data["local_ip"]): raise ValueError("Invalid 'Local IP Address', must be in x.x.x.x/x form or IPv6 prefix") if not is_prefix(data["remote_ip"]): raise ValueError( "Invalid 'Remote IP Address', must be in x.x.x.x/x form or IPv6 prefix") if "local_backup_ip" in data and data["local_backup_ip"]: if not is_prefix(data["local_backup_ip"]): raise ValueError( "Invalid 'Local Backup IP Address', must be in x.x.x.x/x form or IPv6 prefix") if "remote_backup_ip" in data and data["remote_backup_ip"]: if not is_prefix(data["remote_backup_ip"]): raise ValueError( "Invalid 'Remote Backup IP Address', must be in x.x.x.x/x form or IPv6 prefix") # Check no or both backup addresses given has_local_backup = "local_backup_ip" in data and data["local_backup_ip"] has_remote_backup = "remote_backup_ip" in data and data["remote_backup_ip"] if has_local_backup and not has_remote_backup: raise ValueError("One of backup addresses given. Set peer address") if not has_local_backup and has_remote_backup: raise ValueError("One of backup addresses given. Set peer address") # Check all link addresses belongs to one AFI if len(set([IP.prefix(data[x]).afi for x in ["local_ip", "remote_ip", "local_backup_ip", "remote_backup_ip"] if x in data and data[x]])) > 1: raise ValueError("All neighboring addresses must have same address family") return data
def get_interface_prefixes(self): """ Get prefixes from interface discovery artifact :return: """ def get_vlan(data): vlans = data.get("vlan_ids") if vlans and len(vlans) == 1: return vlans[0] return None self.logger.debug("Getting interface prefixes") if not self.object.object_profile.prefix_profile_interface: self.logger.info("Default interface prefix profile is not set. Skipping interface prefix discovery") return [] prefixes = self.get_artefact("interface_prefix") if not prefixes: self.logger.info("No interface_prefix artefact, skipping interface prefixes") return [] return [ DiscoveredPrefix( vpn_id=p.get("vpn_id", GLOBAL_VRF) or GLOBAL_VRF, prefix=str(IP.prefix(p["address"]).first), profile=self.object.object_profile.prefix_profile_interface, source=SRC_INTERFACE, description=p["description"], subinterface=p["subinterface"], vlan=get_vlan(p), asn=None ) for p in prefixes ]
def upload_axfr(data): p = IP.prefix(prefix.prefix) count = 0 for row in data: row = row.strip() if row == "" or row.startswith(";"): continue row = row.split() if len(row) != 5 or row[2] != "IN" or row[3] != "PTR": continue if row[3] == "PTR": # @todo: IPv6 x = row[0].split(".") ip = "%s.%s.%s.%s" % (x[3], x[2], x[1], x[0]) fqdn = row[4] if fqdn.endswith("."): fqdn = fqdn[:-1] # Leave only addresses residing into "prefix" # To prevent uploading to not-owned blocks if not p.contains(IPv4(ip)): continue a, changed = Address.objects.get_or_create(vrf=vrf, afi=afi, address=ip) if a.fqdn != fqdn: a.fqdn = fqdn changed = True if changed: a.save() count += 1 return count
def api_suggest_free(self, request, prefix_id): """ Suggest free blocks of different sizes :param request: :param prefix_id: :return: """ prefix = self.get_object_or_404(Prefix, id=int(prefix_id)) suggestions = [] p_mask = int(prefix.prefix.split("/")[1]) free = sorted( IP.prefix(prefix.prefix).iter_free( [pp.prefix for pp in prefix.children_set.all()]), key=attrgetter("mask"), reverse=True, ) # Find smallest free block possible for mask in range(30 if prefix.is_ipv4 else 64, max(p_mask + 1, free[-1].mask) - 1, -1): # Find smallest free block possible for p in free: if p.mask <= mask: suggestions += [{ "prefix": "%s/%d" % (p.address, mask), "size": 2**(32 - mask) if prefix.is_ipv4 else None, }] break return suggestions
def iter_free(self): """ Generator returning all available free prefixes inside :return: """ for fp in IP.prefix(self.prefix).iter_free( [p.prefix for p in self.children_set.all()]): yield str(fp)
def get_data(self, request): def ppower(prefix): m = int(prefix.split("/")[1]) if m <= powermask: return long(2 * (powermask - m)) else: return 0 powermask = 24 r = [] # (Descption, as, filter, cone) peers = {} # peer id -> peer cone_powers = {} # peer id -> power uniq_powers = {} # peer id -> power prefixes = {} # Prefix -> set(peer ids) for p in Peer.objects.filter(status="A").exclude(import_filter="ANY"): peers[p.id] = p cone_powers[p.id] = 0 for cp in WhoisCache.resolve_as_set_prefixes(p.import_filter, optimize=True): # Get powers cone_powers[p.id] += ppower(cp) # Assign to prefixes for i in IP.prefix(cp).iter_cover(powermask): pfx = i.prefix try: prefixes[pfx].add(p.id) except KeyError: prefixes[pfx] = set([p.id]) # Calculate unique powers for pfx in prefixes: pfx_peers = prefixes[pfx] if len(pfx_peers) == 1: # Unique peer = list(pfx_peers)[0] try: uniq_powers[peer] += 1 except KeyError: uniq_powers[peer] = 1 # Build result for peer_id in peers: p = peers[peer_id] r += [(p.description, "AS%d" % p.remote_asn, p.import_filter, cone_powers.get(peer_id, 0), uniq_powers.get(peer_id, 0))] r = sorted(r, key=lambda x: -x[4]) return self.from_dataset(title=self.title, columns=[ "Peer", "ASN", "Import Filter", TableColumn("Cone Power", format="numeric", align="right"), TableColumn("Uniq. Cone Power", format="numeric", align="right"), ], data=r)
def get_interfaces(self, afi, rd, exclude=None): """ Returns a list of SI """ def check_ipv4(a): if (a.startswith("127.") or a.startswith("169.254") or a.endswith("/32") or a.startswith("0.0.0.0")): return False else: return True def check_ipv6(a): if a == "::1": return False else: return True exclude = exclude or [] si_fields = { "_id": 0, "name": 1, "forwarding_instance": 1, "managed_object": 1 } if afi == self.IPv4: check = check_ipv4 def get_addresses(x): return x.get("ipv4_addresses", []) AFI = "IPv4" si_fields["ipv4_addresses"] = 1 elif afi == self.IPv6: check = check_ipv6 def get_addresses(x): return x.get("ipv6_addresses", []) AFI = "IPv6" si_fields["ipv6_addresses"] = 1 else: raise NotImplementedError() for si in SubInterface._get_collection().find({"enabled_afi": AFI}, si_fields): if rd != self.get_rd(si["managed_object"], si.get("forwarding_instance")): continue seen = set(exclude) for a in [a for a in get_addresses(si) if check(a)]: prefix = str(IP.prefix(a).first) if prefix in seen: continue seen.add(prefix) self.p_power[prefix] += 1 yield self.SI(si["managed_object"], si["name"], si.get("forwarding_instance"), a, prefix)
def get_vc_prefixes(self, vc_id): vc = VC.get_by_id(vc_id) if not vc: return [] objects = vc.vc_domain.managedobject_set.values_list("id", flat=True) ipv4 = set() ipv6 = set() # @todo: Exact match on vlan_ids for si in SubInterface.objects.filter( Q(managed_object__in=objects) & Q(vlan_ids=vc.l1) & (Q(enabled_afi=["IPv4"]) | Q(enabled_afi=["IPv6"]))).only( "enabled_afi", "ipv4_addresses", "ipv6_addresses"): if "IPv4" in si.enabled_afi: ipv4.update([IP.prefix(ip).first for ip in si.ipv4_addresses]) if "IPv6" in si.enabled_afi: ipv6.update([IP.prefix(ip).first for ip in si.ipv6_addresses]) p = [str(x.first) for x in sorted(ipv4)] p += [str(x.first) for x in sorted(ipv6)] return p
def get_data(self, vrf, afi, prefix, **kwargs): p = IP.prefix(prefix.prefix) allocated = [IP.prefix(a.prefix) for a in prefix.children_set.all()] if afi == "4": allocated_30 = [a for a in allocated if a.mask == 30] free = list(p.iter_free(allocated)) if afi == "4": allocated_size = sum([a.size for a in allocated]) allocated_30_size = sum([a.size for a in allocated_30]) free_size = sum([a.size for a in free]) total = p.size data = [ ("Allocated addresses", allocated_size, float(allocated_size) * 100 / float(total)), (".... in /30", allocated_30_size, float(allocated_30_size) * 100 / float(total)), ("Free addresses", free_size, float(free_size) * 100 / float(total)), ("Total addresses", total, 1.0), ] a_s = len(allocated) if a_s: avg_allocated_size = allocated_size / a_s avg_allocated_mask = 32 - int( math.ceil(math.log(avg_allocated_size, 2))) data += [ ("Average allocated block", avg_allocated_size, ""), ("Average allocated mask", avg_allocated_mask, ""), ] return self.from_dataset( title=_("Summary for VRF %(vrf)s (IPv%(afi)s): %(prefix)s") % { "vrf": vrf.name, "afi": afi, "prefix": p.prefix }, columns=[ "", TableColumn(_("Size"), format="numeric", align="right"), TableColumn(_("%"), format="percent", align="right"), ], data=data, )
def get_uplinks_minaddr(self): """ Segment's Object with lesser address is uplink :return: """ s = next( iter( sorted((IP.prefix(self.G.nodes[i].get("address")), i) for i in self.G.nodes if self.G.nodes[i].get("role") == "segment"))) return [s[1]]
def get_uplinks_maxaddr(self): """ Segment's Object with greater address is uplink :return: """ s = next( reversed( sorted((IP.prefix(self.G.node[i].get("address")), i) for i in self.G.node if self.G.node[i].get("role") == "segment"))) return [s[1]]
def match(self, prefix): """ Check the prefix is inside Prefix Table :param prefix: Prefix :type prefix: str :rtype: bool """ p = IP.prefix(prefix) return (PrefixTablePrefix.objects.filter(table=self, afi=p.afi).extra( where=["%s <<= prefix"], params=[prefix]).exists())
def get_prefix_spot(self, prefix, sep=True, extra=None): """ Return addresses around existing ones """ extra = extra or [] p = IP.prefix(prefix.prefix) if prefix.afi == "4" and len(p) <= self.MAX_IPv4_NET_SIZE: dist = self.MAX_IPv4_NET_SIZE else: dist = self.ADDRESS_SPOT_DIST return p.area_spot([a.address for a in prefix.address_set.all()] + extra, dist=dist, sep=sep)
def compile_ip_eq(self, f_name): v = IP.prefix(self.value) r = [ "def %s(iface):" % f_name, " a = [si.ipv%(afi)s_addresses for si in iface.subinterface_set.filter(enabled_afi='IPv%(afi)s')]" % {"afi": v.afi}, " a = sum(a, [])", ] if "/" in self.value: # Compare prefixes r += [" return any(x for x in a if x == %r)" % v.prefix] else: # Compare addresses v = v.prefix.split("/")[0] r += [" return any(x for x in a if x.split('/')[0] == %r)" % v] return "\n".join(r)
def fn_MatchPrefix(self, _input, prefix, address): """ Check `address` is within prefix :param _input: :param prefix: :param address: :return: """ for ctx in _input: prefix = self.resolve_var(ctx, prefix) if not prefix: continue address = self.resolve_var(ctx, address) if not address: continue if address in IP.prefix(str(prefix)): yield ctx
def get_data(self, **kwargs): from django.db import connection data = [] last_vrf = None c = connection.cursor() c.execute(self.QUERY) for vrf, rd, afi, prefix, description, used in c: if last_vrf != vrf: data += [SectionRow("%s (%s)" % (vrf, rd))] last_vrf = vrf p = IP.prefix(prefix) if afi == "4": total = p.size if p.mask < 31 and total - used >= 2: # Exclude network and broadcast total = p.size - 2 free = total - used percent = used * 100 / total elif afi == "6": if p.mask >= 96: total = 2**(128 - p.mask) free = total - used percent = used * 100 / total else: total = "-" free = "-" percent = "-" data += [[prefix, description, used, free, total, percent]] return self.from_dataset(title=self.title, columns=[ "Prefix", "Description", TableColumn("IP Used", align="right", format="numeric"), TableColumn("IP Free", align="right", format="numeric"), TableColumn("IP Total", align="right", format="numeric"), TableColumn("% Used", align="right", format="percent") ], data=data)
def update_usage(self): if self.prefix.afi != "4": return p = IP.prefix(self.prefix.prefix) ps = p.size if ps < 2: return if self.children: # Count prefixes u = sum(c.size for c in self.children) pu = min(int(float(u) * 100 / float(ps)), 100) else: # Count addresses u = self.app.ip_usage.get(self.prefix.id, 0) if ps > 2: pu = int(float(u) * 100 / float(ps - 2)) else: pu = int(float(u) * 100 / float(ps)) self.used = pu
def append_prefix(self, prefix): self.append_binary_prefix(list(IP.prefix(prefix).iter_bits()))
def save(self, *args, **kwargs): # Set AFI self.afi = IP.prefix(self.prefix).afi return super(PrefixTablePrefix, self).save(*args, **kwargs)
def addresses(self): """ Generator returning all addresses in range """ return IP.prefix(self.from_address).iter_address(until=IP.prefix(self.to_address))
def save(self, *args, **kwargs): def generate_fqdns(): # Prepare FQDN template t = Template(self.fqdn_template) # Sync FQDNs sn = 0 for ip in self.addresses: # Generage FQDN vars = {"afi": self.afi, "vrf": self.vrf, "range": self, "n": sn} sn += 1 if self.afi == "4": i = ip.address.split(".") vars["ip"] = i # ip.0 .. ip.3 # ip1, ip2, ip3, ip4 for backward compatibility for n, i in enumerate(i): vars["ip%d" % (n + 1)] = i elif self.afi == "6": vars["ip"] = ip.digits # ip.0 .. ip.31 fqdn = t.render(Context(vars)) description = "Generated by address range '%s'" % self.name # Create or update address record when necessary a, created = Address.objects.get_or_create( vrf=self.vrf, afi=self.afi, address=ip.address ) if created: a.fqdn = fqdn a.description = description a.save() elif a.fqdn != fqdn or a.description != a.description: a.fqdn = fqdn a.description = description a.save() created = self.id is None if not created: # Get old values old = AddressRange.objects.get(id=self.id) super(AddressRange, self).save(*args, **kwargs) if created: # New if self.action == "G": generate_fqdns() else: # Changed if old.action == "G" and self.action != "G": # Drop all auto-generated IPs Address.objects.filter( vrf=self.vrf, afi=self.afi, address__gte=self.from_address, address__lte=self.to_address, ).delete() elif old.action != "G" and self.action == "G": # Generate IPs generate_fqdns() elif self.action == "G": # Check for boundaries change if IP.prefix(old.from_address) < IP.prefix(self.from_address): # Lower boundary raised up. Clean up addresses falled out of range Address.objects.filter( vrf=self.vrf, afi=self.afi, address__gte=old.from_address, address__lt=self.to_address, ).delete() if IP.prefix(old.to_address) > IP.prefix(self.to_address): # Upper boundary is lowered. Clean up addressess falled out of range Address.objects.filter( vrf=self.vrf, afi=self.afi, address__gt=self.to_address, address__lte=old.to_address, ).delete() # Finally recheck FQDNs generate_fqdns()
def view_vrf_index(self, request, vrf_id, afi, prefix): """ Display VRF Index """ # Validate vrf = self.get_object_or_404(VRF, id=int(vrf_id)) if (afi == "4" and (not is_ipv4_prefix(prefix)) or not vrf.afi_ipv4) or ( afi == "6" and (not is_ipv6_prefix(prefix) or not vrf.afi_ipv6) ): return self.response_forbidden("Invalid prefix") prefix = self.get_object_or_404(Prefix, vrf=vrf, afi=afi, prefix=prefix) # Get prefix path path = [] p = prefix.parent while p: path = [p] + path p = p.parent # List of nested prefixes # @todo: prefetch_related prefixes = list(prefix.children_set.select_related().order_by("prefix")) # Bulk utilization Prefix.update_prefixes_usage(prefixes) # Get permissions user = request.user can_view = prefix.can_view(user) can_change = prefix.can_change(user) can_bind_vc = can_change and Permission.has_perm(user, "ip:ipam:bind_vc") can_change_maintainers = user.is_superuser can_add_prefix = can_change can_add_address = can_change and len(prefixes) == 0 can_edit_special = prefix.effective_prefix_special_address == "I" # Bookmarks has_bookmark = prefix.has_bookmark(user) bookmarks = PrefixBookmark.user_bookmarks(user, vrf=vrf, afi=afi) s_bookmarks = set(b.prefix for b in bookmarks) # Add free prefixes free_prefixes = list(IP.prefix(prefix.prefix).iter_free([pp.prefix for pp in prefixes])) l_prefixes = sorted( ( [(True, IP.prefix(pp.prefix), pp, pp.prefix in s_bookmarks) for pp in prefixes] + [(False, pp, None, None) for pp in free_prefixes] ), key=lambda x: x[1], ) # List of nested addresses # @todo: prefetch_related addresses = list(prefix.address_set.select_related().order_by("address")) # Prepare block info prefix_info = [("Network", prefix.prefix)] if afi == "4": prefix_info += [ ("Broadcast", prefix.broadcast), ("Netmask", prefix.netmask), ("Widlcard", prefix.wildcard), ("Size", prefix.size), ("Usage", prefix.usage_percent), ("Usage Address", prefix.address_usage_percent), ] if addresses: prefix_info += [("Used addresses", len(addresses))] if afi == "4": free = prefix.size - len(addresses) prefix_info += [("Free addresses", free - 2 if free >= 2 else free)] # Prefix discovery dmap = {"E": "Enabled", "D": "Disabled"} if prefix.prefix_discovery_policy == "P": t = "Profile (%s)" % dmap[prefix.profile.prefix_discovery_policy] else: t = dmap[prefix.prefix_discovery_policy] prefix_info += [("Prefix Discovery", t)] # Address discovery if prefix.address_discovery_policy == "P": t = "Profile (%s)" % dmap[prefix.profile.address_discovery_policy] else: t = dmap[prefix.address_discovery_policy] prefix_info += [("Address Discovery", t)] # Source prefix_info += [ ( "Source", {"M": "Manual", "i": "Interface", "w": "Whois Route", "n": "Neighbor"}.get( prefix.source, "-" ), ) ] # # Add custom fields for f in CustomField.table_fields("ip_prefix"): v = getattr(prefix, f.name) prefix_info += [(f.label, v if v is not None else "")] # Ranges ranges = [] rs = [] max_slots = 0 r_spots = [] if addresses: # Assign ranges colors ranges = list(prefix.address_ranges) for r, c in zip(ranges, get_colors(len(ranges))): r.color = c # Schedule ranges r_changes = {} # Address -> (set of entering ranges, set of leaving ranges) for r in ranges: if r.from_address not in r_changes: r_changes[r.from_address] = (set(), set()) if r.to_address not in r_changes: r_changes[r.to_address] = (set(), set()) r_changes[r.from_address][0].add(r) r_changes[r.to_address][1].add(r) # <!> n = (IP.prefix(r.to_address) + 1).address if n not in r_changes: r_changes[n] = (set(), set()) r_spots = list(six.iterkeys(r_changes)) # Allocate slots used_slots = set() free_slots = set() r_slots = {} # Range -> slot max_slots = 0 rs = sorted( ([IP.prefix(i), d, []] for i, d in six.iteritems(r_changes)), key=itemgetter(0) ) for address, d, x in rs: entering, leaving = d for r in entering: if not free_slots: free_slots.add(max_slots) max_slots += 1 s = free_slots.pop() used_slots.add(s) r_slots[r] = s for r in leaving: s = r_slots[r] used_slots.remove(s) free_slots.add(s) # Assign ranges to slots slots = [None] * max_slots for r in rs: address, [entering, leaving], _ = r for e in entering: slots[r_slots[e]] = e r[2] = slots[:] for l in leaving: slots[r_slots[l]] = None # Assign slots to addresses c = [None] * max_slots rrs = rs[:] cr = rrs.pop(0) if rrs else None for a in addresses: address = IP.prefix(a.address) while cr and address >= cr[0]: c = cr[2] if rrs: cr = rrs.pop(0) else: break a.slots = c # Address spot if can_add_address: special_addr = IP.prefix(prefix.prefix).special_addresses c = [None] * max_slots rrs = rs[:] if rrs: cr = rrs.pop(0) else: cr = None spot = [] for a in self.get_prefix_spot(prefix, extra=r_spots): if cr and a is not None and a == cr[0]: c = [None if cc is None else cc.id for cc in cr[2]] if rrs: cr = rrs.pop(0) spot += [(None if a is None else a.address, c, a in special_addr)] spot = ujson.dumps(spot) else: spot = None can_ping = spot is not None and len([a for a in addresses if a.managed_object]) > 0 # Build custom styles styles = {} if prefix.profile.style: styles[prefix.profile.style.css_class_name] = prefix.profile.style.css for p in prefixes: if p.profile.style and p.profile.style.css_class_name not in styles: styles[p.profile.style.css_class_name] = p.profile.style.css for a in addresses: if a.profile.style and a.profile.style.css_class_name not in styles: styles[a.profile.style.css_class_name] = a.profile.style.css styles = "\n".join(six.itervalues(styles)) # Render return self.render( request, "vrf_index.html.j2", user=request.user, vrf=vrf, prefix=prefix, path=path, prefixes=prefixes, addresses=addresses, prefix_info=prefix_info, display_empty_message=not addresses and not prefixes, can_view=can_view, can_change=can_change, can_bind_vc=can_bind_vc, can_change_maintainers=can_change_maintainers, can_add_prefix=can_add_prefix, can_add_address=can_add_address, can_edit_special=can_edit_special, has_bookmark=has_bookmark, bookmarks=bookmarks, spot=spot, can_ping=can_ping, styles=styles, ranges=ranges, max_slots=max_slots, l_prefixes=l_prefixes, )
def test_ip_prefix(prefix, result): assert repr(IP.prefix(prefix)) == result
def test_ip_prefix(): assert repr(IP.prefix("192.168.0.1")) == "<IPv4 192.168.0.1/32>" assert repr(IP.prefix("::/0")), "<IPv6 ::/0>" assert repr(IP.prefix("2001:db8::/32")) == "<IPv6 2001:db8::/32>" assert repr(IP.prefix("::ffff:192.168.0.1")) == "<IPv6 ::ffff:192.168.0.1/128>"