def rebase(self, vrf, new_prefix): """ Rebase prefix to a new location :param vrf: :param new_prefix: :return: """ # b = IP.prefix(self.prefix) nb = IP.prefix(new_prefix) # Validation if vrf == self.vrf and self.prefix == new_prefix: raise ValueError("Cannot rebase to self") if b.afi != nb.afi: raise ValueError("Cannot change address family during rebase") if b.mask < nb.mask: raise ValueError("Cannot rebase to prefix of lesser size") # Rebase prefix and all nested prefixes # Parents are left untouched for p in Prefix.objects.filter(vrf=self.vrf, afi=self.afi).extra( where=["prefix <<= %s"], params=[self.prefix]): np = IP.prefix(p.prefix).rebase(b, nb).prefix # Prefix.objects.filter(pk=p.pk).update(prefix=np, vrf=vrf) p.prefix = np p.vrf = vrf p.save() # Raise events # Rebase addresses # Parents are left untouched for a in Address.objects.filter(vrf=self.vrf, afi=self.afi).extra( where=["address <<= %s"], params=[self.prefix]): na = IP.prefix(a.address).rebase(b, nb).address # Address.objects.filter(pk=a.pk).update(address=na, vrf=vrf) a.address = na a.vrf = vrf a.save() # Raise events # Rebase permissions # move all permissions to the nested blocks for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra( where=["prefix <<= %s"], params=[self.prefix]): np = IP.prefix(pa.prefix).rebase(b, nb).prefix PrefixAccess.objects.filter(pk=pa.pk).update(prefix=np, vrf=vrf) # create permissions for covered blocks for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra( where=["prefix >> %s"], params=[self.prefix]): PrefixAccess( user=pa.user, vrf=vrf, afi=pa.afi, prefix=new_prefix, can_view=pa.can_view, can_change=pa.can_change, ).save() # @todo: Rebase bookmarks # @todo: Update caches # Return rebased prefix return Prefix.objects.get(pk=self.pk) # Updated object
def get_data(self, vrf, afi, prefix, **kwargs): p = IP.prefix(prefix.prefix) return self.from_dataset( title=_("Free blocks in VRF %(vrf)s (IPv%(afi)s), %(prefix)s" % { "vrf": vrf.name, "afi": afi, "prefix": prefix.prefix }), columns=["Free Blocks"], data=[[unicode(f)] for f in p.iter_free( [IP.prefix(c.prefix) for c in prefix.children_set.all()])])
def clean(self, data): data = super(AddressRangeApplication, self).clean(data) afi = data["afi"] from_address = data["from_address"] to_address = data["to_address"] # Check AFI address_validator = is_ipv4 if afi == "4" else is_ipv6 if not address_validator(from_address): raise ValueError("Invalid IPv%(afi)s 'From Address'" % {"afi": afi}) if not address_validator(to_address): raise ValueError("Invalid IPv%(afi)s 'To Address'" % {"afi": afi}) # Check from address not greater than to address if IP.prefix(from_address) > IP.prefix(to_address): raise ValueError( "'To Address' must be greater or equal than 'From Address'") # Check for valid "action" combination if "fqdn_template" in data and data[ "fqdn_template"] and data["action"] != "G": raise ValueError( "'FQDN Template' must be clean for selected 'Action'") if "reverse_nses" in data and data[ "reverse_nses"] and data["action"] != "D": raise ValueError( "'Reverse NSes' must be clean for selected 'Action'") # Set range as locked for "G" and "D" actions if data["action"] != "N": data["is_locked"] = True # @todo: check FQDN template # Check reverse_nses is a list of FQDNs or IPs if "reverse_nses" in data and data["reverse_nses"]: reverse_nses = data["reverse_nses"] for ns in reverse_nses.split(","): ns = ns.strip() if not is_ipv4(ns) and not is_ipv6(ns) and not is_fqdn(ns): raise ValueError("%s is invalid nameserver" % ns) # Check no locked range overlaps another locked range if data["is_locked"]: r = [ r for r in AddressRange.get_overlapping_ranges( data["vrf"], data["afi"], data["from_address"], data["to_address"]) if r.is_locked is True and r.name != data["name"] ] if r: raise ValueError( "Locked range overlaps with ahother locked range: %s" % unicode(r[0])) return data
def sync_prefixes(self, prefixes): """ Apply prefixes to database :param prefixes: :return: """ # vpn_id -> [prefix, ] vrf_prefixes = defaultdict(list) for vpn_id, p in prefixes: vrf_prefixes[vpn_id] += [p] # build vpn_id -> VRF mapping self.logger.debug("Building VRF map") vrfs = {} for vpn_id in vrf_prefixes: vrf = VRF.get_by_vpn_id(vpn_id) if vrf: vrfs[vpn_id] = vrf missed_vpn_id = set(vrf_prefixes) - set(vrfs) if missed_vpn_id: self.logger.info("RD missed in VRF database and to be ignored: %s", ", ".join(missed_vpn_id)) # self.logger.debug("Getting prefixes to synchronize") for vpn_id in vrfs: vrf = vrfs[vpn_id] seen = set() for p in Prefix.objects.filter(vrf=vrf, prefix__in=vrf_prefixes[vpn_id]): norm_prefix = IP.expand(p.prefix) # Confirmed prefix, apply changes and touch prefix = prefixes[vpn_id, norm_prefix] self.apply_prefix_changes(p, prefix) seen.add(norm_prefix) for p in set(vrf_prefixes[vpn_id]) - seen: # New prefix, create self.create_prefix(prefixes[vpn_id, p])
def get_interface_prefixes(self): """ Get prefixes from interface discovery artifact :return: """ def get_vlan(data): vlans = data.get("vlan_ids") if vlans and len(vlans) == 1: return vlans[0] return None self.logger.debug("Getting interface prefixes") if not self.object.object_profile.prefix_profile_interface: self.logger.info("Default interface prefix profile is not set. Skipping interface prefix discovery") return [] prefixes = self.get_artefact("interface_prefix") if not prefixes: self.logger.info("No interface_prefix artefact, skipping interface prefixes") return [] return [ DiscoveredPrefix( vpn_id=p.get("vpn_id", GLOBAL_VRF) or GLOBAL_VRF, prefix=str(IP.prefix(p["address"]).first), profile=self.object.object_profile.prefix_profile_interface, source=SRC_INTERFACE, description=p["description"], subinterface=p["subinterface"], vlan=get_vlan(p), asn=None ) for p in prefixes ]
def upload_axfr(data): p = IP.prefix(prefix.prefix) count = 0 for row in data: row = row.strip() if row == "" or row.startswith(";"): continue row = row.split() if len(row) != 5 or row[2] != "IN" or row[3] != "PTR": continue if row[3] == "PTR": # @todo: IPv6 x = row[0].split(".") ip = "%s.%s.%s.%s" % (x[3], x[2], x[1], x[0]) fqdn = row[4] if fqdn.endswith("."): fqdn = fqdn[:-1] # Leave only addresses residing into "prefix" # To prevent uploading to not-owned blocks if not p.contains(IPv4(ip)): continue a, changed = Address.objects.get_or_create(vrf=vrf, afi=afi, address=ip) if a.fqdn != fqdn: a.fqdn = fqdn changed = True if changed: a.save() count += 1 return count
def api_suggest_free(self, request, prefix_id): """ Suggest free blocks of different sizes :param request: :param prefix_id: :return: """ prefix = self.get_object_or_404(Prefix, id=int(prefix_id)) suggestions = [] p_mask = int(prefix.prefix.split("/")[1]) free = sorted( IP.prefix(prefix.prefix).iter_free( [pp.prefix for pp in prefix.children_set.all()]), key=attrgetter("mask"), reverse=True, ) # Find smallest free block possible for mask in range(30 if prefix.is_ipv4 else 64, max(p_mask + 1, free[-1].mask) - 1, -1): # Find smallest free block possible for p in free: if p.mask <= mask: suggestions += [{ "prefix": "%s/%d" % (p.address, mask), "size": 2**(32 - mask) if prefix.is_ipv4 else None, }] break return suggestions
def iter_lazy_labels(cls, prefix: str): p = IP.prefix(prefix) for pt in PrefixTablePrefix.objects.filter(afi=p.afi).extra( where=["%s <<= prefix"], params=[prefix]): yield f"noc::prefixfilter::{pt.table.name}::<" if prefix == pt.prefix: yield f"noc::prefixfilter::{pt.table.name}::="
def clean_args(self, obj, **kwargs): args = {} for p in self.params: if not p.name in kwargs and p.is_required and not p.default: raise ValueError("Required parameter '%s' is missed" % p.name) v = kwargs.get(p.name, p.default) if v is None: continue if p.type == "int": # Integer type try: v = int(v) except ValueError: raise ValueError( "Invalid integer in parameter '%s': '%s'" % (p.name, v)) elif p.type == "float": # Float type try: v = float(v) except ValueError: raise ValueError("Invalid float in parameter '%s': '%s'" % (p.name, v)) elif p.type == "interface": # Interface try: v = obj.get_profile().convert_interface_name(v) except Exception: raise ValueError( "Invalid interface name in parameter '%s': '%s'" % (p.name, v)) elif p.type == "ip": # IP address try: v = IP.prefix(v) except ValueError: raise ValueError("Invalid ip in parameter '%s': '%s'" % (p.name, v)) elif p.type == "vrf": if isinstance(v, VRF): pass elif isinstance(v, six.integer_types): try: v = VRF.objects.get(id=v) except VRF.DoesNotExist: raise ValueError( "Unknown VRF in parameter '%s': '%s'" % (p.name, v)) elif isinstance(v, six.string_types): try: v = VRF.objects.get(name=v) except VRF.DoesNotExist: raise ValueError( "Unknown VRF in parameter '%s': '%s'" % (p.name, v)) else: raise ValueError("Unknown VRF in parameter '%s': '%s'" % (p.name, v)) args[str(p.name)] = v return args
def resolve_as_set_prefixes_maxlen(cls, as_set, optimize=None): """ Generate prefixes for as-sets. Returns a list of (prefix, min length, max length) """ prefixes = cls._resolve_as_set_prefixes(as_set) max_len = config.peer.max_prefix_length if optimize or ( optimize is None and config.peer.prefix_list_optimization and len(prefixes) >= config.peer.prefix_list_optimization_threshold ): # Optimization is enabled return [ (p.prefix, p.mask, m) for p, m in optimize_prefix_list_maxlen(prefixes) if p.mask <= max_len ] else: # Optimization is disabled return [ (x.prefix, x.mask, x.mask) for x in sorted([IP.prefix(p) for p in prefixes]) if x.mask <= max_len ]
def clean(self, data): data = super(PeerApplication, self).clean(data) # Check address fields if not is_prefix(data["local_ip"]): raise ValueError("Invalid 'Local IP Address', must be in x.x.x.x/x form or IPv6 prefix") if not is_prefix(data["remote_ip"]): raise ValueError( "Invalid 'Remote IP Address', must be in x.x.x.x/x form or IPv6 prefix") if "local_backup_ip" in data and data["local_backup_ip"]: if not is_prefix(data["local_backup_ip"]): raise ValueError( "Invalid 'Local Backup IP Address', must be in x.x.x.x/x form or IPv6 prefix") if "remote_backup_ip" in data and data["remote_backup_ip"]: if not is_prefix(data["remote_backup_ip"]): raise ValueError( "Invalid 'Remote Backup IP Address', must be in x.x.x.x/x form or IPv6 prefix") # Check no or both backup addresses given has_local_backup = "local_backup_ip" in data and data["local_backup_ip"] has_remote_backup = "remote_backup_ip" in data and data["remote_backup_ip"] if has_local_backup and not has_remote_backup: raise ValueError("One of backup addresses given. Set peer address") if not has_local_backup and has_remote_backup: raise ValueError("One of backup addresses given. Set peer address") # Check all link addresses belongs to one AFI if len(set([IP.prefix(data[x]).afi for x in ["local_ip", "remote_ip", "local_backup_ip", "remote_backup_ip"] if x in data and data[x]])) > 1: raise ValueError("All neighboring addresses must have same address family") return data
def sync_addresses(self, addresses): """ Apply addresses to database :param addresses: :return: """ # vpn_id -> [address, ] vrf_addresses = defaultdict(list) for vpn_id, a in addresses: vrf_addresses[vpn_id] += [a] # build vpn_id -> VRF mapping self.logger.debug("Building VRF map") vrfs = {} for vpn_id in vrf_addresses: vrf = VRF.get_by_vpn_id(vpn_id) if vrf: vrfs[vpn_id] = vrf missed_vpn_id = set(vrf_addresses) - set(vrfs) if missed_vpn_id: self.logger.info( "VPN ID are missed in VRF database and to be ignored: %s", ", ".join(missed_vpn_id)) # self.logger.debug("Getting addresses to synchronize") for vpn_id in vrfs: vrf = vrfs[vpn_id] seen = set() for a in Address.objects.filter(vrf=vrf, address__in=vrf_addresses[vpn_id]): norm_address = IP.expand(a.address) # Confirmed address, apply changes and touch address = addresses[vpn_id, norm_address] self.apply_address_changes(a, address) seen.add(norm_address) for a in set(vrf_addresses[vpn_id]) - seen: # New address, create self.create_address(addresses[vpn_id, a]) # Detaching hanging addresses self.logger.debug("Checking for hanging addresses") for a in Address.objects.filter(managed_object=self.object): norm_address = IP.expand(a.address) address = addresses.get((a.vrf.vpn_id, norm_address)) if not address or address.source not in LOCAL_SRC: self.logger.info("Detaching %s:%s", a.vrf.name, a.address) a.managed_object = None a.save()
def iter_free(self): """ Generator returning all available free prefixes inside :return: """ for fp in IP.prefix(self.prefix).iter_free( [p.prefix for p in self.children_set.all()]): yield str(fp)
def get_data(self, request): def ppower(prefix): m = int(prefix.split("/")[1]) if m <= powermask: return long(2 * (powermask - m)) else: return 0 powermask = 24 r = [] # (Descption, as, filter, cone) peers = {} # peer id -> peer cone_powers = {} # peer id -> power uniq_powers = {} # peer id -> power prefixes = {} # Prefix -> set(peer ids) for p in Peer.objects.filter(status="A").exclude(import_filter="ANY"): peers[p.id] = p cone_powers[p.id] = 0 for cp in WhoisCache.resolve_as_set_prefixes(p.import_filter, optimize=True): # Get powers cone_powers[p.id] += ppower(cp) # Assign to prefixes for i in IP.prefix(cp).iter_cover(powermask): pfx = i.prefix try: prefixes[pfx].add(p.id) except KeyError: prefixes[pfx] = set([p.id]) # Calculate unique powers for pfx in prefixes: pfx_peers = prefixes[pfx] if len(pfx_peers) == 1: # Unique peer = list(pfx_peers)[0] try: uniq_powers[peer] += 1 except KeyError: uniq_powers[peer] = 1 # Build result for peer_id in peers: p = peers[peer_id] r += [(p.description, "AS%d" % p.remote_asn, p.import_filter, cone_powers.get(peer_id, 0), uniq_powers.get(peer_id, 0))] r = sorted(r, key=lambda x: -x[4]) return self.from_dataset(title=self.title, columns=[ "Peer", "ASN", "Import Filter", TableColumn("Cone Power", format="numeric", align="right"), TableColumn("Uniq. Cone Power", format="numeric", align="right"), ], data=r)
def get_interfaces(self, afi, rd, exclude=None): """ Returns a list of SI """ def check_ipv4(a): if (a.startswith("127.") or a.startswith("169.254") or a.endswith("/32") or a.startswith("0.0.0.0")): return False else: return True def check_ipv6(a): if a == "::1": return False else: return True exclude = exclude or [] si_fields = { "_id": 0, "name": 1, "forwarding_instance": 1, "managed_object": 1 } if afi == self.IPv4: check = check_ipv4 def get_addresses(x): return x.get("ipv4_addresses", []) AFI = "IPv4" si_fields["ipv4_addresses"] = 1 elif afi == self.IPv6: check = check_ipv6 def get_addresses(x): return x.get("ipv6_addresses", []) AFI = "IPv6" si_fields["ipv6_addresses"] = 1 else: raise NotImplementedError() for si in SubInterface._get_collection().find({"enabled_afi": AFI}, si_fields): if rd != self.get_rd(si["managed_object"], si.get("forwarding_instance")): continue seen = set(exclude) for a in [a for a in get_addresses(si) if check(a)]: prefix = str(IP.prefix(a).first) if prefix in seen: continue seen.add(prefix) self.p_power[prefix] += 1 yield self.SI(si["managed_object"], si["name"], si.get("forwarding_instance"), a, prefix)
def get_vc_prefixes(self, vc_id): vc = VC.get_by_id(vc_id) if not vc: return [] objects = vc.vc_domain.managedobject_set.values_list("id", flat=True) ipv4 = set() ipv6 = set() # @todo: Exact match on vlan_ids for si in SubInterface.objects.filter( Q(managed_object__in=objects) & Q(vlan_ids=vc.l1) & (Q(enabled_afi=["IPv4"]) | Q(enabled_afi=["IPv6"]))).only( "enabled_afi", "ipv4_addresses", "ipv6_addresses"): if "IPv4" in si.enabled_afi: ipv4.update([IP.prefix(ip).first for ip in si.ipv4_addresses]) if "IPv6" in si.enabled_afi: ipv6.update([IP.prefix(ip).first for ip in si.ipv6_addresses]) p = [str(x.first) for x in sorted(ipv4)] p += [str(x.first) for x in sorted(ipv6)] return p
def get_data(self, vrf, afi, prefix, **kwargs): p = IP.prefix(prefix.prefix) allocated = [IP.prefix(a.prefix) for a in prefix.children_set.all()] if afi == "4": allocated_30 = [a for a in allocated if a.mask == 30] free = list(p.iter_free(allocated)) if afi == "4": allocated_size = sum([a.size for a in allocated]) allocated_30_size = sum([a.size for a in allocated_30]) free_size = sum([a.size for a in free]) total = p.size data = [ ("Allocated addresses", allocated_size, float(allocated_size) * 100 / float(total)), (".... in /30", allocated_30_size, float(allocated_30_size) * 100 / float(total)), ("Free addresses", free_size, float(free_size) * 100 / float(total)), ("Total addresses", total, 1.0), ] a_s = len(allocated) if a_s: avg_allocated_size = allocated_size / a_s avg_allocated_mask = 32 - int( math.ceil(math.log(avg_allocated_size, 2))) data += [ ("Average allocated block", avg_allocated_size, ""), ("Average allocated mask", avg_allocated_mask, ""), ] return self.from_dataset( title=_("Summary for VRF %(vrf)s (IPv%(afi)s): %(prefix)s") % { "vrf": vrf.name, "afi": afi, "prefix": p.prefix }, columns=[ "", TableColumn(_("Size"), format="numeric", align="right"), TableColumn(_("%"), format="percent", align="right"), ], data=data, )
def get_uplinks_maxaddr(self): """ Segment's Object with greater address is uplink :return: """ s = next( reversed( sorted((IP.prefix(self.G.node[i].get("address")), i) for i in self.G.node if self.G.node[i].get("role") == "segment"))) return [s[1]]
def get_uplinks_minaddr(self): """ Segment's Object with lesser address is uplink :return: """ s = next( iter( sorted((IP.prefix(self.G.nodes[i].get("address")), i) for i in self.G.nodes if self.G.nodes[i].get("role") == "segment"))) return [s[1]]
def match(self, prefix): """ Check the prefix is inside Prefix Table :param prefix: Prefix :type prefix: str :rtype: bool """ p = IP.prefix(prefix) return (PrefixTablePrefix.objects.filter(table=self, afi=p.afi).extra( where=["%s <<= prefix"], params=[prefix]).exists())
def get_prefix_spot(self, prefix, sep=True, extra=None): """ Return addresses around existing ones """ extra = extra or [] p = IP.prefix(prefix.prefix) if prefix.afi == "4" and len(p) <= self.MAX_IPv4_NET_SIZE: dist = self.MAX_IPv4_NET_SIZE else: dist = self.ADDRESS_SPOT_DIST return p.area_spot([a.address for a in prefix.address_set.all()] + extra, dist=dist, sep=sep)
def compile_ip_eq(self, f_name): v = IP.prefix(self.value) r = [ "def %s(iface):" % f_name, " a = [si.ipv%(afi)s_addresses for si in iface.subinterface_set.filter(enabled_afi='IPv%(afi)s')]" % {"afi": v.afi}, " a = sum(a, [])", ] if "/" in self.value: # Compare prefixes r += [" return any(x for x in a if x == %r)" % v.prefix] else: # Compare addresses v = v.prefix.split("/")[0] r += [" return any(x for x in a if x.split('/')[0] == %r)" % v] return "\n".join(r)
def fn_MatchPrefix(self, _input, prefix, address): """ Check `address` is within prefix :param _input: :param prefix: :param address: :return: """ for ctx in _input: prefix = self.resolve_var(ctx, prefix) if not prefix: continue address = self.resolve_var(ctx, address) if not address: continue if address in IP.prefix(str(prefix)): yield ctx
def get_data(self, **kwargs): from django.db import connection data = [] last_vrf = None c = connection.cursor() c.execute(self.QUERY) for vrf, rd, afi, prefix, description, used in c: if last_vrf != vrf: data += [SectionRow("%s (%s)" % (vrf, rd))] last_vrf = vrf p = IP.prefix(prefix) if afi == "4": total = p.size if p.mask < 31 and total - used >= 2: # Exclude network and broadcast total = p.size - 2 free = total - used percent = used * 100 / total elif afi == "6": if p.mask >= 96: total = 2**(128 - p.mask) free = total - used percent = used * 100 / total else: total = "-" free = "-" percent = "-" data += [[prefix, description, used, free, total, percent]] return self.from_dataset(title=self.title, columns=[ "Prefix", "Description", TableColumn("IP Used", align="right", format="numeric"), TableColumn("IP Free", align="right", format="numeric"), TableColumn("IP Total", align="right", format="numeric"), TableColumn("% Used", align="right", format="percent") ], data=data)
def apply_addresses(addresses, discovered_addresses): """ Apply list of discovered addresses to addresses dict :param addresses: dict of (vpn_id, address) => DiscoveredAddress :param discovered_addresses: List of [DiscoveredAddress] :returns: Resulted addresses """ for address in discovered_addresses: norm_address = IP.expand(address.address) old = addresses.get((address.vpn_id, norm_address)) if old: if AddressCheck.is_preferred(old.source, address.source): # New address is preferable, replace addresses[address.vpn_id, norm_address] = address else: # Not seen yet addresses[address.vpn_id, norm_address] = address return addresses
def apply_prefixes(prefixes, discovered_prefixes): """ Apply list of discovered prefixes to prefix dict :param prefixes: dict of (vpn_id, prefix) => DiscoveredAddress :param discovered_prefixes: List of [DiscoveredAddress] :returns: Resulted prefixes """ for prefix in discovered_prefixes: norm_prefix = IP.expand(prefix.prefix) old = prefixes.get((prefix.vpn_id, norm_prefix)) if old: if PrefixCheck.is_preferred(old.source, prefix.source): # New prefix is preferable, replace prefixes[prefix.vpn_id, norm_prefix] = prefix else: # Not seen yet prefixes[prefix.vpn_id, norm_prefix] = prefix return prefixes
def update_usage(self): if self.prefix.afi != "4": return p = IP.prefix(self.prefix.prefix) ps = p.size if ps < 2: return if self.children: # Count prefixes u = sum(c.size for c in self.children) pu = min(int(float(u) * 100 / float(ps)), 100) else: # Count addresses u = self.app.ip_usage.get(self.prefix.id, 0) if ps > 2: pu = int(float(u) * 100 / float(ps - 2)) else: pu = int(float(u) * 100 / float(ps)) self.used = pu
def save(self, *args, **kwargs): def generate_fqdns(): # Prepare FQDN template t = Template(self.fqdn_template) # Sync FQDNs sn = 0 for ip in self.addresses: # Generage FQDN vars = {"afi": self.afi, "vrf": self.vrf, "range": self, "n": sn} sn += 1 if self.afi == "4": i = ip.address.split(".") vars["ip"] = i # ip.0 .. ip.3 # ip1, ip2, ip3, ip4 for backward compatibility for n, i in enumerate(i): vars["ip%d" % (n + 1)] = i elif self.afi == "6": vars["ip"] = ip.digits # ip.0 .. ip.31 fqdn = t.render(Context(vars)) description = "Generated by address range '%s'" % self.name # Create or update address record when necessary a, created = Address.objects.get_or_create( vrf=self.vrf, afi=self.afi, address=ip.address ) if created: a.fqdn = fqdn a.description = description a.save() elif a.fqdn != fqdn or a.description != a.description: a.fqdn = fqdn a.description = description a.save() created = self.id is None if not created: # Get old values old = AddressRange.objects.get(id=self.id) super(AddressRange, self).save(*args, **kwargs) if created: # New if self.action == "G": generate_fqdns() else: # Changed if old.action == "G" and self.action != "G": # Drop all auto-generated IPs Address.objects.filter( vrf=self.vrf, afi=self.afi, address__gte=self.from_address, address__lte=self.to_address, ).delete() elif old.action != "G" and self.action == "G": # Generate IPs generate_fqdns() elif self.action == "G": # Check for boundaries change if IP.prefix(old.from_address) < IP.prefix(self.from_address): # Lower boundary raised up. Clean up addresses falled out of range Address.objects.filter( vrf=self.vrf, afi=self.afi, address__gte=old.from_address, address__lt=self.to_address, ).delete() if IP.prefix(old.to_address) > IP.prefix(self.to_address): # Upper boundary is lowered. Clean up addressess falled out of range Address.objects.filter( vrf=self.vrf, afi=self.afi, address__gt=self.to_address, address__lte=old.to_address, ).delete() # Finally recheck FQDNs generate_fqdns()
def addresses(self): """ Generator returning all addresses in range """ return IP.prefix(self.from_address).iter_address(until=IP.prefix(self.to_address))
def save(self, *args, **kwargs): # Set AFI self.afi = IP.prefix(self.prefix).afi return super(PrefixTablePrefix, self).save(*args, **kwargs)