def test_get_probe_timeout(self): """ CreateContainer probe times-out if get_probe runs too long. """ clock = Clock() node_id = uuid4() node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1')) control_service = FakeFlockerClient([node], node_id) cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) operation = CreateContainer(clock, cluster) d = operation.get_probe() clock.advance(DEFAULT_TIMEOUT.total_seconds()) # No control_service.synchronize_state() call, so cluster state # never shows container is created. # The Deferred fails if container not created within 10 minutes. self.failureResultOf(d)
class _Namespace(object): """ Implementation helper for :py:func:`create_network_namespace`. :ivar ADDRESSES: List of :py:class:`IPAddress`es in the created namespace. """ # https://clusterhq.atlassian.net/browse/FLOC-135 # Don't hardcode addresses in the created namespace ADDRESSES = [IPAddress('127.0.0.1'), IPAddress('10.0.0.1')] def create(self): """ Create a new network namespace, and populate it with some addresses. """ self.fd = open('/proc/self/ns/net') unshare(CLONE_NEWNET) check_call(['ip', 'link', 'set', 'up', 'lo']) check_call(['ip', 'link', 'add', 'eth0', 'type', 'dummy']) check_call(['ip', 'link', 'set', 'eth0', 'up']) check_call(['ip', 'addr', 'add', '10.0.0.1/8', 'dev', 'eth0']) def restore(self): """ Restore the original network namespace. """ setns(self.fd.fileno(), CLONE_NEWNET) self.fd.close()
def test_read_request_load_succeeds(self, _logger): """ ``read_request_load_scenario`` starts and stops without collapsing. """ c = Clock() node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1')) node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2')) cluster = BenchmarkCluster( node1.public_address, lambda reactor: FakeFlockerClient([node1, node2]), {node1.public_address, node2.public_address}, default_volume_size=DEFAULT_VOLUME_SIZE) sample_size = 5 s = read_request_load_scenario(c, cluster, sample_size=sample_size) d = s.start() # Request rate samples are recorded every second and we need to # collect enough samples to establish the rate which is defined # by `sample_size`. Therefore, advance the clock by # `sample_size` seconds to obtain enough samples. c.pump(repeat(1, sample_size)) s.maintained().addBoth(lambda x: self.fail()) d.addCallback(lambda ignored: s.stop()) c.pump(repeat(1, sample_size)) self.successResultOf(d)
def setUp(self): super(InterfaceTests, self).setUp() self.node_1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1')) self.node_2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2')) self.client = self.create_client()
def test_read_request_load_start_stop_start_succeeds(self, _logger): """ ``read_request_load_scenario`` starts, stops and starts without collapsing. """ c = Clock() node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1')) node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2')) cluster = BenchmarkCluster( node1.public_address, lambda reactor: FakeFlockerClient([node1, node2]), {node1.public_address, node2.public_address}, default_volume_size=DEFAULT_VOLUME_SIZE) sample_size = 5 s = read_request_load_scenario(c, cluster, sample_size=sample_size) # Start and stop s.start() c.pump(repeat(1, sample_size)) s.stop() # Start again and verify the scenario succeeds d = s.start() c.pump(repeat(1, sample_size)) s.maintained().addBoth(lambda x: self.fail()) d.addCallback(lambda ignored: s.stop()) c.pump(repeat(1, sample_size)) self.successResultOf(d)
def ipranges_to_ipobjects(ranges): """ Parse a list of ip ranges (texts, e.g. "12.12.12.12-12.12.12.34/32") and return a (compressed) list of IPNetwork objects. Everything beyond a hash ('#') is ignored. Malformed lines are also ignored. """ re_ip4range = re.compile(r""" ^ \s* (?:(?P<start>\d+\.\d+\.\d+\.\d+)-)? (?P<end>\d+\.\d+\.\d+\.\d+)(?:/(?P<mask>\d+))? \s* (?:\#.*)? $ """, re.VERBOSE).match addresses = list() for iprange in ranges: # just skip this line if the regex doesn't match iprange = re_ip4range(iprange) if not iprange: continue iprange = iprange.groupdict() # extract end and (optional) start as IPAddresses end = IPAddress(iprange["end"]) address = IPAddress(iprange["start"] or end) assert address <= end # turn the mask into a string-format-function mask_fmt = "{{}}/{}".format(iprange["mask"] or str(end.max_prefixlen)).format # computation of the size of each subnet is hard, so we prefer while over for while address <= end: addresses.append(IPNetwork(mask_fmt(str(address)))) address += int(addresses[-1].broadcast)-int(addresses[-1].network)+1 return tuple(collapse_address_list(addresses))
def process_ipv4_csv(filename, asn_name_map): """ process csv for IPv4 input file """ ip_family = IP_FAMILY_IPV4 csv_rows = [] input_fieldnames = [MIN_IP_INT_COLUMN, MAX_IP_INT_COLUMN, ASN_STRING_COLUMN] with open(filename, 'rb') as csvfile: reader = csv.DictReader(csvfile, fieldnames=input_fieldnames) for row in reader: min_ip = IPAddress(v4_int_to_packed(int(row[MIN_IP_INT_COLUMN]))) max_ip = IPAddress(v4_int_to_packed(int(row[MAX_IP_INT_COLUMN]))) (asn_number, asn_name) = get_asn_number_name(row[ASN_STRING_COLUMN]) row[ASN_NUMBER_COLUMN] = asn_number row[ASN_NAME_COLUMN] = asn_name_map[asn_number] if asn_number in asn_name_map else asn_name row[MIN_IP_HEX_COLUMN] = hex_encode_ip(min_ip) row[MAX_IP_HEX_COLUMN] = hex_encode_ip(max_ip) row[MIN_IP_COLUMN] = str(min_ip) row[MAX_IP_COLUMN] = str(max_ip) row[IP_FAMILY_COLUMN] = ip_family del row[MIN_IP_INT_COLUMN] del row[MAX_IP_INT_COLUMN] csv_rows.append(row) # Write the new CSV file with new columns with open(OUTPUT_FILENAME, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=OUTPUT_FIELDNAMES) writer.writeheader() for row in csv_rows: writer.writerow(row)
def process_ipv6_csv(filename, asn_name_map): """ process csv for IPv6 input file """ ip_family = IP_FAMILY_IPV6 csv_rows = [] input_fieldnames = [ASN_STRING_COLUMN, MIN_IP_COLUMN, MAX_IP_COLUMN, 'unused'] with open(filename, 'rb') as csvfile: reader = csv.DictReader(csvfile, fieldnames=input_fieldnames) for row in reader: min_ip = IPAddress(row[MIN_IP_COLUMN]) max_ip = IPAddress(row[MAX_IP_COLUMN]) (asn_number, asn_name) = get_asn_number_name(row[ASN_STRING_COLUMN]) row[ASN_NUMBER_COLUMN] = asn_number row[ASN_NAME_COLUMN] = asn_name_map[asn_number] if asn_number in asn_name_map else asn_name row[MIN_IP_HEX_COLUMN] = hex_encode_ip(min_ip) row[MAX_IP_HEX_COLUMN] = hex_encode_ip(max_ip) row[MIN_IP_COLUMN] = str(min_ip) row[MAX_IP_COLUMN] = str(max_ip) row[IP_FAMILY_COLUMN] = ip_family del row['unused'] csv_rows.append(row) # Append the CSV file with new rows for IPv6 (do not write a new csv header) with open(OUTPUT_FILENAME, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=OUTPUT_FIELDNAMES) for row in csv_rows: writer.writerow(row)
def from_cluster_yaml(cls, path): """ Create a cluster from Quick Start Installer files. :param FilePath path: directory containing Quick Start Installer ``cluster.yml`` and certificate files. :return: A ``BenchmarkCluster`` instance. """ with path.child('cluster.yml').open() as f: cluster = yaml.safe_load(f) validate_cluster_configuration(cluster) control_node_address = cluster['control_node'] public_addresses = { IPAddress(node['private']): IPAddress(node['public']) for node in cluster['agent_nodes'] } control_service = partial(FlockerClient, host=control_node_address, port=4523, ca_cluster_path=path.child('cluster.crt'), cert_path=path.child('user.crt'), key_path=path.child('user.key')) return cls( IPAddress(control_node_address), control_service, public_addresses, None, )
def test_only_specified_proxy_deleted(self): """ Proxies other than a deleted proxy are still listed. """ proxy_one = self.network.create_proxy_to(IPAddress("10.0.0.1"), 1) proxy_two = self.network.create_proxy_to(IPAddress("10.0.0.2"), 2) self.network.delete_proxy(proxy_one) self.assertEqual([proxy_two], self.network.enumerate_proxies())
def __makeflow(self): if haveIPAddrGen: srcip = str(IPv4Address(ipaddrgen.generate_addressv4(self.ipsrcgen))) dstip = str(IPv4Address(ipaddrgen.generate_addressv4(self.ipdstgen))) else: srcip = str(IPAddress(int(self.ipsrc) + random.randint(0,self.ipsrc.numhosts-1))) dstip = str(IPAddress(int(self.ipdst) + random.randint(0,self.ipdst.numhosts-1))) ipproto = self.ipproto sport = dport = 0 if ipproto == IPPROTO_ICMP: # std way that netflow encodes icmp type/code: # type in high-order byte of dport, # code in low-order byte t = next(self.icmptype) c = next(self.icmpcode) dport = t << 8 | c # print 'icmp t,c,dport',hex(t),hex(c),hex(dport) else: if self.sport: sport = next(self.sport) if self.dport: dport = next(self.dport) flet = Flowlet(FlowIdent(srcip, dstip, ipproto, sport, dport)) flet.iptos = next(self.iptos) flet.flowstart = flet.flowend = fscore().now if flet.ipproto == IPPROTO_TCP: flet.ackflow = not self.autoack tcpflags = next(self.tcpflags) flaglist = tcpflags.split('|') xtcpflags = 0x0 for f in flaglist: if f == 'FIN': xtcpflags |= 0x01 elif f == 'SYN': xtcpflags |= 0x02 elif f == 'RST': xtcpflags |= 0x04 elif f == 'PUSH' or f == 'PSH': xtcpflags |= 0x08 elif f == 'ACK': xtcpflags |= 0x10 elif f == 'URG': xtcpflags |= 0x20 elif f == 'ECE': xtcpflags |= 0x40 elif f == 'CWR': xtcpflags |= 0x80 else: raise InvalidFlowConfiguration('Invalid TCP flags mnemonic ' + f) flet.tcpflags = xtcpflags return flet
def setUp(self): self.node_1 = Node( uuid=uuid4(), public_address=IPAddress('10.0.0.1') ) self.node_2 = Node( uuid=uuid4(), public_address=IPAddress('10.0.0.2') ) self.client = self.create_client()
def from_acceptance_test_env(cls, env): """ Create a cluster from acceptance test environment variables. See the Flocker documentation acceptance testing page for more details. :param dict env: Dictionary mapping acceptance test environment names to values. :return: A ``BenchmarkCluster`` instance. :raise KeyError: if expected environment variables do not exist. :raise ValueError: if environment variables are malformed. :raise jsonschema.ValidationError: if host mapping is not a valid format. """ control_node_address = env['FLOCKER_ACCEPTANCE_CONTROL_NODE'] certs = FilePath(env['FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH']) try: host_to_public = json.loads( env['FLOCKER_ACCEPTANCE_HOSTNAME_TO_PUBLIC_ADDRESS']) validate_host_mapping(host_to_public) public_addresses = { IPAddress(k): IPAddress(v) for k, v in host_to_public.items() } except ValueError as e: raise type(e)( ': '.join(('FLOCKER_ACCEPTANCE_HOSTNAME_TO_PUBLIC_ADDRESS', ) + e.args)) control_service = partial(FlockerClient, host=control_node_address, port=4523, ca_cluster_path=certs.child('cluster.crt'), cert_path=certs.child('user.crt'), key_path=certs.child('user.key')) try: control_node_ip = IPAddress(control_node_address) except ValueError as e: raise type(e)(': '.join(('FLOCKER_ACCEPTANCE_CONTROL_NODE', ) + e.args)) try: default_volume_size = int( env['FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE']) except ValueError as e: raise type(e)( ': '.join(('FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE', ) + e.args)) return cls( control_node_ip, control_service, public_addresses, default_volume_size, )
def lint_ipaddresses(doc, cluster_network = IPNetwork('10.61.34.0/24')): if doc.xpath('/coherence'): wka_addresses = doc.xpath('/coherence/cluster-config/unicast-listener/wll-known-addresses/socket-address') for wka_address in wka_addresses: address = IPAddress(wka_address.xpath('./address/text()')[0]) port = int(wka_address.xpath('./port/text()')[0]) assert port in (35501, 35502, 35503) assert address.is_private() assert address in cluster_network
def find(self, ipstr): ''' return postcode of the province in which the ip addr reside ''' ip = IPAddress(ipstr) pos = self.__binary_search(ip._ip) ip_start = IPAddress(self.__ip_list_start[pos]) ip_end = IPAddress(self.__ip_list_end[pos][0]) if ip > ip_start and ip < ip_end: if self.__ip_list_end[pos][1] < 0: raise Exception(ipstr + " not found") return self.__ip_list_end[pos][1] raise Exception(ipstr + " not found")
def __getitem__(self, n): network = int(self.network) broadcast = int(self.broadcast) if n >= 0: if network + n > broadcast: raise IndexError return IPAddress(network + n, version=self._version) else: n += 1 if broadcast + n < network: raise IndexError return IPAddress(broadcast + n, version=self._version)
def exportflow(self, ts, flet): flowrec = cflow.packrecord(srcaddr=int(IPAddress(flet.srcaddr)), dstaddr=int(IPAddress(flet.dstaddr)), pkts=flet.pkts, bytes=flet.size, start=int(flet.flowstart), end=int(flet.flowend), srcport=flet.srcport, dstport=flet.dstport, tcpflags=flet.tcpflags, ipproto=flet.ipproto, iptos=flet.iptos) self.outfile.write(flowrec)
def ScopedIPAddress(*args, **kwargs): zone_index = None if kwargs.get("version") == 6 and kwargs.pop("allow_zone_index", False): value = args[0] if value.count("%") == 1: value, zone_index = value.split("%") args = (value,) + args[1:] result = IPAddress(*args, **kwargs) result.zone_index = zone_index result._string_from_ip_int = lambda ip_int: result.__class__._string_from_ip_int(result, ip_int) + ( ("%" + result.zone_index) if result.zone_index is not None else "") return result
def make_cluster(self, make_flocker_client): """ Create a cluster that can be used by the scenario tests. """ node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1')) node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2')) return BenchmarkCluster( node1.public_address, lambda reactor: make_flocker_client( FakeFlockerClient([node1, node2]), reactor), {node1.public_address, node2.public_address}, default_volume_size=DEFAULT_VOLUME_SIZE, )
def get_geo_for_ip(ip_address): try: ip_address = IPAddress(ip_address) if not ip_address.is_private: asn_info = ASN_READER.asn(ip_address) city_info = CITY_READER.city(ip_address) zip_code = city_info.postal.code return dict(ip=ip_address, asn=asn_info.autonomous_system_organization, country_code=city_info.country.iso_code, city=city_info.city.names["en"] if city_info.city.names else None, zip_code=zip_code, country=city_info.registered_country.names["en"] if city_info.registered_country.names else None, continent=city_info.continent.names["en"] if city_info.continent.names else None) raise Exception("e") except Exception as e: return dict(ip=ip_address, asn=None, country_code=None, city=None, state=None, country=None, continent=None)
def _getCmdLineArgs(self): if not self.cmdLineArgs: return () version = str(self.snmpVersion).lstrip('v') if version == '2': version += 'c' if '%' in self.ip: address, interface = self.ip.split('%') else: address = self.ip interface = None log.debug("AgentProxy._getCmdLineArgs: using google ipaddr on %s" % address) ipobj = IPAddress(address) agent = _get_agent_spec(ipobj, interface, self.port) cmdLineArgs = list(self.cmdLineArgs) + [ '-v', str(version), '-c', self.community, '-t', str(self.timeout), '-r', str(self.tries), agent, ] return cmdLineArgs
def getAllBridgeHistory(self): cur = self._cur v = cur.execute("SELECT * FROM BridgeHistory") if v is None: return for h in v: yield BridgeHistory(h[0], IPAddress(h[1]), h[2], h[3], h[4], h[5], h[6], h[7], h[8], h[9], h[10])
def validate_ip(value, required=True): if not required and not value: return # will raise a ValueError IPAddress(value) return value
def call_proxy_url(request, uuid): context = request.GET.get('context') ip = IPAddress(request.META['REMOTE_ADDR']) for subnet in TrustedSubnet.objects.all(): if ip in IPNetwork(subnet.subnet): break else: return HttpResponseForbidden() proxy_url = get_object_or_404( ProxyUrl.objects.filter(uuid=uuid, context=context, subnet=subnet)) proxy_url.last_access = datetime.now() proxy_url.save() view, args, kwargs = resolve(proxy_url.url) user = proxy_url.user user.backend = proxy_url.user_backend or \ settings.AUTHENTICATION_BACKENDS[0] login(request, user) request.proxy_url = proxy_url kwargs['request'] = request return view(*args, **kwargs)
def IPSocket(string): u""" This helper create a dictionary containing address and port from a parsed IP address string. Throws ValueError in case of failure (e.g. string is not a valid IP address). **Example usage** >>> IPSocket('gaga:gogo') Traceback (most recent call last): ... ValueError: 'gaga:gogo' is not a valid IP socket. >>> IPSocket('239.232.0.222:5004') {'ip': '239.232.0.222', 'port': 5004} .. warning:: TODO IPv6 ready : >>> IPSocket('[2001:0db8:0000:0000:0000:ff00:0042]:8329') """ try: (ip, port) = string.rsplit(':', 1) #ip = ip.translate(None, '[]') IPAddress(ip) # Seem not IPv6 ready port = int(port) except Exception: raise ValueError("%r is not a valid IP socket." % string) return {'ip': ip, 'port': port}
def __init__(self, base_path): """ :param FilePath base_path: The path beneath which all of the temporary SSH server-related files will be created. An ``ssh`` directory will be created as a child of this directory to hold the key pair that is generated. An ``sshd`` directory will also be created here to hold the generated host key. A ``home`` directory is also created here and used as the home directory for shell logins to the server. """ self.home = base_path.child(b"home") self.home.makedirs() ssh_path = base_path.child(b"ssh") ssh_path.makedirs() self.key_path = ssh_path.child(b"key") key = generate_ssh_key(self.key_path) sshd_path = base_path.child(b"sshd") sshd_path.makedirs() self.host_key_path = sshd_path.child(b"ssh_host_key") generate_ssh_key(self.host_key_path) factory = OpenSSHFactory() realm = _UnixSSHRealm(self.home) checker = _InMemoryPublicKeyChecker(public_key=key.public()) factory.portal = Portal(realm, [checker]) factory.dataRoot = sshd_path.path factory.moduliRoot = b"/etc/ssh" self._port = reactor.listenTCP(0, factory, interface=b"127.0.0.1") self.ip = IPAddress(self._port.getHost().host) self.port = self._port.getHost().port
def bytesToCanonIp(byteString): """ SNMP provides either a 4-byte or 16-byte index to table items, where the index represents an IPV4 or IPV6 address, respectively. Raises a ValueError for incompatible types. >>> bytesToCanonIp( ['254','128','0','0','0','0','0','0','2','80','86','255','254','138','46','210']) 'fe80::250:56ff:fe8a:2ed2' >>> bytesToCanonIp( ['253','0','0','0','0','0','0','0','0','0','0','0','10','175','210','5']) 'fd00::aaf:d205' >>> bytesToCanonIp( ['hello','world']) Traceback (most recent call last): ... ValueError: Unsupported IP Address: hello.world >>> bytesToCanonIp( ['253','0','0','0','0','0','0','0','0','0','0','0','10','175','210','5']) 'fd00::aaf:d205' """ byteStringLen = len(byteString) if byteStringLen == IPV4_ADDR_LEN: rawIpStr = '.'.join(byteString) elif byteStringLen == IPV6_ADDR_LEN: rawIpStr = _bytesToCanonIpv6(byteString) else: raise ValueError("Unsupported IP Address: %s" % '.'.join(byteString)) return str(IPAddress(rawIpStr))
def bytesToCanonIpv6(byteString): """ SNMP provides a 16-byte index to table items, where the index represents the IPv6 address. Return an empty string or the canonicalized IPv6 address. >>> bytesToCanonIpv6( ['254','128','0','0','0','0','0','0','2','80','86','255','254','138','46','210']) 'fe80::250:56ff:fe8a:2ed2' >>> bytesToCanonIpv6( ['253','0','0','0','0','0','0','0','0','0','0','0','10','175','210','5']) 'fd00::aaf:d205' >>> bytesToCanonIpv6( ['hello','world']) '' >>> bytesToCanonIpv6( ['253','0','0','0','0','0','0','0','0','0','0','0','10','175','210','5']) 'fd00::aaf:d205' """ # To form an IPv6 address, need to combine pairs of octets (in hex) # and join them with a colon try: left = map(int, byteString[::2]) right = map(int, byteString[1::2]) except ValueError: return '' ipv6 = ':'.join("%x%02x" % tuple(x) for x in zip(left, right)) # Now canonicalize the IP ip = '' try: ip = str(IPAddress(ipv6)) except ValueError: pass return ip
def get_ip_obj(cls, module_name, ifname, addr): if addr: try: return IPNetwork(addr) if '/' in addr else IPAddress(addr) except Exception as e: cls.logger.warning('%s: %s: %s' % (module_name, ifname, str(e))) return None
def get_paths(self, client, src, dst, max_hop): ''' calc paths from src to dst using backtracking. can add memoization to convert to dynamic programming for better scalability when network is large. ''' prefix_dbs = client.get_prefix_dbs() dst_addr = dst # if dst is node, we get its loopback addr if ':' not in dst: dst_addr = self.get_loopback_addr(prefix_dbs, dst).split('/')[0] try: IPAddress(dst_addr) except ValueError: print("node name or ip address not valid.") sys.exit(1) adj_dbs = client.get_adj_dbs() if2node = self.get_if2node_map(adj_dbs) fib_routes = defaultdict(list) paths = [] def _backtracking(cur, path, hop, visited, in_fib): if hop > max_hop: return cur_lpm_len = self.get_lpm_len_from_node(cur, dst_addr, prefix_dbs) next_hop_nodes = self.get_nexthop_nodes(client.get_route_db(cur), dst_addr, cur_lpm_len, if2node, fib_routes, in_fib) if len(next_hop_nodes) == 0: if hop != 1: paths.append((in_fib, path[:])) return for next_hop_node in next_hop_nodes: next_hop_node_name = next_hop_node[0] # prevent loops if next_hop_node_name in visited: return path.append([hop] + next_hop_node) visited.add(next_hop_node_name) # check if next hop node is in fib path is_nexthop_in_fib_path = False for nexthop in fib_routes[cur]: if next_hop_node[3] == utils.sprint_addr(nexthop.addr) and\ next_hop_node[1] == nexthop.ifName: is_nexthop_in_fib_path = True _backtracking(next_hop_node_name, path, hop + 1, visited, is_nexthop_in_fib_path and in_fib) visited.remove(next_hop_node_name) path.pop() _backtracking(src, [], 1, set([src]), True) return paths
def _contain_public_ip(data, _used_networks): _has_public_ip = False _ip_regex = (r'\b((\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\.){3}' r'(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\b') _not_public_regex = [ r'\b10(\.\d{1,3}){3}', r'\b127(\.\d{1,3}){3}', r'\b169\.254(\.\d{1,3}){2}', r'172\.(1[6-9]|2[0-9]|3[0-1])(\.\d{1,3}){2}', r'192\.168(\.\d{1,3}){2}', r'2(2[4-9]|[3-5][0-9])(\.\d{1,3}){3}' ] for _match in re.finditer(_ip_regex, data): # If IP address isn't public and doesn't belong to defined for # deployment pools (e.g. admin, public, storage), then skip it if any(re.search(_r, _match.group()) for _r in _not_public_regex) \ and not any(IPAddress(_match.group()) in IPNetwork(net) for net in _used_networks): continue logger.debug('Usage statistics with public IP(s):\n {0}'. format(data)) logger.error('Found public IP in usage statistics: "{0}"'.format( _match.group())) _has_public_ip = True return _has_public_ip