def compare(self, all_ip=IPSet(), input_ip=IPSet()) -> None: '''将输入的 IP 范围与完整的 IP 范围做比较,输出缺少的部分''' t0 = datetime.now() self.result = '\n'.join([x.strCompressed() for x in (all_ip - input_ip)]) t1 = (datetime.now() - t0).total_seconds() pft(str_blue_green('运行用时:', '{:4.2f} s'.format(t1)))
def not_in_use(self): "Show unused subnets in the CIDR range" if self.is_leaf() or self.net_type != "scope": return [] base_set = IPSet([self.ip]) for child in self.children: base_set.discard(child.ip) return base_set.prefixes
def __init__(self, routes=(), aggregate=True): if aggregate: self.ip4 = IPSet() self.ip6 = IPSet() else: self.ip4 = set() self.ip6 = set() if routes: for r in routes: self.add(r)
def to_python(self, value): if value is None: return value if isinstance(value, IPSet): return value try: ip_list = json.loads(value) ip_set = IPSet([IP(ip) for ip in ip_list]) ip_set.optimize() return ip_set except (TypeError, json.decoder.JSONDecodeError): # invalid json data in db raise ValidationError(self.default_error_messages['invalid'])
def fetch(): a = IPSet([]) totalnoofnewprefixes = 0 for blocklist in blocklists: syslog.syslog('{0} fetching prefixes from: {1}'.format(syslogprefix, blocklist)) # '{0} in {1}'.format(unicode(self.author,'utf-8'), unicode(self.publication,'utf-8')) r = requests.get(blocklist) for line in r.iter_lines(): # Dont ask me why but like this it works. if line != "0.0.0.0/8" and line != "240.0.0.0/4" and line != "224.0.0.0/4": if linefilter(line): myprefix = makeprefix(linefilter(line)) # if myprefix not in a a.add(myprefix) noofprefixes = a.__len__ syslog.syslog('{0} got {1} prefixes from: {2}'.format(syslogprefix, noofprefixes, blocklist)) totalnoofnewprefixes = totalnoofnewprefixes + noofprefixes syslog.syslog('{0} got {1} prefixes in total from upstream'.format(syslogprefix, totalnoofprefixes)) for prefix in b: if b.len() > 0 and b.__contains__(prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def fetch(): a = IPSet([]) for blocklist in blocklists: syslog.syslog('generate-blacklist.py - fetching prefixes from: %s ' % blocklist) #print blocklist r = requests.get(blocklist) for line in r.iter_lines(): # Dont ask me why but like this it works. if line != "0.0.0.0/8" and line != "240.0.0.0/4" and line != "224.0.0.0/4": if linefilter(line): myprefix = makeprefix(linefilter(line)) #a.add(makeprefix(linefilter(line))) a.add(myprefix) for prefix in b: if b.len() > 0 and b.__contains__( prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def main(): original_file_path = r'D:\xuexi\cheshiwenjian\哈尔滨CR全路由表.txt' print(f'城域网CR路由原表文件路径:{original_file_path}') format_file_path = r'D:\xuexi\cheshiwenjian\哈尔滨CR全路清理后文件.txt' print(f'城域网CR路由表清理后文件路径:{format_file_path}') # usable_ip_str_path = input('可用IP范围文件路径:') usable_ip_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨城域网可用汇总路由.txt' print(f'城域网可用IP网段存放文件路径:{usable_ip_str_path}') used_ip_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨城域网已使用IP网段.txt' print(f'城域网已使用IP网段存放文件路径:{used_ip_str_path}') # not_used_ip_str_path = input('未使用IP输出文件路径:') not_used_ip_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨城域网未使用IP网段.txt' print(f'城域网未使用IP网段存放文件路径:{not_used_ip_str_path}') ip_net_mask = '22' not_used_ip_mask_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨城域网未使用IP网段({}位以上).txt'.format( ip_net_mask) print(f'哈尔滨城域网未使用IP网段({ip_net_mask}位以上):{not_used_ip_mask_str_path}') usable_ip_str = read_file(usable_ip_str_path) usable_ip_list = data_format(usable_ip_str) usable_ip_list = col_data_format(usable_ip_list) usable_ip_set = ip_format(usable_ip_list).copy() conn_ip = IPSet(usable_ip_set) route_ip_list = format_routing_table(original_file_path, format_file_path) used_ip_list = extract_used_ip(route_ip_list, usable_ip_set) used_ip_str = produce_output_str(used_ip_list) output_file(used_ip_str_path, used_ip_str) used_ip_list = col_data_format(used_ip_list) used_ip_net_set = ip_format(used_ip_list).copy() for used_ip_net in used_ip_net_set: conn_ip.discard(IP(used_ip_net)) not_used_ip_str = produce_output_str(conn_ip) output_file(not_used_ip_str_path, not_used_ip_str) not_used_ip_filter_list = ip_mask_filter(conn_ip, ip_net_mask) not_used_ip_filter_str = produce_output_str(not_used_ip_filter_list) output_file(not_used_ip_mask_str_path, not_used_ip_filter_str)
def make_prefix_heap(prefixes, initial_children=None, family=None, sort_fn=None, show_available=False, show_unused=False): """Return a prefix heap of all prefixes. Might optionally filter out IPv4 and IPv6 as needed. Args: prefixes: a queryset for or list of nav.models.manage.Prefix initial_children: a list of IPNode to initialize the root with family: a list of address types to allow (of "ipv4", "ipv6", "rfc1918") sort_fn: function used to sort the children upon serialization show_available: whether or not to create "fake" children for all ranges not spanned by the node's children or found otherwhere in NAV show_unused: like above, but only creates such nodes for prefixes that are in fact found within NAV Returns: A prefix heap (tree) """ rfc1918 = IPSet( [IP("10.0.0.0/8"), IP("172.16.0.0/12"), IP("192.168.0.0/16")]) def accept(prefix): "Helper function for filtering prefixes by IP family" ip_addr = IP(prefix.net_address) if "ipv4" in family and ip_addr.version( ) == 4 and ip_addr not in rfc1918: return True if "ipv6" in family and ip_addr.version() == 6: return True if "rfc1918" in family and ip_addr in rfc1918: return True return False heap = PrefixHeap(initial_children) filtered = [prefix for prefix in prefixes if accept(prefix)] nodes = [PrefixNode(prefix, sort_fn=sort_fn) for prefix in filtered] for node in sorted(nodes, reverse=False): heap.add(node) # Add marker nodes for available ranges/prefixes if show_available: scopes = (child for child in heap.walk_roots() if child.net_type in ["scope"]) subnets = (get_available_nodes([scope.ip]) for scope in scopes) for subnet in subnets: heap.add_many(subnet) # Add marker nodes for empty ranges, e.g. ranges not spanned by the # children of a node. This is useful for aligning visualizations and so on. if show_unused: unused_prefixes = (child.not_in_use() for child in heap.walk()) for unused_prefix in unused_prefixes: nodes = nodes_from_ips(unused_prefix, klass="empty") heap.add_many(nodes) return heap
def test_get_available_subnets(self): base = ["10.0.0.0/8"] used = ["10.0.0.0/9"] available_subnets = _get_available_subnets(base, used) # sanity check: only 10.128.0.0/9 should be available self.assertTrue(IP("10.128.0.0/9") in IPSet(available_subnets)) self.assertEqual(len(available_subnets), 1) self.assertEqual(available_subnets, sorted(available_subnets))
def ip_list2c(ip_list): ips = [] for i in ip_list: ips.append(IP(i + '/24', make_net=True)) for j in ips: if ips.count(j) < 2: ips.remove(j) return IPSet(ips)
def partition_subnet(size, prefix): "Partition prefix into subnets with room for at at least n hosts" subnet_size = math.ceil(math.log(size, 2)) chunk_size = 2**subnet_size _iter = iter(IP(prefix)) chunk = list(islice(_iter, chunk_size)) while chunk: yield IPSet(chunk).prefixes[0] chunk = list(islice(_iter, chunk_size))
def main(): # used_ip_str_path = input('被使用IP统计文件路径:') used_ip_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨CR全路由提取.txt' print(used_ip_str_path) # usable_ip_str_path = input('可用IP范围文件路径:') usable_ip_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨总IP1.txt' print(usable_ip_str_path) # not_used_ip_str_path = input('未使用IP输出文件路径:') not_used_ip_str_path = r'D:\xuexi\cheshiwenjian\哈尔滨未用IP.txt' print(not_used_ip_str_path) with open(used_ip_str_path, 'r', encoding='utf-8-sig') as file_used_ip: used_ip_str = file_used_ip.read() with open(usable_ip_str_path, 'r', encoding='utf-8-sig') as file_usable_ip: usable_ip_str = file_usable_ip.read() used_ip_list = data_format(used_ip_str) used_ip_list = col_data_format(used_ip_list) used_ip_net_set = ip_format(used_ip_list).copy() usable_ip_list = data_format(usable_ip_str) usable_ip_list = col_data_format(usable_ip_list) usable_ip_set = ip_format(usable_ip_list).copy() conn_ip = IPSet(usable_ip_set) # conn_ip = IPSet([IP('112.100.107.0/24'), IP('112.100.14.0/24')]) for used_ip_net in used_ip_net_set: conn_ip.discard(IP(used_ip_net)) # for conn_ip_tmp in conn_ip: # print(conn_ip_tmp) not_used_ip_str = "" for not_used_ip in conn_ip: not_used_ip_str = not_used_ip_str + str(not_used_ip) + "\n" with open(not_used_ip_str_path, 'w') as file_not_used_ip: file_not_used_ip.write(not_used_ip_str)
def aggregate_prefix_list(plist, t=False): ''' t = if True then truncate, means that will prune the network portion to match the netmask (i.e. 1.2.3.4/24 becomes 1.2.3.0/24) (i.e. 2001:db8:abcd:1234::dead:beef/64 becomes 2001:db8:abcd:1234::/64 ''' s = IPSet() # the set of aggregated addresses for p in plist: try: ip = IP(p, make_net=t) # read the line as an IP network; truncate if -t was specified except ValueError as err: # exception if the line can't be parsed as an IP prefix raise errors.IPparseError(err) s.add(ip) # add the IP into the set, automatically aggregating as necessary result = [] for prefix in s: result.append(str(prefix)) return result
def aggregate_nets(nets): def flush(ipset): for ip in ipset: addr, prefix = (ip.strNormal(), '32') \ if ip.prefixlen() == 32 else \ ip.strNormal().split('/') mask = ip.strNetmask() yield (addr, mask, prefix) i, limit = 0, 300 ips = IPSet() for starting_ip, mask, prefix in nets: ip = IP("{}/{}".format(starting_ip, prefix)) ips.add(ip) i += 1 if i >= limit: i = 0 yield from flush(ips) ips = IPSet() yield from flush(ips)
def __init__(self, reactor, cache_size, cache_expire, timeout, disallowed=None, allowed=None): super(Resolver, self).__init__(reactor) self.disallowed = IPSet( [IP(ip) for ip in (disallowed if disallowed is not None else [])]) self.allowed = IPSet( [IP(ip) for ip in (allowed if allowed is not None else [])]) self.cache_allowed = self.cache_disallowed = None if cache_size > 0: self.cache_allowed = ExpiringDict(max_len=cache_size, max_age_seconds=cache_expire) self.cache_disallowed = ExpiringDict(max_len=cache_size, max_age_seconds=cache_expire) self.timeout = timeout if isinstance(timeout, tuple) else (timeout, ) self.logger = logging.getLogger(self.__class__.__name__)
def _get_available_subnets(prefix_or_prefixes, used_prefixes): """Get available prefixes within a list of CIDR addresses, based on what prefixes are in use. E.g. this is `get_available_subnets`, but with explicit dependency injection. Args: prefix_or_prefixes: a single or a list of prefixes ("10.0.0.0/8") or IPy.IP objects used_prefixes: prefixes that are in use Returns: An iterable IPy.IPSet of available addresses within prefix_or_prefixes """ if not isinstance(prefix_or_prefixes, list): prefix_or_prefixes = [prefix_or_prefixes] base_prefixes = [str(prefix) for prefix in prefix_or_prefixes] acc = IPSet([IP(prefix) for prefix in prefix_or_prefixes]) used_prefixes = IPSet([IP(used) for used in used_prefixes]) # remove used prefixes acc.discard(used_prefixes) # filter away original prefixes return sorted([ip for ip in acc if str(ip) not in base_prefixes])
class RouteSet(collections.MutableSet): def __init__(self, routes=(), aggregate=True): if aggregate: self.ip4 = IPSet() self.ip6 = IPSet() else: self.ip4 = set() self.ip6 = set() if routes: for r in routes: self.add(r) def __len__(self): """ IPy doesn't provide a len() for number of prefixes but that's really what we're interested in here, so override their semantics. """ if isinstance(self.ip4, IPSet): return len(self.ip4.prefixes) + len(self.ip6.prefixes) else: return len(self.ip4) + len(self.ip6) def __iter__(self): return itertools.chain(self.ip4, self.ip6) def __contains__(self, item): return item in self.ip4 or item in self.ip6 def add(self, item): ip = IP(item, make_net=True) if ip.version() == 4: self.ip4.add(ip) elif ip.version() == 6: self.ip6.add(ip) def discard(self, item): ip = IP(item, make_net=True) self.ip4.discard(ip) self.ip6.discard(ip)
def fetch(): a = IPSet([]) for blocklist in blocklists: r = requests.get(blocklist) for line in r.iter_lines(): if linefilter(line): a.add(makeprefix(linefilter(line))) for prefix in b: if b.len() > 0 and b.__contains__( prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def compress_cidr_list(cidrList): """ return cidr list after compressed and ipCount """ cidrHash = {} compressedCidrList = [] ipCount = 0 for cidr in cidrList: ip, prefix = cidr.strip().split('/') if not cidrHash.has_key(prefix): cidrHash[prefix] = [] cidrHash[prefix].append(cidr) from IPy import IP, IPSet for prefix, cidrList in cidrHash.items(): ipSetList = [] for cidr in cidrList: ipSetList.append(IP(cidr, make_net=1)) ipSet = IPSet(ipSetList) for compressdCidr in ipSet: compressedCidrList.append(str(compressdCidr)) ipCount = ipCount + compressdCidr.len() return compressedCidrList, ipCount
def fetch(): a = IPSet([]) for blocklist in blocklists: r = requests.get(blocklist) for line in r.iter_lines(): if linefilter(line): a.add(makeprefix(linefilter(line))) for prefix in b: if b.len() > 0 and b.__contains__(prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def __init__(self, rpki_json_str: str, slurm_json_str: Optional[str], database_handler: DatabaseHandler): self.roa_objs: List[ROA] = [] self._filtered_asns: Set[str] = set() self._filtered_prefixes: IPSet = IPSet() self._filtered_combined: Dict[str, IPSet] = defaultdict(IPSet) self._load_roa_dicts(rpki_json_str) if slurm_json_str: self._load_slurm(slurm_json_str) scopefilter_validator = ScopeFilterValidator() for roa_dict in self._roa_dicts: try: asn = roa_dict['asn'] prefix = IP(roa_dict['prefix']) ta = roa_dict['ta'] if ta != SLURM_TRUST_ANCHOR: if asn in self._filtered_asns: continue if any([prefix in self._filtered_prefixes]): continue if any([prefix in self._filtered_combined.get(asn, [])]): continue roa_obj = ROA(prefix, asn, roa_dict['maxLength'], ta) except KeyError as ke: msg = f'Unable to parse ROA record: missing key {ke} -- full record: {roa_dict}' logger.error(msg) raise ROAParserException(msg) except ValueError as ve: msg = f'Invalid value in ROA or SLURM: {ve}' logger.error(msg) raise ROAParserException(msg) roa_obj.save(database_handler, scopefilter_validator) self.roa_objs.append(roa_obj)
def ip_merge(_input: str): '''IP 地址合并计算''' res = IPSet() if _input == '-f': # 传入文件名 ip_list = _read_file(argv[3]) else: # 传入若干 ip ip_list = argv[2:] _ip_list = [] for x in ip_list: if x.find('-') != -1: # 先转换为 ip / mask 格式 _ip_list.extend(ip_from_last(x.strip(), True)) else: res.add(IP(x.strip())) for xx in _ip_list: res.add(xx) return '\n'.join([x.strCompressed() for x in res])
def get_input_ip(self, input_ip_file) -> IPSet: ''' 获取输入的 IP ''' input_ip_str = _read_file(input_ip_file) # 不使用下面的单行函数返回结果,是为了在循环中使用try,若格式异常可跳过,不影响整体运行。 # input_ip = IPSet([IP(x.strip(), make_net=True) for x in input_ip_str.split('\n')]) input_ip = IPSet() for x in input_ip_str: try: # 尝试按照 ip / mask 解析 _i = IP(x.strip(), make_net=True) input_ip.add(_i) except Exception as err: # 尝试按照 ip_start - ip_end 解析 try: _i = ip_from_last(x.strip(), True) for xx in _i: input_ip.add(xx) except Exception as err: pft(str_red('忽略无效 ip:{},\n\t错误原因:{}'.format(x, err))) return input_ip
def ip_merge(ip_list): ip_set = [] for ip_element in ip_list: ip_set.append(IP(ip_element)) ip_set_agg = IPSet(ip_set) return ip_set_agg
#! /usr/bin/env python3 # _*_ coding: utf-8 _*_ from IPy import IP, IPSet iplist = [] filename = '/Users/heping/Documents/ProgramCodingBackup/Python/ip.txt' ''' windows 路径可以写成 'D:\\ip.txt' ''' with open(filename) as ipfile: for line in ipfile: line = line.strip() ipaddress = IPSet([IP(line)]) for ip in ipaddress: iplist.append(ip) ipa = IPSet(iplist) for x in ipa: print(x)
import fileinput import sys parser = argparse.ArgumentParser(conflict_handler='resolve') parser.add_argument('-t', '--truncate', action='store_true', dest='truncate', help='truncate the network portion of the prefix to match the netmask (i.e. 192.0.2.10/24 becomes 192.0.2.0/24)', default=False) parser.add_argument('-h', '--host', action='store_true', dest='host', help='convert host addresses w/o masks to IPv4 /32 or IPv6 /128 networks', default=None) parser.add_argument('-c', '--cidr', dest='cidr', action='store_true', help='add netmasks to classful IPv4 network definitions') parser.add_argument('-i', '--ignore-errors', dest='ignore', action='store_true', help='discard malformed input lines instead of exiting', default=False) version = parser.add_mutually_exclusive_group() version.add_argument('-4', '--ipv4', dest='ipv6', action='store_false', help='display only IPv4 prefixes (default: show IPv4 & IPv6)', default=True) version.add_argument('-6', '--ipv6', dest='ipv4', action='store_false', help='display only IPv6 prefixes (default: show IPv4 & IPv6)', default=True) parser.add_argument('args', nargs=argparse.REMAINDER, help='<input file> or STDIN') args=parser.parse_args() s = IPSet() # the set of aggregated addresses try: for line in fileinput.input(args.args): try: ip = IP(line,make_net=args.truncate) # read the line as an IP network; truncate if -t was specified except ValueError as err: # exception if the line can't be parsed as an IP prefix if args.ignore == False: print(err) print "exiting..." sys.exit(1) # process -c option if args.cidr: if ip in IP('0.0.0.0/1') and ( ip.int() & 0x00ffffff == 0x00000000 ) and ip.prefixlen() == 32: ip=ip.make_net(8)
def in_use(self): "Show allocated *used( subnets in the CIDR range" return IPSet([child.ip for child in self.children])
print("Usage: wordcount <file>", file=sys.stderr) exit(-1) appName ="jhl_spark_1" master = "local" conf = SparkConf().setAppName(appName).setMaster(master) sc = SparkContext(conf=conf) # textFile读取外部数据 rdd = sc.textFile(sys.argv[1])# 以行为单位读取外部文件,并转化为RDD rdd2 = rdd.flatMap(handle) \ .distinct() result = rdd2.collect() print("********************************\n\n") ret = IPSet() for y in result: ret.add(IP(y, make_net = True)) #print(ret)#合并后的网段 dict = {} for z in result: dec = IP(z).int() dict[z] = dec a_start = IP('1.0.0.0').int() a_end = IP('126.0.0.0').int() b_start = IP('128.0.0.0').int() b_end = IP('191.255.255.255').int() c_start = IP('192.0.0.0').int() c_end = IP('223.255.255.255').int()
print IP('127.0.0.1').make_net('255.0.0.0') print '=============================================' print u'IP地址转字符串的几种方式:' ip5 = IP('10.0.0.0/32') ip6 = IP('10.0.0.0/24') ip7 = IP('10.0.0.0') print ip5.strNormal() print ip6.strNormal() print ip6.strNormal(0) print ip6.strNormal(1) print ip6.strNormal(2) print ip6.strNormal(3) print ip7 ip7.NoPrefixForSingleIp = None print(ip7) ip7.WantPrefixLen = 3 print ip7 print '=============================================' print IP('10.0.0.0/22') - IP('10.0.2.0/24') print IPSet([IP('10.0.0.0/23'), IP('10.0.3.0/24'), IP('10.0.2.0/24')]) s = IPSet([IP('10.0.0.0/22')]) s.add(IP('192.168.1.2')) print s s.discard(IP('192.168.1.2')) print s
'tb': 'networkresource_publicipsegment', # 'host': 'localhost', 'host': '10.201.163.202', 'port': 9003, 'user': '******', 'password': '******', 'db': 'omni_agent', } # 公网地址段 SegmentTable = SqlTable(**tableInfo) cmd = 'select * from {}'.format(SegmentTable._tb) segdata = SegmentTable.queryResult(cmd) ipsetDict = {} # 地址段类型: IPSet(...) for gw in segdata: if str(gw[3]) not in ipsetDict: ipsetDict[str(gw[3])] = IPSet() subnet = getSubnet(gw[1], gw[2]) ipsetDict[str(gw[3])].add(subnet) # IP 清单 tableInfo['db'] = 'omni_agent' tableInfo['tb'] = 'networkresource_iprecord' IPTable = SqlTable(**tableInfo) cmd = 'select device_ip, ip_mask, device_name, logic_port, svlan, cvlan, ip_description, gateway, ip_func from {}'.format( IPTable._tb) ips = IPTable.queryResult(cmd) result = {} for segment_type in ipsetDict: result[segment_type] = [] if '-1' not in result:
sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer) start_time = time.time() #获取post过来的数据 form = cgi.FieldStorage() IPS = form.getvalue('IPS') print("Content-type: application/json") print() #取条目数 IPS = json.loads(IPS) process_before = len(IPS) result = IPSet() for item in IPS: try: result.add(IP(item)) except ValueError as e: result_json ={} result_json['ips'] = ['{0}'.format(e)] result_json['process_before'] = '0' result_json['process_after'] = '0' result_json['elapsed'] = '0.0000' #生成JSON数据 print(json.dumps(result_json)) sys.exit() ips = [] for item in result:
exabgp: aggregate_requests.py based partially on aggregate.py from https://github.com/infowolfe/exabgp-edgerouter/blob/master/blocklists_simple.py """ from IPy import IP, IPSet import requests import socket from sys import stdout from time import sleep import syslog import os # syslogprefix = sys.argv[0] + '[' + os.getpid() + ']:' syslogprefix = __file__ + '[' + str(os.getpid()) + ']:' a = IPSet() b = IPSet() # how long should we sleep in minutes? mins = 10 expires = '' nexthop = ' next-hop 169.254.1.1 community [65535:65281]\n' #nexthop = ' next-hop self community [64512:666]\n' blocklists = [ 'https://www.spamhaus.org/drop/drop.txt', 'https://www.spamhaus.org/drop/edrop.txt', 'https://rules.emergingthreats.net/fwrules/emerging-Block-IPs.txt', 'http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt' ]