def fetch(): a = IPSet([]) for blocklist in blocklists: syslog.syslog('generate-blacklist.py - fetching prefixes from: %s ' % blocklist) #print blocklist r = requests.get(blocklist) for line in r.iter_lines(): # Dont ask me why but like this it works. if line != "0.0.0.0/8" and line != "240.0.0.0/4" and line != "224.0.0.0/4": if linefilter(line): myprefix = makeprefix(linefilter(line)) #a.add(makeprefix(linefilter(line))) a.add(myprefix) for prefix in b: if b.len() > 0 and b.__contains__( prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def fetch(): a = IPSet([]) totalnoofnewprefixes = 0 for blocklist in blocklists: syslog.syslog('{0} fetching prefixes from: {1}'.format(syslogprefix, blocklist)) # '{0} in {1}'.format(unicode(self.author,'utf-8'), unicode(self.publication,'utf-8')) r = requests.get(blocklist) for line in r.iter_lines(): # Dont ask me why but like this it works. if line != "0.0.0.0/8" and line != "240.0.0.0/4" and line != "224.0.0.0/4": if linefilter(line): myprefix = makeprefix(linefilter(line)) # if myprefix not in a a.add(myprefix) noofprefixes = a.__len__ syslog.syslog('{0} got {1} prefixes from: {2}'.format(syslogprefix, noofprefixes, blocklist)) totalnoofnewprefixes = totalnoofnewprefixes + noofprefixes syslog.syslog('{0} got {1} prefixes in total from upstream'.format(syslogprefix, totalnoofprefixes)) for prefix in b: if b.len() > 0 and b.__contains__(prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def fetch(): a = IPSet([]) for blocklist in blocklists: r = requests.get(blocklist) for line in r.iter_lines(): if linefilter(line): a.add(makeprefix(linefilter(line))) for prefix in b: if b.len() > 0 and b.__contains__(prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def ip_merge(_input: str): '''IP 地址合并计算''' res = IPSet() if _input == '-f': # 传入文件名 ip_list = _read_file(argv[3]) else: # 传入若干 ip ip_list = argv[2:] _ip_list = [] for x in ip_list: if x.find('-') != -1: # 先转换为 ip / mask 格式 _ip_list.extend(ip_from_last(x.strip(), True)) else: res.add(IP(x.strip())) for xx in _ip_list: res.add(xx) return '\n'.join([x.strCompressed() for x in res])
class RouteSet(collections.MutableSet): def __init__(self, routes=(), aggregate=True): if aggregate: self.ip4 = IPSet() self.ip6 = IPSet() else: self.ip4 = set() self.ip6 = set() if routes: for r in routes: self.add(r) def __len__(self): """ IPy doesn't provide a len() for number of prefixes but that's really what we're interested in here, so override their semantics. """ if isinstance(self.ip4, IPSet): return len(self.ip4.prefixes) + len(self.ip6.prefixes) else: return len(self.ip4) + len(self.ip6) def __iter__(self): return itertools.chain(self.ip4, self.ip6) def __contains__(self, item): return item in self.ip4 or item in self.ip6 def add(self, item): ip = IP(item, make_net=True) if ip.version() == 4: self.ip4.add(ip) elif ip.version() == 6: self.ip6.add(ip) def discard(self, item): ip = IP(item, make_net=True) self.ip4.discard(ip) self.ip6.discard(ip)
def aggregate_prefix_list(plist, t=False): ''' t = if True then truncate, means that will prune the network portion to match the netmask (i.e. 1.2.3.4/24 becomes 1.2.3.0/24) (i.e. 2001:db8:abcd:1234::dead:beef/64 becomes 2001:db8:abcd:1234::/64 ''' s = IPSet() # the set of aggregated addresses for p in plist: try: ip = IP(p, make_net=t) # read the line as an IP network; truncate if -t was specified except ValueError as err: # exception if the line can't be parsed as an IP prefix raise errors.IPparseError(err) s.add(ip) # add the IP into the set, automatically aggregating as necessary result = [] for prefix in s: result.append(str(prefix)) return result
def fetch(): a = IPSet([]) for blocklist in blocklists: r = requests.get(blocklist) for line in r.iter_lines(): if linefilter(line): a.add(makeprefix(linefilter(line))) for prefix in b: if b.len() > 0 and b.__contains__( prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def aggregate_nets(nets): def flush(ipset): for ip in ipset: addr, prefix = (ip.strNormal(), '32') \ if ip.prefixlen() == 32 else \ ip.strNormal().split('/') mask = ip.strNetmask() yield (addr, mask, prefix) i, limit = 0, 300 ips = IPSet() for starting_ip, mask, prefix in nets: ip = IP("{}/{}".format(starting_ip, prefix)) ips.add(ip) i += 1 if i >= limit: i = 0 yield from flush(ips) ips = IPSet() yield from flush(ips)
def get_input_ip(self, input_ip_file) -> IPSet: ''' 获取输入的 IP ''' input_ip_str = _read_file(input_ip_file) # 不使用下面的单行函数返回结果,是为了在循环中使用try,若格式异常可跳过,不影响整体运行。 # input_ip = IPSet([IP(x.strip(), make_net=True) for x in input_ip_str.split('\n')]) input_ip = IPSet() for x in input_ip_str: try: # 尝试按照 ip / mask 解析 _i = IP(x.strip(), make_net=True) input_ip.add(_i) except Exception as err: # 尝试按照 ip_start - ip_end 解析 try: _i = ip_from_last(x.strip(), True) for xx in _i: input_ip.add(xx) except Exception as err: pft(str_red('忽略无效 ip:{},\n\t错误原因:{}'.format(x, err))) return input_ip
#获取post过来的数据 form = cgi.FieldStorage() IPS = form.getvalue('IPS') print("Content-type: application/json") print() #取条目数 IPS = json.loads(IPS) process_before = len(IPS) result = IPSet() for item in IPS: try: result.add(IP(item)) except ValueError as e: result_json ={} result_json['ips'] = ['{0}'.format(e)] result_json['process_before'] = '0' result_json['process_after'] = '0' result_json['elapsed'] = '0.0000' #生成JSON数据 print(json.dumps(result_json)) sys.exit() ips = [] for item in result: ips.append(item.strNormal(1) + '/32' if ('/' not in item.strNormal(1)) item.strNormal(1)) end_time = time.time()
print IP('127.0.0.1').make_net('255.0.0.0') print '=============================================' print u'IP地址转字符串的几种方式:' ip5 = IP('10.0.0.0/32') ip6 = IP('10.0.0.0/24') ip7 = IP('10.0.0.0') print ip5.strNormal() print ip6.strNormal() print ip6.strNormal(0) print ip6.strNormal(1) print ip6.strNormal(2) print ip6.strNormal(3) print ip7 ip7.NoPrefixForSingleIp = None print(ip7) ip7.WantPrefixLen = 3 print ip7 print '=============================================' print IP('10.0.0.0/22') - IP('10.0.2.0/24') print IPSet([IP('10.0.0.0/23'), IP('10.0.3.0/24'), IP('10.0.2.0/24')]) s = IPSet([IP('10.0.0.0/22')]) s.add(IP('192.168.1.2')) print s s.discard(IP('192.168.1.2')) print s
appName ="jhl_spark_1" master = "local" conf = SparkConf().setAppName(appName).setMaster(master) sc = SparkContext(conf=conf) # textFile读取外部数据 rdd = sc.textFile(sys.argv[1])# 以行为单位读取外部文件,并转化为RDD rdd2 = rdd.flatMap(handle) \ .distinct() result = rdd2.collect() print("********************************\n\n") ret = IPSet() for y in result: ret.add(IP(y, make_net = True)) #print(ret)#合并后的网段 dict = {} for z in result: dec = IP(z).int() dict[z] = dec a_start = IP('1.0.0.0').int() a_end = IP('126.0.0.0').int() b_start = IP('128.0.0.0').int() b_end = IP('191.255.255.255').int() c_start = IP('192.0.0.0').int() c_end = IP('223.255.255.255').int() a_count = 0 b_count = 0
10.0.0.0/32 >>> ip.WantPrefixLen = 3 >>> print(ip) 10.0.0.0-10.0.0.0 Work with multiple networks Simple addition of neighboring netblocks that can be aggregated will yield a parent network of both, but more complex range mapping and aggregation requires is available with the IPSet class which will hold any number of unique address ranges and will aggregate overlapping ranges. >>> from IPy import IP, IPSet >>> IP('10.0.0.0/22') - IP('10.0.2.0/24') IPSet([IP('10.0.0.0/23'), IP('10.0.3.0/24')]) >>> IPSet([IP('10.0.0.0/23'), IP('10.0.3.0/24'), IP('10.0.2.0/24')]) IPSet([IP('10.0.0.0/22')]) >>> s = IPSet([IP('10.0.0.0/22')]) >>> s.add(IP('192.168.1.0/29')) >>> s IPSet([IP('10.0.0.0/22'), IP('192.168.1.0/29')]) >>> s.discard(IP('192.168.1.2')) >>> s IPSet([IP('10.0.0.0/22'), IP('192.168.1.0/31'), IP('192.168.1.3'), IP('192.168.1.4/30')]) IPSet supports the set method isdisjoint: >>> s.isdisjoint(IPSet([IP('192.168.0.0/16')])) False >>> s.isdisjoint(IPSet([IP('172.16.0.0/12')])) True IPSet supports intersection:
print(err) print "exiting..." sys.exit(1) # process -c option if args.cidr: if ip in IP('0.0.0.0/1') and ( ip.int() & 0x00ffffff == 0x00000000 ) and ip.prefixlen() == 32: ip=ip.make_net(8) if ip in IP('128.0.0.0/2') and ( ip.int() & 0x0000ffff == 0x00000000 ) and ip.prefixlen() == 32: ip=ip.make_net(16) if ip in IP('192.0.0.0/3') and ( ip.int() & 0x000000ff == 0x00000000 ) and ip.prefixlen() == 32: ip=ip.make_net(24) # process -h option if args.host: ip.NoPrefixForSingleIp = None s.add(ip) # add the IP into the set, automatically aggregating as necessary except KeyboardInterrupt: # show usage if user exits w/ CTRL-C print parser.print_help() sys.exit(1) # send the results to STDOUT for prefix in s: if args.ipv4 & (prefix.version() == 4): print (prefix) if args.ipv6 & (prefix.version() == 6): print (prefix)