def block_ip(self, ip, ipset_name=None, reload=True): block_ipset = self.get_block_ipset_for_ip(ip, ipset_name) if not block_ipset: # TODO err: unsupported protocol raise Exception('Unsupported protocol') self.ensure_block_ipset_in_drop_zone(block_ipset) log.info('Adding IP address {} to block set {}'.format( ip, block_ipset.get_property('name'))) try: from aggregate6 import aggregate entries = [] for entry in block_ipset.getEntries(): entries.append(str(entry)) entries.append(str(ip)) block_ipset.setEntries(aggregate(entries)) except ImportError: block_ipset.addEntry(str(ip)) if reload: log.info('Reloading FirewallD to apply permanent configuration') self.fw.reload() log.info('Breaking connection with {}'.format(ip)) from subprocess import CalledProcessError, check_output, STDOUT try: check_output(["/sbin/conntrack", "-D", "-s", str(ip)], stderr=STDOUT) except CalledProcessError as e: pass
def test_05__lot_of_ipv4_holes(self): pfxs = [] for i in range(5, 200): pfxs.append("{}.0.0.0/8".format(i)) outcome = ["5.0.0.0/8", "6.0.0.0/7", "8.0.0.0/5", "16.0.0.0/4", "32.0.0.0/3", "64.0.0.0/2", "128.0.0.0/2", "192.0.0.0/5"] self.assertEqual(aggregate(pfxs), outcome)
def do_prefixes_merge(s_cone_asn_prefixes): """ Receive a prefix cone definition to be merged and execute it. Return: cone prefix definition merged. """ d_cone_def_merged = dict() id_asn_cone = s_cone_asn_prefixes[0] l_prefixes = s_cone_asn_prefixes[1] d_cone_def_merged[id_asn_cone] = prefixagg.aggregate(l_prefixes) return d_cone_def_merged
def _parse_rirstats(self, text: str): prefixes = [] for ip_version, start_ip, size in self._rirstats_lines(text): if ip_version == 4: first_ip = ipaddress.ip_address(start_ip) last = int(first_ip) + int(size) - 1 last_ip = ipaddress.ip_address(last) cidrs = ipaddress.summarize_address_range(first_ip, last_ip) for prefix in cidrs: prefixes.append(str(prefix)) else: prefixes.append(f"{start_ip}/{size}") return aggregate6.aggregate(prefixes)
def do_prefixes_merge(chunk_line): """ Receive a prefix cone definition to be merged and execute it. Return: cone prefix definition merged. """ d_cone_def_merged = dict() l_striped_line = chunk_line.strip().split(" ") id_asn_cone = l_striped_line[0] l_prefixes = l_striped_line[1:] d_cone_def_merged[id_asn_cone] = prefixagg.aggregate(l_prefixes) return d_cone_def_merged
def country_delegation_statistics(origin_ases_delegation_cc_df): """ This. Bla bla. """ output_list = [] for origin_asn, cc in origin_ases_delegation_cc_df.drop_duplicates( ['origin-asn', 'cc'])[['origin-asn', 'cc']].values: prefix_list = origin_ases_delegation_cc_df.loc[ (origin_ases_delegation_cc_df['origin-asn'] == origin_asn) & (origin_ases_delegation_cc_df['cc'] == cc )]['pfix'].values.tolist() aggregated_prefix_list = aggregate(prefix_list) ip_cnt = 0 for prefix in aggregated_prefix_list: ip_cnt += prefix_addr_space(prefix) output_list.append((cc, origin_asn, ip_cnt)) # Generate ourput file delegated_addr_space_by_as_cc_df = pd.DataFrame( output_list, columns=['cc', 'origin-asn', 'ip-cnt']) return delegated_addr_space_by_as_cc_df
def ip_networks_aggregates(prefixes: List[IPNetwork]): inputs = [str(prefix) for prefix in prefixes] return [ip_network(prefix) for prefix in aggregate(inputs)]
def test_07__non_ip_input(self): with self.assertRaises(Exception) as context: aggregate(["this_is_no_prefix", "10.0.0.0/24"]) self.assertTrue('ERROR, invalid IP prefix: this_is_no_prefix' in context.exception)
def test_06__reduce_dups(self): self.assertEqual(aggregate(["2001:db8::/32", "2001:db8::/32"]), ["2001:db8::/32"])
def test_04__lot_of_ipv4(self): pfxs = [] for i in range(0, 256): pfxs.append("{}.0.0.0/8".format(i)) self.assertEqual(aggregate(pfxs), ["0.0.0.0/0"])
def test_03__mix_v4_v6_default(self): self.assertEqual(aggregate(["0.0.0.0/0", "::/0"]), ["0.0.0.0/0", "::/0"])
def test_01__join_two(self): self.assertEqual(aggregate(["10.0.0.0/8", "11.0.0.0/8"]), ["10.0.0.0/7"])
def test_00__default_wins(self): self.assertEqual(aggregate(["0.0.0.0/0", "10.0.0.0/16"]), ["0.0.0.0/0"])
ripe_url = "http://ftp.ripe.net/ripe/stats/delegated-ripencc-latest" r = requests.get(ripe_url) pfx_list = [] for entry in str(r.text).split('\n'): if not entry: continue try: afi, start_ip, count = entry.split('|')[2:5] except ValueError as e: print(entry) sys.exit(1) ip_type = entry.split('|')[-1] if not ip_type in ["allocated", "assigned"]: continue if afi == "ipv4": first_ip = ipaddress.ip_address(start_ip) last = int(first_ip) + int(count) - 1 last_ip = ipaddress.ip_address(last) cidrs = ipaddress.summarize_address_range(first_ip, last_ip) for prefix in cidrs: pfx_list.append(str(prefix)) if afi == "ipv6": pfx_list.append(("{}/{}".format(start_ip, count))) for prefix in aggregate(pfx_list): print(prefix)
def aggregated_roas_covered_space(): prefixes = [] for pref_len in self.rpki_roas: for roa in self.rpki_roas[pref_len]: prefixes.append(roa["prefix"]) return aggregate(prefixes)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-c', dest='cache', default="https://rpki.gin.ntt.net/api/export.json", type=str, help="""Location of the RPKI Cache in JSON format (default: https://rpki.gin.ntt.net/api/export.json)""") parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + rpki_ov_route_map.__version__) args = parser.parse_args() if 'http' in args.cache: r = requests.get(args.cache, headers={'Accept': 'text/json'}) validator_export = r.json() else: validator_export = json.load(open(args.cache, "r")) print("""! ip bgp-community new-format no ip community-list rpki-not-found ip community-list standard rpki-not-found permit 65000:0 no ip community-list rpki-valid ip community-list standard rpki-valid permit 65000:1 no ip community-list rpki-invalid ip community-list standard rpki-invalid permit 65000:2 no ip community-list rpki ip community-list expanded rpki permit 65000:[123] !""") data = dict() data['vrps'] = load_vrp_list(validator_export) data['origins'] = collections.defaultdict(set) covered_space = set() for vrp in data['vrps']: covered_space.add(vrp['prefix']) if vrp['prefixlen'] == vrp['maxlen']: entry = vrp['prefix'] else: entry = "{} le {}".format(vrp['prefix'], vrp['maxlen']) data['origins'][vrp['origin']].add(entry) print("no ip prefix-list rpki-covered-space-v4") for i in aggregate(covered_space): print("ip prefix-list rpki-covered-space-v4 permit {} le 32".format(i)) print("!") for origin, prefixes in data['origins'].items(): if origin == 0: continue print("!") print("no ip prefix-list rpki-origin-AS{}".format(origin)) for prefix in prefixes: print("ip prefix-list rpki-origin-AS{} permit {}".format( origin, prefix)) print("!") print("no ip as-path access-list {}".format(origin)) print("ip as-path access-list {} permit _{}$".format(origin, origin)) print("""! ! test whether BGP NLIR is covered by RPKI ROA or not route-map rpki-ov permit 1 match ip address prefix-list rpki-covered-space-v4 set comm-list rpki delete continue 3 ! ! BGP announcement is not covered by RPKI ROA, mark as not-found and exit route-map rpki-ov permit 2 set comm-list rpki delete set community 65000:0 additive ! ! find RPKI valids""") n = 3 for origin in data['origins'].keys(): if origin == 0: continue print("!") print("route-map rpki-ov permit {}".format(n)) print(" match ip prefix-list rpki-origin-AS{}".format(origin)) print(" match as-path {}".format(origin)) print(" set community 65000:1") n += 1 print("!") print("! Reject RPKI Invalid BGP announcements") print("route-map rpki-ov deny {}".format(n))
stream = pybgpstream.BGPStream(from_time=time_init, until_time=time_end, filter="type ribs and collector %s" % (collector, )) rib = dict() for elem in stream: if elem.type == 'R': # rib type rib[elem.fields["prefix"] + elem.fields["next-hop"]] = elem.fields # remove duplicates prefixes_by_as = dict() for rib_elem in rib.values(): as_origin = rib_elem["as-path"].split(" ")[-1] next_hop = rib_elem["next-hop"] key = as_origin + next_hop if not key in prefixes_by_as.keys(): prefixes_by_as[key] = list() prefixes_by_as[key].append(rib_elem["prefix"]) aggregated_rib_len = 0 for prefixes in prefixes_by_as.values(): aggregated_rib_len += len(aggregate(prefixes)) pprint.pprint("Full RIB length: %d" % len(rib)) pprint.pprint("Aggregated RIB length: %d" % aggregated_rib_len)