def __init__(self): self.asns = csv_writer("asns.csv") self.prefixes = csv_writer("prefixes.csv") for fn in self.filenames: f = gzip.open(fn) self.statement = "" self.cur = None for line in f: line = line.expandtabs().partition("#")[0].rstrip("\n") if line and not line[0].isalpha(): self.statement += line[1:] if line[0] == "+" else line else: self.finish_statement(not line) self.statement = line self.finish_statement(True) f.close() self.asns.close() self.prefixes.close()
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE # OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. """ Parse APNIC "Extended Allocation and Assignment" reports and write out (just) the RPKI-relevant fields in myrpki-format CSV syntax. """ from rpki.csv_utils import csv_writer from rpki.ipaddrs import v4addr asns = csv_writer("asns.csv") prefixes = csv_writer("prefixes.csv") for line in open("delegated-apnic-extended-latest"): line = line.rstrip() if not line.startswith("apnic|") or line.endswith("|summary"): continue try: registry, cc, rectype, start, value, date, status, opaque_id = line.split( "|") except ValueError: continue
for netblock in node.iter(tag_netBlock): tag = find(netblock, tag_type) startAddress = find(netblock, tag_startAddress) endAddress = find(netblock, tag_endAddress) if not startAddress.endswith(".000") and not startAddress.endswith(":0000"): continue if not endAddress.endswith(".255") and not endAddress.endswith(":FFFF"): continue if tag in ("DS", "DA", "IU"): prefixes.writerow((handle, "%s-%s" % (startAddress, endAddress))) elif tag in erx_table: erx.writerow((erx_table[tag], "%s-%s" % (startAddress, endAddress))) dispatch = { tag_asn : do_asn, tag_net : do_net } asns = csv_writer("asns.csv") prefixes = csv_writer("prefixes.csv") erx = csv_writer("erx.csv") root = None for event, node in lxml.etree.iterparse(sys.stdin): if root is None: root = node while root.getparent() is not None: root = root.getparent() if node.getparent() is root: if node.tag in dispatch:
tag_description = ns("description") tag_designation = ns("designation") tag_record = ns("record") tag_number = ns("number") tag_prefix = ns("prefix") tag_status = ns("status") handles = {} rirs = { "legacy" : resource_bag() } for rir in ("AfriNIC", "APNIC", "ARIN", "LACNIC", "RIPE NCC"): handle = rir.split()[0].lower() handles[rir] = handles["Assigned by %s" % rir] = handles["Administered by %s" % rir] = handle rirs[handle] = resource_bag() asns = csv_writer("asns.csv") prefixes = csv_writer("prefixes.csv") for record in iterate_xml("as-numbers.xml", tag_record): description = record.findtext(tag_description) if description in handles: asns.writerow((handles[description], record.findtext(tag_number))) for record in iterate_xml("ipv4-address-space.xml", tag_record): designation = record.findtext(tag_designation) if record.findtext(tag_status) != "RESERVED": prefix, prefixlen = [int(i) for i in record.findtext(tag_prefix).split("/")] if prefixlen != 8: raise ValueError("%s violated /8 assumption" % record.findtext(tag_prefix)) rirs[handles.get(designation, "legacy")] |= resource_bag.from_str("%d.0.0.0/8" % prefix)
program for two reasons: - Conversion of some of the RIR data is a very slow process, and it's both annoying and unnecessary to run it every time we add a new participant to the testbed. - This handle translation business now has fingers into half a dozen scripts, so it needs refactoring in any case, either as a common library function or as a separate script. This program takes a list of .CSV files on its command line, and rewrites them as needed after performing the translation. """ import os import sys from rpki.csv_utils import csv_reader, csv_writer translations = dict((src, dst) for src, dst in csv_reader("translations.csv", columns = 2)) for filename in sys.argv[1:]: f = csv_writer(filename) for cols in csv_reader(filename): if cols[0] in translations: cols[0] = translations[cols[0]] f.writerow(cols) f.close()
if not startAddress.endswith(".000") and not startAddress.endswith( ":0000"): continue if not endAddress.endswith(".255") and not endAddress.endswith( ":FFFF"): continue if tag in ("DS", "DA", "IU"): prefixes.writerow((handle, "%s-%s" % (startAddress, endAddress))) elif tag in erx_table: erx.writerow( (erx_table[tag], "%s-%s" % (startAddress, endAddress))) dispatch = {tag_asn: do_asn, tag_net: do_net} asns = csv_writer("asns.csv") prefixes = csv_writer("prefixes.csv") erx = csv_writer("erx.csv") root = None for event, node in lxml.etree.iterparse(sys.stdin): if root is None: root = node while root.getparent() is not None: root = root.getparent() if node.getparent() is root: if node.tag in dispatch: