def validate_networks(
    value: Union[bool, Sequence[str]]
) -> Union[bool, List[Union[IPv6Network, IPv4Network]]]:
    if value is True:
        return [IPv4Network('0.0.0.0/0'), IPv6Network('::/0')]

    if not value:
        return []

    converted_networks_ipv4 = []
    converted_networks_ipv6 = []
    for i, network in enumerate(value):
        try:
            if ':' in value:
                converted_networks_ipv6.append(IPv6Network(network))
            else:
                converted_networks_ipv4.append(IPv4Network(network))
        except ValueError:
            raise vol.Invalid("invalid network provided", path=[i])

    if not converted_networks_ipv6:
        return list(collapse_addresses(converted_networks_ipv4))
    elif not converted_networks_ipv4:
        return list(collapse_addresses(converted_networks_ipv6))
    return [
        *collapse_addresses(converted_networks_ipv4),
        *collapse_addresses(converted_networks_ipv6)
    ]
Пример #2
0
def getRKNdata():
    # Get AWS prefixes
    aws_prefixes = downloadJSONdata()

    url = "https://raw.githubusercontent.com/zapret-info/z-i/master/dump.csv"
    data = []

    response = urllib.request.urlopen(url)
    reader = csv.reader(codecs.iterdecode(response, 'cp1251'), delimiter=';')

    for row in reader:
        for rkn_prefix in row[0].split(' | '):
            try:
                data.append(ipaddress.ip_network(rkn_prefix))
            except ValueError:
                # Not an IP in rkn_blocklist row
                continue

    rkn_prefixes = [ipaddr for ipaddr in ipaddress.collapse_addresses(data)]

    data = []

    for aws_prefix in aws_prefixes:
        for rkn_prefix in rkn_prefixes:
            if rkn_prefix.overlaps(aws_prefix):
                data.append(rkn_prefix)
                break

    return [ipaddr for ipaddr in ipaddress.collapse_addresses(data)]
Пример #3
0
def _clean_up_networks(strings: list) -> tuple:
    """
    Take a list of strings. Convert elements to objects from the ipaddress
    class. For elements that fail conversion (still string), print an error and
    remove them. Separate remaining items into lists of a single address family:
    one of IPv4 objects and the other of IPv6 objects. Return a tuple of the
    IPv4Network list, IPv6Network list.
    """

    # Get two empty lists ready
    ip4_nets = []
    ip6_nets = []

    # Convert strings to ipaddress objects
    ip_networks = _strings_to_networks(strings)

    # Print an error for each string that did not convert and remove it from
    # the list. Sort IPv4 and IPv6 objects into separate lists.
    for i,item in enumerate(ip_networks):
        if isinstance(item, str):
            print("ValueError: '{}' does not appear to be an IPv4 or IPv6 "
                    "network, ignoring.".format(item))
            del ip_networks[i]
        elif isinstance(item, ipaddress.IPv4Network):
            ip4_nets.append(item)
        elif isinstance(item, ipaddress.IPv6Network):
            ip6_nets.append(item)
        else:
            raise TypeError("Unexpected type for item {} found at "
            "'ip_networks[{}]'.".format(str(item), i))

    # Return collapsed lists
    return (list(ipaddress.collapse_addresses(ip4_nets)),
            list(ipaddress.collapse_addresses(ip6_nets)))
Пример #4
0
def _parse_address_list(a):
    net4, net6 = [], []
    for addr in a:
        m = sd_regex.match(addr).groupdict()
        if m['ipv4']:
            net4.append(ipaddress.IPv4Network(_unicode(m['ipv4'])))
        elif m['ipv6']:
            net6.append(ipaddress.IPv6Network(_unicode(m['ipv6'])))
        elif m['fqdn']:
            # throw up badly if domain names cannot be resolved
            # ignoring dns.resolver.NXDOMAIN silently here leads to generated
            # rules with missing src/dst filters, which is bad
            r4 = None
            r6 = None
            try:
                r4 = dns.resolver.query(m['fqdn'], dns.rdatatype.A)
                net4.extend(sorted([ipaddress.IPv4Network(_unicode(rr.to_text()))
                                    for rr in r4.rrset]))
            except dns.resolver.NoAnswer:
                pass
            except dns.resolver.NXDOMAIN as e:
                logger.critical("NXDOMAIN on %s" % m['fqdn'])
                raise e
            try:
                r6 = dns.resolver.query(m['fqdn'], dns.rdatatype.AAAA)
                net6.extend(sorted([ipaddress.IPv6Network(_unicode(rr.to_text()))
                                    for rr in r6.rrset]))
            except dns.resolver.NoAnswer:
                pass
            except dns.resolver.NXDOMAIN as e:
                logger.critical("NXDOMAIN on %s" % m['fqdn'])
                raise e
            rtxt = None
            if r4 is None and r6 is None:
                try:
                    rtxt = dns.resolver.query(m['fqdn'], dns.rdatatype.TXT)
                    for rr in rtxt.rrset:
                        txt = rr.to_text()
                        if txt.startswith('"') and txt.endswith('"'):
                            txt = txt[1:-1]
                        txt_net4, txt_net6 = _parse_address_list([txt])
                        net4.extend(txt_net4)
                        net4 = sorted(ipaddress.collapse_addresses(net4))
                        net6.extend(txt_net6)
                        net6 = sorted(ipaddress.collapse_addresses(net6))
                except dns.resolver.NoAnswer:
                    pass
                except dns.resolver.NXDOMAIN as e:
                    logger.critical("NXDOMAIN on %s" % m['fqdn'])
                    raise e

            if r4 is None and r6 is None and rtxt is None:
                raise OinkwallException('No A, AAAA or TXT found for %s' % m['fqdn'])
        else:
            logger.critical('Regular expression for parse_address_list cannot '
                            'deal with %s' % addr)

    return net4, net6
Пример #5
0
 def get_as_prefixes_effective(self, asn):
     """
     Returns the effective address space of given ASN by removing all overlaps among the prefixes
     :return: The effective prefixes resulting from removing overlaps of the given ASN's prefixes
     """
     prefixes = self.get_as_prefixes(asn)
     if not prefixes:  # issue 12
         return None
     non_overlapping_prefixes4 = collapse_addresses([ip_network(i) for i in prefixes if ':' not in i])
     non_overlapping_prefixes6 = collapse_addresses([ip_network(i) for i in prefixes if ':' in i])
     return [i.compressed for i in non_overlapping_prefixes4] + [i.compressed for i in non_overlapping_prefixes6]
Пример #6
0
 def get_as_prefixes_effective(self, asn):
     """
     Returns the effective address space of given ASN by removing all overlaps among the prefixes
     :return: The effective prefixes resulting from removing overlaps of the given ASN's prefixes
     """
     prefixes = self.get_as_prefixes(asn)
     if not prefixes:  # issue 12
         return None
     non_overlapping_prefixes4 = collapse_addresses([ip_network(i) for i in prefixes if ':' not in i])
     non_overlapping_prefixes6 = collapse_addresses([ip_network(i) for i in prefixes if ':' in i])
     return [i.compressed for i in non_overlapping_prefixes4] + [i.compressed for i in non_overlapping_prefixes6]
Пример #7
0
 def supernet(self):
     if self._check()  == 0:
         prefixlen = self._min_prefixlen()
         #print (prefixlen )
         newPrefixNwList =  [ip_network(x).supernet(new_prefix=prefixlen) for x in self ]
         newNetworkList = list(collapse_addresses(newPrefixNwList))
         while len (newNetworkList) > 1:
             prefixlen = prefixlen - 1
             newPrefixNwList =  [ip_network(x).supernet(new_prefix=prefixlen) for x in self ]
             newNetworkList = (list(collapse_addresses(newPrefixNwList)))
         return newNetworkList[0] 
Пример #8
0
    def get_nets_krnic(self, response):
        """
        The function for parsing network blocks from krnic whois data.

        Args:
            response (:obj:`str`): The response from the krnic server.

        Returns:
            list of dict: Mapping of networks with start and end positions.

            ::

                [{
                    'cidr' (str) - The network routing block
                    'start' (int) - The starting point of the network
                    'end' (int) - The endpoint point of the network
                }]
        """

        nets = []

        # Iterate through all of the networks found, storing the CIDR value
        # and the start and end positions.
        for match in re.finditer(
                r'^(IPv4 Address)[\s]+:[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+?)'
                '[^\S\n]\((.+?)\)|.+)$', response, re.MULTILINE):

            try:

                net = copy.deepcopy(BASE_NET)
                net['range'] = match.group(2)

                if match.group(3) and match.group(4):

                    addrs = []
                    addrs.extend(
                        summarize_address_range(
                            ip_address(match.group(3).strip()),
                            ip_address(match.group(4).strip())))

                    cidr = ', '.join(
                        [i.__str__() for i in collapse_addresses(addrs)])

                    net['range'] = '{0} - {1}'.format(match.group(3),
                                                      match.group(4))

                else:

                    cidr = ip_network(match.group(2).strip()).__str__()

                net['cidr'] = cidr
                net['start'] = match.start()
                net['end'] = match.end()
                nets.append(net)

            except (ValueError, TypeError):

                pass

        return nets
Пример #9
0
def get_net_in_area(OSW_CFG_TXT):
    ''' return a dict of kind: { area: [ipaddress('prefix/lenght'),] } '''

    dict_net_in_area = dict()  # { area: [ipaddress('prefix/lenght'),] }

    parse = c.CiscoConfParse(OSW_CFG_TXT)

    ospf_obj_list = parse.find_objects(r'^router ospf')

    for line in ospf_obj_list[0].ioscfg:
        hl = line.split()
        if hl[0] == 'network':
            net = hl[1]
            net_hostmask = hl[2]
            area = hl[4]
            if area not in dict_net_in_area:
                dict_net_in_area[area] = [
                    ipaddress.IPv4Network(
                        net + '/' + net_hostmask)]
            else:
                dict_net_in_area[area].append(
                    ipaddress.IPv4Network(
                        net + '/' + net_hostmask))
    for area in dict_net_in_area:
        dict_net_in_area[area] = list(
            ipaddress.collapse_addresses(
                dict_net_in_area[area]))
    return dict_net_in_area
Пример #10
0
def parse_blacklist(bl_data, bl_type, regex=None):
    """
    Parses downloaded blacklist to the list of individual blacklist records
    :param bl_data: Blacklist data, which will be parsed
    :param bl_type: Type of blacklist (ip|prefixIP|domain)
    :param regex: Regular expression, which may be used for parsing records
    :return: List of individual blacklist records and length of blacklist (len of prefixIP blacklist needs to be
        calculated before collapsing range)
    """
    bl_records = []
    prefix_bl_length = 0
    if regex:
        cregex = compile_regex(regex)
        bl_records = parse_bl_with_regex(bl_data, bl_type, cregex)
    else:
        bl_records = parse_bl_without_regex(bl_data, bl_type)

    # if the blacklist was prefix blacklist, try to collapse ranges
    if bl_type == "prefixIP":
        prefix_bl_length = len(bl_records)
        print("prefixbl length after collapsing: " + str(prefix_bl_length))
        # remove overlaps from range IP blacklists
        bl_records = ipaddress.collapse_addresses(bl_records)

    return bl_records, prefix_bl_length if prefix_bl_length else 0
Пример #11
0
def cidrExclude(list1, list2):
    newAdd = []
    cidrOutput = []
    cidrOutputMerge = []
    i = 0
    while i < len(list1):
        overlap = 0
        newAdd = []
        for j in range(len(list2)):
            if list1[i].overlaps(list2[j]):
                overlap = 1
                if list1[i].supernet_of(list2[j]):
                    newAdd = list(list1[i].address_exclude(list2[j]))
                    newAdd.sort()
                    list1 = list1[:i + 1] + newAdd + list1[i + 1:]
                    break
        if overlap == 0:
            print("Output", list1[i], "\n")
            cidrOutput.extend(list1[i])
        i += 1

    cidrOutputMerge = ipaddress.collapse_addresses(cidrOutput)
    fileOutput = open("cidrOutput.txt", "w")
    for iprange in cidrOutputMerge:
        fileOutput.write("".join(str(iprange)))
        fileOutput.write("\n")
Пример #12
0
def calculate_cidr(start_address, end_address):
    """
    The function to calculate a CIDR range(s) from a start and end IP address.

    Args:
        start_address: The starting IP address in string format.
        end_address: The ending IP address in string format.

    Returns:
        List: A list of calculated CIDR ranges.
    """

    tmp_addrs = []

    try:

        tmp_addrs.extend(summarize_address_range(
            ip_address(start_address),
            ip_address(end_address)))

    except (KeyError, ValueError, TypeError):  # pragma: no cover

        try:

            tmp_addrs.extend(summarize_address_range(
                ip_network(start_address).network_address,
                ip_network(end_address).network_address))

        except AttributeError:  # pragma: no cover

            tmp_addrs.extend(summarize_address_range(
                ip_network(start_address).ip,
                ip_network(end_address).ip))

    return [i.__str__() for i in collapse_addresses(tmp_addrs)]
Пример #13
0
def getBlockedIPList(connstr, collapse=True, ipv6=False):
    """
    Complementary function for getting only IP list
    :param connstr: smth like "engine://*****:*****@host:port/dbname"
    :param collapse: merge and minimize IPs and networks
    :param ipv6: use ipv6 entities
    :return: The total and the list of ip subnets, using /32 for ips.
    """
    bldt = BlockData(connstr)
    if ipv6:
        ips = [
            ipaddress.ip_network(addr)
            for addr in bldt.getBlockedResourcesSet('ipv6')
        ]
        ipsubs = [
            ipaddress.ip_network(addr)
            for addr in bldt.getBlockedResourcesSet('ipv6subnet')
        ]
    else:
        ips = [
            ipaddress.ip_network(addr)
            for addr in bldt.getBlockedResourcesSet('ip')
        ]
        ipsubs = [
            ipaddress.ip_network(addr)
            for addr in bldt.getBlockedResourcesSet('ipsubnet')
        ]
    ipsall = ips + ipsubs
    if collapse:
        return list(ipaddress.collapse_addresses(ipsall))
    return list(ipsall)
def legacy_import(db_user, db_pass, db_host, db_name):
    # init and connect to legacy db
    db_uri = 'postgresql://%s:%s@%s/%s' % (db_user, db_pass, db_host, db_name)
    db_engine = create_engine(db_uri, convert_unicode=True)
    db_metadata.bind = db_engine
    db_con = db_engine.connect()

    users = t_usr.select().execute().fetchall()
    users_len = len(users)
    for i, user in enumerate(users):
        print("\n\n[%d/%d] %s <%s>..." % (i, users_len, user[1], user[3]))
        rows = t_ip.select(t_ip.c.user_id == user[0]).execute().fetchall()
        networks = [IPv4Network(unicode(r[1])) for r in rows]
        collapsed = [c.compressed for c in collapse_addresses(networks)]
        for c in collapsed:
            pool = current_app.config['API_POOL_LEGACY']
            try:
                data = {
                    'description': user[1],
                    'tags': ['legacy','imported']
                }
                get_api().create_prefix_by_cidr(c, user[3], pool, data)

                print("\t* %s saved." % c)
            except NipapDuplicateError:
                print("\t* %s already exists." % c)
Пример #15
0
def sg_network_in_policy(sg, args):
    '''check_network_policy(json ASG entity) - validate network policy'''
    # check for exceptions to policy
    if args.skip is not None and sg['name'] in args.skip:
        return True
    if args.skip_re is not None:
        for r in args.skip_rec:
            if r.match(sg['name']):
                return True
    # collect all addresses, and apply policy to them in aggregate
    ip_n_list = list()
    for sg_rule in sg['rules']:
        if 'destination' not in sg_rule:
            return False
        ip_n_list.extend(compile_networks([sg_rule['destination']]))
    # take aggregate of addresses, and apply policy - this avoids
    # splitting non-compliant policy across serveral rules to
    # circumvent controls (note that this does not prevent it from
    # being done across multiple policies)
    ip_n_list = list(ipaddress.collapse_addresses(ip_n_list))
    for ip_n in ip_n_list:
        if ip_n.prefixlen < args.min_cidr:
            if args.debug:
                print("%s rule %s fails /%d check" %
                      (sg['name'], ip_n, args.min_cidr))
            return False
        # no referring to banned networks
        if args.network is not None:
            for banned in args.banned_networks:
                if ip_n.overlaps(banned):
                    if args.debug:
                        print("%s rule %s fails banned nets" %
                              (sg['name'], ip_n))
                    return False
    return True  # network policy passed
Пример #16
0
def merge_ip_nets(ip_net):
    ip_counter = 0
    net_break_counter = 0
    nets = [ipaddr for ipaddr in ipaddress.collapse_addresses(ip_net)]
    merged_nets = []
    for n in nets:
        ip_counter += n.num_addresses
        start_ip = n[0]
        end_ip = n[-1]
        if net_break_counter == 0:
            net_start = start_ip
            net_end = end_ip
        # check if the temp ip net can be continue
        if net_break_counter > 0:
            if start_ip == net_end + 1:
                net_end = end_ip
            else:
                # net breaks
                net_break_counter = 0
                merged_nets.append(
                    [net_start.compressed + '-' + net_end.compressed])
                print(merged_nets)
                # reset net_start & net_end
                net_start = start_ip
                net_end = end_ip
        net_break_counter += 1
    merged_nets.append([net_start.compressed + '-' + net_end.compressed])
    print(merged_nets)
    return (ip_counter, merged_nets)
Пример #17
0
def calculate_cidr(start_address, end_address):
    """
    The function to calculate a CIDR range(s) from a start and end IP address.

    Args:
        start_address: The starting IP address in string format.
        end_address: The ending IP address in string format.

    Returns:
        List: A list of calculated CIDR ranges.
    """

    tmp_addrs = []

    try:

        tmp_addrs.extend(summarize_address_range(
            ip_address(start_address),
            ip_address(end_address)))

    except (KeyError, ValueError, TypeError):  # pragma: no cover

        try:

            tmp_addrs.extend(summarize_address_range(
                ip_network(start_address).network_address,
                ip_network(end_address).network_address))

        except AttributeError:  # pragma: no cover

            tmp_addrs.extend(summarize_address_range(
                ip_network(start_address).ip,
                ip_network(end_address).ip))

    return [i.__str__() for i in collapse_addresses(tmp_addrs)]
Пример #18
0
    def summarize_ipv6(mml_file):
        ipv6_pattern = r'.*prefix\s\d\s*([^\n\r]*)'
        ipv6_list = list()
        with open(mml_file) as fin:
            ip_list = list()
            for lines in fin:
                if lines.strip().startswith('prefix'):
                    ip_list.append(
                        re.findall(pattern=ipv6_pattern, string=lines))
        for ls in ip_list:
            for txt in ls:
                ip_range = txt.strip().split()
                ipv6_list.append(ip_range)
        networks = list()
        for ls in ipv6_list:
            networks.extend(
                ipaddress.summarize_address_range(
                    ipaddress.IPv6Address(ls[0]),
                    ipaddress.IPv6Address(ls[-1])))

        aggregate = [
            str(network) for network in ipaddress.collapse_addresses(networks)
        ]

        return aggregate
Пример #19
0
    def add_new_subnet(self, hosts, name):
        for subnet in self.subnets_lst:
            if subnet.name == name:
                print("Name already exists!")
                return None
        holder = list()

        if self.version == 4:
            (cidr, block) = self.get_cidr_block_from_hosts(hosts)
        else:
            (cidr,
             block) = (64, Statics.get_block_size_from_cidr(64, self.version))

        def add(name, network):
            subnet = Subnet(name, network)
            self.subnets[subnet] = IP_tracker(subnet)
            self.subnets_lst.append(subnet)
            self.subnets_lst = sorted(self.subnets_lst,
                                      key=lambda x: (x.network, x.name))

        def get_split_addr(start, stop, cidr):
            if start == stop:
                return list()  #sqeeze and sort address_space
            else:
                holder.append(ipaddress.ip_network("{}/{}".format(start,
                                                                  cidr)))
                return get_split_addr(
                    start +
                    Statics.get_block_size_from_cidr(cidr, self.version), stop,
                    cidr - 1)

        while True:
            if not self.address_space:
                self.unsupported_lst.append("{}, {}".format(name, hosts))
                print("There is no space for requirement '{}'!".format(name))
                break
            else:
                avail_address = self.address_space.pop()
                avail_block = Statics.get_block_size_from_cidr(
                    avail_address.prefixlen, self.version)

                if avail_block == block:
                    add(name, avail_address)
                    break
                elif avail_block > block:
                    new_subnet = ipaddress.ip_network("{}/{}".format(
                        avail_address[0], self.get_cidr_from_block(block)))
                    add(name, new_subnet)
                    get_split_addr(avail_address[0] + block,
                                   avail_address[0] + avail_block, cidr)
                    break
                else:
                    holder.append(avail_address)

        while holder:
            self.address_space.append(holder.pop())
        self.address_space = sorted(list(
            ipaddress.collapse_addresses(self.address_space)),
                                    key=lambda x: (-x.prefixlen))
Пример #20
0
 def get_as_prefixes_effective(self, asn):
     """
     Returns the effective address space of given ASN by removing all overlaps among the prefixes
     :return: The effective prefixes resulting from removing overlaps of the given ASN's prefixes
     """
     prefixes = self.get_as_prefixes(asn)
     non_overlapping_prefixes = collapse_addresses([ip_network(i) for i in prefixes])
     return [i.compressed for i in non_overlapping_prefixes]
Пример #21
0
 def get_as_prefixes_effective(self, asn):
     """
     Returns the effective address space of given ASN by removing all overlaps among the prefixes
     :return: The effective prefixes resulting from removing overlaps of the given ASN's prefixes
     """
     prefixes = self.get_as_prefixes(asn)
     non_overlapping_prefixes = collapse_addresses(
         [ip_network(i) for i in prefixes])
     return [i.compressed for i in non_overlapping_prefixes]
Пример #22
0
 def networks(self):
     if not hasattr(self, '_networks'):
         self._networks = []
         for i, block in enumerate(self.blocks):
             self._networks += summarize_address_range(
                 IPv4Address(block['start']),
                 IPv4Address(block['end']))
         self._networks = list(collapse_addresses(self._networks))
     return self._networks
Пример #23
0
def blackhole_routes_as_array():
    ip = IPRoute()
    array = []

    # Blackhole routes are type 6
    for r in ip.get_routes(type=6):
        array.append(ip_network('{}/{}'.format(r['attrs'][1][1],
                                               r['dst_len'])))
    return collapse_addresses(array)
Пример #24
0
 def block_summarize(self,block_prefix=""):
     if self._check()  == 0:
         if block_prefix == "":
             prefixlen = self._min_prefixlen()
         else:
             prefixlen = block_prefix
         #print ("prefixlen: " + str(prefixlen))
         newPrefixNwList =  [ip_network(x).supernet(new_prefix=prefixlen) for x in self ]
         return (list(collapse_addresses(newPrefixNwList)))
Пример #25
0
def gen_routes(srv):
    print('- Generating ccd routes for {}'.format(srv))

    ccd_dir = '/etc/openvpn/ccd-{}'.format(srv)

    if not os.path.exists(ccd_dir):
        print('-- Directory `{}` does not exists, creating new one..'.format(
            ccd_dir))

        os.makedirs(ccd_dir)

    target_f = ccd_dir + '/DEFAULT'
    target = open(target_f, 'w')

    def keyd_to_list(f):
        content = json.loads(open('../json/' + f + '.json', 'r').read())
        list = []

        for arr in content:
            list.extend(content[arr])

        return list

    for addr in keyd_to_list('dns'):
        print('push "dhcp-option DNS %s"' % addr,
              'push "route %s"' % addr,
              sep="\n",
              file=target)

    print('-- Getting AS\'s nets..')

    as_nets = []

    for asn in keyd_to_list('asn-{}'.format(srv)):
        whois = os.popen('whois -h whois.radb.net -- "-i origin AS' + asn +
                         '" | grep -Eo "([0-9.]+){4}/[0-9]+"').read()

        as_nets.extend(whois.split())

    print('-- Collapsing nets..')

    ip4 = [ipaddress.IPv4Network(addr) for addr in as_nets]
    ip4_collapsed = ipaddress.collapse_addresses(ip4)

    count = 0
    for addr in ip4_collapsed:
        ip4_range = addr.with_netmask.replace('/', ' ')

        print('push "route {}"'.format(ip4_range), file=target)

        count += 1

    print('-- Networks before collapsing: {}, after collapsing: {}'.format(
        len(as_nets), count))

    print('\tSaved in', target_f)
Пример #26
0
    def _get_nets_krnic(self, response):
        """
        The function for parsing network blocks from krnic whois data.

        Args:
            response: The response from the krnic server.

        Returns:
            List: A of dictionaries containing keys: cidr, start, end.
        """

        nets = []

        # Iterate through all of the networks found, storing the CIDR value
        # and the start and end positions.
        for match in re.finditer(
                r'^(IPv4 Address)[\s]+:[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+?)'
                '[^\S\n]\((.+?)\)|.+)$',
                response,
                re.MULTILINE
        ):

            try:

                net = copy.deepcopy(BASE_NET)
                net['range'] = match.group(2)

                if match.group(3) and match.group(4):

                    addrs = []
                    addrs.extend(summarize_address_range(
                        ip_address(match.group(3).strip()),
                        ip_address(match.group(4).strip())))

                    cidr = ', '.join(
                        [i.__str__() for i in collapse_addresses(addrs)]
                    )

                    net['range'] = '{0} - {1}'.format(
                        match.group(3), match.group(4)
                    )

                else:

                    cidr = ip_network(match.group(2).strip()).__str__()

                net['cidr'] = cidr
                net['start'] = match.start()
                net['end'] = match.end()
                nets.append(net)

            except (ValueError, TypeError):

                pass

        return nets
Пример #27
0
def run_old():
    testdata = [ip_network(_) for _ in open('testdata.txt').readlines()]
    prof = cProfile.Profile()
    prof.enable()
    result = collapse_addresses(testdata)
    prof.disable()
    prof.dump_stats('old-algo.stats')
    resultlist = [str(_) for _ in result]
    with open("old_result.txt", "w") as fp:
        fp.write('\n'.join(resultlist))
Пример #28
0
def run_old():
    testdata = [ip_network(_) for _ in open('testdata.txt').readlines()]
    prof = cProfile.Profile()
    prof.enable()
    result = collapse_addresses(testdata)
    prof.disable()
    prof.dump_stats('old-algo.stats')
    resultlist = [str(_) for _ in result]
    with open("old_result.txt", "w") as fp:
        fp.write('\n'.join(resultlist))
Пример #29
0
def format_net(networks):
    networks = collapse_addresses(networks)

    results = {}
    for net in networks:
        address = struct.unpack("!I", net.network_address.packed)[0]
        cidr = net.prefixlen
        prefix = address >> (32 - cidr)
        results[f"{prefix}/{cidr}"] = 1
    return json.dumps(results)
Пример #30
0
 def convert(self):
     self.cidrs = []
     self.collapsedCidrs = []
     for ipRange in self.ipRanges:
         for cidr in netaddr.iprange_to_cidrs(ipRange['Start'],
                                              ipRange['End']):
             self.cidrs.append(ipaddress.ip_network(cidr))
     for cidr in ipaddress.collapse_addresses(self.cidrs):
         self.collapsedCidrs.append(cidr)
     return self
Пример #31
0
def file_as_array(filename):
    print('Reading {} ... '.format(filename))
    array = []
    if filename.exists():
        with filename.open() as f:
            for line in f:
                line = line.strip()
                if any((is_comment(line), is_blank(line))):
                    if is_cidr(line) and line not in array:
                        array.append(line)
    return collapse_addresses(array)
Пример #32
0
def main():
    networks = get_networks_from_asn(ASN_LIST)

    ipv4_networks = [n for n in networks if n.version == 4]
    ipv6_networks = [n for n in networks if n.version == 6]
    print('Recieved: {} IPv4 networks, {} IPv6 networks.'.format(
        len(ipv4_networks),
        len(ipv6_networks),
    ))

    collapsed_ipv4 = list(ipaddress.collapse_addresses(ipv4_networks))
    collapsed_ipv6 = list(ipaddress.collapse_addresses(ipv6_networks))
    print('After collapsing: {} IPv4 networks, {} IPv6 networks.'.format(
        len(collapsed_ipv4),
        len(collapsed_ipv6),
    ))

    if CCD_FILEPATH:
        write_ovpn_ccd(chain(collapsed_ipv4, collapsed_ipv6), CCD_FILEPATH)
    else:
        print('"HOSTSUPDATE_CCD_FILEPATH" environment variable is not found.')
Пример #33
0
def consolidate_networks(networks):
    # Split to IPv4 and IPv6 ranges
    ipv4_networks = []
    ipv6_networks = []
    for network in networks:
        if isinstance(network, str):
            # Convert string to IpNetwork
            network = ipaddress.ip_network(network)

        if network.version == 4:
            ipv4_networks.append(network)
        else:
            ipv6_networks.append(network)

    # Collapse ranges
    networks_to_keep = list(
        map(str, ipaddress.collapse_addresses(ipv4_networks)))
    networks_to_keep.extend(
        map(str, ipaddress.collapse_addresses(ipv6_networks)))

    return networks_to_keep
Пример #34
0
def ip_main(sourcefile, outputfile, *args, **kwargs):
    try:
        with open(sourcefile, 'r', encoding='utf-8') as f:
            allip = f.readlines()
        ip = [ipaddress.IPv4Network(net.strip('\n ')) for net in allip]
        ip2 = [addr for addr in ipaddress.collapse_addresses(ip)]

        with open(outputfile, 'w', encoding='utf-8') as f1:
            for i in ip2:
                f1.write(str(i) + '\n')
    except Exception as e:
        sys.exit(e)
Пример #35
0
    def _get_nets_other(self, response):
        """
        The function for parsing network blocks from generic whois data.

        Args:
            response: The response from the whois/rwhois server.

        Returns:
            List: A of dictionaries containing keys: cidr, start, end.
        """

        nets = []

        # Iterate through all of the networks found, storing the CIDR value
        # and the start and end positions.
        for match in re.finditer(
            r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|'
                '.+)$',
            response,
            re.MULTILINE
        ):

            try:

                net = copy.deepcopy(BASE_NET)
                net['range'] = match.group(2)

                if match.group(3) and match.group(4):

                    addrs = []
                    addrs.extend(summarize_address_range(
                        ip_address(match.group(3).strip()),
                        ip_address(match.group(4).strip())))

                    cidr = ', '.join(
                        [i.__str__() for i in collapse_addresses(addrs)]
                    )

                else:

                    cidr = ip_network(match.group(2).strip()).__str__()

                net['cidr'] = cidr
                net['start'] = match.start()
                net['end'] = match.end()
                nets.append(net)

            except (ValueError, TypeError):

                pass

        return nets
Пример #36
0
def updateRealmScope(networkEntriesForRealm, logger):
    collapsed = None
    networkList = []
    for entry in networkEntriesForRealm:
        parseEntry(networkList, entry.data, logger)  # insert logger
    ## Get a unique set, then colapse to the smallest number of entries
    collapsed = [
        ipaddr
        for ipaddr in ipaddress.collapse_addresses(list(set(networkList)))
    ]

    ## end updateRealmScope
    return collapsed
Пример #37
0
    def __init__(self, ips):
        """
        Constructor.

        Params:
            ips: An iterable of network specifications in either CIDR notation
                or with the network and subnet masks separated by a slash, for
                example: 127.0.0.0/8 and 127.0.0.0/255.0.0.0 would be
                equivalent.
        """
        self._ipv4 = []
        self._ipv6 = []

        for ip in ips:
            network = ipaddress.ip_network(ip, False)
            if isinstance(network, IPv4Network):
                self._ipv4.append(network)
            else:
                self._ipv6.append(network)

        self._ipv4 = set(ipaddress.collapse_addresses(self._ipv4))
        self._ipv6 = set(ipaddress.collapse_addresses(self._ipv6))
Пример #38
0
def getBlockedIPCount(connstr):
    bldt = BlockData(connstr)
    ips = [
        ipaddress.ip_network(addr)
        for addr in bldt.getBlockedResourcesSet('ip')
    ]
    ipsubs = [
        ipaddress.ip_network(addr)
        for addr in bldt.getBlockedResourcesSet('ipsubnet')
    ]
    ipNum = sum(
        map(lambda x: x.num_addresses,
            ipaddress.collapse_addresses(ips + ipsubs)))
    return ipNum
Пример #39
0
def spanning_network(networks):
    if not networks:
        raise ValueError("List of networks is empty")
    if len(networks) == 1:
        return networks[0]

    sorter = operator.attrgetter("num_addresses")
    while True:
        networks = sorted(
            ipaddress.collapse_addresses(networks), key=sorter, reverse=True)

        if len(networks) == 1:
            return networks[0]

        networks[-1] = networks[-1].supernet()
Пример #40
0
def fetch_ip_data():
    print("Fetching data from apnic.net, please wait...", file=sys.stderr)
    url = 'http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest'
    try:
        data = subprocess.check_output(['wget', url, '-O-']).decode()
    except (OSError, AttributeError):
        response = urllib.request.urlopen(url)
        data = response.read().decode("UTF-8")

    cnregex = re.compile(r'^apnic\|cn\|ipv4\|[\d\.]+\|\d+\|\d+\|a\w*$',
                         re.I | re.M)
    cndata = cnregex.findall(data)

    results = []

    for item in cndata:
        unit_items = item.split('|')
        starting_ip = unit_items[3]
        num_ip = int(unit_items[4])

        imask = 0xffffffff ^ (num_ip - 1)
        imask = hex(imask)[2:]

        mask = [imask[i:i + 2] for i in range(0, 8, 2)]
        mask = '.'.join([str(int(i, 16)) for i in mask])

        cidr = 32 - int(math.log(num_ip, 2))

        results.append((starting_ip, mask, cidr))

    print("Collapsing addresses, please wait...", file=sys.stderr)
    networks = []
    for ip, _, mask in results:
        networks.append(ipaddress.ip_network("%s/%s" % (ip, mask)))
    results = []
    for net in ipaddress.collapse_addresses(networks):
        results.append((str(net.network_address),
                        str(net.netmask), net.prefixlen))

    return results
Пример #41
0
def merge_ip_nets(ip_net):
    summ_net = [ipaddr for ipaddr in ipaddress.collapse_addresses(ip_net)]
    print(summ_net)
    return(summ_net)
Пример #42
0
    def lookup(self, inc_raw = False):
        """
        The function for retrieving and parsing whois information for an IP address via port 43 (WHOIS).
        
        Args:
            inc_raw: Boolean for whether to include the raw whois results in the returned dictionary.
    
        Returns:
            Dictionary: A dictionary containing the following keys:
                    query (String) - The IP address.
                    asn (String) - The Autonomous System Number.
                    asn_date (String) - The ASN Allocation date.
                    asn_registry (String) - The assigned ASN registry.
                    asn_cidr (String) - The assigned ASN CIDR.
                    asn_country_code (String) - The assigned ASN country code.
                    nets (List) - Dictionaries containing network information which consists of the fields 
                                listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, 
                                hence the need for a list object.
                    raw (String) - Raw whois results if the inc_raw parameter is True.
        """
        
        #Attempt to resolve ASN info via Cymru. DNS is faster, so try that first.
        try:
            
            asn_data = self.get_asn_dns()
        
        except ASNLookupError:

            asn_data = self.get_asn_whois()
        
        #Create the return dictionary.   
        results = {
                   'query': self.address_str,
                   'nets': [],
                   'raw': None
        }
        
        #Add the ASN information to the return dictionary.
        results.update(asn_data)
        
        #Retrieve the whois data.
        response = self.get_whois(results['asn_registry'])
        
        #If the inc_raw parameter is True, add the response to the return dictionary.
        if inc_raw:
            
            results['raw'] = response
        
        #Create the network dictionary template. The start and end fields will be removed in the final returned dictionary.
        base_net = {
              'cidr': None,
              'name': None,
              'description': None,
              'country': None,
              'state': None,
              'city': None,
              'address': None,
              'postal_code': None,
              'start': None,
              'end': None
              }
        
        nets = []
        
        if results['asn_registry'] == 'arin': 
            
            #Iterate through all of the networks found, storing the CIDR value and the start and end positions.
            for match in re.finditer(r'^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$', response, re.MULTILINE):
                
                try:

                    net = base_net.copy()
                    net['cidr'] = ', '.join([ipaddress.ip_network(c.strip()).__str__() for c in match.group(1).split(', ')])
                    net['start'] = match.start()
                    net['end'] = match.end()
                    nets.append(net)
                    
                except:
                    
                    pass
        
        #Future fix: LACNIC has to be special and shorten inetnum field (no validity testing done for these).
        elif results['asn_registry'] == 'lacnic':
            
            #Iterate through all of the networks found, storing the CIDR value and the start and end positions.
            for match in re.finditer(r'^(inetnum|inet6num):[^\S\n]+(.+?,[^\S\n].+|.+)$', response, re.MULTILINE):
                
                try:
                    
                    cidr = match.group(2).strip()
                        
                    net = base_net.copy()
                    net['cidr'] = cidr
                    net['start'] = match.start()
                    net['end'] = match.end()
                    nets.append(net)
                    
                except:
                    
                    pass

        else:
            
            #Iterate through all of the networks found, storing the CIDR value and the start and end positions.
            for match in re.finditer(r'^(inetnum|inet6num):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|.+)$', response, re.MULTILINE):
                
                try:
                    
                    if match.group(3) and match.group(4):
                        
                        addrs = []
                        addrs.extend(ipaddress.summarize_address_range(ipaddress.ip_address(match.group(3).strip()), ipaddress.ip_address(match.group(4).strip())))
                        
                        cidr = ', '.join([i.__str__() for i in ipaddress.collapse_addresses(addrs)])
                            
                    else:
                        
                        cidr = ipaddress.ip_network(match.group(2).strip()).__str__()
                        
                    net = base_net.copy()
                    net['cidr'] = cidr
                    net['start'] = match.start()
                    net['end'] = match.end()
                    nets.append(net)
                    
                except:
                    
                    pass
        
        #Iterate through all of the network sections and parse out the appropriate fields for each.
        for index, net in enumerate(nets):
                
            end = None
            if index + 1 < len(nets):
                
                end = nets[index + 1]['start']
            
            for field in NIC_WHOIS[results['asn_registry']]['fields']:

                pattern = re.compile(r'' + NIC_WHOIS[results['asn_registry']]['fields'][field], re.MULTILINE)
            
                if end:
                    
                    match = pattern.finditer(response, net['end'], end)
                    
                else:
                    
                    match = pattern.finditer(response, net['end'])
                
                value = ''
                sub_end = None
                for m in match:
                    
                    if sub_end:

                        if sub_end != (m.start()-1):
                            
                            break 
                        
                    if value != '':
                        
                        value += '\n'
                        
                    value += m.group(2).strip()
                    
                    sub_end = m.end()
                    
                if value != '':
                    
                    if field == 'country':
                        
                        value = value.upper()
                        
                    net[field] = value
            
            #The start and end values are no longer needed.
            del net['start'], net['end']
        
        #Add the networks to the return dictionary.  
        results['nets'] = nets

        return results
Пример #43
0
    def push_config_to_apic(self):
        """
        Push the configuration to the APIC

        :return: Requests Response instance indicating success or not
        """
        THROTTLE_SIZE = 500000 / 8
        # Set the tenant name correctly
        if self._tenant_name == '' and self.cdb.has_context_config():
            self.set_tenant_name(self.cdb.get_context_config().tenant_name)
        elif self._tenant_name == '':
            self.set_tenant_name('acitoolkit')

        # Find all the unique contract providers
        logging.debug('Finding the unique contract providers')
        unique_providers = {}
        for provided_policy in self.cdb.get_contract_policies():
            if provided_policy.dst_id not in unique_providers:
                unique_providers[provided_policy.dst_id] = 0
            else:
                unique_providers[provided_policy.dst_id] += 1
        logging.debug('Found %s unique contract providers', len(unique_providers))

        # Find any duplicate contracts that this provider is providing (remove)
        logging.debug('Finding any duplicate contracts')
        duplicate_policies = []
        for provider in unique_providers:
            for provided_policy in self.cdb.get_contract_policies():
                if provided_policy in duplicate_policies:
                    continue
                if provider in provided_policy.dst_ids:
                    for other_policy in self.cdb.get_contract_policies():
                        if other_policy == provided_policy or other_policy in duplicate_policies:
                            continue
                        if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions(provided_policy):
                            provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids
                            duplicate_policies.append(other_policy)
                            logging.debug('duplicate_policies now has %s entries', len(duplicate_policies))

        logging.debug('Removing duplicate contracts')
        for duplicate_policy in duplicate_policies:
            self.cdb.remove_contract_policy(duplicate_policy)

        if not self.displayonly:
            # Log on to the APIC
            apic_cfg = self.cdb.get_apic_config()
            apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password)
            resp = apic.login()
            if not resp.ok:
                return resp

        logging.debug('Generating JSON....')
        # Push all of the Contracts
        logging.debug('Pushing contracts. # of Contract policies: %s', len(self.cdb.get_contract_policies()))
        tenant = Tenant(self._tenant_name)
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            contract = Contract(name, tenant)
            contract.descr = contract_policy.descr[0:127 - (contract_policy.descr.count('"') + contract_policy.descr.count("'") + contract_policy.descr.count('/'))]
            for whitelist_policy in contract_policy.get_whitelist_policies():
                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                if whitelist_policy.proto == '6' or whitelist_policy.proto == '17':
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        dFromPort=whitelist_policy.port_min,
                                        dToPort=whitelist_policy.port_max,
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        sFromPort='unspecified',
                                        sToPort='unspecified',
                                        tcpRules='unspecified',
                                        parent=contract)
                else:
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        parent=contract)
            if not self.displayonly:
                if len(str(tenant.get_json())) > THROTTLE_SIZE:
                    logging.debug('Throttling contracts. Pushing config...')
                    resp = tenant.push_to_apic(apic)
                    if not resp.ok:
                        return resp
                    tenant = Tenant(self._tenant_name)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing remaining contracts')
            resp = tenant.push_to_apic(apic)
            if not resp.ok:
                return resp

        # Push all of the EPGs
        logging.debug('Pushing EPGs')
        if not self.displayonly:
            tenant = Tenant(self._tenant_name)
        app = AppProfile(self._app_name, tenant)

        if self._use_ip_epgs:
            # Create a Base EPG
            base_epg = EPG('base', app)
            if self.cdb.has_context_config():
                context_name = self.cdb.get_context_config().name
            else:
                context_name = 'vrf1'
            context = Context(context_name, tenant)
            bd = BridgeDomain('bd', tenant)
            bd.add_context(context)
            base_epg.add_bd(bd)
            if self.displayonly:
                # If display only, just deploy the EPG to leaf 101
                base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged')
            else:
                # Deploy the EPG to all of the leaf switches
                nodes = Node.get(apic)
                for node in nodes:
                    if node.role == 'leaf':
                        base_epg.add_static_leaf_binding(node.node, 'vlan', '1', encap_mode='untagged')

            # Create the Attribute based EPGs
            logging.debug('Creating Attribute Based EPGs')
            for epg_policy in self.cdb.get_epg_policies():
                if not self.displayonly:
                    # Check if we need to throttle very large configs
                    if len(str(tenant.get_json())) > THROTTLE_SIZE:
                        resp = tenant.push_to_apic(apic)
                        if not resp.ok:
                            return resp
                        tenant = Tenant(self._tenant_name)
                        app = AppProfile(self._app_name, tenant)
                        context = Context(context_name, tenant)
                        bd = BridgeDomain('bd', tenant)
                        bd.add_context(context)
                        if self._use_ip_epgs:
                            base_epg = EPG('base', app)
                            base_epg.add_bd(bd)
                epg = EPG(epg_policy.name, app)

                # Check if the policy has the default 0.0.0.0 IP address
                no_default_endpoint = True
                for node_policy in epg_policy.get_node_policies():
                    if node_policy.ip == '0.0.0.0' and node_policy.prefix_len == 0:
                        no_default_endpoint = False
                        epg.add_bd(bd)

                # Add all of the IP addresses
                if no_default_endpoint:
                    epg.is_attributed_based = True
                    epg.set_base_epg(base_epg)
                    criterion = AttributeCriterion('criterion', epg)
                    ipaddrs = []
                    for node_policy in epg_policy.get_node_policies():
                        ipaddr = ipaddress.ip_address(unicode(node_policy.ip))
                        if not ipaddr.is_multicast: # Skip multicast addresses. They cannot be IP based EPGs
                            ipaddrs.append(ipaddr)
                    nets = ipaddress.collapse_addresses(ipaddrs)
                    for net in nets:
                        criterion.add_ip_address(str(net))
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        contract = Contract(name, tenant)
                        epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            contract = Contract(name, tenant)
                        epg.provide(contract)
        else:
            logging.debug('Creating EPGs')
            for epg_policy in self.cdb.get_epg_policies():
                epg = EPG(epg_policy.name, app)
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        contract = Contract(name, tenant)
                        epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            contract = Contract(name, tenant)
                        epg.provide(contract)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            return resp
Пример #44
0
    def lookup_rws(self, inc_raw = False):
        """
        The function for retrieving and parsing whois information for an IP address via HTTP (Whois-RWS).
        
        NOTE: This should be faster than IPWhois.lookup(), but may not be as reliable. APNIC, LACNIC, and AFRINIC
            do not have a Whois-RWS service yet. We have to rely on the Ripe RWS service, which does not contain all
            of the data we need.
            
        Args:
            inc_raw: Boolean for whether to include the raw whois results in the returned dictionary.
    
        Returns:
            Dictionary: A dictionary containing the following keys:
                    query (String) - The IP address.
                    asn (String) - The Autonomous System Number.
                    asn_date (String) - The ASN Allocation date.
                    asn_registry (String) - The assigned ASN registry.
                    asn_cidr (String) - The assigned ASN CIDR.
                    asn_country_code (String) - The assigned ASN country code.
                    nets (List) - Dictionaries containing network information which consists of the fields 
                                listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, 
                                hence the need for a list object.
                    raw (Dictionary) - Whois results in Json format if the inc_raw parameter is True.
        """
        
        #Attempt to resolve ASN info via Cymru. DNS is faster, so try that first.
        try:
            
            asn_data = self.get_asn_dns()
        
        except ASNLookupError:

            asn_data = self.get_asn_whois()
        
        #Create the return dictionary.   
        results = {
                   'query': self.address_str,
                   'nets': [],
                   'raw': None
        }
        
        #Add the ASN information to the return dictionary.
        results.update(asn_data)
        
        #Retrieve the whois data.
        try:
            
            response = self.get_rws(NIC_WHOIS[results['asn_registry']]['url'].format(self.address_str))
        
        #If the query failed, try the radb-grs source.
        except WhoisLookupError:
            
            response = self.get_rws('http://apps.db.ripe.net/whois/grs-search?query-string={0}&source=radb-grs'.format(self.address_str))

        #If the inc_raw parameter is True, add the response to the return dictionary.
        if inc_raw:
            
            results['raw'] = response
        
        #Create the network dictionary template.
        base_net = {
              'cidr': None,
              'name': None,
              'description': None,
              'country': None,
              'state': None,
              'city': None,
              'address': None,
              'postal_code': None
              }
        
        nets = []
        
        if results['asn_registry'] == 'arin': 
            
            try:
                
                net_list = response['nets']['net']
                
                if not isinstance(net_list, list):
                    
                    net_list = [net_list]
                                    
                for n in net_list:
                    
                    if 'orgRef' in n and n['orgRef']['@handle'] in ('ARIN', 'VR-ARIN'):
                        
                        continue
                            
                    addrs = []
                    addrs.extend(ipaddress.summarize_address_range(ipaddress.ip_address(n['startAddress']['$'].strip()), ipaddress.ip_address(n['endAddress']['$'].strip())))
                        
                    net = base_net.copy()
                    net['cidr'] = ', '.join([i.__str__() for i in ipaddress.collapse_addresses(addrs)])
                    
                    if 'name' in n:
                        
                        net['name'] = n['name']['$'].strip()
                    
                    ref = None
                    if 'customerRef' in n:
                        
                        ref = ['customerRef', 'customer']
                        
                    elif 'orgRef' in n:
                        
                        ref = ['orgRef', 'org']
                        
                    if ref is not None:
                        
                        net['description'] = n[ref[0]]['@name'].strip()
                        ref_url = n[ref[0]]['$'].strip()
                        
                        ref_response = self.get_rws(ref_url)
                        
                        if ref_response:
                            
                            if 'streetAddress' in ref_response[ref[1]]:
                                
                                addr_list = ref_response[ref[1]]['streetAddress']['line']
                
                                if not isinstance(addr_list, list):
                                    
                                    addr_list = [addr_list]
                    
                                net['address'] = '\n'.join([line['$'].strip() for line in addr_list])
                                
                            if 'postalCode' in ref_response[ref[1]]:
                                
                                net['postal_code'] = ref_response[ref[1]]['postalCode']['$']
                                
                            if 'city' in ref_response[ref[1]]:
                                
                                net['city'] = ref_response[ref[1]]['city']['$']
                                
                            if 'iso3166-1' in ref_response[ref[1]]:
                                
                                net['country'] = ref_response[ref[1]]['iso3166-1']['code2']['$']
                                
                            if 'iso3166-2' in ref_response[ref[1]]:
                                
                                net['state'] = ref_response[ref[1]]['iso3166-2']['$']

                    nets.append(net)
                    
            except:
                
                pass
            
        else:
            
            try:
                
                object_list = response['whois-resources']['objects']['object']
                
                if not isinstance(object_list, list):
                    
                    object_list = [object_list]
                    
                for n in object_list:

                    if n['type'] in ('inetnum', 'inet6num', 'route', 'route6'):
                        
                        net = base_net.copy()
                        
                        for attr in n['attributes']['attribute']:
                            
                            if attr['name'] in ('inetnum', 'inet6num'):
                                
                                ipr = attr['value'].strip()
                                ip_range = ipr.split(' - ')
                                
                                try:
                                    
                                    if len(ip_range) > 1:
                                        
                                        addrs = []
                                        addrs.extend(ipaddress.summarize_address_range(ipaddress.ip_address(ip_range[0]), ipaddress.ip_address(ip_range[1])))
                                            
                                        cidr = ', '.join([i.__str__() for i in ipaddress.collapse_addresses(addrs)])
                                        
                                    else:
                                        
                                        cidr = ipaddress.ip_network(ip_range[0]).__str__()
                                    
                                    net['cidr'] = cidr
                                    
                                except:
                                    
                                    pass
                                
                            elif attr['name'] in ('route', 'route6'):
                                
                                ipr = attr['value'].strip()
                                ip_ranges = ipr.split(', ')
                                
                                try:

                                    net['cidr'] = ', '.join(ipaddress.ip_network(r).__str__() for r in ip_ranges)   
                                    
                                except:
                                    
                                    pass
                                
                            elif attr['name'] == 'netname':
                                
                                net['name'] = attr['value'].strip()
                            
                            elif attr['name'] == 'descr':
                                
                                if net['description']:
                                    
                                    net['description'] += '\n%s' % attr['value'].strip()
                                    
                                else:
                                    
                                    net['description'] = attr['value'].strip()
                                
                            elif attr['name'] == 'country':
                                
                                net['country'] = attr['value'].strip()
                                
                            elif attr['name'] == 'address':
                                
                                if net['address']:
                                    
                                    net['address'] += '\n%s' % attr['value'].strip()
                                    
                                else:
                                    
                                    net['address'] = attr['value'].strip()
                                
                        nets.append(net)
                        
                        break
                    
            except:
                
                pass
            
        #Add the networks to the return dictionary.  
        results['nets'] = nets

        return results
Пример #45
0
    def lookup(self, inc_raw=False, retry_count=3):
        """
        The function for retrieving and parsing whois information for an IP
        address via port 43 (WHOIS).

        Args:
            inc_raw: Boolean for whether to include the raw whois results in
                the returned dictionary.
            retry_count: The number of times to retry in case socket errors,
                timeouts, connection resets, etc. are encountered.

        Returns:
            Dictionary: A dictionary containing the following keys:
                    query (String) - The IP address.
                    asn (String) - The Autonomous System Number.
                    asn_date (String) - The ASN Allocation date.
                    asn_registry (String) - The assigned ASN registry.
                    asn_cidr (String) - The assigned ASN CIDR.
                    asn_country_code (String) - The assigned ASN country code.
                    nets (List) - Dictionaries containing network information
                        which consists of the fields listed in the NIC_WHOIS
                        dictionary. Certain IPs have more granular network
                        listings, hence the need for a list object.
                    raw (String) - Raw whois results if the inc_raw parameter
                        is True.
        """

        # Initialize the response.
        response = None

        # Attempt to resolve ASN info via Cymru. DNS is faster, try that first.
        try:

            asn_data = self.get_asn_dns()

        except (ASNLookupError, ASNRegistryError):

            try:

                asn_data = self.get_asn_whois(retry_count)

            except (ASNLookupError, ASNRegistryError):

                # Lets attempt to get the ASN registry information from ARIN.
                response = self.get_whois("arin", retry_count)

                asn_data = {
                    "asn_registry": None,
                    "asn": None,
                    "asn_cidr": None,
                    "asn_country_code": None,
                    "asn_date": None,
                }

                matched = False
                for match in re.finditer(r"^ReferralServer:[^\S\n]+(.+)$", response, re.MULTILINE):

                    matched = True

                    try:

                        referral = match.group(1)
                        referral = referral.replace(":43", "")

                        asn_data["asn_registry"] = ASN_REFERRALS[referral]

                    except KeyError:

                        raise ASNRegistryError("ASN registry lookup failed.")

                    break

                if not matched:

                    asn_data["asn_registry"] = "arin"

        # Create the return dictionary.
        results = {"query": self.address_str, "nets": [], "raw": None}

        # Add the ASN information to the return dictionary.
        results.update(asn_data)

        # Only fetch the response if we haven't already.
        if response is None or results["asn_registry"] is not "arin":

            # Retrieve the whois data.
            response = self.get_whois(results["asn_registry"], retry_count)

        # If inc_raw parameter is True, add the response to return dictionary.
        if inc_raw:

            results["raw"] = response

        nets = []

        if results["asn_registry"] == "arin":

            # Iterate through all of the networks found, storing the CIDR value
            # and the start and end positions.
            for match in re.finditer(r"^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$", response, re.MULTILINE):

                try:

                    net = BASE_NET.copy()
                    net["cidr"] = ", ".join([ip_network(c.strip()).__str__() for c in match.group(1).split(", ")])
                    net["start"] = match.start()
                    net["end"] = match.end()
                    nets.append(net)

                except ValueError:

                    pass

        elif results["asn_registry"] == "lacnic":

            # Iterate through all of the networks found, storing the CIDR value
            # and the start and end positions.
            for match in re.finditer(r"^(inetnum|inet6num):[^\S\n]+(.+?,[^\S\n].+|.+)$", response, re.MULTILINE):

                try:

                    temp = []
                    for addr in match.group(2).strip().split(", "):

                        count = addr.count(".")
                        if count is not 0 and count < 4:

                            addr_split = addr.strip().split("/")
                            for i in range(count + 1, 4):
                                addr_split[0] += ".0"

                            addr = "/".join(addr_split)

                        temp.append(ip_network(addr.strip()).__str__())

                    net = BASE_NET.copy()
                    net["cidr"] = ", ".join(temp)
                    net["start"] = match.start()
                    net["end"] = match.end()
                    nets.append(net)

                except ValueError:

                    pass

        else:

            # Iterate through all of the networks found, storing the CIDR value
            # and the start and end positions.
            for match in re.finditer(
                r"^(inetnum|inet6num):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|.+)$", response, re.MULTILINE
            ):

                try:

                    if match.group(3) and match.group(4):

                        addrs = []
                        addrs.extend(
                            summarize_address_range(
                                ip_address(match.group(3).strip()), ip_address(match.group(4).strip())
                            )
                        )

                        cidr = ", ".join([i.__str__() for i in collapse_addresses(addrs)])

                    else:

                        cidr = ip_network(match.group(2).strip()).__str__()

                    net = BASE_NET.copy()
                    net["cidr"] = cidr
                    net["start"] = match.start()
                    net["end"] = match.end()
                    nets.append(net)

                except (ValueError, TypeError):

                    pass

        # Iterate through all of the network sections and parse out the
        # appropriate fields for each.
        for index, net in enumerate(nets):

            section_end = None
            if index + 1 < len(nets):

                section_end = nets[index + 1]["start"]

            for field in NIC_WHOIS[results["asn_registry"]]["fields"]:

                pattern = re.compile(str(NIC_WHOIS[results["asn_registry"]]["fields"][field]), re.MULTILINE)

                if section_end is not None:

                    match = pattern.finditer(response, net["end"], section_end)

                else:

                    match = pattern.finditer(response, net["end"])

                values = []
                sub_section_end = None
                for m in match:

                    if sub_section_end:

                        if field not in ("abuse_emails", "tech_emails", "misc_emails") and (
                            sub_section_end != (m.start() - 1)
                        ):

                            break

                    try:

                        values.append(m.group("val").strip())

                    except AttributeError:

                        values.append(m.group("val2").strip())

                    sub_section_end = m.end()

                if len(values) > 0:

                    try:

                        if field == "country":

                            value = values[0].upper()

                        elif field in ["created", "updated"]:

                            value = datetime.strptime(
                                values[0], str(NIC_WHOIS[results["asn_registry"]]["dt_format"])
                            ).isoformat("T")

                        else:

                            values = list(set(values))
                            value = "\n".join(values)

                    except ValueError:

                        value = None
                        pass

                    net[field] = value

            # The start and end values are no longer needed.
            del net["start"], net["end"]

        # Add the networks to the return dictionary.
        results["nets"] = nets

        return results
Пример #46
0
    def _lookup_rws_apnic(self, response=None):
        """
        The function for retrieving and parsing whois information for a APNIC
        IP address via HTTP (Whois-RWS).

        Args:
            response: The dictionary containing whois information to parse.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        addrs = []
        net = BASE_NET.copy()

        try:

            addrs.extend(summarize_address_range(
                ip_address(response['startAddress'].strip()),
                ip_address(response['endAddress'].strip())))

            net['cidr'] = ', '.join(
                [i.__str__() for i in collapse_addresses(addrs)]
            )

        except (KeyError, ValueError, TypeError):

                pass

        try:

            net['country'] = str(response['country']).strip().upper()

        except KeyError:

            pass

        try:

            events = response['events']

            if not isinstance(events, list):

                events = [events]

        except KeyError:

            events = []

        for ev in events:

            try:

                if ev['eventAction'] == 'registration':

                    net['created'] = str(ev['eventDate']).strip()

                elif ev['eventAction'] == 'last changed':

                    net['updated'] = str(ev['eventDate']).strip()

            except (KeyError, ValueError):

                pass

        try:

            entities = response['entities']

            if not isinstance(entities, list):

                entities = [entities]

        except KeyError:

            entities = []

        for en in entities:

            try:

                temp = en['vcardArray'][1]

                for t in temp:

                    if 'administrative' in en['roles'] and t[0] == 'fn':

                        net['name'] = str(t[3]).strip()

                    elif 'administrative' in en['roles'] and t[0] == 'adr':

                        try:

                            net['address'] = str(t[1]['label']).strip()

                        except KeyError:

                            pass

                    elif t[0] == 'email':

                        key = None

                        if (len(en['roles']) > 1 or
                           en['roles'][0] == 'administrative'):

                            key = 'misc_emails'

                        elif en['roles'][0] == 'abuse':

                            key = 'abuse_emails'

                        elif en['roles'][0] == 'technical':

                            key = 'tech_emails'

                        if key is not None:

                            if net[key] is not None:

                                net[key] += '\n%s' % str(t[3]).strip()

                            else:

                                net[key] = str(t[3]).strip()

            except (KeyError, IndexError):

                pass

        try:

            remarks = response['remarks']

            if not isinstance(remarks, list):

                remarks = [remarks]

        except KeyError:

            remarks = []

        for rem in remarks:

            try:

                if rem['title'] == 'description':

                    net['description'] = str('\n'.join(rem['description']))

            except (KeyError, IndexError):

                pass

        return [net]
Пример #47
0
    def _lookup_rws_lacnic(self, response=None):
        """
        The function for retrieving and parsing whois information for a LACNIC
        IP address via HTTP (Whois-RWS).

        Args:
            response: The dictionary containing whois information to parse.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        addrs = []
        net = BASE_NET.copy()

        try:

            addrs.extend(summarize_address_range(
                ip_address(response['startAddress'].strip()),
                ip_address(response['endAddress'].strip())))

            net['cidr'] = ', '.join(
                [i.__str__() for i in collapse_addresses(addrs)]
            )

        except (KeyError, ValueError, TypeError):

                pass

        try:

            net['country'] = str(response['country']).strip().upper()

        except KeyError:

            pass

        try:

            events = response['events']

            if not isinstance(events, list):

                events = [events]

        except KeyError:

            events = []

        for ev in events:

            try:

                if ev['eventAction'] == 'registration':

                    tmp = str(ev['eventDate']).strip()

                    value = datetime.strptime(
                        tmp,
                        str(NIC_WHOIS['lacnic']['dt_rws_format'])
                    ).isoformat('T')

                    net['created'] = value

                elif ev['eventAction'] == 'last changed':

                    tmp = str(ev['eventDate']).strip()

                    value = datetime.strptime(
                        tmp,
                        str(NIC_WHOIS['lacnic']['dt_rws_format'])
                    ).isoformat('T')

                    net['updated'] = value

            except (KeyError, ValueError):

                pass

        try:

            entities = response['entities']

            if not isinstance(entities, list):

                entities = [entities]

        except KeyError:

            entities = []

        for en in entities:

            try:

                if en['roles'][0] == 'registrant':

                    temp = en['vcardArray'][1]

                    for t in temp:

                        if t[0] == 'fn':

                            net['name'] = str(t[3]).strip()

                        elif t[0] == 'org':

                            net['description'] = str(t[3][0]).strip()

                        elif t[0] == 'adr':

                            net['address'] = str(t[1]['label']).strip()

                        elif t[0] == 'email':

                            net['misc_emails'] = str(t[3]).strip()

                elif en['roles'][0] == 'abuse':

                    temp = en['vcardArray'][1]

                    for t in temp:

                        if t[0] == 'email':

                            net['abuse_emails'] = str(t[3]).strip()

                elif en['roles'][0] == 'tech':

                    temp = en['vcardArray'][1]

                    for t in temp:

                        if t[0] == 'email':

                            net['tech_emails'] = str(t[3]).strip()

            except (KeyError, IndexError):

                pass

        return [net]
Пример #48
0
    def _lookup_rws_arin(self, response=None, retry_count=3):
        """
        The function for retrieving and parsing whois information for an ARIN
        IP address via HTTP (Whois-RWS).

        Args:
            response: The dictionary containing whois information to parse.
            retry_count: The number of times to retry in case socket errors,
                timeouts, connection resets, etc. are encountered.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        nets = []

        try:

            net_list = response['nets']['net']

            if not isinstance(net_list, list):

                net_list = [net_list]

        except KeyError:

            net_list = []

        for n in net_list:

            if 'orgRef' in n and n['orgRef']['@handle'] in ('ARIN', 'VR-ARIN'):

                continue

            addrs = []
            net = BASE_NET.copy()

            try:

                addrs.extend(summarize_address_range(
                    ip_address(n['startAddress']['$'].strip()),
                    ip_address(n['endAddress']['$'].strip())))

                net['cidr'] = ', '.join(
                    [i.__str__() for i in collapse_addresses(addrs)]
                )

            except (KeyError, ValueError, TypeError):

                pass

            for k, v in {
                'created': 'registrationDate',
                'updated': 'updateDate',
                'name': 'name'
            }.items():

                try:

                    net[k] = str(n[v]['$']).strip()

                except KeyError:

                    pass

            ref = None
            if 'customerRef' in n:

                ref = ['customerRef', 'customer']

            elif 'orgRef' in n:

                ref = ['orgRef', 'org']

            if ref is not None:

                try:

                    net['description'] = str(n[ref[0]]['@name']).strip()

                except KeyError:

                    pass

                try:

                    ref_url = n[ref[0]]['$'].strip() + '?showPocs=true'
                    ref_response = self.get_rws(ref_url, retry_count)

                except (KeyError, WhoisLookupError):

                    nets.append(net)
                    continue

                try:

                    addr_list = (
                        ref_response[ref[1]]['streetAddress']['line']
                    )

                    if not isinstance(addr_list, list):

                        addr_list = [addr_list]

                    net['address'] = '\n'.join(
                        [str(line['$']).strip() for line in addr_list]
                    )

                except KeyError:

                    pass

                for k, v in {
                    'postal_code': 'postalCode',
                    'city': 'city',
                    'state': 'iso3166-2'
                }.items():

                    try:

                        net[k] = str(ref_response[ref[1]][v]['$'])

                    except KeyError:

                        pass

                try:

                    net['country'] = (
                        str(ref_response[ref[1]]['iso3166-1']['code2']['$'])
                    ).upper()

                except KeyError:

                    pass

                try:

                    for poc in (
                        ref_response[ref[1]]['pocs']['pocLinkRef']
                    ):

                        if poc['@description'] in ('Abuse', 'Tech'):

                            poc_url = poc['$']
                            poc_response = self.get_rws(
                                poc_url,
                                retry_count
                            )

                            emails = poc_response['poc']['emails']['email']

                            if not isinstance(emails, list):

                                emails = [emails]

                            temp = []

                            for e in emails:

                                temp.append(str(e['$']).strip())

                            key = '%s_emails' % poc['@description'].lower()

                            net[key] = (
                                '\n'.join(set(temp)) if len(temp) > 0 else None
                            )

                except (KeyError, WhoisLookupError):

                    pass

            nets.append(net)

        return nets
Пример #49
0
    def get_nets_other(self, response):
        """
        The function for parsing network blocks from generic whois data.

        Args:
            response (:obj:`str`): The response from the whois/rwhois server.

        Returns:
            list of dict: Mapping of networks with start and end positions.

            ::

                [{
                    'cidr' (str) - The network routing block
                    'start' (int) - The starting point of the network
                    'end' (int) - The endpoint point of the network
                }]
        """

        nets = []

        # Iterate through all of the networks found, storing the CIDR value
        # and the start and end positions.
        for match in re.finditer(
            r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|'
                '.+)$',
            response,
            re.MULTILINE
        ):

            try:

                net = copy.deepcopy(BASE_NET)
                net_range = match.group(2).strip()

                try:

                    net['range'] = net['range'] = '{0} - {1}'.format(
                        ip_network(net_range)[0].__str__(),
                        ip_network(net_range)[-1].__str__()
                    ) if '/' in net_range else net_range

                except ValueError:  # pragma: no cover

                    net['range'] = net_range

                if match.group(3) and match.group(4):

                    addrs = []
                    addrs.extend(summarize_address_range(
                        ip_address(match.group(3).strip()),
                        ip_address(match.group(4).strip())))

                    cidr = ', '.join(
                        [i.__str__() for i in collapse_addresses(addrs)]
                    )

                else:

                    cidr = ip_network(net_range).__str__()

                net['cidr'] = cidr
                net['start'] = match.start()
                net['end'] = match.end()
                nets.append(net)

            except (ValueError, TypeError):

                pass

        return nets
Пример #50
0
    def _lookup_rws_apnic(self, response=None):
        """
        The function for retrieving and parsing whois information for a APNIC
        IP address via HTTP (Whois-RWS).

        Args:
            response: The dictionary containing whois information to parse.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        addrs = []
        net = BASE_NET.copy()

        try:

            addrs.extend(
                summarize_address_range(
                    ip_address(response["startAddress"].strip()), ip_address(response["endAddress"].strip())
                )
            )

            net["cidr"] = ", ".join([i.__str__() for i in collapse_addresses(addrs)])

        except (KeyError, ValueError, TypeError):

            pass

        try:

            net["country"] = str(response["country"]).strip().upper()

        except KeyError:

            pass

        try:

            events = response["events"]

            if not isinstance(events, list):

                events = [events]

        except KeyError:

            events = []

        for ev in events:

            try:

                if ev["eventAction"] == "registration":

                    net["created"] = str(ev["eventDate"]).strip()

                elif ev["eventAction"] == "last changed":

                    net["updated"] = str(ev["eventDate"]).strip()

            except (KeyError, ValueError):

                pass

        try:

            entities = response["entities"]

            if not isinstance(entities, list):

                entities = [entities]

        except KeyError:

            entities = []

        for en in entities:

            try:

                temp = en["vcardArray"][1]

                for t in temp:

                    if "administrative" in en["roles"] and t[0] == "fn":

                        net["name"] = str(t[3]).strip()

                    elif "administrative" in en["roles"] and t[0] == "adr":

                        try:

                            net["address"] = str(t[1]["label"]).strip()

                        except KeyError:

                            pass

                    elif t[0] == "email":

                        key = None

                        if len(en["roles"]) > 1 or en["roles"][0] == "administrative":

                            key = "misc_emails"

                        elif en["roles"][0] == "abuse":

                            key = "abuse_emails"

                        elif en["roles"][0] == "technical":

                            key = "tech_emails"

                        if key is not None:

                            if net[key] is not None:

                                net[key] += "\n%s" % str(t[3]).strip()

                            else:

                                net[key] = str(t[3]).strip()

            except (KeyError, IndexError):

                pass

        try:

            remarks = response["remarks"]

            if not isinstance(remarks, list):

                remarks = [remarks]

        except KeyError:

            remarks = []

        for rem in remarks:

            try:

                if rem["title"] == "description":

                    net["description"] = str("\n".join(rem["description"]))

            except (KeyError, IndexError):

                pass

        return [net]
Пример #51
0
    def _lookup_rws_lacnic(self, response=None):
        """
        The function for retrieving and parsing whois information for a LACNIC
        IP address via HTTP (Whois-RWS).

        Args:
            response: The dictionary containing whois information to parse.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        addrs = []
        net = BASE_NET.copy()

        try:

            addrs.extend(
                summarize_address_range(
                    ip_address(response["startAddress"].strip()), ip_address(response["endAddress"].strip())
                )
            )

            net["cidr"] = ", ".join([i.__str__() for i in collapse_addresses(addrs)])

        except (KeyError, ValueError, TypeError):

            pass

        try:

            net["country"] = str(response["country"]).strip().upper()

        except KeyError:

            pass

        try:

            events = response["events"]

            if not isinstance(events, list):

                events = [events]

        except KeyError:

            events = []

        for ev in events:

            try:

                if ev["eventAction"] == "registration":

                    tmp = str(ev["eventDate"]).strip()

                    value = datetime.strptime(tmp, str(NIC_WHOIS["lacnic"]["dt_rws_format"])).isoformat("T")

                    net["created"] = value

                elif ev["eventAction"] == "last changed":

                    tmp = str(ev["eventDate"]).strip()

                    value = datetime.strptime(tmp, str(NIC_WHOIS["lacnic"]["dt_rws_format"])).isoformat("T")

                    net["updated"] = value

            except (KeyError, ValueError):

                pass

        try:

            entities = response["entities"]

            if not isinstance(entities, list):

                entities = [entities]

        except KeyError:

            entities = []

        for en in entities:

            try:

                if en["roles"][0] == "registrant":

                    temp = en["vcardArray"][1]

                    for t in temp:

                        if t[0] == "fn":

                            net["name"] = str(t[3]).strip()

                        elif t[0] == "org":

                            net["description"] = str(t[3][0]).strip()

                        elif t[0] == "adr":

                            net["address"] = str(t[1]["label"]).strip()

                        elif t[0] == "email":

                            net["misc_emails"] = str(t[3]).strip()

                elif en["roles"][0] == "abuse":

                    temp = en["vcardArray"][1]

                    for t in temp:

                        if t[0] == "email":

                            net["abuse_emails"] = str(t[3]).strip()

                elif en["roles"][0] == "tech":

                    temp = en["vcardArray"][1]

                    for t in temp:

                        if t[0] == "email":

                            net["tech_emails"] = str(t[3]).strip()

            except (KeyError, IndexError):

                pass

        return [net]
Пример #52
0
    def _lookup_rws_ripe(self, response=None):
        """
        The function for retrieving and parsing whois information for a RIPE
        IP address via HTTP (Whois-RWS).

        ***
        THIS FUNCTION IS TEMPORARILY BROKEN UNTIL RIPE FIXES THEIR API:
        https://github.com/RIPE-NCC/whois/issues/114
        ***

        Args:
            response: The dictionary containing whois information to parse.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        nets = []

        try:

            object_list = response['objects']['object']

        except KeyError:

            object_list = []

        ripe_abuse_emails = []
        ripe_misc_emails = []

        net = BASE_NET.copy()

        for n in object_list:

            try:

                if n['type'] == 'role':

                    for attr in n['attributes']['attribute']:

                        if attr['name'] == 'abuse-mailbox':

                            ripe_abuse_emails.append(str(
                                attr['value']
                            ).strip())

                        elif attr['name'] == 'e-mail':

                            ripe_misc_emails.append(str(attr['value']).strip())

                        elif attr['name'] == 'address':

                            if net['address'] is not None:

                                net['address'] += '\n%s' % (
                                    str(attr['value']).strip()
                                )

                            else:

                                net['address'] = str(attr['value']).strip()

                elif n['type'] in ('inetnum', 'inet6num'):

                    for attr in n['attributes']['attribute']:

                        if attr['name'] in ('inetnum', 'inet6num'):

                            ipr = str(attr['value']).strip()
                            ip_range = ipr.split(' - ')

                            try:

                                if len(ip_range) > 1:

                                    addrs = []
                                    addrs.extend(
                                        summarize_address_range(
                                            ip_address(ip_range[0]),
                                            ip_address(ip_range[1])
                                        )
                                    )

                                    cidr = ', '.join(
                                        [i.__str__()
                                         for i in collapse_addresses(addrs)]
                                    )

                                else:

                                    cidr = ip_network(ip_range[0]).__str__()

                                net['cidr'] = cidr

                            except (ValueError, TypeError):

                                pass

                        elif attr['name'] == 'netname':

                            net['name'] = str(attr['value']).strip()

                        elif attr['name'] == 'descr':

                            if net['description'] is not None:

                                net['description'] += '\n%s' % (
                                    str(attr['value']).strip()
                                )

                            else:

                                net['description'] = str(attr['value']).strip()

                        elif attr['name'] == 'country':

                            net['country'] = str(attr['value']).strip().upper()

            except KeyError:

                pass

        nets.append(net)

        #This is nasty. Since RIPE RWS doesn't provide a granular
        #contact to network relationship, we apply to all networks.
        if len(ripe_abuse_emails) > 0 or len(ripe_misc_emails) > 0:

            abuse = (
                '\n'.join(set(ripe_abuse_emails))
                if len(ripe_abuse_emails) > 0 else None
            )
            misc = (
                '\n'.join(set(ripe_misc_emails))
                if len(ripe_misc_emails) > 0 else None
            )

            for net in nets:

                net['abuse_emails'] = abuse
                net['misc_emails'] = misc

        return nets
Пример #53
0
    def push_config_to_apic(self):
        """
        Push the configuration to the APIC

        :return: Requests Response instance indicating success or not
        """
        THROTTLE_SIZE = 500000 / 8
        # Set the tenant name correctly
        if self._tenant_name == '' and self.cdb.has_context_config():
            self.set_tenant_name(self.cdb.get_context_config().tenant_name)
        elif self._tenant_name == '':
            self.set_tenant_name('acitoolkit')

        # Find all the unique contract providers
        logging.debug('Finding the unique contract providers')
        unique_providers = {}
        for provided_policy in self.cdb.get_contract_policies():
            if provided_policy.dst_id not in unique_providers:
                unique_providers[provided_policy.dst_id] = 0
            else:
                unique_providers[provided_policy.dst_id] += 1
        logging.debug('Found %s unique contract providers', len(unique_providers))

        # Find any duplicate contracts that this provider is providing (remove)
        logging.debug('Finding any duplicate contracts')
        duplicate_policies = []
        for provider in unique_providers:
            for provided_policy in self.cdb.get_contract_policies():
                if provided_policy in duplicate_policies:
                    continue
                if provider in provided_policy.dst_ids:
                    for other_policy in self.cdb.get_contract_policies():
                        if other_policy == provided_policy or other_policy in duplicate_policies:
                            continue
                        if other_policy.dst_ids == provided_policy.dst_ids and other_policy.has_same_permissions(
                                provided_policy):
                            provided_policy.src_ids = provided_policy.src_ids + other_policy.src_ids
                            duplicate_policies.append(other_policy)
                            logging.debug('duplicate_policies now has %s entries', len(duplicate_policies))

        logging.debug('Removing duplicate contracts')
        for duplicate_policy in duplicate_policies:
            self.cdb.remove_contract_policy(duplicate_policy)

        if not self.displayonly:
            # Log on to the APIC
            apic_cfg = self.cdb.get_apic_config()
            apic = Session(apic_cfg.url, apic_cfg.user_name, apic_cfg.password)
            resp = apic.login()
            if not resp.ok:
                return resp

        tenant_names = []
        tenant_names.append(self._tenant_name)

        # delete all the unwanted epgs
        tenant = Tenant(self._tenant_name)
        existing_epgs = []
        if Tenant.exists(apic, tenant):
            tenants = Tenant.get_deep(
                apic,
                names=tenant_names,
                limit_to=[
                    'fvTenant',
                    'fvAp',
                    'vzFilter',
                    'vzEntry',
                    'vzBrCP',
                    'vzSubj',
                    'vzRsSubjFiltAtt'])
            tenant = tenants[0]
            appProfiles = tenant.get_children(AppProfile)
            app = appProfiles[0]
            existing_epgs = app.get_children(EPG)
        else:

            app = AppProfile(self._app_name, tenant)

        for existing_epg in existing_epgs:
            matched = False
            if existing_epg.name != "base":
                for epg_policy in self.cdb.get_epg_policies():
                    if existing_epg.descr.split(":")[1] == epg_policy.descr.split(":")[1]:
                        matched = True
                if not matched:
                    existing_epg.mark_as_deleted()

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing EPGS by deleting unwanted epgs ')
            if len(tenant.get_children()) > 0:
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        # delete all the unwanted contracts
        tenants = Tenant.get_deep(
            apic,
            names=tenant_names,
            limit_to=[
                'fvTenant',
                'fvAp',
                'vzFilter',
                'vzEntry',
                'vzBrCP',
                'vzSubj',
                'vzRsSubjFiltAtt'])
        tenant = tenants[0]
        existing_contracts = tenant.get_children(Contract)
        for existing_contract in existing_contracts:
            matched = False
            for contract_policy in self.cdb.get_contract_policies():
                if existing_contract.descr.split("::")[1] == contract_policy.descr.split("::")[1]:
                    matched = True
            if not matched:
                existing_contract.mark_as_deleted()
                exist_contract_providing_epgs = existing_contract.get_all_providing_epgs()
                for exist_contract_providing_epg in exist_contract_providing_epgs:
                    exist_contract_providing_epg.mark_as_deleted()
                exist_contract_consuming_epgs = existing_contract.get_all_consuming_epgs()
                for exist_contract_consuming_epg in exist_contract_consuming_epgs:
                    exist_contract_consuming_epg.mark_as_deleted()

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing contracts by deleting unwanted contracts')
            if len(tenant.get_children()) > 0:
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        filterEntry_list = []

        logging.debug('Generating JSON....')
        # Push all of the Contracts
        logging.debug('Pushing contracts. # of Contract policies: %s', len(self.cdb.get_contract_policies()))
        tenant = Tenant(self._tenant_name)
        if Tenant.exists(apic, tenant):
            tenants = Tenant.get_deep(
                apic,
                names=tenant_names,
                limit_to=[
                    'fvTenant',
                    'vzFilter',
                    'vzEntry',
                    'vzBrCP',
                    'vzSubj',
                    'vzRsSubjFiltAtt'])
            tenant = tenants[0]
            existing_contracts = tenant.get_children(Contract)
        else:
            existing_contracts = tenant.get_children(Contract)
        # removing the unwanted contractsubject filters for each contract subject
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            for existing_contract in existing_contracts:
                if existing_contract.descr.split("::")[1] == contract_policy.descr.split("::")[1]:
                    for child_contractSubject in existing_contract.get_children(ContractSubject):
                        for child_filter in child_contractSubject.get_filters():
                            matched = False
                            for whitelist_policy in contract_policy.get_whitelist_policies():
                                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                                if child_filter.name == entry_name + '_Filter':
                                    matched = True
                                    continue
                            if not matched:
                                # TBD need to check this. this is not working
                                child_contractSubject._remove_relation(child_filter)
                                child_filter._remove_attachment(child_contractSubject)
                                logging.debug('removing filter ' + child_filter.name)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            logging.debug('Pushing contracts by deleting unwanted filters')
            if len(tenant.get_children()) > 0:
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        # if num of contract_subjects is 0 then remove it finally
        for contract_policy in self.cdb.get_contract_policies():
            name = contract_policy.src_name + '::' + contract_policy.dst_name
            contract = Contract(name, tenant)
            contract.descr = contract_policy.descr[0:127 -
                                                   (contract_policy.descr.count('"') +
                                                    contract_policy.descr.count("'") +
                                                       contract_policy.descr.count('/'))]
            for whitelist_policy in contract_policy.get_whitelist_policies():
                entry_name = whitelist_policy.proto + '.' + whitelist_policy.port_min + '.' + whitelist_policy.port_max
                if whitelist_policy.proto == '6' or whitelist_policy.proto == '17':
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        dFromPort=whitelist_policy.port_min,
                                        dToPort=whitelist_policy.port_max,
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        sFromPort='unspecified',
                                        sToPort='unspecified',
                                        tcpRules='unspecified',
                                        parent=contract)
                else:
                    entry = FilterEntry(entry_name,
                                        applyToFrag='no',
                                        arpOpc='unspecified',
                                        etherT='ip',
                                        prot=whitelist_policy.proto,
                                        parent=contract)
                filterEntry_list.append(entry_name)
            if not self.displayonly:
                if len(str(tenant.get_json())) > THROTTLE_SIZE:
                    logging.debug('Throttling contracts. Pushing config...')
                    resp = tenant.push_to_apic(apic)
                    if not resp.ok:
                        return resp
                    tenant = Tenant(self._tenant_name)

            if self.displayonly:
                print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
            else:
                logging.debug('Pushing remaining contracts')
                resp = tenant.push_to_apic(apic)
                if not resp.ok:
                    return resp

        # Push all of the EPGs
        logging.debug('Pushing EPGs')
        if not self.displayonly:
            tenants = Tenant.get_deep(apic, names=tenant_names)
            tenant = tenants[0]
            appProfiles = tenant.get_children(AppProfile)
            app = appProfiles[0]

        if self._use_ip_epgs:
            # Create a Base EPG
            base_epg = EPG('base', app)
            if self.cdb.has_context_config():
                context_name = self.cdb.get_context_config().name
            else:
                context_name = 'vrf1'
            context = Context(context_name, tenant)
            bd = BridgeDomain('bd', tenant)
            bd.add_context(context)
            base_epg.add_bd(bd)
            if self.displayonly:
                # If display only, just deploy the EPG to leaf 101
                base_epg.add_static_leaf_binding('101', 'vlan', '1', encap_mode='untagged')
            else:
                # Deploy the EPG to all of the leaf switches
                nodes = Node.get(apic)
                for node in nodes:
                    if node.role == 'leaf':
                        base_epg.add_static_leaf_binding(node.node, 'vlan', '1', encap_mode='untagged')

            # Create the Attribute based EPGs
            logging.debug('Creating Attribute Based EPGs')
            existing_epgs = app.get_children(EPG)
            for epg_policy in self.cdb.get_epg_policies():
                if not self.displayonly:
                    # Check if we need to throttle very large configs
                    if len(str(tenant.get_json())) > THROTTLE_SIZE:
                        resp = tenant.push_to_apic(apic)
                        if not resp.ok:
                            return resp
                        tenant = Tenant(self._tenant_name)
                        app = AppProfile(self._app_name, tenant)
                        context = Context(context_name, tenant)
                        bd = BridgeDomain('bd', tenant)
                        bd.add_context(context)
                        if self._use_ip_epgs:
                            base_epg = EPG('base', app)
                            base_epg.add_bd(bd)

                matched = False
                for existing_epg in existing_epgs:
                    if existing_epg.name != "base":
                        if existing_epg.descr.split(":")[1] == epg_policy.descr.split(":")[1]:
                            matched = True
                            break

                consumed_contracts = []
                provided_contracts = []
                if matched is True:
                    consumed_contracts = existing_epg.get_all_consumed()
                    provided_contracts = existing_epg.get_all_provided()
                    epg = existing_epg
                else:
                    epg = EPG(epg_policy.name, app)

                # Check if the policy has the default 0.0.0.0 IP address
                no_default_endpoint = True
                for node_policy in epg_policy.get_node_policies():
                    if node_policy.ip == '0.0.0.0' and node_policy.prefix_len == 0:
                        no_default_endpoint = False
                        epg.add_bd(bd)

                # Add all of the IP addresses
                if no_default_endpoint:
                    epg.is_attributed_based = True
                    epg.set_base_epg(base_epg)
                    criterion = AttributeCriterion('criterion', epg)
                    ipaddrs = []
                    # check if the existing nodes are there in the present config,if not delete them
                    for node_policy in epg_policy.get_node_policies():
                        ipaddr = ipaddress.ip_address(unicode(node_policy.ip))
                        if not ipaddr.is_multicast:  # Skip multicast addresses. They cannot be IP based EPGs
                            ipaddrs.append(ipaddr)
                    nets = ipaddress.collapse_addresses(ipaddrs)
                    for net in nets:
                        criterion.add_ip_address(str(net))
                epg.descr = epg_policy.descr[0:127]
                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        existing = False
                        for existing_consumed_contract in consumed_contracts:
                            if name == existing_consumed_contract.name:
                                existing = True
                                contract = existing_consumed_contract
                        if not existing:
                            contract = Contract(name, tenant)
                            epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            existing = False
                            for existing_provided_contract in provided_contracts:
                                if name == existing_provided_contract.name:
                                    existing = True
                                    contract = existing_provided_contract
                            if not existing:
                                contract = Contract(name, tenant)
                        epg.provide(contract)
        else:
            logging.debug('Creating EPGs')
            tenants = Tenant.get_deep(apic, names=tenant_names)
            tenant = tenants[0]
            appProfiles = tenant.get_children(AppProfile)
            if len(appProfiles) > 0:
                app = appProfiles[0]
            else:
                app = AppProfile(self._app_name, tenant)

            existing_epgs = app.get_children(EPG)

            for epg_policy in self.cdb.get_epg_policies():

                matched = False
                for existing_epg in existing_epgs:
                    if existing_epg.name != "base":
                        if existing_epg.descr.split(":")[1] == epg_policy.descr.split(":")[1]:
                            matched = True
                            break

                consumed_contracts = []
                provided_contracts = []
                if matched is True:
                    consumed_contracts = existing_epg.get_all_consumed()
                    provided_contracts = existing_epg.get_all_provided()
                epg = EPG(epg_policy.name, app)
                epg.descr = epg_policy.descr[0:127]

                # Consume and provide all of the necessary contracts
                for contract_policy in self.cdb.get_contract_policies():
                    contract = None
                    if epg_policy.id in contract_policy.src_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        existing = False
                        for existing_consumed_contract in consumed_contracts:
                            if name == existing_consumed_contract.name:
                                existing = True
                                contract = existing_consumed_contract
                        if not existing:
                            contract = Contract(name, tenant)
                            epg.consume(contract)
                    if epg_policy.id in contract_policy.dst_ids:
                        name = contract_policy.src_name + '::' + contract_policy.dst_name
                        if contract is None:
                            existing = False
                            for existing_provided_contract in provided_contracts:
                                if name == existing_provided_contract.name:
                                    existing = True
                                    contract = existing_provided_contract
                            if not existing:
                                contract = Contract(name, tenant)
                        epg.provide(contract)

        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            if not resp.ok:
                return resp

        # remove the unwanted filters
        existing_filters = tenant.get_children(Filter)
        for existing_filetrEntry in existing_filters:
            matched = False
            for filterEntry in filterEntry_list:
                if filterEntry + '_Filter' == existing_filetrEntry.name:
                    matched = True
            if not matched:
                existing_filetrEntry.mark_as_deleted()
        if self.displayonly:
            print json.dumps(tenant.get_json(), indent=4, sort_keys=True)
        else:
            resp = tenant.push_to_apic(apic)
            return resp
Пример #54
0
    def _lookup_rws_arin(self, response=None, retry_count=3):
        """
        The function for retrieving and parsing whois information for an ARIN
        IP address via HTTP (Whois-RWS).

        Args:
            response: The dictionary containing whois information to parse.
            retry_count: The number of times to retry in case socket errors,
                timeouts, connection resets, etc. are encountered.

        Returns:
            List: Dictionaries containing network information which consists
                of the fields listed in the NIC_WHOIS dictionary. Certain IPs
                have more granular network listings, hence the need for a list
                object.
        """

        nets = []

        try:

            net_list = response["nets"]["net"]

            if not isinstance(net_list, list):

                net_list = [net_list]

        except KeyError:

            net_list = []

        for n in net_list:

            if "orgRef" in n and n["orgRef"]["@handle"] in ("ARIN", "VR-ARIN"):

                continue

            addrs = []
            net = BASE_NET.copy()

            try:

                addrs.extend(
                    summarize_address_range(
                        ip_address(n["startAddress"]["$"].strip()), ip_address(n["endAddress"]["$"].strip())
                    )
                )

                net["cidr"] = ", ".join([i.__str__() for i in collapse_addresses(addrs)])

            except (KeyError, ValueError, TypeError):

                pass

            for k, v in {"created": "registrationDate", "updated": "updateDate", "name": "name"}.items():

                try:

                    net[k] = str(n[v]["$"]).strip()

                except KeyError:

                    pass

            ref = None
            if "customerRef" in n:

                ref = ["customerRef", "customer"]

            elif "orgRef" in n:

                ref = ["orgRef", "org"]

            if ref is not None:

                try:

                    net["description"] = str(n[ref[0]]["@name"]).strip()

                except KeyError:

                    pass

                try:

                    ref_url = n[ref[0]]["$"].strip() + "?showPocs=true"
                    ref_response = self.get_rws(ref_url, retry_count)

                except (KeyError, WhoisLookupError):

                    nets.append(net)
                    continue

                try:

                    addr_list = ref_response[ref[1]]["streetAddress"]["line"]

                    if not isinstance(addr_list, list):

                        addr_list = [addr_list]

                    net["address"] = "\n".join([str(line["$"]).strip() for line in addr_list])

                except KeyError:

                    pass

                for k, v in {"postal_code": "postalCode", "city": "city", "state": "iso3166-2"}.items():

                    try:

                        net[k] = str(ref_response[ref[1]][v]["$"])

                    except KeyError:

                        pass

                try:

                    net["country"] = (str(ref_response[ref[1]]["iso3166-1"]["code2"]["$"])).upper()

                except KeyError:

                    pass

                try:

                    for poc in ref_response[ref[1]]["pocs"]["pocLinkRef"]:

                        if poc["@description"] in ("Abuse", "Tech"):

                            poc_url = poc["$"]
                            poc_response = self.get_rws(poc_url, retry_count)

                            emails = poc_response["poc"]["emails"]["email"]

                            if not isinstance(emails, list):

                                emails = [emails]

                            temp = []

                            for e in emails:

                                temp.append(str(e["$"]).strip())

                            key = "%s_emails" % poc["@description"].lower()

                            net[key] = "\n".join(set(temp)) if len(temp) > 0 else None

                except (KeyError, WhoisLookupError):

                    pass

            nets.append(net)

        return nets