def _normalize_ips_and_network(hosts_str: str) -> list[str] | None: """Check if a list of hosts are all ips or ip networks.""" normalized_hosts = [] hosts = [host for host in cv.ensure_list_csv(hosts_str) if host != ""] for host in sorted(hosts): try: start, end = host.split("-", 1) if "." not in end: ip_1, ip_2, ip_3, _ = start.split(".", 3) end = ".".join([ip_1, ip_2, ip_3, end]) summarize_address_range(ip_address(start), ip_address(end)) except ValueError: pass else: normalized_hosts.append(host) continue try: normalized_hosts.append(str(ip_address(host))) except ValueError: pass else: continue try: normalized_hosts.append(str(ip_network(host))) except ValueError: return None return normalized_hosts
def _group_nets(cidr_list: list, raw_list=None) -> IPv4Network: def remove_items(start, end): for ip in range(int(start), int(end) + 1, 256): raw_list.remove(str(IPv4Address(ip)) + "/24") start = end = curr_super_net = None nets = 1 for net in cidr_list: net_obj = IPv4Network(net) prefix_len = net_obj.prefixlen if prefix_len == 24: address = net_obj.network_address super_net = net_obj.supernet(new_prefix=16) if end and super_net == curr_super_net and (end + 256) == address: nets += 1 else: if nets > 1 and nets >= NETS_LIMIT: if raw_list: remove_items(start, end) yield summarize_address_range(IPv4Address(start), IPv4Address(end + 255)) start = address curr_super_net = super_net nets = 1 end = address else: if nets > 1 and nets >= NETS_LIMIT: if raw_list: remove_items(start, end) yield summarize_address_range(IPv4Address(start), IPv4Address(end + 255))
def calculate_cidr(start_address, end_address): """ The function to calculate a CIDR range(s) from a start and end IP address. Args: start_address: The starting IP address in string format. end_address: The ending IP address in string format. Returns: List: A list of calculated CIDR ranges. """ tmp_addrs = [] try: tmp_addrs.extend(summarize_address_range( ip_address(start_address), ip_address(end_address))) except (KeyError, ValueError, TypeError): # pragma: no cover try: tmp_addrs.extend(summarize_address_range( ip_network(start_address).network_address, ip_network(end_address).network_address)) except AttributeError: # pragma: no cover tmp_addrs.extend(summarize_address_range( ip_network(start_address).ip, ip_network(end_address).ip)) return [i.__str__() for i in collapse_addresses(tmp_addrs)]
def nro_as0_roas(url): delegations = requests.get(url).text.split("\n") # Remove header and summaries delegations.pop(0) # 2|nro|20200214|574416|19821213|20200214|+0000 delegations.pop(0) # nro|*|asn|*|91534|summary delegations.pop(0) # nro|*|ipv4|*|214428|summary delegations.pop(0) # nro|*|ipv6|*|268454|summary delegations.pop() # <last empty line on the file> roas = [] for line in delegations: delegation = line.split("|") type = delegation[2] value = delegation[3] length = int(delegation[4]) status = delegation[6] if status == "available" or status == "ianapool" or status == "ietf" or status == "reserved": # meaning !assigned if type == "ipv4": v4networks = ipaddress.summarize_address_range(ipaddress.IPv4Address(value), ipaddress.IPv4Address(value)+(length-1)) roas += as0_roas_for(v4networks, 32) if type == "ipv6": v6networks = [ipaddress.IPv6Network(value+"/"+str(length))] roas += as0_roas_for(v6networks, 128) return roas
def extract_addresses(raw): for addr in raw.split(): if '-' in addr: start, end = addr.split('-', 1) yield from ipaddress.summarize_address_range(ipaddress.ip_address(start), ipaddress.ip_address(end)) else: yield ipaddress.ip_network(addr)
def ip_in_whitelist(ip): try: logger.debug("client ip request for registry auth is %s" % ip) white_ips = [x.strip() for x in REGISTRY_IP_WHITELIST.split(',')] networks, ranges, ips = [], [], [] for ip_str in white_ips: if ip_str.find('/') >= 0: try: networks.append(ipaddress.ip_network(unicode(ip_str))) except Exception as e: logger.warning("format of ip net %s is invalid" % ip_str) elif ip_str.find('-') >= 0: try: first, last = ip_str.split('-') ranges.append(ipaddress.summarize_address_range( IPv4Address(unicode(first)), IPv4Address(unicode(last)))) except Exception as e: logger.warning("format of ip range %s is invalid" % ip_str) else: ips.append(ip_str) if ip in ips: return True for ip_range in ranges: if IPv4Address(ip) in ip_range: return True for network in networks: if IPv4Address(ip) in network: return True return IPAddress(ip) in IPNetwork(NODE_NETWORK) except Exception, e: logger.error( "Exception parse registry whitelist for ip %s : %s" % (ip, str(e))) return False
def post(self): """Handles creation of GeoCreate requests""" data = PARSER.parse_args() active = True if not data['CountryCode']: return make_response( jsonify(Result={ "Status": "Error", "Message": "Valid ISO required" }), 400) if GeoModel.exists(data['CountryCode']): return make_response( jsonify(Result={ "Status": "Error", "Message": "ISO Exists in DB" }), 400) if data["Active"]: if data["Active"].lower() is "false" or data["Active"] is "0": active = False elif data["Active"].lower() is "true" or data["Active"] is "1": active = True new_geo = GeoModel(lt=self.ltype, cc=data['CountryCode'], start_date=data['Start_Date'], end_date=data['End_Date'], comments=data['Comments'], active=active, remove=False) new_geo.save() f = open("ipv4geolist.csv") try: reader = csv.reader(f) for row in reader: country_code = row[2] if (country_code == new_geo.cc): startIp = ipaddress.ip_address(int(row[0])) endIp = ipaddress.ip_address(int(row[1])) range = [ ipaddr for ipaddr in ipaddress.summarize_address_range( startIp, endIp) ] for item in range: new_ip = IPModel(lt=self.ltype, ipv4=str(item), geo=new_geo.id, remove=False) new_ip.save() finally: f.close() return jsonify( Result={ "Status": "Success", "Message": "Geo Added", "EntryID": str(new_geo.id) })
def dest_ips(self, dest_ips): """Create a list of IP addresses according to the parameter passed in the constructor function""" if dest_ips.find('/') != -1: # If / than is a network address self._strdest_ips = dest_ips.strip() self._dest_ips = list(ip_network(dest_ips.strip()).hosts()) elif dest_ips.find('-') != -1: # if - than is a IP range self._strdest_ips = dest_ips first_ip, last_ip = dest_ips.split('-') # When using IP ranges the 'summarize_address_range' return a list of networks inside the IP range. networks_in_range = list( summarize_address_range(ip_address(first_ip.strip()), ip_address(last_ip.strip()))) ip_range = [] # Convert the list of networks in a list of IP address for network in networks_in_range: ip_range += list(ip_network(network)) self._dest_ips = ip_range else: isfile = True character = 0 # File name must have a alphabetic character to be recognized as txt file, otherwise it's a IP address. while not (dest_ips[character].isalpha()): isfile = False character += 1 break if isfile: # open the txt file and add all lines as IP address in the _dest_ips variable. self._strdest_ips = self._filename self._dest_ips = _read_file(self._filename) else: # when passed a single IP to scan. self._strdest_ips = dest_ips self._dest_ips = ip_address(dest_ips)
def get_all_addresses(self, ips): ip_objects = set() inputs = [ip.strip() for ip in ips.split(',')] for input_ in inputs: try: # Input is in range form ("1.2.3.4 - 5.6.7.8"): if '-' in input_: input_ips = input_.split('-') ranges = set( ipaddress.summarize_address_range( *map(lambda x: ipaddress.IPv4Address(x.strip()), input_ips))) ip_objects.update(ranges) # Input is in CIDR form ("192.168.0.0/24"): elif '/' in input_: network = ipaddress.ip_network(input_) ip_objects.add(network) # Input is a single ip ("1.1.1.1"): else: ip = ipaddress.ip_address(input_) ip_objects.add(ip) except ValueError as e: print(e) # If we get any non-ip value just ignore it pass ip_objects = list(ip_objects) return ip_objects
def _parse_member(settype, member, strict=False): subtypes = settype.split(':')[1].split(',') parts = member.split(' ') parsed_member = [] for i in range(len(subtypes)): subtype = subtypes[i] part = parts[i] if subtype in ['ip', 'net']: try: if '/' in part: part = ipaddress.ip_network(part, strict=strict) elif '-' in part: start, end = list(map(ipaddress.ip_address, part.split('-'))) part = list(ipaddress.summarize_address_range(start, end)) else: part = ipaddress.ip_address(part) except ValueError: pass elif subtype == 'port': part = int(part) parsed_member.append(part) if len(parts) > len(subtypes): parsed_member.append(' '.join(parts[len(subtypes):])) return parsed_member
def iprange_to_cidr(ip_range: str = None) -> [str]: """ Format an IP range addresses string given as "start to end" format ('x.x.x.x-y.y.y.y') to its corresponding CIDR list. :param ip_range: IP range addresses string given as "start to end" format ('x.x.x.x-y.y.y.y'). :return: IP range addresses string as its corresponding CIDR string list. :raises ipaddress.AddressValueError: Raised by the ipaddress library when address is incorrect. :raises ipaddress.NetmaskValueError: Raised by the ipaddress library when netmask is incorrect. :raises ValueError: Raised by the ipaddress library when values are incorrect. :raises TypeError: Raised by the ipaddress library when given objects types are incorrect. """ cidr_ip_range_list = [] ip_start, ip_end = ip_range.split("-") try: cidr_ip_range_list.extend(net.with_prefixlen for net in ipaddress.summarize_address_range( ipaddress.IPv4Address(ip_start), ipaddress.IPv4Address(ip_end))) except ipaddress.AddressValueError as ave: raise ave except ipaddress.NetmaskValueError as nve: raise nve except ValueError as ve: raise ve except TypeError as te: raise te return cidr_ip_range_list
def check_objects(input_objects,input_names,ip_address): valid_objects = [] for k,v in input_objects.items(): for item in v: # There are multiple possible configurations. Host, subnet and range. We need to validate if our IP Address is in any of them. if 'host' in item: # This one is simple. Check to see if a host IP matches directly or if it matches a matched name. if item.split()[1] == ip_address: valid_objects.append(k) for name in input_names: if item.split()[1] == name: valid_objects.append(k) if 'subnet' in item: # Here it requires a bit more work. We use the ipaddress library to validate if the IP resides within the network statement. network = unicode(item.split()[1] + "/" + item.split()[2],"utf-8") ipa = unicode(ip_address,"utf-8") if ipaddress.ip_address(ipa) in ipaddress.ip_network(network): valid_objects.append(k) if 'range' in item: # This one was tricky. Since a range doesn't necessarily line up 1-for-1 with subnets, I used a summarization function in the ipaddress # library to generate a list of summaries required to cover the range of addresses provided in the object. I then check our # IP address against that list (ike the block above) to see if it resides in any of the summaries. ipa = unicode(ip_address,"utf-8") first = unicode(item.split()[1],"utf-8") last = unicode(item.split()[2],"utf-8") subnets = [] for ipaddr in ipaddress.summarize_address_range(ipaddress.IPv4Address(first),ipaddress.IPv4Address(last)): if ipaddress.ip_address(ipa) in ipaddr: valid_objects.append(k) return(valid_objects)
def ip_addresses(self): """ Returns the list of IP addresses contained in the current range. """ start = ipaddress.ip_address(self.ip_start) end = ipaddress.ip_address(self.ip_end) return reduce( lambda ips, ipn: ips + list(ipn), ipaddress.summarize_address_range(start, end), [])
def check_objects(input_objects, input_names, ip_address): valid_objects = [] for k, v in input_objects.items(): for item in v: # There are multiple possible configurations. Host, subnet and range. We need to validate if our IP Address is in any of them. if 'host' in item: # This one is simple. Check to see if a host IP matches directly or if it matches a matched name. if item.split()[1] == ip_address: valid_objects.append(k) for name in input_names: if item.split()[1] == name: valid_objects.append(k) if 'subnet' in item: # Here it requires a bit more work. We use the ipaddress library to validate if the IP resides within the network statement. network = unicode(item.split()[1] + "/" + item.split()[2], "utf-8") ipa = unicode(ip_address, "utf-8") if ipaddress.ip_address(ipa) in ipaddress.ip_network(network): valid_objects.append(k) if 'range' in item: # This one was tricky. Since a range doesn't necessarily line up 1-for-1 with subnets, I used a summarization function in the ipaddress # library to generate a list of summaries required to cover the range of addresses provided in the object. I then check our # IP address against that list (ike the block above) to see if it resides in any of the summaries. ipa = unicode(ip_address, "utf-8") first = unicode(item.split()[1], "utf-8") last = unicode(item.split()[2], "utf-8") subnets = [] for ipaddr in ipaddress.summarize_address_range( ipaddress.IPv4Address(first), ipaddress.IPv4Address(last)): if ipaddress.ip_address(ipa) in ipaddr: valid_objects.append(k) return (valid_objects)
def get_nets_krnic(self, response): """ The function for parsing network blocks from krnic whois data. Args: response (:obj:`str`): The response from the krnic server. Returns: list of dict: Mapping of networks with start and end positions. :: [{ 'cidr' (str) - The network routing block 'start' (int) - The starting point of the network 'end' (int) - The endpoint point of the network }] """ nets = [] # Iterate through all of the networks found, storing the CIDR value # and the start and end positions. for match in re.finditer( r'^(IPv4 Address)[\s]+:[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+?)' '[^\S\n]\((.+?)\)|.+)$', response, re.MULTILINE): try: net = copy.deepcopy(BASE_NET) net['range'] = match.group(2) if match.group(3) and match.group(4): addrs = [] addrs.extend( summarize_address_range( ip_address(match.group(3).strip()), ip_address(match.group(4).strip()))) cidr = ', '.join( [i.__str__() for i in collapse_addresses(addrs)]) net['range'] = '{0} - {1}'.format(match.group(3), match.group(4)) else: cidr = ip_network(match.group(2).strip()).__str__() net['cidr'] = cidr net['start'] = match.start() net['end'] = match.end() nets.append(net) except (ValueError, TypeError): pass return nets
def all_ips_for_range(ip_list): count = 0 starting = ip_list[0].strip() ending_ip = ip_list[1].strip() all_up_ips = [] ip_list = [ ipaddr for ipaddr in ipaddress.summarize_address_range( ipaddress.IPv4Address(starting), ipaddress.IPv4Address(ending_ip)) ] if not ip_list: print("Unable to get ip range") return all_up_ips with concurrent.futures.ThreadPoolExecutor( max_workers=MAX_WORKERS) as executor: future_to_url = {executor.submit(ping_ip, ip): ip for ip in ip_list[0]} for future in concurrent.futures.as_completed(future_to_url): ip = future_to_url[future] try: data = future.result() except Exception as exc: pass else: if data[1]: all_up_ips.append(str(data[0])) return all_up_ips
def get(self, request): try: input1 = request.GET.get("input1") input2 = request.GET.get("input2") validate_ipv4_address(input1) validate_ipv4_address(input2) first = ipaddress.IPv4Address(input1) last = ipaddress.IPv4Address(input2) queryset = Router.objects.filter(is_active=True) summary = " ".join(map(str, ipaddress.summarize_address_range(first, last))) final_data = [] for i in queryset: if ipaddress.IPv4Network(i.loopback).__str__() in summary: final_data.append(i) data = RouterSerializer(final_data, many=True) return Response({"data": data.data, "success": True}, status=200) except ValueError as error: traceback.print_exc() return Response({"error": error, "success": False}, status=400) except ValidationError as error: traceback.print_exc() return Response({"error": error, "success": False}, status=400) except Exception as error: traceback.print_exc() return Response({"error": str(error), "success": False}, status=400)
def main(): log(f'{BANNER}\n\n') check_argc() parse_cmdline() check_argv(opts) s = requests.Session() if 'host' in opts['type']: log(f"fetching ipv4 host-ranges for {opts['country']}", 'info') with open(f"{opts['country']}-host.txt", '+a') as f: for i, x in enumerate(host_range(opts['country'], s)): log(x, _type='file', logfile=f) if 'cidr' in opts['type']: log(f"fetching ipv4 cidr-ranges for {opts['country']}", 'info') with open(f"{opts['country']}-cidr.txt", '+a') as f: for i, x in enumerate(host_range(opts['country'], s)): splitted = x.split('-') startip = ipaddress.IPv4Address(splitted[0]) endip = ipaddress.IPv4Address(splitted[1]) for addr in ipaddress.summarize_address_range(startip, endip): log(addr, _type='file', logfile=f) log(f'found {i} ranges for {opts["country"]}', 'good') if 'cidr' in opts['type'] and 'host' in opts['type']: log(f"saved results to: {opts['country']}-*.txt", 'good') else: log(f"saved results to: {opts['country']}-{opts['type'][0]}.txt", 'good') log('game over', 'info') return
def __init__(self, port_num:int, queue_:"return from pipe", start_ip=None, end_ip=None): if type(start_ip) != type(end_ip): raise ValueError("IP range has to be None or integers") if start_ip is None: start_ip = ipaddress.IPv4Address('0.0.0.0') end_ip = ipaddress.IPv4Address('255.255.255.255') else: start_ip = ipaddress.IPv4Address(start_ip) end_ip = ipaddress.IPv4Address(end_ip) fake_network = [net for net in ipaddress.summarize_address_range(start_ip, end_ip)] skip_list = SkipList() for net in skip_list.skip_list: if fake_network[0].overlaps(net): self.__queue.put(_END_SIGNAL) exit(-1) self.__ip_range = range(int(start_ip), int(end_ip)) self.__prepare_socket_factory(port_num) self.__packet = ip_scan_packet.make_dns_packet("one.yumi.ipl.eecs.case.edu") self.__udp_spoofing(port_num) self.__start_from = ipaddress.IPv4Address(start_ip) self.__end_to = ipaddress.IPv4Address(end_ip) - 1 self.__queue = queue_ self.__start_time = time.time()
def getCIDR(sip,eip): ''' returns one or more CIDR ranges for start and end IP range ''' # is it a qualys range? startip = ipaddress.IPv4Address(sip) endip = ipaddress.IPv4Address(eip) # summarized cidr ranges yield ([ipaddr for ipaddr in ipaddress.summarize_address_range(startip, endip)])
def parse_ip_ranges(ip_ranges): add_list = [] try: if ip_ranges: if ',' in ip_ranges: ip_ranges = ip_ranges.split(',') else: ip_ranges = [ip_ranges] for address in ip_ranges: if '-' in address: first_addr = ip_address(address.split('-')[0]) last_addr = ip_address(address.split('-')[1]) for ipnet in summarize_address_range( ip_address(first_addr), ip_address(last_addr)): add_list += [ipad.exploded for ipad in ipnet] elif '/' in address: add_list += [ ipad.exploded for ipad in ip_network(address, strict=False) ] else: add_list.append(address) except (AddressValueError, NetmaskValueError, ValueError): raise ValueError(INVALID_SUBNET_ERROR_MESSAGE) return add_list
def rangesplitter(ip_range): split_range = ip_range.split('-') #print(split_range) first_ip = ipaddress.IPv4Address(split_range[0]) second_ip = ipaddress.IPv4Address(split_range[1]) #print(ipaddress.summarize_address_range(first_ip, second_ip)) return [ipaddr for ipaddr in ipaddress.summarize_address_range(first_ip, second_ip)]
def resolve_ranges(self): r = [] # When there is a special if 'others' in self.ranges: if len(self.ranges['others']) > 0: self.members = [] return self.group_type = Address for v4_range in self.ranges['ipv4']: start = v4_range['start'] end = v4_range['end'] start, end = self.correct_v4address(start, end) subnets = list( ipaddress.summarize_address_range(ipaddress.IPv4Address(start), ipaddress.IPv4Address(end))) for net in subnets: cidr = "{}/{}".format(net.network_address, net.prefixlen) name = "RR_{}_{}".format(net.network_address, net.prefixlen) address_d = { "uid": "manual", "name": name, "ipv4-address": cidr, "type": "network" } a = Address(address_d) r.append(a) self.members = r
def request(self, ip_addr): """Send ARP request, ip_addr is either, a single address, or a range of addr list of 2), list of addr (3 or more), or a network. Do not send to oneself""" self.skip_list.append(self.sip) try: if isinstance(ip_addr, list): if len(ip_addr) == 2: # A range if ip_addr[0] < ip_addr[1]: ip_addr = ipaddress.summarize_address_range( ip_addr[0], ip_addr[1] ) # else it is just 2 addresses elif isinstance(ip_addr, ipaddress.IPv4Address): ip_addr = [ip_addr] elif isinstance(ip_addr, ipaddress.IPv4Network,): ip_addr = [ip_addr] else: raise IPAddressingError # Now we have a list for addr in ip_addr: if isinstance(addr, ipaddress.IPv4Address): if addr not in self.skip_list: self.send_arp_request(addr) else: # A network if addr.network_address not in self.skip_list: self.send_arp_request(addr.network_address) for x in addr.hosts(): if x not in self.skip_list: self.send_arp_request(x) if addr.broadcast_address not in self.skip_list: self.send_arp_request(addr.broadcast_address) except: raise IPAddressingError
def __get_port_count(self, port_list): port_listlen = len(port_list) if port_listlen <= 1: return 1 # search consecutive port port_list.sort() count = 0 while (port_listlen >= 2): start_index = 0 end_index = 0 for i in range(1, port_listlen): if (port_list[start_index] + i) == port_list[i]: end_index = i else: break if start_index is not end_index: summarize = list(summarize_address_range(IPv4Address(port_list[start_index]), IPv4Address(port_list[end_index]))) count += len(summarize) del port_list[start_index:(end_index + 1)] else: del port_list[0:2] count += 2 port_listlen = len(port_list) # If it is 1 port remaining, count as it is if port_listlen is 1: count += 1 return count
def write_ips(): obj = {} for domain,start,end in [ ("reddit","198.41.128.0","198.41.255.255"), ("pornhub","31.192.117.132","31.192.117.135"), ("4chan","141.8.224.0","141.8.231.255"), ("youtube","173.194.0.0","173.194.255.255"), ("netflix","50.16.0.0","50.19.255.255"), ("hulu","77.67.96.0","77.67.99.255"), ("amazon","205.251.192.0","205.251.255.255"), ("ebay","66.211.160.0","66.211.191.255"), ("pandora","208.85.40.0","208.85.47.255"), ("spotify","194.132.196.0","194.132.199.255"), ("twitch","192.16.64.0","192.16.71.255"), ("facebook","173.252.64.0","173.252.127.255"), ("twitter","199.59.148.0","199.59.151.255"), ("myspace","216.178.32.0","216.178.47.255"), ("espn","216.178.32.0","216.178.47.255"), ("nfl","23.0.0.0","23.15.255.255"), ("wsu","216.58.224.0","216.58.255.255"), ("google","173.194.0.0","173.194.255.255"), ("bing","98.136.0.0","98.139.255.255"), ("baidu","220.181.0.0","220.181.255.255"), ("duckDuckgo","50.16.0.0","50.19.255.255"), ("github","192.30.252.0","192.30.255.255"), ("stackoverflow","198.252.206.0","198.252.206.255"), ("hackernews","54.224.0.0","54.239.255.255"), ]: obj[domain] = [ip for ip in ipaddress.summarize_address_range( ipaddress.IPv4Address(start), ipaddress.IPv4Address(end))] return obj
def calc_subnet(firstip, lastip): a = ipaddress.IPv4Address(firstip) b = ipaddress.IPv4Address(lastip) subnets = [ipaddr for ipaddr in ipaddress.summarize_address_range(a, b)] for s in subnets: return s return subnets
def summarize_ipv6(mml_file): ipv6_pattern = r'.*prefix\s\d\s*([^\n\r]*)' ipv6_list = list() with open(mml_file) as fin: ip_list = list() for lines in fin: if lines.strip().startswith('prefix'): ip_list.append( re.findall(pattern=ipv6_pattern, string=lines)) for ls in ip_list: for txt in ls: ip_range = txt.strip().split() ipv6_list.append(ip_range) networks = list() for ls in ipv6_list: networks.extend( ipaddress.summarize_address_range( ipaddress.IPv6Address(ls[0]), ipaddress.IPv6Address(ls[-1]))) aggregate = [ str(network) for network in ipaddress.collapse_addresses(networks) ] return aggregate
def check_allocation_pools_pairing(filedata, pools): global ERROR_COUNT for poolitem in pools: pooldata = filedata[poolitem] LOG.info('Checking allocation pool {}'.format(poolitem)) pool_objs = [ipaddress.summarize_address_range( ipaddress.ip_address(x['start'].decode('utf-8')), ipaddress.ip_address(x['end'].decode('utf-8'))) for x in pooldata] subnet_item = poolitem.split('AllocationPools')[0] + 'NetCidr' try: subnet_obj = ipaddress.ip_network( filedata[subnet_item].decode('utf-8')) except ValueError: LOG.error('Invalid address: %s', subnet_item) ERROR_COUNT += 1 for ranges in pool_objs: for range in ranges: if not subnet_obj.overlaps(range): LOG.error('Allocation pool {} {} outside of subnet' '{}: {}'.format(poolitem, pooldata, subnet_item, subnet_obj)) ERROR_COUNT += 1 break
def get_cidr_list(self): cidr_list = [] for interval in self.interval_set: startip = interval.start.address.__class__(interval.start) # either IPv4AAddress or IPv6Address endip = interval.end.address.__class__(interval.end) cidrs = [ipaddr for ipaddr in ipaddress.summarize_address_range(startip, endip)] cidr_list += [str(cidr) for cidr in cidrs] return cidr_list
def in_Range(IP, IPRange): start_ip = ipaddress.ip_address(IPRange[0]) end_ip = ipaddress.ip_address(IPRange[1]) subnet_list = ipaddress.summarize_address_range(start_ip, end_ip) decision = False for subnet in subnet_list: decision = decision or subnet.overlaps(IP) return decision
def get_cidr(network): networks = [str(net) for net in summarize_address_range( ip_address(network["start_address"]), ip_address(network["end_address"]) )] if len(networks) == 1: networks = networks[0] return networks
def _parse_address(self, address): """ parse addresses and hostnames, yield only valid addresses and networks :param address: address or network :return: boolean """ address = address.strip() if address.find('/') > -1 and not address.split('/')[-1].isdigit(): # wildcard netmask for idx, item in enumerate( net_wildcard_iterator(address.lstrip('!'))): if idx > 65535: # overflow syslog.syslog(syslog.LOG_ERR, 'alias table %s overflow' % self._name) break yield "!%s" % item if address.startswith('!') else str(item) elif address.find('/') > -1: # provided address could be a network try: ipaddress.ip_network(str(address.lstrip('!')), strict=False) yield address return except (ipaddress.AddressValueError, ValueError): pass else: # check if address is an ipv4/6 address or range try: tmp = str(address).split('-') if len(tmp) > 1: addr1 = ipaddress.ip_address(tmp[0]) # address range (from-to) addr2 = ipaddress.ip_address(tmp[1]) for addr in ipaddress.summarize_address_range( addr1, addr2): yield str(addr) else: ipaddress.ip_address(tmp[0].lstrip('!')) yield address return except (ipaddress.AddressValueError, ValueError): pass # try to resolve provided address could_resolve = False for record_type in ['A', 'AAAA']: try: for rdata in self._dnsResolver.query(address, record_type): yield str(rdata) could_resolve = True except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN, dns.exception.Timeout, dns.resolver.NoNameservers): pass if not could_resolve: # log when none could be found syslog.syslog( syslog.LOG_ERR, 'unable to resolve %s for alias %s' % (address, self._name))
def parse_destination(self, line): temp_line_data = {} temp_line_data['acl_dst_net'] = [] temp_line_data['acl_dst_port'] = None linesplit = line.split() if linesplit[0] == 'host': temp_line_data['acl_dst_net'].append(linesplit[1].split('(')[0] + '/32') line = line.lstrip(' ').lstrip(' ' + linesplit[0]).lstrip(' ').lstrip( linesplit[1]) elif 'any' in linesplit[0]: temp_line_data['acl_dst_net'].append('0.0.0.0/0') line = line.lstrip(' ').lstrip(linesplit[0]) elif re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", linesplit[0]): if '(' in linesplit[1]: temp_line_data['acl_dst_net'].append( str( ipaddress.ip_network(linesplit[0] + '/' + linesplit[1].split('(')[0]))) else: temp_line_data['acl_dst_net'].append( str(ipaddress.ip_network(linesplit[0] + '/' + linesplit[1]))) line = line.lstrip(' ' + linesplit[0]).lstrip(' ').lstrip( linesplit[1]) elif 'v4-object-group' in linesplit[0]: '''get list of objects''' for each in self.object_lookup(object=linesplit[1].replace(( linesplit[1][linesplit[1].rindex('('):]), '')): temp_line_data['acl_dst_net'].append(each) line = line.lstrip(' ' + linesplit[0]).lstrip(linesplit[1]) elif 'object' in linesplit[0]: for each in self.object_lookup(object=linesplit[1].replace(( linesplit[1][linesplit[1].rindex('('):]), '')): temp_line_data['acl_dst_net'].append(each) line = line.lstrip(' ' + linesplit[0]).lstrip(linesplit[1]) elif 'range' in linesplit[0]: rangelist = [] for ipaddr in ipaddress.summarize_address_range( ipaddress.IPv4Address(linesplit[1].split('(')[0]), ipaddress.IPv4Address(linesplit[2].split('(')[0])): for each in ipaddr: rangelist.append(str(each)) line = line.lstrip(' ') \ .lstrip(linesplit[0]). \ lstrip(' '). \ lstrip(linesplit[1]). \ lstrip(' ') \ .lstrip(linesplit[2]) temp_line_data['acl_dst_net'] = rangelist else: print('\r') print(line) print(linesplit) print(temp_line_data) input('unknown destination') return line, temp_line_data
def main(): if len(sys.argv) != 2: print("Usage: python {} <ip-range>|<ip-subnet>".format(sys.argv[0])) print("Example") print("python {} 192.168.10.10-192.168.20.1".format(sys.argv[0])) print("python {} 192.168.10.0/24".format(sys.argv[0])) return 1 if sys.argv[1].find('/') != -1 and sys.argv[1].find('-') != -1: print("uncognized format") return 1 elif sys.argv[1].find('/') != -1: network = ipaddress.ip_network(unicode(sys.argv[1])) for ip in network: try: subprocess.check_call('timeout {} ping -c 1 {} >/dev/null 2>&1'.format(pingTimeout, ip), shell=True) except: continue print("Scanning " + str(ip)) pool = multiprocessing.Pool(processes=nprocs) for res in pool.map(scan, [(str(ip), port) for port in ports]): if res: print(res) pool.close() pool.join() elif sys.argv[1].find('-') != -1: ips = sys.argv[1].split('-') iprange = ipaddress.summarize_address_range(ipaddress.ip_address(unicode(ips[0])), ipaddress.ip_address(unicode(ips[1]))) while True: try: network = iprange.next() for ip in network: try: subprocess.check_call('timeout {} ping -c 1 {} >/dev/null 2>&1'.format(pingTimeout, ip), shell=True) except: continue print("Scanning " + str(ip)) pool = multiprocessing.Pool(processes=nprocs) for res in pool.imap_unordered(scan, [(str(ip), port) for port in ports]): if res: print(res) pool.close() pool.join() except: break else: ip = sys.argv[1] try: subprocess.check_call('timeout {} ping -c 1 {} >/dev/null 2>&1'.format(pingTimeout, ip), shell=True) except: pass print("Scanning " + str(ip)) pool = multiprocessing.Pool(processes=nprocs) for res in pool.imap_unordered(scan, [(str(ip), port) for port in ports]): if res: print(res) pool.close() pool.join()
def networks(self): if not hasattr(self, '_networks'): self._networks = [] for i, block in enumerate(self.blocks): self._networks += summarize_address_range( IPv4Address(block['start']), IPv4Address(block['end'])) self._networks = list(collapse_addresses(self._networks)) return self._networks
def get_baremetal_hosts_list(env): network_data = env.get_network_data() hosts = set() baremetal_net = [x for x in network_data["networks"] if x["name"] == "baremetal"][0] for start, end in baremetal_net["ip_ranges"]: start = ipaddress.ip_address(start) end = ipaddress.ip_address(end) for subnet in ipaddress.summarize_address_range(start, end): for host in subnet: hosts.add(host) start, end = network_data["networking_parameters"]["baremetal_range"] start = ipaddress.ip_address(start) end = ipaddress.ip_address(end) for subnet in ipaddress.summarize_address_range(start, end): for host in subnet: hosts.add(host) hosts.add(ipaddress.ip_address(network_data["networking_parameters"]["baremetal_gateway"])) return hosts
def _get_nets_krnic(self, response): """ The function for parsing network blocks from krnic whois data. Args: response: The response from the krnic server. Returns: List: A of dictionaries containing keys: cidr, start, end. """ nets = [] # Iterate through all of the networks found, storing the CIDR value # and the start and end positions. for match in re.finditer( r'^(IPv4 Address)[\s]+:[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+?)' '[^\S\n]\((.+?)\)|.+)$', response, re.MULTILINE ): try: net = copy.deepcopy(BASE_NET) net['range'] = match.group(2) if match.group(3) and match.group(4): addrs = [] addrs.extend(summarize_address_range( ip_address(match.group(3).strip()), ip_address(match.group(4).strip()))) cidr = ', '.join( [i.__str__() for i in collapse_addresses(addrs)] ) net['range'] = '{0} - {1}'.format( match.group(3), match.group(4) ) else: cidr = ip_network(match.group(2).strip()).__str__() net['cidr'] = cidr net['start'] = match.start() net['end'] = match.end() nets.append(net) except (ValueError, TypeError): pass return nets
def get_baremetal_hosts_list(env): network_data = env.get_network_data() hosts = set() baremetal_net = [x for x in network_data['networks'] if x['name'] == 'baremetal'][0] for start, end in baremetal_net['ip_ranges']: start = ipaddress.ip_address(start) end = ipaddress.ip_address(end) for subnet in ipaddress.summarize_address_range(start, end): for host in subnet: hosts.add(host) start, end = network_data['networking_parameters']['baremetal_range'] start = ipaddress.ip_address(start) end = ipaddress.ip_address(end) for subnet in ipaddress.summarize_address_range(start, end): for host in subnet: hosts.add(host) hosts.add(ipaddress.ip_address(network_data['networking_parameters'][ 'baremetal_gateway'])) return hosts
def read_csv_and_update_database(): """ 1. read CSV in text mode 2. process all data 3. write converted data into database """ print(datetime, end='') print(' geoip models_refresh.read_csv_and_update_database(): processing data') with open('dbip-country.csv', 'rt') as csv_file: csv_obj = csv.reader(csv_file, delimiter=',', quotechar='"') counter = 0 # counter for row in csv_obj: ip_start = ipaddress.ip_address(row[0]) ip_end = ipaddress.ip_address(row[1]) # country = row[2] if ':' in row[0]: db.session.add(Ipv6StrRange(row[0], row[1], row[2])) db.session.add(Ipv6IntRange(int(ip_start), int(ip_end), row[2])) ip_blocks = ipaddress.summarize_address_range(ip_start, ip_end) for ip_block in ip_blocks: db.session.add(Ipv6ClasslessInterDomainRouting(ip_block, row[2])) elif'.' in row[0]: db.session.add(Ipv4StrRange(row[0], row[1], row[2])) db.session.add(Ipv4IntRange(int(ip_start), int(ip_end), row[2])) ip_blocks = ipaddress.summarize_address_range(ip_start, ip_end) for ip_block in ip_blocks: db.session.add(Ipv4ClasslessInterDomainRouting(str(ip_block), row[2])) else: print('no insert ', end='') print(row) counter += 1 if counter % 100 == 0: db.session.commit() print(datetime.now(), end='') print(' geoip models_refresh.read_csv_and_update_database(): insert data into database ', end='') print(counter) db.session.commit() print(datetime.now(), end='') print(' geoip models_refresh.read_csv_and_update_database(): insert data into database ', end='') print(counter)
def check_allocation_pools_pairing(self, filedata, pools): for poolitem in pools: pooldata = filedata[poolitem] self.log.info('Checking allocation pool {}'.format(poolitem)) pool_objs = [] for pool in pooldata: try: ip_start = ipaddress.ip_address( six.u(pool['start'])) except ValueError: self.log.error('Invalid address: %s' % ip_start) self.error_count += 1 ip_start = None try: ip_end = ipaddress.ip_address(six.u(pool['end'])) except ValueError: self.log.error('Invalid address: %s' % ip_start) self.error_count += 1 ip_end = None if (ip_start is None) or (ip_end is None): continue try: pool_objs.append(list( ipaddress.summarize_address_range(ip_start, ip_end))) except Exception: self.log.error('Invalid address pool: %s, %s' % (ip_start, ip_end)) self.error_count += 1 subnet_item = poolitem.split('AllocationPools')[0] + 'NetCidr' try: subnet_obj = ipaddress.ip_network( six.u(filedata[subnet_item])) except ValueError: self.log.error('Invalid address: %s', subnet_item) self.error_count += 1 continue for ranges in pool_objs: for range in ranges: if not subnet_obj.overlaps(range): self.log.error( 'Allocation pool {} {} outside of subnet {}: {}' .format(poolitem, pooldata, subnet_item, subnet_obj)) self.error_count += 1 break
def get_ips(self, cidr_notation=False): """ Runs through the source file line by line to parse the lines and return a list `IPv4Network`s. :returns list of textual ipaddress notations :rtype list """ # First check if we need a forced update? if not self.fetcher.file_exists: self.fetcher.update() # Run through the list and gather all textual ipaddresses results = [] linenr = 1 filehandler = open(self.filepath, 'r') for line in filehandler: res = re.search(self.regex, line) if not res: LOG.debug( "{} Line:{} Regular Expression mismatch {}" .format(self.filepath, line, self.__class__.__name__) ) continue if len(res.groups()) == 1: if cidr_notation: entry = IPv4Network(unicode(res.groups()[0])) results.append(entry) else: results.append(res.groups()[0]) elif len(res.groups()) > 1: if cidr_notation: start = IPv4Address(unicode(res.groups()[0])) end = IPv4Address(unicode(res.groups()[1])) networks = list(summarize_address_range(start, end)) results += networks else: results.append( "{}-{}".format(res.groups()[0], res.groups()[1])) linenr += 1 filehandler.close() return list(results)
def get_nets(src_file_path): ip_net = [] with open(src_file_path, newline='') as csvfile: reader = csv.reader(csvfile) for eachline in reader: entry = eachline[0] if '-' in entry: ip_pair = entry.strip().split('-') ip_begin = ipaddress.ip_address(ip_pair[0].strip()) ip_end = ipaddress.ip_address(ip_pair[1].strip()) temp_net = [ ipaddr for ipaddr in ipaddress.summarize_address_range( ip_begin, ip_end)] elif '/' in entry: temp_net = [ipaddress.ip_network(entry.strip())] else: temp_net = [ipaddress.ip_address(entry.strip())] ip_net = ip_net + temp_net print(ip_net) return(ip_net)
def ParseAdditionalParams(parser): options, args = parser.parse_args() prop_array = None properties = options.properties ip_range = options.ip_range logger_severity = options.logger_severity.upper() plot_interval_periods = options.plot_interval if properties is not None: prop_array = properties.split(",") if ip_range is not None: ip_range = ip_range.split("-") ip_range = list( ipaddress.summarize_address_range( ipaddress.ip_address(unicode(ip_range[0], "utf-8")), ipaddress.ip_address(unicode(ip_range[1], "utf-8")) ) ) if plot_interval_periods is not None: plot_interval_periods = HOUR_PERIODS_COUNT * int(plot_interval_periods) if plot_interval_periods < HOUR_PERIODS_COUNT: plot_interval_periods = HOUR_PERIODS_COUNT return logger_severity, prop_array, ip_range, options.file_path, options.learning, plot_interval_periods
def _parse_address(self, address): """ parse addresses and hostnames, yield only valid addresses and networks :param address: address or network :return: boolean """ address = address.strip() if address.find('/') > -1: # provided address could be a network try: ipaddress.ip_network(str(address), strict=False) yield address return except (ipaddress.AddressValueError, ValueError): pass else: # check if address is an ipv4/6 address or range try: tmp = str(address).split('-') addr1 = ipaddress.ip_address(tmp[0]) if len(tmp) > 1: # address range (from-to) addr2 = ipaddress.ip_address(tmp[1]) for addr in ipaddress.summarize_address_range(addr1, addr2): yield str(addr) else: yield address return except (ipaddress.AddressValueError, ValueError): pass # try to resolve provided address for record_type in ['A', 'AAAA']: try: for rdata in self._dnsResolver.query(address, record_type): yield str(rdata) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN, dns.exception.Timeout, dns.resolver.NoNameservers): pass
def _lookup_rws_lacnic(self, response=None): """ The function for retrieving and parsing whois information for a LACNIC IP address via HTTP (Whois-RWS). Args: response: The dictionary containing whois information to parse. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ addrs = [] net = BASE_NET.copy() try: addrs.extend( summarize_address_range( ip_address(response["startAddress"].strip()), ip_address(response["endAddress"].strip()) ) ) net["cidr"] = ", ".join([i.__str__() for i in collapse_addresses(addrs)]) except (KeyError, ValueError, TypeError): pass try: net["country"] = str(response["country"]).strip().upper() except KeyError: pass try: events = response["events"] if not isinstance(events, list): events = [events] except KeyError: events = [] for ev in events: try: if ev["eventAction"] == "registration": tmp = str(ev["eventDate"]).strip() value = datetime.strptime(tmp, str(NIC_WHOIS["lacnic"]["dt_rws_format"])).isoformat("T") net["created"] = value elif ev["eventAction"] == "last changed": tmp = str(ev["eventDate"]).strip() value = datetime.strptime(tmp, str(NIC_WHOIS["lacnic"]["dt_rws_format"])).isoformat("T") net["updated"] = value except (KeyError, ValueError): pass try: entities = response["entities"] if not isinstance(entities, list): entities = [entities] except KeyError: entities = [] for en in entities: try: if en["roles"][0] == "registrant": temp = en["vcardArray"][1] for t in temp: if t[0] == "fn": net["name"] = str(t[3]).strip() elif t[0] == "org": net["description"] = str(t[3][0]).strip() elif t[0] == "adr": net["address"] = str(t[1]["label"]).strip() elif t[0] == "email": net["misc_emails"] = str(t[3]).strip() elif en["roles"][0] == "abuse": temp = en["vcardArray"][1] for t in temp: if t[0] == "email": net["abuse_emails"] = str(t[3]).strip() elif en["roles"][0] == "tech": temp = en["vcardArray"][1] for t in temp: if t[0] == "email": net["tech_emails"] = str(t[3]).strip() except (KeyError, IndexError): pass return [net]
def _lookup_rws_arin(self, response=None, retry_count=3): """ The function for retrieving and parsing whois information for an ARIN IP address via HTTP (Whois-RWS). Args: response: The dictionary containing whois information to parse. retry_count: The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ nets = [] try: net_list = response["nets"]["net"] if not isinstance(net_list, list): net_list = [net_list] except KeyError: net_list = [] for n in net_list: if "orgRef" in n and n["orgRef"]["@handle"] in ("ARIN", "VR-ARIN"): continue addrs = [] net = BASE_NET.copy() try: addrs.extend( summarize_address_range( ip_address(n["startAddress"]["$"].strip()), ip_address(n["endAddress"]["$"].strip()) ) ) net["cidr"] = ", ".join([i.__str__() for i in collapse_addresses(addrs)]) except (KeyError, ValueError, TypeError): pass for k, v in {"created": "registrationDate", "updated": "updateDate", "name": "name"}.items(): try: net[k] = str(n[v]["$"]).strip() except KeyError: pass ref = None if "customerRef" in n: ref = ["customerRef", "customer"] elif "orgRef" in n: ref = ["orgRef", "org"] if ref is not None: try: net["description"] = str(n[ref[0]]["@name"]).strip() except KeyError: pass try: ref_url = n[ref[0]]["$"].strip() + "?showPocs=true" ref_response = self.get_rws(ref_url, retry_count) except (KeyError, WhoisLookupError): nets.append(net) continue try: addr_list = ref_response[ref[1]]["streetAddress"]["line"] if not isinstance(addr_list, list): addr_list = [addr_list] net["address"] = "\n".join([str(line["$"]).strip() for line in addr_list]) except KeyError: pass for k, v in {"postal_code": "postalCode", "city": "city", "state": "iso3166-2"}.items(): try: net[k] = str(ref_response[ref[1]][v]["$"]) except KeyError: pass try: net["country"] = (str(ref_response[ref[1]]["iso3166-1"]["code2"]["$"])).upper() except KeyError: pass try: for poc in ref_response[ref[1]]["pocs"]["pocLinkRef"]: if poc["@description"] in ("Abuse", "Tech"): poc_url = poc["$"] poc_response = self.get_rws(poc_url, retry_count) emails = poc_response["poc"]["emails"]["email"] if not isinstance(emails, list): emails = [emails] temp = [] for e in emails: temp.append(str(e["$"]).strip()) key = "%s_emails" % poc["@description"].lower() net[key] = "\n".join(set(temp)) if len(temp) > 0 else None except (KeyError, WhoisLookupError): pass nets.append(net) return nets
def _lookup_rws_apnic(self, response=None): """ The function for retrieving and parsing whois information for a APNIC IP address via HTTP (Whois-RWS). Args: response: The dictionary containing whois information to parse. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ addrs = [] net = BASE_NET.copy() try: addrs.extend( summarize_address_range( ip_address(response["startAddress"].strip()), ip_address(response["endAddress"].strip()) ) ) net["cidr"] = ", ".join([i.__str__() for i in collapse_addresses(addrs)]) except (KeyError, ValueError, TypeError): pass try: net["country"] = str(response["country"]).strip().upper() except KeyError: pass try: events = response["events"] if not isinstance(events, list): events = [events] except KeyError: events = [] for ev in events: try: if ev["eventAction"] == "registration": net["created"] = str(ev["eventDate"]).strip() elif ev["eventAction"] == "last changed": net["updated"] = str(ev["eventDate"]).strip() except (KeyError, ValueError): pass try: entities = response["entities"] if not isinstance(entities, list): entities = [entities] except KeyError: entities = [] for en in entities: try: temp = en["vcardArray"][1] for t in temp: if "administrative" in en["roles"] and t[0] == "fn": net["name"] = str(t[3]).strip() elif "administrative" in en["roles"] and t[0] == "adr": try: net["address"] = str(t[1]["label"]).strip() except KeyError: pass elif t[0] == "email": key = None if len(en["roles"]) > 1 or en["roles"][0] == "administrative": key = "misc_emails" elif en["roles"][0] == "abuse": key = "abuse_emails" elif en["roles"][0] == "technical": key = "tech_emails" if key is not None: if net[key] is not None: net[key] += "\n%s" % str(t[3]).strip() else: net[key] = str(t[3]).strip() except (KeyError, IndexError): pass try: remarks = response["remarks"] if not isinstance(remarks, list): remarks = [remarks] except KeyError: remarks = [] for rem in remarks: try: if rem["title"] == "description": net["description"] = str("\n".join(rem["description"])) except (KeyError, IndexError): pass return [net]
def _lookup_rws_ripe(self, response=None): """ The function for retrieving and parsing whois information for a RIPE IP address via HTTP (Whois-RWS). *** THIS FUNCTION IS TEMPORARILY BROKEN UNTIL RIPE FIXES THEIR API: https://github.com/RIPE-NCC/whois/issues/114 *** Args: response: The dictionary containing whois information to parse. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ nets = [] try: object_list = response['objects']['object'] except KeyError: object_list = [] ripe_abuse_emails = [] ripe_misc_emails = [] net = BASE_NET.copy() for n in object_list: try: if n['type'] == 'role': for attr in n['attributes']['attribute']: if attr['name'] == 'abuse-mailbox': ripe_abuse_emails.append(str( attr['value'] ).strip()) elif attr['name'] == 'e-mail': ripe_misc_emails.append(str(attr['value']).strip()) elif attr['name'] == 'address': if net['address'] is not None: net['address'] += '\n%s' % ( str(attr['value']).strip() ) else: net['address'] = str(attr['value']).strip() elif n['type'] in ('inetnum', 'inet6num'): for attr in n['attributes']['attribute']: if attr['name'] in ('inetnum', 'inet6num'): ipr = str(attr['value']).strip() ip_range = ipr.split(' - ') try: if len(ip_range) > 1: addrs = [] addrs.extend( summarize_address_range( ip_address(ip_range[0]), ip_address(ip_range[1]) ) ) cidr = ', '.join( [i.__str__() for i in collapse_addresses(addrs)] ) else: cidr = ip_network(ip_range[0]).__str__() net['cidr'] = cidr except (ValueError, TypeError): pass elif attr['name'] == 'netname': net['name'] = str(attr['value']).strip() elif attr['name'] == 'descr': if net['description'] is not None: net['description'] += '\n%s' % ( str(attr['value']).strip() ) else: net['description'] = str(attr['value']).strip() elif attr['name'] == 'country': net['country'] = str(attr['value']).strip().upper() except KeyError: pass nets.append(net) #This is nasty. Since RIPE RWS doesn't provide a granular #contact to network relationship, we apply to all networks. if len(ripe_abuse_emails) > 0 or len(ripe_misc_emails) > 0: abuse = ( '\n'.join(set(ripe_abuse_emails)) if len(ripe_abuse_emails) > 0 else None ) misc = ( '\n'.join(set(ripe_misc_emails)) if len(ripe_misc_emails) > 0 else None ) for net in nets: net['abuse_emails'] = abuse net['misc_emails'] = misc return nets
def _lookup_rws_apnic(self, response=None): """ The function for retrieving and parsing whois information for a APNIC IP address via HTTP (Whois-RWS). Args: response: The dictionary containing whois information to parse. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ addrs = [] net = BASE_NET.copy() try: addrs.extend(summarize_address_range( ip_address(response['startAddress'].strip()), ip_address(response['endAddress'].strip()))) net['cidr'] = ', '.join( [i.__str__() for i in collapse_addresses(addrs)] ) except (KeyError, ValueError, TypeError): pass try: net['country'] = str(response['country']).strip().upper() except KeyError: pass try: events = response['events'] if not isinstance(events, list): events = [events] except KeyError: events = [] for ev in events: try: if ev['eventAction'] == 'registration': net['created'] = str(ev['eventDate']).strip() elif ev['eventAction'] == 'last changed': net['updated'] = str(ev['eventDate']).strip() except (KeyError, ValueError): pass try: entities = response['entities'] if not isinstance(entities, list): entities = [entities] except KeyError: entities = [] for en in entities: try: temp = en['vcardArray'][1] for t in temp: if 'administrative' in en['roles'] and t[0] == 'fn': net['name'] = str(t[3]).strip() elif 'administrative' in en['roles'] and t[0] == 'adr': try: net['address'] = str(t[1]['label']).strip() except KeyError: pass elif t[0] == 'email': key = None if (len(en['roles']) > 1 or en['roles'][0] == 'administrative'): key = 'misc_emails' elif en['roles'][0] == 'abuse': key = 'abuse_emails' elif en['roles'][0] == 'technical': key = 'tech_emails' if key is not None: if net[key] is not None: net[key] += '\n%s' % str(t[3]).strip() else: net[key] = str(t[3]).strip() except (KeyError, IndexError): pass try: remarks = response['remarks'] if not isinstance(remarks, list): remarks = [remarks] except KeyError: remarks = [] for rem in remarks: try: if rem['title'] == 'description': net['description'] = str('\n'.join(rem['description'])) except (KeyError, IndexError): pass return [net]
def check(set=None, entry=None, family='ipv4'): ''' Check that an entry exists in the specified set. set The ipset name entry An entry in the ipset. This parameter can be a single IP address, a range of IP addresses, or a subnet block. Example: .. code-block:: cfg 192.168.0.1 192.168.0.2-192.168.0.19 192.168.0.0/25 family IP protocol version: ipv4 or ipv6 CLI Example: .. code-block:: bash salt '*' ipset.check setname '192.168.0.1 comment "Hello"' ''' if not set: return 'Error: Set needs to be specified' if not entry: return 'Error: Entry needs to be specified' settype = _find_set_type(set) if not settype: return 'Error: Set {0} does not exist'.format(set) if isinstance(entry, list): entries = entry else: _entry = entry.split()[0] _entry_extra = entry.split()[1:] if _entry.find('-') != -1 and _entry.count('-') == 1: start, end = _entry.split('-') if settype == 'hash:ip': if _entry_extra: entries = [' '.join([str(ipaddress.ip_address(ip)), ' '.join(_entry_extra)]) for ip in long_range( ipaddress.ip_address(start), ipaddress.ip_address(end) + 1 )] else: entries = [' '.join([str(ipaddress.ip_address(ip))]) for ip in long_range( ipaddress.ip_address(start), ipaddress.ip_address(end) + 1 )] elif settype == 'hash:net': networks = ipaddress.summarize_address_range(ipaddress.ip_address(start), ipaddress.ip_address(end)) entries = [] for network in networks: _network = [str(ip) for ip in ipaddress.ip_network(network)] if len(_network) == 1: if _entry_extra: __network = ' '.join([str(_network[0]), ' '.join(_entry_extra)]) else: __network = ' '.join([str(_network[0])]) else: if _entry_extra: __network = ' '.join([str(network), ' '.join(_entry_extra)]) else: __network = ' '.join([str(network)]) entries.append(__network) else: entries = [entry] elif _entry.find('/') != -1 and _entry.count('/') == 1: if settype == 'hash:ip': if _entry_extra: entries = [' '.join([str(ip), ' '.join(_entry_extra)]) for ip in ipaddress.ip_network(_entry)] else: entries = [' '.join([str(ip)]) for ip in ipaddress.ip_network(_entry)] elif settype == 'hash:net': _entries = [str(ip) for ip in ipaddress.ip_network(_entry)] if len(_entries) == 1: if _entry_extra: entries = [' '.join([_entries[0], ' '.join(_entry_extra)])] else: entries = [' '.join([_entries[0]])] else: entries = [entry] else: entries = [entry] else: entries = [entry] current_members = _find_set_members(set) for entry in entries: if entry not in current_members: return False return True
def _lookup_rws_lacnic(self, response=None): """ The function for retrieving and parsing whois information for a LACNIC IP address via HTTP (Whois-RWS). Args: response: The dictionary containing whois information to parse. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ addrs = [] net = BASE_NET.copy() try: addrs.extend(summarize_address_range( ip_address(response['startAddress'].strip()), ip_address(response['endAddress'].strip()))) net['cidr'] = ', '.join( [i.__str__() for i in collapse_addresses(addrs)] ) except (KeyError, ValueError, TypeError): pass try: net['country'] = str(response['country']).strip().upper() except KeyError: pass try: events = response['events'] if not isinstance(events, list): events = [events] except KeyError: events = [] for ev in events: try: if ev['eventAction'] == 'registration': tmp = str(ev['eventDate']).strip() value = datetime.strptime( tmp, str(NIC_WHOIS['lacnic']['dt_rws_format']) ).isoformat('T') net['created'] = value elif ev['eventAction'] == 'last changed': tmp = str(ev['eventDate']).strip() value = datetime.strptime( tmp, str(NIC_WHOIS['lacnic']['dt_rws_format']) ).isoformat('T') net['updated'] = value except (KeyError, ValueError): pass try: entities = response['entities'] if not isinstance(entities, list): entities = [entities] except KeyError: entities = [] for en in entities: try: if en['roles'][0] == 'registrant': temp = en['vcardArray'][1] for t in temp: if t[0] == 'fn': net['name'] = str(t[3]).strip() elif t[0] == 'org': net['description'] = str(t[3][0]).strip() elif t[0] == 'adr': net['address'] = str(t[1]['label']).strip() elif t[0] == 'email': net['misc_emails'] = str(t[3]).strip() elif en['roles'][0] == 'abuse': temp = en['vcardArray'][1] for t in temp: if t[0] == 'email': net['abuse_emails'] = str(t[3]).strip() elif en['roles'][0] == 'tech': temp = en['vcardArray'][1] for t in temp: if t[0] == 'email': net['tech_emails'] = str(t[3]).strip() except (KeyError, IndexError): pass return [net]
def start(): pyv = python_version_tuple() if pyv[0] != "3": print("Need python 3 for execute this script") sys.exit(1) parser = argparse.ArgumentParser( description="An Tool for add new servers config to your protozoo setup. In future versions you can create servers using this tool" ) parser.add_argument("--ip_range", help="A range of ip's for the servers in format 192.168.1.5-192.168.1.33.") parser.add_argument("--ip_list", help="A list of ip's of new servers separed by ,") parser.add_argument( "--remove_ip", help="If true, the ip list is used for delete servers", required=False, nargs="?", const="1" ) parser.add_argument("--os", help="The operating system of new servers", required=True) parser.add_argument("--domainname", help="The domain name of new servers", required=True) parser.add_argument("--type", help="The type of servers", required=True) parser.add_argument( "--save_in_db", help="Save in a database (you need special config)", required=False, nargs="?", const="1" ) parser.add_argument("--profile", help="The profile where the servers are saved", required=True) args = parser.parse_args() if args.ip_range == None and args.ip_list == None: parser.error("You need --ip_range or --ip_list options") arr_ip = [] if args.ip_range is not None: range_ips = args.ip_range.split("-") try: ipaddress.ip_address(range_ips[0]) except: parser.error("First element of ip range is not valid IPv4 or IPv6") try: ipaddress.ip_address(range_ips[1]) except: parser.error("Second element of ip range is not valid IPv4 or IPv6") for ipaddr in ipaddress.summarize_address_range( ipaddress.ip_address(range_ips[0]), ipaddress.ip_address(range_ips[1]) ): for ip in ipaddr: arr_ip.append(ip) elif args.ip_list is not None: ip_list = args.ip_list.split(",") for ip in ip_list: arr_ip.append(ipaddress.ip_address(ip)) if len(arr_ip) > 0: # Save if args.save_in_db == None: print("Saving new servers in file...") old_servers = {} check_old = 0 try: servers = import_module("settings." + args.profile) for k, server in enumerate(servers.servers): old_servers[server["ip"]] = 1 check_old = 1 except: pass new_file = "settings/" + args.profile + ".py" file_txt = "#!/usr/bin/python3\n" file_txt += "servers=[]\n" prefix = "" if args.profile != None: prefix = "-" + args.profile.replace(".", "-") for ip in arr_ip: old_servers[str(ip)] = old_servers.get(str(ip), 0) if old_servers[str(ip)] == 0: hostname = str(ip).replace(".", "") + prefix + "." + args.domainname file_txt += ( "servers.append({'hostname': '" + hostname + "', 'os_codename': '" + str(args.os) + "', 'ip': '" + str(ip) + "', 'name': '" + str(hostname).replace(".", "_") + "'})\n" ) elif args.remove_ip == "1": old_servers[str(ip)] = 0 pass # Add old servers if check_old == 1: for server in servers.servers: if old_servers[server["ip"]] == 1: file_txt += ( "servers.append({'hostname': '" + server["hostname"] + "', 'os_codename': '" + server["os_codename"] + "', 'ip': '" + server["ip"] + "', 'name': '" + server["name"] + "'})\n" ) # Save file file = open(new_file, "w+") file.write(file_txt) file.close() else: # Import settings for db print("Saving new servers in database...") new_file = "settings/" + args.profile + ".py" try: config_db = import_module("settings.config_db") except: print( "You need a configuration file called config_db.py for use a database for save servers data. Also you need cromosoma module installed" ) print("Exception in user code:") print("-" * 60) traceback.print_exc(file=sys.stdout) print("-" * 60) exit(1) pass # Load model model = import_module("protozoo.models.servers") if args.remove_ip == "1": for ip in arr_ip: model.server.conditions = 'WHERE ip="' + str(ip) + '"' model.server.delete() else: if args.profile != None: prefix = "-" + args.profile.replace(".", "-") for ip in arr_ip: # file_txt+="servers.append({'hostname': '"+hostname+"', 'os_codename': '"+str(args.os)+"', 'ip': '"+str(ip)+"', 'name': '"+str(hostname).replace('.', '_')+"'})\n" # Check if server exists in this profile model.server.conditions = 'WHERE ip="' + str(ip) + '" and profile="' + args.profile + '"' num_server = model.server.select_count() if num_server == 0: # if args.type!=None: # type_server="-"+args.type.replace('.', '-') hostname = str(ip).replace(".", "") + prefix + "." + args.domainname arr_server = { "hostname": hostname, "os_codename": str(args.os), "ip": str(ip), "name": str(hostname).replace(".", "_"), "type": args.type, "profile": args.profile, } model.server.insert(arr_server) # Save file file_txt = "#!/usr/bin/python3\n\n" file_txt += "from protozoo.models.servers import server\n\n" file_txt += "from settings import config_db\n\n" file_txt += "servers=[]\n\n" file_txt += "server.conditions='where type=\"" + args.type + '" and profile="' + args.profile + "\"'\n\n" file_txt += "cur=server.select()\n\n" file_txt += "for row in cur:\n" file_txt += " servers.append(row)\n" file = open(new_file, "w+") file.write(file_txt) file.close()
def _lookup_rws_arin(self, response=None, retry_count=3): """ The function for retrieving and parsing whois information for an ARIN IP address via HTTP (Whois-RWS). Args: response: The dictionary containing whois information to parse. retry_count: The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Returns: List: Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. """ nets = [] try: net_list = response['nets']['net'] if not isinstance(net_list, list): net_list = [net_list] except KeyError: net_list = [] for n in net_list: if 'orgRef' in n and n['orgRef']['@handle'] in ('ARIN', 'VR-ARIN'): continue addrs = [] net = BASE_NET.copy() try: addrs.extend(summarize_address_range( ip_address(n['startAddress']['$'].strip()), ip_address(n['endAddress']['$'].strip()))) net['cidr'] = ', '.join( [i.__str__() for i in collapse_addresses(addrs)] ) except (KeyError, ValueError, TypeError): pass for k, v in { 'created': 'registrationDate', 'updated': 'updateDate', 'name': 'name' }.items(): try: net[k] = str(n[v]['$']).strip() except KeyError: pass ref = None if 'customerRef' in n: ref = ['customerRef', 'customer'] elif 'orgRef' in n: ref = ['orgRef', 'org'] if ref is not None: try: net['description'] = str(n[ref[0]]['@name']).strip() except KeyError: pass try: ref_url = n[ref[0]]['$'].strip() + '?showPocs=true' ref_response = self.get_rws(ref_url, retry_count) except (KeyError, WhoisLookupError): nets.append(net) continue try: addr_list = ( ref_response[ref[1]]['streetAddress']['line'] ) if not isinstance(addr_list, list): addr_list = [addr_list] net['address'] = '\n'.join( [str(line['$']).strip() for line in addr_list] ) except KeyError: pass for k, v in { 'postal_code': 'postalCode', 'city': 'city', 'state': 'iso3166-2' }.items(): try: net[k] = str(ref_response[ref[1]][v]['$']) except KeyError: pass try: net['country'] = ( str(ref_response[ref[1]]['iso3166-1']['code2']['$']) ).upper() except KeyError: pass try: for poc in ( ref_response[ref[1]]['pocs']['pocLinkRef'] ): if poc['@description'] in ('Abuse', 'Tech'): poc_url = poc['$'] poc_response = self.get_rws( poc_url, retry_count ) emails = poc_response['poc']['emails']['email'] if not isinstance(emails, list): emails = [emails] temp = [] for e in emails: temp.append(str(e['$']).strip()) key = '%s_emails' % poc['@description'].lower() net[key] = ( '\n'.join(set(temp)) if len(temp) > 0 else None ) except (KeyError, WhoisLookupError): pass nets.append(net) return nets
def lookup(self, inc_raw=False, retry_count=3): """ The function for retrieving and parsing whois information for an IP address via port 43 (WHOIS). Args: inc_raw: Boolean for whether to include the raw whois results in the returned dictionary. retry_count: The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Returns: Dictionary: A dictionary containing the following keys: query (String) - The IP address. asn (String) - The Autonomous System Number. asn_date (String) - The ASN Allocation date. asn_registry (String) - The assigned ASN registry. asn_cidr (String) - The assigned ASN CIDR. asn_country_code (String) - The assigned ASN country code. nets (List) - Dictionaries containing network information which consists of the fields listed in the NIC_WHOIS dictionary. Certain IPs have more granular network listings, hence the need for a list object. raw (String) - Raw whois results if the inc_raw parameter is True. """ # Initialize the response. response = None # Attempt to resolve ASN info via Cymru. DNS is faster, try that first. try: asn_data = self.get_asn_dns() except (ASNLookupError, ASNRegistryError): try: asn_data = self.get_asn_whois(retry_count) except (ASNLookupError, ASNRegistryError): # Lets attempt to get the ASN registry information from ARIN. response = self.get_whois("arin", retry_count) asn_data = { "asn_registry": None, "asn": None, "asn_cidr": None, "asn_country_code": None, "asn_date": None, } matched = False for match in re.finditer(r"^ReferralServer:[^\S\n]+(.+)$", response, re.MULTILINE): matched = True try: referral = match.group(1) referral = referral.replace(":43", "") asn_data["asn_registry"] = ASN_REFERRALS[referral] except KeyError: raise ASNRegistryError("ASN registry lookup failed.") break if not matched: asn_data["asn_registry"] = "arin" # Create the return dictionary. results = {"query": self.address_str, "nets": [], "raw": None} # Add the ASN information to the return dictionary. results.update(asn_data) # Only fetch the response if we haven't already. if response is None or results["asn_registry"] is not "arin": # Retrieve the whois data. response = self.get_whois(results["asn_registry"], retry_count) # If inc_raw parameter is True, add the response to return dictionary. if inc_raw: results["raw"] = response nets = [] if results["asn_registry"] == "arin": # Iterate through all of the networks found, storing the CIDR value # and the start and end positions. for match in re.finditer(r"^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$", response, re.MULTILINE): try: net = BASE_NET.copy() net["cidr"] = ", ".join([ip_network(c.strip()).__str__() for c in match.group(1).split(", ")]) net["start"] = match.start() net["end"] = match.end() nets.append(net) except ValueError: pass elif results["asn_registry"] == "lacnic": # Iterate through all of the networks found, storing the CIDR value # and the start and end positions. for match in re.finditer(r"^(inetnum|inet6num):[^\S\n]+(.+?,[^\S\n].+|.+)$", response, re.MULTILINE): try: temp = [] for addr in match.group(2).strip().split(", "): count = addr.count(".") if count is not 0 and count < 4: addr_split = addr.strip().split("/") for i in range(count + 1, 4): addr_split[0] += ".0" addr = "/".join(addr_split) temp.append(ip_network(addr.strip()).__str__()) net = BASE_NET.copy() net["cidr"] = ", ".join(temp) net["start"] = match.start() net["end"] = match.end() nets.append(net) except ValueError: pass else: # Iterate through all of the networks found, storing the CIDR value # and the start and end positions. for match in re.finditer( r"^(inetnum|inet6num):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|.+)$", response, re.MULTILINE ): try: if match.group(3) and match.group(4): addrs = [] addrs.extend( summarize_address_range( ip_address(match.group(3).strip()), ip_address(match.group(4).strip()) ) ) cidr = ", ".join([i.__str__() for i in collapse_addresses(addrs)]) else: cidr = ip_network(match.group(2).strip()).__str__() net = BASE_NET.copy() net["cidr"] = cidr net["start"] = match.start() net["end"] = match.end() nets.append(net) except (ValueError, TypeError): pass # Iterate through all of the network sections and parse out the # appropriate fields for each. for index, net in enumerate(nets): section_end = None if index + 1 < len(nets): section_end = nets[index + 1]["start"] for field in NIC_WHOIS[results["asn_registry"]]["fields"]: pattern = re.compile(str(NIC_WHOIS[results["asn_registry"]]["fields"][field]), re.MULTILINE) if section_end is not None: match = pattern.finditer(response, net["end"], section_end) else: match = pattern.finditer(response, net["end"]) values = [] sub_section_end = None for m in match: if sub_section_end: if field not in ("abuse_emails", "tech_emails", "misc_emails") and ( sub_section_end != (m.start() - 1) ): break try: values.append(m.group("val").strip()) except AttributeError: values.append(m.group("val2").strip()) sub_section_end = m.end() if len(values) > 0: try: if field == "country": value = values[0].upper() elif field in ["created", "updated"]: value = datetime.strptime( values[0], str(NIC_WHOIS[results["asn_registry"]]["dt_format"]) ).isoformat("T") else: values = list(set(values)) value = "\n".join(values) except ValueError: value = None pass net[field] = value # The start and end values are no longer needed. del net["start"], net["end"] # Add the networks to the return dictionary. results["nets"] = nets return results
def get_nets_other(self, response): """ The function for parsing network blocks from generic whois data. Args: response (:obj:`str`): The response from the whois/rwhois server. Returns: list of dict: Mapping of networks with start and end positions. :: [{ 'cidr' (str) - The network routing block 'start' (int) - The starting point of the network 'end' (int) - The endpoint point of the network }] """ nets = [] # Iterate through all of the networks found, storing the CIDR value # and the start and end positions. for match in re.finditer( r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|' '.+)$', response, re.MULTILINE ): try: net = copy.deepcopy(BASE_NET) net_range = match.group(2).strip() try: net['range'] = net['range'] = '{0} - {1}'.format( ip_network(net_range)[0].__str__(), ip_network(net_range)[-1].__str__() ) if '/' in net_range else net_range except ValueError: # pragma: no cover net['range'] = net_range if match.group(3) and match.group(4): addrs = [] addrs.extend(summarize_address_range( ip_address(match.group(3).strip()), ip_address(match.group(4).strip()))) cidr = ', '.join( [i.__str__() for i in collapse_addresses(addrs)] ) else: cidr = ip_network(net_range).__str__() net['cidr'] = cidr net['start'] = match.start() net['end'] = match.end() nets.append(net) except (ValueError, TypeError): pass return nets